xref: /freebsd/sys/dev/xen/netback/netback_unit_tests.c (revision a8b70cf26030d68631200619bd1b0ad35b34b6b8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2009-2011 Spectra Logic Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon
16  *    including a substantially similar Disclaimer requirement for further
17  *    binary redistribution.
18  *
19  * NO WARRANTY
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGES.
31  *
32  * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
33  *          Alan Somers         (Spectra Logic Corporation)
34  *          John Suykerbuyk     (Spectra Logic Corporation)
35  */
36 
37 #include <sys/cdefs.h>
38 #include <netinet/tcp.h>
39 /**
40  * \file netback_unit_tests.c
41  *
42  * \brief Unit tests for the Xen netback driver.
43  *
44  * Due to the driver's use of static functions, these tests cannot be compiled
45  * standalone; they must be #include'd from the driver's .c file.
46  */
47 
48 /** Helper macro used to snprintf to a buffer and update the buffer pointer */
49 #define	SNCATF(buffer, buflen, ...) do {				\
50 	size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__);	\
51 	buffer += new_chars;						\
52 	/* be careful; snprintf's return value can be  > buflen */	\
53 	buflen -= MIN(buflen, new_chars);				\
54 } while (0)
55 
56 /* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
57 #define	STRINGIFY(x) #x
58 #define	TOSTRING(x) STRINGIFY(x)
59 
60 /**
61  * Writes an error message to buffer if cond is false
62  * Note the implied parameters buffer and
63  * buflen
64  */
65 #define	XNB_ASSERT(cond) ({						\
66 	int passed = (cond);						\
67 	char *_buffer = (buffer);					\
68 	size_t _buflen = (buflen);					\
69 	if (! passed) {							\
70 		strlcat(_buffer, __func__, _buflen);			\
71 		strlcat(_buffer, ":" TOSTRING(__LINE__) 		\
72 		  " Assertion Error: " #cond "\n", _buflen);		\
73 	}								\
74 	})
75 
76 /**
77  * The signature used by all testcases.  If the test writes anything
78  * to buffer, then it will be considered a failure
79  * \param buffer	Return storage for error messages
80  * \param buflen	The space available in the buffer
81  */
82 typedef void testcase_t(char *buffer, size_t buflen);
83 
84 /**
85  * Signature used by setup functions
86  * \return nonzero on error
87  */
88 typedef int setup_t(void);
89 
90 typedef void teardown_t(void);
91 
92 /** A simple test fixture comprising setup, teardown, and test */
93 struct test_fixture {
94 	/** Will be run before the test to allocate and initialize variables */
95 	setup_t *setup;
96 
97 	/** Will be run if setup succeeds */
98 	testcase_t *test;
99 
100 	/** Cleans up test data whether or not the setup succeeded */
101 	teardown_t *teardown;
102 };
103 
104 typedef struct test_fixture test_fixture_t;
105 
106 static int	xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
107 static int	xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
108 				     char *buffer, size_t buflen);
109 
110 static int __unused
null_setup(void)111 null_setup(void) { return 0; }
112 
113 static void __unused
null_teardown(void)114 null_teardown(void) { }
115 
116 static setup_t setup_pvt_data;
117 static teardown_t teardown_pvt_data;
118 static testcase_t xnb_ring2pkt_emptyring;
119 static testcase_t xnb_ring2pkt_1req;
120 static testcase_t xnb_ring2pkt_2req;
121 static testcase_t xnb_ring2pkt_3req;
122 static testcase_t xnb_ring2pkt_extra;
123 static testcase_t xnb_ring2pkt_partial;
124 static testcase_t xnb_ring2pkt_wraps;
125 static testcase_t xnb_txpkt2rsp_emptypkt;
126 static testcase_t xnb_txpkt2rsp_1req;
127 static testcase_t xnb_txpkt2rsp_extra;
128 static testcase_t xnb_txpkt2rsp_long;
129 static testcase_t xnb_txpkt2rsp_invalid;
130 static testcase_t xnb_txpkt2rsp_error;
131 static testcase_t xnb_txpkt2rsp_wraps;
132 static testcase_t xnb_pkt2mbufc_empty;
133 static testcase_t xnb_pkt2mbufc_short;
134 static testcase_t xnb_pkt2mbufc_csum;
135 static testcase_t xnb_pkt2mbufc_1cluster;
136 static testcase_t xnb_pkt2mbufc_largecluster;
137 static testcase_t xnb_pkt2mbufc_2cluster;
138 static testcase_t xnb_txpkt2gnttab_empty;
139 static testcase_t xnb_txpkt2gnttab_short;
140 static testcase_t xnb_txpkt2gnttab_2req;
141 static testcase_t xnb_txpkt2gnttab_2cluster;
142 static testcase_t xnb_update_mbufc_short;
143 static testcase_t xnb_update_mbufc_2req;
144 static testcase_t xnb_update_mbufc_2cluster;
145 static testcase_t xnb_mbufc2pkt_empty;
146 static testcase_t xnb_mbufc2pkt_short;
147 static testcase_t xnb_mbufc2pkt_1cluster;
148 static testcase_t xnb_mbufc2pkt_2short;
149 static testcase_t xnb_mbufc2pkt_long;
150 static testcase_t xnb_mbufc2pkt_extra;
151 static testcase_t xnb_mbufc2pkt_nospace;
152 static testcase_t xnb_rxpkt2gnttab_empty;
153 static testcase_t xnb_rxpkt2gnttab_short;
154 static testcase_t xnb_rxpkt2gnttab_2req;
155 static testcase_t xnb_rxpkt2rsp_empty;
156 static testcase_t xnb_rxpkt2rsp_short;
157 static testcase_t xnb_rxpkt2rsp_extra;
158 static testcase_t xnb_rxpkt2rsp_2short;
159 static testcase_t xnb_rxpkt2rsp_2slots;
160 static testcase_t xnb_rxpkt2rsp_copyerror;
161 static testcase_t xnb_sscanf_llu;
162 static testcase_t xnb_sscanf_lld;
163 static testcase_t xnb_sscanf_hhu;
164 static testcase_t xnb_sscanf_hhd;
165 static testcase_t xnb_sscanf_hhn;
166 
167 #if defined(INET) || defined(INET6)
168 /* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
169 static testcase_t xnb_add_mbuf_cksum_arp;
170 static testcase_t xnb_add_mbuf_cksum_tcp;
171 static testcase_t xnb_add_mbuf_cksum_udp;
172 static testcase_t xnb_add_mbuf_cksum_icmp;
173 static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
174 static void	xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
175 				   uint16_t ip_id, uint16_t ip_p,
176 				   uint16_t ip_off, uint16_t ip_sum);
177 static void	xnb_fill_tcp(struct mbuf *m);
178 #endif /* INET || INET6 */
179 
180 /** Private data used by unit tests */
181 static struct {
182 	gnttab_copy_table 	gnttab;
183 	netif_rx_back_ring_t	rxb;
184 	netif_rx_front_ring_t	rxf;
185 	netif_tx_back_ring_t	txb;
186 	netif_tx_front_ring_t	txf;
187 	struct ifnet*		ifp;
188 	netif_rx_sring_t*	rxs;
189 	netif_tx_sring_t*	txs;
190 } xnb_unit_pvt;
191 
safe_m_freem(struct mbuf ** ppMbuf)192 static inline void safe_m_freem(struct mbuf **ppMbuf) {
193 	if (*ppMbuf != NULL) {
194 		m_freem(*ppMbuf);
195 		*ppMbuf = NULL;
196 	}
197 }
198 
199 /**
200  * The unit test runner.  It will run every supplied test and return an
201  * output message as a string
202  * \param tests		An array of tests.  Every test will be attempted.
203  * \param ntests	The length of tests
204  * \param buffer	Return storage for the result string
205  * \param buflen	The length of buffer
206  * \return		The number of tests that failed
207  */
208 static int
xnb_unit_test_runner(test_fixture_t const tests[],int ntests,char * buffer,size_t buflen)209 xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
210     		     size_t buflen)
211 {
212 	int i;
213 	int n_passes;
214 	int n_failures = 0;
215 
216 	for (i = 0; i < ntests; i++) {
217 		int error = tests[i].setup();
218 		if (error != 0) {
219 			SNCATF(buffer, buflen,
220 			    "Setup failed for test idx %d\n", i);
221 			n_failures++;
222 		} else {
223 			size_t new_chars;
224 
225 			tests[i].test(buffer, buflen);
226 			new_chars = strnlen(buffer, buflen);
227 			buffer += new_chars;
228 			buflen -= new_chars;
229 
230 			if (new_chars > 0) {
231 				n_failures++;
232 			}
233 		}
234 		tests[i].teardown();
235 	}
236 
237 	n_passes = ntests - n_failures;
238 	if (n_passes > 0) {
239 		SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
240 	}
241 	if (n_failures > 0) {
242 		SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
243 	}
244 
245 	return n_failures;
246 }
247 
248 /** Number of unit tests.  Must match the length of the tests array below */
249 #define	TOTAL_TESTS	(53)
250 /**
251  * Max memory available for returning results.  400 chars/test should give
252  * enough space for a five line error message for every test
253  */
254 #define	TOTAL_BUFLEN	(400 * TOTAL_TESTS + 2)
255 
256 /**
257  * Called from userspace by a sysctl.  Runs all internal unit tests, and
258  * returns the results to userspace as a string
259  * \param oidp	unused
260  * \param arg1	pointer to an xnb_softc for a specific xnb device
261  * \param arg2	unused
262  * \param req	sysctl access structure
263  * \return a string via the special SYSCTL_OUT macro.
264  */
265 
266 static int
xnb_unit_test_main(SYSCTL_HANDLER_ARGS)267 xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
268 	test_fixture_t const tests[TOTAL_TESTS] = {
269 		{setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
270 		{setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
271 		{setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
272 		{setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
273 		{setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
274 		{setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
275 		{setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
276 		{setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
277 		{setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
278 		{setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
279 		{setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
280 		{setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
281 		{setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
282 		{setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
283 		{setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
284 		{setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
285 		{setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
286 		{setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
287 		{setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
288 		{setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
289 		{setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
290 		{setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
291 		{setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
292 		{setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
293 		{setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
294 		{setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
295 		{setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
296 		{setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
297 		{setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
298 		{setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
299 		{setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
300 		{setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
301 		{setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
302 		{setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
303 		{setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
304 		{setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
305 		{setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
306 		{setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
307 		{setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
308 		{setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
309 		{setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
310 		{setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
311 		{setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
312 #if defined(INET) || defined(INET6)
313 		{null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
314 		{null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
315 		{null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
316 		{null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
317 		{null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
318 #endif
319 		{null_setup, xnb_sscanf_hhd, null_teardown},
320 		{null_setup, xnb_sscanf_hhu, null_teardown},
321 		{null_setup, xnb_sscanf_lld, null_teardown},
322 		{null_setup, xnb_sscanf_llu, null_teardown},
323 		{null_setup, xnb_sscanf_hhn, null_teardown},
324 	};
325 	/**
326 	 * results is static so that the data will persist after this function
327 	 * returns.  The sysctl code expects us to return a constant string.
328 	 * \todo: the static variable is not thread safe.  Put a mutex around
329 	 * it.
330 	 */
331 	static char results[TOTAL_BUFLEN];
332 
333 	/* empty the result strings */
334 	results[0] = 0;
335 	xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
336 
337 	return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
338 }
339 
340 static int
setup_pvt_data(void)341 setup_pvt_data(void)
342 {
343 	int error = 0;
344 
345 	bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
346 
347 	xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
348 	if (xnb_unit_pvt.txs != NULL) {
349 		SHARED_RING_INIT(xnb_unit_pvt.txs);
350 		BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
351 		FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
352 	} else {
353 		error = 1;
354 	}
355 
356 	xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
357 	if (xnb_unit_pvt.ifp == NULL) {
358 		error = 1;
359 	}
360 
361 	xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
362 	if (xnb_unit_pvt.rxs != NULL) {
363 		SHARED_RING_INIT(xnb_unit_pvt.rxs);
364 		BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
365 		FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
366 	} else {
367 		error = 1;
368 	}
369 
370 	return error;
371 }
372 
373 static void
teardown_pvt_data(void)374 teardown_pvt_data(void)
375 {
376 	if (xnb_unit_pvt.txs != NULL) {
377 		free(xnb_unit_pvt.txs, M_XENNETBACK);
378 	}
379 	if (xnb_unit_pvt.rxs != NULL) {
380 		free(xnb_unit_pvt.rxs, M_XENNETBACK);
381 	}
382 	if (xnb_unit_pvt.ifp != NULL) {
383 		if_free(xnb_unit_pvt.ifp);
384 	}
385 }
386 
387 /**
388  * Verify that xnb_ring2pkt will not consume any requests from an empty ring
389  */
390 static void
xnb_ring2pkt_emptyring(char * buffer,size_t buflen)391 xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
392 {
393 	struct xnb_pkt pkt;
394 	int num_consumed;
395 
396 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
397 	                            xnb_unit_pvt.txb.req_cons);
398 	XNB_ASSERT(num_consumed == 0);
399 }
400 
401 /**
402  * Verify that xnb_ring2pkt can convert a single request packet correctly
403  */
404 static void
xnb_ring2pkt_1req(char * buffer,size_t buflen)405 xnb_ring2pkt_1req(char *buffer, size_t buflen)
406 {
407 	struct xnb_pkt pkt;
408 	int num_consumed;
409 	struct netif_tx_request *req;
410 
411 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
412 	    xnb_unit_pvt.txf.req_prod_pvt);
413 
414 	req->flags = 0;
415 	req->size = 69;	/* arbitrary number for test */
416 	xnb_unit_pvt.txf.req_prod_pvt++;
417 
418 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
419 
420 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
421 	                            xnb_unit_pvt.txb.req_cons);
422 	XNB_ASSERT(num_consumed == 1);
423 	XNB_ASSERT(pkt.size == 69);
424 	XNB_ASSERT(pkt.car_size == 69);
425 	XNB_ASSERT(pkt.flags == 0);
426 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
427 	XNB_ASSERT(pkt.list_len == 1);
428 	XNB_ASSERT(pkt.car == 0);
429 }
430 
431 /**
432  * Verify that xnb_ring2pkt can convert a two request packet correctly.
433  * This tests handling of the MORE_DATA flag and cdr
434  */
435 static void
xnb_ring2pkt_2req(char * buffer,size_t buflen)436 xnb_ring2pkt_2req(char *buffer, size_t buflen)
437 {
438 	struct xnb_pkt pkt;
439 	int num_consumed;
440 	struct netif_tx_request *req;
441 	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
442 
443 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
444 	    xnb_unit_pvt.txf.req_prod_pvt);
445 	req->flags = NETTXF_more_data;
446 	req->size = 100;
447 	xnb_unit_pvt.txf.req_prod_pvt++;
448 
449 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
450 	    xnb_unit_pvt.txf.req_prod_pvt);
451 	req->flags = 0;
452 	req->size = 40;
453 	xnb_unit_pvt.txf.req_prod_pvt++;
454 
455 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
456 
457 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
458 	                            xnb_unit_pvt.txb.req_cons);
459 	XNB_ASSERT(num_consumed == 2);
460 	XNB_ASSERT(pkt.size == 100);
461 	XNB_ASSERT(pkt.car_size == 60);
462 	XNB_ASSERT(pkt.flags == 0);
463 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
464 	XNB_ASSERT(pkt.list_len == 2);
465 	XNB_ASSERT(pkt.car == start_idx);
466 	XNB_ASSERT(pkt.cdr == start_idx + 1);
467 }
468 
469 /**
470  * Verify that xnb_ring2pkt can convert a three request packet correctly
471  */
472 static void
xnb_ring2pkt_3req(char * buffer,size_t buflen)473 xnb_ring2pkt_3req(char *buffer, size_t buflen)
474 {
475 	struct xnb_pkt pkt;
476 	int num_consumed;
477 	struct netif_tx_request *req;
478 	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
479 
480 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
481 	    xnb_unit_pvt.txf.req_prod_pvt);
482 	req->flags = NETTXF_more_data;
483 	req->size = 200;
484 	xnb_unit_pvt.txf.req_prod_pvt++;
485 
486 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
487 	    xnb_unit_pvt.txf.req_prod_pvt);
488 	req->flags = NETTXF_more_data;
489 	req->size = 40;
490 	xnb_unit_pvt.txf.req_prod_pvt++;
491 
492 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
493 	    xnb_unit_pvt.txf.req_prod_pvt);
494 	req->flags = 0;
495 	req->size = 50;
496 	xnb_unit_pvt.txf.req_prod_pvt++;
497 
498 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
499 
500 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
501 	                            xnb_unit_pvt.txb.req_cons);
502 	XNB_ASSERT(num_consumed == 3);
503 	XNB_ASSERT(pkt.size == 200);
504 	XNB_ASSERT(pkt.car_size == 110);
505 	XNB_ASSERT(pkt.flags == 0);
506 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
507 	XNB_ASSERT(pkt.list_len == 3);
508 	XNB_ASSERT(pkt.car == start_idx);
509 	XNB_ASSERT(pkt.cdr == start_idx + 1);
510 	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
511 }
512 
513 /**
514  * Verify that xnb_ring2pkt can read extra inf
515  */
516 static void
xnb_ring2pkt_extra(char * buffer,size_t buflen)517 xnb_ring2pkt_extra(char *buffer, size_t buflen)
518 {
519 	struct xnb_pkt pkt;
520 	int num_consumed;
521 	struct netif_tx_request *req;
522 	struct netif_extra_info *ext;
523 	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
524 
525 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
526 	    xnb_unit_pvt.txf.req_prod_pvt);
527 	req->flags = NETTXF_extra_info | NETTXF_more_data;
528 	req->size = 150;
529 	xnb_unit_pvt.txf.req_prod_pvt++;
530 
531 	ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
532 	    xnb_unit_pvt.txf.req_prod_pvt);
533 	ext->flags = 0;
534 	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
535 	ext->u.gso.size = 250;
536 	ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
537 	ext->u.gso.features = 0;
538 	xnb_unit_pvt.txf.req_prod_pvt++;
539 
540 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
541 	    xnb_unit_pvt.txf.req_prod_pvt);
542 	req->flags = 0;
543 	req->size = 50;
544 	xnb_unit_pvt.txf.req_prod_pvt++;
545 
546 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
547 
548 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
549 	                            xnb_unit_pvt.txb.req_cons);
550 	XNB_ASSERT(num_consumed == 3);
551 	XNB_ASSERT(pkt.extra.flags == 0);
552 	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
553 	XNB_ASSERT(pkt.extra.u.gso.size == 250);
554 	XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
555 	XNB_ASSERT(pkt.size == 150);
556 	XNB_ASSERT(pkt.car_size == 100);
557 	XNB_ASSERT(pkt.flags == NETTXF_extra_info);
558 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
559 	XNB_ASSERT(pkt.list_len == 2);
560 	XNB_ASSERT(pkt.car == start_idx);
561 	XNB_ASSERT(pkt.cdr == start_idx + 2);
562 	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
563 }
564 
565 /**
566  * Verify that xnb_ring2pkt will consume no requests if the entire packet is
567  * not yet in the ring
568  */
569 static void
xnb_ring2pkt_partial(char * buffer,size_t buflen)570 xnb_ring2pkt_partial(char *buffer, size_t buflen)
571 {
572 	struct xnb_pkt pkt;
573 	int num_consumed;
574 	struct netif_tx_request *req;
575 
576 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
577 	    xnb_unit_pvt.txf.req_prod_pvt);
578 	req->flags = NETTXF_more_data;
579 	req->size = 150;
580 	xnb_unit_pvt.txf.req_prod_pvt++;
581 
582 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
583 
584 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
585 	                            xnb_unit_pvt.txb.req_cons);
586 	XNB_ASSERT(num_consumed == 0);
587 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
588 }
589 
590 /**
591  * Verity that xnb_ring2pkt can read a packet whose requests wrap around
592  * the end of the ring
593  */
594 static void
xnb_ring2pkt_wraps(char * buffer,size_t buflen)595 xnb_ring2pkt_wraps(char *buffer, size_t buflen)
596 {
597 	struct xnb_pkt pkt;
598 	int num_consumed;
599 	struct netif_tx_request *req;
600 	unsigned int rsize;
601 
602 	/*
603 	 * Manually tweak the ring indices to create a ring with no responses
604 	 * and the next request slot at position 2 from the end
605 	 */
606 	rsize = RING_SIZE(&xnb_unit_pvt.txf);
607 	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
608 	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
609 	xnb_unit_pvt.txs->req_prod = rsize - 2;
610 	xnb_unit_pvt.txs->req_event = rsize - 1;
611 	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
612 	xnb_unit_pvt.txs->rsp_event = rsize - 1;
613 	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
614 	xnb_unit_pvt.txb.req_cons = rsize - 2;
615 
616 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
617 	    xnb_unit_pvt.txf.req_prod_pvt);
618 	req->flags = NETTXF_more_data;
619 	req->size = 550;
620 	xnb_unit_pvt.txf.req_prod_pvt++;
621 
622 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
623 	    xnb_unit_pvt.txf.req_prod_pvt);
624 	req->flags = NETTXF_more_data;
625 	req->size = 100;
626 	xnb_unit_pvt.txf.req_prod_pvt++;
627 
628 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
629 	    xnb_unit_pvt.txf.req_prod_pvt);
630 	req->flags = 0;
631 	req->size = 50;
632 	xnb_unit_pvt.txf.req_prod_pvt++;
633 
634 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
635 
636 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
637 	                            xnb_unit_pvt.txb.req_cons);
638 	XNB_ASSERT(num_consumed == 3);
639 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
640 	XNB_ASSERT(pkt.list_len == 3);
641 	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
642 }
643 
644 /**
645  * xnb_txpkt2rsp should do nothing for an empty packet
646  */
647 static void
xnb_txpkt2rsp_emptypkt(char * buffer,size_t buflen)648 xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
649 {
650 	struct xnb_pkt pkt;
651 	netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
652 	netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
653 	pkt.list_len = 0;
654 
655 	/* must call xnb_ring2pkt just to intialize pkt */
656 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
657 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
658 	XNB_ASSERT(
659 	    memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
660 	XNB_ASSERT(
661 	    memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
662 }
663 
664 /**
665  * xnb_txpkt2rsp responding to one request
666  */
667 static void
xnb_txpkt2rsp_1req(char * buffer,size_t buflen)668 xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
669 {
670 	uint16_t num_consumed;
671 	struct xnb_pkt pkt;
672 	struct netif_tx_request *req;
673 	struct netif_tx_response *rsp;
674 
675 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
676 	    xnb_unit_pvt.txf.req_prod_pvt);
677 	req->size = 1000;
678 	req->flags = 0;
679 	xnb_unit_pvt.txf.req_prod_pvt++;
680 
681 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
682 
683 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
684 	                            xnb_unit_pvt.txb.req_cons);
685 	xnb_unit_pvt.txb.req_cons += num_consumed;
686 
687 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
688 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
689 
690 	XNB_ASSERT(
691 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
692 	XNB_ASSERT(rsp->id == req->id);
693 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
694 };
695 
696 /**
697  * xnb_txpkt2rsp responding to 1 data request and 1 extra info
698  */
699 static void
xnb_txpkt2rsp_extra(char * buffer,size_t buflen)700 xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
701 {
702 	uint16_t num_consumed;
703 	struct xnb_pkt pkt;
704 	struct netif_tx_request *req;
705 	netif_extra_info_t *ext;
706 	struct netif_tx_response *rsp;
707 
708 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
709 	    xnb_unit_pvt.txf.req_prod_pvt);
710 	req->size = 1000;
711 	req->flags = NETTXF_extra_info;
712 	req->id = 69;
713 	xnb_unit_pvt.txf.req_prod_pvt++;
714 
715 	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
716 	    xnb_unit_pvt.txf.req_prod_pvt);
717 	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
718 	ext->flags = 0;
719 	xnb_unit_pvt.txf.req_prod_pvt++;
720 
721 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
722 
723 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
724 	                            xnb_unit_pvt.txb.req_cons);
725 	xnb_unit_pvt.txb.req_cons += num_consumed;
726 
727 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
728 
729 	XNB_ASSERT(
730 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
731 
732 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
733 	XNB_ASSERT(rsp->id == req->id);
734 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
735 
736 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
737 	    xnb_unit_pvt.txf.rsp_cons + 1);
738 	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
739 };
740 
741 /**
742  * xnb_pkg2rsp responding to 3 data requests and 1 extra info
743  */
744 static void
xnb_txpkt2rsp_long(char * buffer,size_t buflen)745 xnb_txpkt2rsp_long(char *buffer, size_t buflen)
746 {
747 	uint16_t num_consumed;
748 	struct xnb_pkt pkt;
749 	struct netif_tx_request *req;
750 	netif_extra_info_t *ext;
751 	struct netif_tx_response *rsp;
752 
753 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
754 	    xnb_unit_pvt.txf.req_prod_pvt);
755 	req->size = 1000;
756 	req->flags = NETTXF_extra_info | NETTXF_more_data;
757 	req->id = 254;
758 	xnb_unit_pvt.txf.req_prod_pvt++;
759 
760 	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
761 	    xnb_unit_pvt.txf.req_prod_pvt);
762 	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
763 	ext->flags = 0;
764 	xnb_unit_pvt.txf.req_prod_pvt++;
765 
766 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
767 	    xnb_unit_pvt.txf.req_prod_pvt);
768 	req->size = 300;
769 	req->flags = NETTXF_more_data;
770 	req->id = 1034;
771 	xnb_unit_pvt.txf.req_prod_pvt++;
772 
773 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
774 	    xnb_unit_pvt.txf.req_prod_pvt);
775 	req->size = 400;
776 	req->flags = 0;
777 	req->id = 34;
778 	xnb_unit_pvt.txf.req_prod_pvt++;
779 
780 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
781 
782 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
783 	                            xnb_unit_pvt.txb.req_cons);
784 	xnb_unit_pvt.txb.req_cons += num_consumed;
785 
786 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
787 
788 	XNB_ASSERT(
789 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
790 
791 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
792 	XNB_ASSERT(rsp->id ==
793 	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
794 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
795 
796 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
797 	    xnb_unit_pvt.txf.rsp_cons + 1);
798 	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
799 
800 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
801 	    xnb_unit_pvt.txf.rsp_cons + 2);
802 	XNB_ASSERT(rsp->id ==
803 	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
804 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
805 
806 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
807 	    xnb_unit_pvt.txf.rsp_cons + 3);
808 	XNB_ASSERT(rsp->id ==
809 	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
810 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
811 }
812 
813 /**
814  * xnb_txpkt2rsp responding to an invalid packet.
815  * Note: this test will result in an error message being printed to the console
816  * such as:
817  * xnb(xnb_ring2pkt:1306): Unknown extra info type 255.  Discarding packet
818  */
819 static void
xnb_txpkt2rsp_invalid(char * buffer,size_t buflen)820 xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
821 {
822 	uint16_t num_consumed;
823 	struct xnb_pkt pkt;
824 	struct netif_tx_request *req;
825 	netif_extra_info_t *ext;
826 	struct netif_tx_response *rsp;
827 
828 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
829 	    xnb_unit_pvt.txf.req_prod_pvt);
830 	req->size = 1000;
831 	req->flags = NETTXF_extra_info;
832 	req->id = 69;
833 	xnb_unit_pvt.txf.req_prod_pvt++;
834 
835 	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
836 	    xnb_unit_pvt.txf.req_prod_pvt);
837 	ext->type = 0xFF;	/* Invalid extra type */
838 	ext->flags = 0;
839 	xnb_unit_pvt.txf.req_prod_pvt++;
840 
841 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
842 
843 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
844 	                            xnb_unit_pvt.txb.req_cons);
845 	xnb_unit_pvt.txb.req_cons += num_consumed;
846 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
847 
848 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
849 
850 	XNB_ASSERT(
851 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
852 
853 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
854 	XNB_ASSERT(rsp->id == req->id);
855 	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
856 
857 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
858 	    xnb_unit_pvt.txf.rsp_cons + 1);
859 	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
860 };
861 
862 /**
863  * xnb_txpkt2rsp responding to one request which caused an error
864  */
865 static void
xnb_txpkt2rsp_error(char * buffer,size_t buflen)866 xnb_txpkt2rsp_error(char *buffer, size_t buflen)
867 {
868 	uint16_t num_consumed;
869 	struct xnb_pkt pkt;
870 	struct netif_tx_request *req;
871 	struct netif_tx_response *rsp;
872 
873 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
874 	    xnb_unit_pvt.txf.req_prod_pvt);
875 	req->size = 1000;
876 	req->flags = 0;
877 	xnb_unit_pvt.txf.req_prod_pvt++;
878 
879 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
880 
881 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
882 	                            xnb_unit_pvt.txb.req_cons);
883 	xnb_unit_pvt.txb.req_cons += num_consumed;
884 
885 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
886 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
887 
888 	XNB_ASSERT(
889 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
890 	XNB_ASSERT(rsp->id == req->id);
891 	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
892 };
893 
894 /**
895  * xnb_txpkt2rsp's responses wrap around the end of the ring
896  */
897 static void
xnb_txpkt2rsp_wraps(char * buffer,size_t buflen)898 xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
899 {
900 	struct xnb_pkt pkt;
901 	struct netif_tx_request *req;
902 	struct netif_tx_response *rsp;
903 	unsigned int rsize;
904 
905 	/*
906 	 * Manually tweak the ring indices to create a ring with no responses
907 	 * and the next request slot at position 2 from the end
908 	 */
909 	rsize = RING_SIZE(&xnb_unit_pvt.txf);
910 	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
911 	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
912 	xnb_unit_pvt.txs->req_prod = rsize - 2;
913 	xnb_unit_pvt.txs->req_event = rsize - 1;
914 	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
915 	xnb_unit_pvt.txs->rsp_event = rsize - 1;
916 	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
917 	xnb_unit_pvt.txb.req_cons = rsize - 2;
918 
919 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
920 	    xnb_unit_pvt.txf.req_prod_pvt);
921 	req->flags = NETTXF_more_data;
922 	req->size = 550;
923 	req->id = 1;
924 	xnb_unit_pvt.txf.req_prod_pvt++;
925 
926 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
927 	    xnb_unit_pvt.txf.req_prod_pvt);
928 	req->flags = NETTXF_more_data;
929 	req->size = 100;
930 	req->id = 2;
931 	xnb_unit_pvt.txf.req_prod_pvt++;
932 
933 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
934 	    xnb_unit_pvt.txf.req_prod_pvt);
935 	req->flags = 0;
936 	req->size = 50;
937 	req->id = 3;
938 	xnb_unit_pvt.txf.req_prod_pvt++;
939 
940 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
941 
942 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
943 
944 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
945 
946 	XNB_ASSERT(
947 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
948 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
949 	    xnb_unit_pvt.txf.rsp_cons + 2);
950 	XNB_ASSERT(rsp->id == req->id);
951 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
952 }
953 
954 /**
955  * Helper function used to setup pkt2mbufc tests
956  * \param size     size in bytes of the single request to push to the ring
957  * \param flags		optional flags to put in the netif request
958  * \param[out] pkt the returned packet object
959  * \return number of requests consumed from the ring
960  */
961 static int
xnb_get1pkt(struct xnb_pkt * pkt,size_t size,uint16_t flags)962 xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
963 {
964 	struct netif_tx_request *req;
965 
966 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
967 	    xnb_unit_pvt.txf.req_prod_pvt);
968 	req->flags = flags;
969 	req->size = size;
970 	xnb_unit_pvt.txf.req_prod_pvt++;
971 
972 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
973 
974 	return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
975 	                            xnb_unit_pvt.txb.req_cons);
976 }
977 
978 /**
979  * xnb_pkt2mbufc on an empty packet
980  */
981 static void
xnb_pkt2mbufc_empty(char * buffer,size_t buflen)982 xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
983 {
984 	struct xnb_pkt pkt;
985 	struct mbuf *pMbuf;
986 	pkt.list_len = 0;
987 
988 	/* must call xnb_ring2pkt just to intialize pkt */
989 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
990 	pkt.size = 0;
991 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
992 	safe_m_freem(&pMbuf);
993 }
994 
995 /**
996  * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
997  */
998 static void
xnb_pkt2mbufc_short(char * buffer,size_t buflen)999 xnb_pkt2mbufc_short(char *buffer, size_t buflen)
1000 {
1001 	const size_t size = MINCLSIZE - 1;
1002 	struct xnb_pkt pkt;
1003 	struct mbuf *pMbuf;
1004 
1005 	xnb_get1pkt(&pkt, size, 0);
1006 
1007 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1008 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1009 	safe_m_freem(&pMbuf);
1010 }
1011 
1012 /**
1013  * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1014  */
1015 static void
xnb_pkt2mbufc_csum(char * buffer,size_t buflen)1016 xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1017 {
1018 	const size_t size = MINCLSIZE - 1;
1019 	struct xnb_pkt pkt;
1020 	struct mbuf *pMbuf;
1021 
1022 	xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1023 
1024 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1025 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1026 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1027 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1028 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1029 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1030 	safe_m_freem(&pMbuf);
1031 }
1032 
1033 /**
1034  * xnb_pkt2mbufc on packet that can fit in one cluster
1035  */
1036 static void
xnb_pkt2mbufc_1cluster(char * buffer,size_t buflen)1037 xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1038 {
1039 	const size_t size = MINCLSIZE;
1040 	struct xnb_pkt pkt;
1041 	struct mbuf *pMbuf;
1042 
1043 	xnb_get1pkt(&pkt, size, 0);
1044 
1045 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1046 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1047 	safe_m_freem(&pMbuf);
1048 }
1049 
1050 /**
1051  * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1052  */
1053 static void
xnb_pkt2mbufc_largecluster(char * buffer,size_t buflen)1054 xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1055 {
1056 	const size_t size = MCLBYTES + 1;
1057 	struct xnb_pkt pkt;
1058 	struct mbuf *pMbuf;
1059 
1060 	xnb_get1pkt(&pkt, size, 0);
1061 
1062 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1063 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1064 	safe_m_freem(&pMbuf);
1065 }
1066 
1067 /**
1068  * xnb_pkt2mbufc on packet that cannot fit in one clusters
1069  */
1070 static void
xnb_pkt2mbufc_2cluster(char * buffer,size_t buflen)1071 xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1072 {
1073 	const size_t size = 2 * MCLBYTES + 1;
1074 	size_t space = 0;
1075 	struct xnb_pkt pkt;
1076 	struct mbuf *pMbuf;
1077 	struct mbuf *m;
1078 
1079 	xnb_get1pkt(&pkt, size, 0);
1080 
1081 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1082 
1083 	for (m = pMbuf; m != NULL; m = m->m_next) {
1084 		space += M_TRAILINGSPACE(m);
1085 	}
1086 	XNB_ASSERT(space >= size);
1087 	safe_m_freem(&pMbuf);
1088 }
1089 
1090 /**
1091  * xnb_txpkt2gnttab on an empty packet.  Should return empty gnttab
1092  */
1093 static void
xnb_txpkt2gnttab_empty(char * buffer,size_t buflen)1094 xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1095 {
1096 	int n_entries;
1097 	struct xnb_pkt pkt;
1098 	struct mbuf *pMbuf;
1099 	pkt.list_len = 0;
1100 
1101 	/* must call xnb_ring2pkt just to intialize pkt */
1102 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1103 	pkt.size = 0;
1104 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1105 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1106 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1107 	XNB_ASSERT(n_entries == 0);
1108 	safe_m_freem(&pMbuf);
1109 }
1110 
1111 /**
1112  * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1113  * and has one request
1114  */
1115 static void
xnb_txpkt2gnttab_short(char * buffer,size_t buflen)1116 xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1117 {
1118 	const size_t size = MINCLSIZE - 1;
1119 	int n_entries;
1120 	struct xnb_pkt pkt;
1121 	struct mbuf *pMbuf;
1122 
1123 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1124 	    xnb_unit_pvt.txf.req_prod_pvt);
1125 	req->flags = 0;
1126 	req->size = size;
1127 	req->gref = 7;
1128 	req->offset = 17;
1129 	xnb_unit_pvt.txf.req_prod_pvt++;
1130 
1131 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1132 
1133 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1134 
1135 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1136 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1137 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1138 	XNB_ASSERT(n_entries == 1);
1139 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1140 	/* flags should indicate gref's for source */
1141 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1142 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1143 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1144 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1145 	      mtod(pMbuf, vm_offset_t)));
1146 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1147 		virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1148 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1149 	safe_m_freem(&pMbuf);
1150 }
1151 
1152 /**
1153  * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1154  * mbuf cluster
1155  */
1156 static void
xnb_txpkt2gnttab_2req(char * buffer,size_t buflen)1157 xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1158 {
1159 	int n_entries;
1160 	struct xnb_pkt pkt;
1161 	struct mbuf *pMbuf;
1162 
1163 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1164 	    xnb_unit_pvt.txf.req_prod_pvt);
1165 	req->flags = NETTXF_more_data;
1166 	req->size = 1900;
1167 	req->gref = 7;
1168 	req->offset = 0;
1169 	xnb_unit_pvt.txf.req_prod_pvt++;
1170 
1171 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1172 	    xnb_unit_pvt.txf.req_prod_pvt);
1173 	req->flags = 0;
1174 	req->size = 500;
1175 	req->gref = 8;
1176 	req->offset = 0;
1177 	xnb_unit_pvt.txf.req_prod_pvt++;
1178 
1179 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1180 
1181 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1182 
1183 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1184 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1185 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1186 
1187 	XNB_ASSERT(n_entries == 2);
1188 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1189 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1190 	      mtod(pMbuf, vm_offset_t)));
1191 
1192 	XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1193 	XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1194 	      mtod(pMbuf, vm_offset_t) + 1400));
1195 	safe_m_freem(&pMbuf);
1196 }
1197 
1198 /**
1199  * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1200  */
1201 static void
xnb_txpkt2gnttab_2cluster(char * buffer,size_t buflen)1202 xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1203 {
1204 	int n_entries;
1205 	struct xnb_pkt pkt;
1206 	struct mbuf *pMbuf;
1207 	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1208 
1209 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1210 	    xnb_unit_pvt.txf.req_prod_pvt);
1211 	req->flags = 0;
1212 	req->size = data_this_transaction;
1213 	req->gref = 8;
1214 	req->offset = 0;
1215 	xnb_unit_pvt.txf.req_prod_pvt++;
1216 
1217 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1218 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1219 
1220 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1221 	XNB_ASSERT(pMbuf != NULL);
1222 	if (pMbuf == NULL)
1223 		return;
1224 
1225 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1226 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1227 
1228 	if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1229 		/* there should be three mbufs and three gnttab entries */
1230 		XNB_ASSERT(n_entries == 3);
1231 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1232 		XNB_ASSERT(
1233 		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1234 		      mtod(pMbuf, vm_offset_t)));
1235 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1236 
1237 		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1238 		XNB_ASSERT(
1239 		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1240 		      mtod(pMbuf->m_next, vm_offset_t)));
1241 		XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1242 
1243 		XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1244 		XNB_ASSERT(
1245 		    xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1246 		      mtod(pMbuf->m_next, vm_offset_t)));
1247 		XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1248 			    MCLBYTES);
1249 	} else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1250 		/* there should be two mbufs and two gnttab entries */
1251 		XNB_ASSERT(n_entries == 2);
1252 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1253 		XNB_ASSERT(
1254 		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1255 		      mtod(pMbuf, vm_offset_t)));
1256 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1257 
1258 		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1259 		XNB_ASSERT(
1260 		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1261 		      mtod(pMbuf->m_next, vm_offset_t)));
1262 		XNB_ASSERT(
1263 		    xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1264 
1265 	} else {
1266 		/* should never get here */
1267 		XNB_ASSERT(0);
1268 	}
1269 	m_freem(pMbuf);
1270 }
1271 
1272 /**
1273  * xnb_update_mbufc on a short packet that only has one gnttab entry
1274  */
1275 static void
xnb_update_mbufc_short(char * buffer,size_t buflen)1276 xnb_update_mbufc_short(char *buffer, size_t buflen)
1277 {
1278 	const size_t size = MINCLSIZE - 1;
1279 	int n_entries;
1280 	struct xnb_pkt pkt;
1281 	struct mbuf *pMbuf;
1282 
1283 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1284 	    xnb_unit_pvt.txf.req_prod_pvt);
1285 	req->flags = 0;
1286 	req->size = size;
1287 	req->gref = 7;
1288 	req->offset = 17;
1289 	xnb_unit_pvt.txf.req_prod_pvt++;
1290 
1291 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1292 
1293 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1294 
1295 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1296 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1297 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1298 
1299 	/* Update grant table's status fields as the hypervisor call would */
1300 	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1301 
1302 	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1303 	XNB_ASSERT(pMbuf->m_len == size);
1304 	XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1305 	safe_m_freem(&pMbuf);
1306 }
1307 
1308 /**
1309  * xnb_update_mbufc on a packet with two requests, that can fit into a single
1310  * mbuf cluster
1311  */
1312 static void
xnb_update_mbufc_2req(char * buffer,size_t buflen)1313 xnb_update_mbufc_2req(char *buffer, size_t buflen)
1314 {
1315 	int n_entries;
1316 	struct xnb_pkt pkt;
1317 	struct mbuf *pMbuf;
1318 
1319 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1320 	    xnb_unit_pvt.txf.req_prod_pvt);
1321 	req->flags = NETTXF_more_data;
1322 	req->size = 1900;
1323 	req->gref = 7;
1324 	req->offset = 0;
1325 	xnb_unit_pvt.txf.req_prod_pvt++;
1326 
1327 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1328 	    xnb_unit_pvt.txf.req_prod_pvt);
1329 	req->flags = 0;
1330 	req->size = 500;
1331 	req->gref = 8;
1332 	req->offset = 0;
1333 	xnb_unit_pvt.txf.req_prod_pvt++;
1334 
1335 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1336 
1337 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1338 
1339 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1340 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1341 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1342 
1343 	/* Update grant table's status fields as the hypervisor call would */
1344 	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1345 	xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1346 
1347 	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1348 	XNB_ASSERT(n_entries == 2);
1349 	XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1350 	XNB_ASSERT(pMbuf->m_len == 1900);
1351 
1352 	safe_m_freem(&pMbuf);
1353 }
1354 
1355 /**
1356  * xnb_update_mbufc on a single request that spans two mbuf clusters
1357  */
1358 static void
xnb_update_mbufc_2cluster(char * buffer,size_t buflen)1359 xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1360 {
1361 	int i;
1362 	int n_entries;
1363 	struct xnb_pkt pkt;
1364 	struct mbuf *pMbuf;
1365 	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1366 
1367 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1368 	    xnb_unit_pvt.txf.req_prod_pvt);
1369 	req->flags = 0;
1370 	req->size = data_this_transaction;
1371 	req->gref = 8;
1372 	req->offset = 0;
1373 	xnb_unit_pvt.txf.req_prod_pvt++;
1374 
1375 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1376 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1377 
1378 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1379 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1380 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1381 
1382 	/* Update grant table's status fields */
1383 	for (i = 0; i < n_entries; i++) {
1384 		xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1385 	}
1386 	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1387 
1388 	if (n_entries == 3) {
1389 		/* there should be three mbufs and three gnttab entries */
1390 		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1391 		XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1392 		XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1393 		XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1394 	} else if (n_entries == 2) {
1395 		/* there should be two mbufs and two gnttab entries */
1396 		XNB_ASSERT(n_entries == 2);
1397 		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1398 		XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1399 		XNB_ASSERT(pMbuf->m_next->m_len == 1);
1400 	} else {
1401 		/* should never get here */
1402 		XNB_ASSERT(0);
1403 	}
1404 	safe_m_freem(&pMbuf);
1405 }
1406 
1407 /** xnb_mbufc2pkt on an empty mbufc */
1408 static void
xnb_mbufc2pkt_empty(char * buffer,size_t buflen)1409 xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1410 	struct xnb_pkt pkt;
1411 	int free_slots = 64;
1412 	struct mbuf *mbuf;
1413 
1414 	mbuf = m_get(M_WAITOK, MT_DATA);
1415 	/*
1416 	 * note: it is illegal to set M_PKTHDR on a mbuf with no data.  Doing so
1417 	 * will cause m_freem to segfault
1418 	 */
1419 	XNB_ASSERT(mbuf->m_len == 0);
1420 
1421 	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1422 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1423 
1424 	safe_m_freem(&mbuf);
1425 }
1426 
1427 /** xnb_mbufc2pkt on a short mbufc */
1428 static void
xnb_mbufc2pkt_short(char * buffer,size_t buflen)1429 xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1430 	struct xnb_pkt pkt;
1431 	size_t size = 128;
1432 	int free_slots = 64;
1433 	RING_IDX start = 9;
1434 	struct mbuf *mbuf;
1435 
1436 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1437 	mbuf->m_flags |= M_PKTHDR;
1438 	mbuf->m_pkthdr.len = size;
1439 	mbuf->m_len = size;
1440 
1441 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1442 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1443 	XNB_ASSERT(pkt.size == size);
1444 	XNB_ASSERT(pkt.car_size == size);
1445 	XNB_ASSERT(! (pkt.flags &
1446 	      (NETRXF_more_data | NETRXF_extra_info)));
1447 	XNB_ASSERT(pkt.list_len == 1);
1448 	XNB_ASSERT(pkt.car == start);
1449 
1450 	safe_m_freem(&mbuf);
1451 }
1452 
1453 /** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1454 static void
xnb_mbufc2pkt_1cluster(char * buffer,size_t buflen)1455 xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1456 	struct xnb_pkt pkt;
1457 	size_t size = MCLBYTES;
1458 	int free_slots = 32;
1459 	RING_IDX start = 12;
1460 	struct mbuf *mbuf;
1461 
1462 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1463 	mbuf->m_flags |= M_PKTHDR;
1464 	mbuf->m_pkthdr.len = size;
1465 	mbuf->m_len = size;
1466 
1467 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1468 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1469 	XNB_ASSERT(pkt.size == size);
1470 	XNB_ASSERT(pkt.car_size == size);
1471 	XNB_ASSERT(! (pkt.flags &
1472 	      (NETRXF_more_data | NETRXF_extra_info)));
1473 	XNB_ASSERT(pkt.list_len == 1);
1474 	XNB_ASSERT(pkt.car == start);
1475 
1476 	safe_m_freem(&mbuf);
1477 }
1478 
1479 /** xnb_mbufc2pkt on a two-mbuf chain with short data regions */
1480 static void
xnb_mbufc2pkt_2short(char * buffer,size_t buflen)1481 xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1482 	struct xnb_pkt pkt;
1483 	size_t size1 = MHLEN - 5;
1484 	size_t size2 = MHLEN - 15;
1485 	int free_slots = 32;
1486 	RING_IDX start = 14;
1487 	struct mbuf *mbufc, *mbufc2;
1488 
1489 	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1490 	XNB_ASSERT(mbufc != NULL);
1491 	if (mbufc == NULL)
1492 		return;
1493 	mbufc->m_flags |= M_PKTHDR;
1494 
1495 	mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1496 	XNB_ASSERT(mbufc2 != NULL);
1497 	if (mbufc2 == NULL) {
1498 		safe_m_freem(&mbufc);
1499 		return;
1500 	}
1501 	mbufc2->m_pkthdr.len = size1 + size2;
1502 	mbufc2->m_len = size1;
1503 
1504 	xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1505 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1506 	XNB_ASSERT(pkt.size == size1 + size2);
1507 	XNB_ASSERT(pkt.car == start);
1508 	/*
1509 	 * The second m_getm may allocate a new mbuf and append
1510 	 * it to the chain, or it may simply extend the first mbuf.
1511 	 */
1512 	if (mbufc2->m_next != NULL) {
1513 		XNB_ASSERT(pkt.car_size == size1);
1514 		XNB_ASSERT(pkt.list_len == 1);
1515 		XNB_ASSERT(pkt.cdr == start + 1);
1516 	}
1517 
1518 	safe_m_freem(&mbufc2);
1519 }
1520 
1521 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */
1522 static void
xnb_mbufc2pkt_long(char * buffer,size_t buflen)1523 xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1524 	struct xnb_pkt pkt;
1525 	size_t size = 14 * MCLBYTES / 3;
1526 	size_t size_remaining;
1527 	int free_slots = 15;
1528 	RING_IDX start = 3;
1529 	struct mbuf *mbufc, *m;
1530 
1531 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1532 	XNB_ASSERT(mbufc != NULL);
1533 	if (mbufc == NULL)
1534 		return;
1535 	mbufc->m_flags |= M_PKTHDR;
1536 
1537 	mbufc->m_pkthdr.len = size;
1538 	size_remaining = size;
1539 	for (m = mbufc; m != NULL; m = m->m_next) {
1540 		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1541 		size_remaining -= m->m_len;
1542 	}
1543 
1544 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1545 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1546 	XNB_ASSERT(pkt.size == size);
1547 	XNB_ASSERT(pkt.car == start);
1548 	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1549 	/*
1550 	 * There should be >1 response in the packet, and there is no
1551 	 * extra info.
1552 	 */
1553 	XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1554 	XNB_ASSERT(pkt.cdr == pkt.car + 1);
1555 
1556 	safe_m_freem(&mbufc);
1557 }
1558 
1559 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */
1560 static void
xnb_mbufc2pkt_extra(char * buffer,size_t buflen)1561 xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1562 	struct xnb_pkt pkt;
1563 	size_t size = 14 * MCLBYTES / 3;
1564 	size_t size_remaining;
1565 	int free_slots = 15;
1566 	RING_IDX start = 3;
1567 	struct mbuf *mbufc, *m;
1568 
1569 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1570 	XNB_ASSERT(mbufc != NULL);
1571 	if (mbufc == NULL)
1572 		return;
1573 
1574 	mbufc->m_flags |= M_PKTHDR;
1575 	mbufc->m_pkthdr.len = size;
1576 	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1577 	mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1578 	size_remaining = size;
1579 	for (m = mbufc; m != NULL; m = m->m_next) {
1580 		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1581 		size_remaining -= m->m_len;
1582 	}
1583 
1584 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1585 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1586 	XNB_ASSERT(pkt.size == size);
1587 	XNB_ASSERT(pkt.car == start);
1588 	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1589 	/* There should be >1 response in the packet, there is extra info */
1590 	XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1591 	XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1592 	XNB_ASSERT(pkt.cdr == pkt.car + 2);
1593 	XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1594 	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1595 	XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1596 
1597 	safe_m_freem(&mbufc);
1598 }
1599 
1600 /** xnb_mbufc2pkt with insufficient space in the ring */
1601 static void
xnb_mbufc2pkt_nospace(char * buffer,size_t buflen)1602 xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1603 	struct xnb_pkt pkt;
1604 	size_t size = 14 * MCLBYTES / 3;
1605 	size_t size_remaining;
1606 	int free_slots = 2;
1607 	RING_IDX start = 3;
1608 	struct mbuf *mbufc, *m;
1609 	int error;
1610 
1611 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1612 	XNB_ASSERT(mbufc != NULL);
1613 	if (mbufc == NULL)
1614 		return;
1615 	mbufc->m_flags |= M_PKTHDR;
1616 
1617 	mbufc->m_pkthdr.len = size;
1618 	size_remaining = size;
1619 	for (m = mbufc; m != NULL; m = m->m_next) {
1620 		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1621 		size_remaining -= m->m_len;
1622 	}
1623 
1624 	error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1625 	XNB_ASSERT(error == EAGAIN);
1626 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1627 
1628 	safe_m_freem(&mbufc);
1629 }
1630 
1631 /**
1632  * xnb_rxpkt2gnttab on an empty packet.  Should return empty gnttab
1633  */
1634 static void
xnb_rxpkt2gnttab_empty(char * buffer,size_t buflen)1635 xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1636 {
1637 	struct xnb_pkt pkt;
1638 	int nr_entries;
1639 	int free_slots = 60;
1640 	struct mbuf *mbuf;
1641 
1642 	mbuf = m_get(M_WAITOK, MT_DATA);
1643 
1644 	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1645 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1646 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1647 
1648 	XNB_ASSERT(nr_entries == 0);
1649 
1650 	safe_m_freem(&mbuf);
1651 }
1652 
1653 /** xnb_rxpkt2gnttab on a short packet without extra data */
1654 static void
xnb_rxpkt2gnttab_short(char * buffer,size_t buflen)1655 xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1656 	struct xnb_pkt pkt;
1657 	int nr_entries;
1658 	size_t size = 128;
1659 	int free_slots = 60;
1660 	RING_IDX start = 9;
1661 	struct netif_rx_request *req;
1662 	struct mbuf *mbuf;
1663 
1664 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1665 	mbuf->m_flags |= M_PKTHDR;
1666 	mbuf->m_pkthdr.len = size;
1667 	mbuf->m_len = size;
1668 
1669 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1670 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1671 			       xnb_unit_pvt.txf.req_prod_pvt);
1672 	req->gref = 7;
1673 
1674 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1675 				      &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1676 
1677 	XNB_ASSERT(nr_entries == 1);
1678 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1679 	/* flags should indicate gref's for dest */
1680 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1681 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1682 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1683 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1684 		   mtod(mbuf, vm_offset_t)));
1685 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1686 		   virt_to_mfn(mtod(mbuf, vm_offset_t)));
1687 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1688 
1689 	safe_m_freem(&mbuf);
1690 }
1691 
1692 /**
1693  * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1694  */
1695 static void
xnb_rxpkt2gnttab_2req(char * buffer,size_t buflen)1696 xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1697 {
1698 	struct xnb_pkt pkt;
1699 	int nr_entries;
1700 	int i, num_mbufs;
1701 	size_t total_granted_size = 0;
1702 	size_t size = MJUMPAGESIZE + 1;
1703 	int free_slots = 60;
1704 	RING_IDX start = 11;
1705 	struct netif_rx_request *req;
1706 	struct mbuf *mbuf, *m;
1707 
1708 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1709 	mbuf->m_flags |= M_PKTHDR;
1710 	mbuf->m_pkthdr.len = size;
1711 	mbuf->m_len = size;
1712 
1713 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1714 
1715 	for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1716 		req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1717 		    xnb_unit_pvt.txf.req_prod_pvt);
1718 		req->gref = i;
1719 		req->id = 5;
1720 	}
1721 	num_mbufs = i;
1722 
1723 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1724 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1725 
1726 	XNB_ASSERT(nr_entries >= num_mbufs);
1727 	for (i = 0; i < nr_entries; i++) {
1728 		int end_offset = xnb_unit_pvt.gnttab[i].len +
1729 			xnb_unit_pvt.gnttab[i].dest.offset;
1730 		XNB_ASSERT(end_offset <= PAGE_SIZE);
1731 		total_granted_size += xnb_unit_pvt.gnttab[i].len;
1732 	}
1733 	XNB_ASSERT(total_granted_size == size);
1734 }
1735 
1736 /**
1737  * xnb_rxpkt2rsp on an empty packet.  Shouldn't make any response
1738  */
1739 static void
xnb_rxpkt2rsp_empty(char * buffer,size_t buflen)1740 xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1741 {
1742 	struct xnb_pkt pkt;
1743 	int nr_entries;
1744 	int nr_reqs;
1745 	int free_slots = 60;
1746 	netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1747 	netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1748 	struct mbuf *mbuf;
1749 
1750 	mbuf = m_get(M_WAITOK, MT_DATA);
1751 
1752 	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1753 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1754 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1755 
1756 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1757 	    &xnb_unit_pvt.rxb);
1758 	XNB_ASSERT(nr_reqs == 0);
1759 	XNB_ASSERT(
1760 	    memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1761 	XNB_ASSERT(
1762 	    memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1763 
1764 	safe_m_freem(&mbuf);
1765 }
1766 
1767 /**
1768  * xnb_rxpkt2rsp on a short packet with no extras
1769  */
1770 static void
xnb_rxpkt2rsp_short(char * buffer,size_t buflen)1771 xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1772 {
1773 	struct xnb_pkt pkt;
1774 	int nr_entries, nr_reqs;
1775 	size_t size = 128;
1776 	int free_slots = 60;
1777 	RING_IDX start = 5;
1778 	struct netif_rx_request *req;
1779 	struct netif_rx_response *rsp;
1780 	struct mbuf *mbuf;
1781 
1782 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1783 	mbuf->m_flags |= M_PKTHDR;
1784 	mbuf->m_pkthdr.len = size;
1785 	mbuf->m_len = size;
1786 
1787 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1788 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1789 	req->gref = 7;
1790 	xnb_unit_pvt.rxb.req_cons = start;
1791 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1792 	xnb_unit_pvt.rxs->req_prod = start + 1;
1793 	xnb_unit_pvt.rxs->rsp_prod = start;
1794 
1795 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1796 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1797 
1798 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1799 	    &xnb_unit_pvt.rxb);
1800 
1801 	XNB_ASSERT(nr_reqs == 1);
1802 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1803 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1804 	XNB_ASSERT(rsp->id == req->id);
1805 	XNB_ASSERT(rsp->offset == 0);
1806 	XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1807 	XNB_ASSERT(rsp->status == size);
1808 
1809 	safe_m_freem(&mbuf);
1810 }
1811 
1812 /**
1813  * xnb_rxpkt2rsp with extra data
1814  */
1815 static void
xnb_rxpkt2rsp_extra(char * buffer,size_t buflen)1816 xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1817 {
1818 	struct xnb_pkt pkt;
1819 	int nr_entries, nr_reqs;
1820 	size_t size = 14;
1821 	int free_slots = 15;
1822 	RING_IDX start = 3;
1823 	uint16_t id = 49;
1824 	uint16_t gref = 65;
1825 	uint16_t mss = TCP_MSS - 40;
1826 	struct mbuf *mbufc;
1827 	struct netif_rx_request *req;
1828 	struct netif_rx_response *rsp;
1829 	struct netif_extra_info *ext;
1830 
1831 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1832 	XNB_ASSERT(mbufc != NULL);
1833 	if (mbufc == NULL)
1834 		return;
1835 
1836 	mbufc->m_flags |= M_PKTHDR;
1837 	mbufc->m_pkthdr.len = size;
1838 	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1839 	mbufc->m_pkthdr.tso_segsz = mss;
1840 	mbufc->m_len = size;
1841 
1842 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1843 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1844 	req->id = id;
1845 	req->gref = gref;
1846 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1847 	req->id = id + 1;
1848 	req->gref = gref + 1;
1849 	xnb_unit_pvt.rxb.req_cons = start;
1850 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1851 	xnb_unit_pvt.rxs->req_prod = start + 2;
1852 	xnb_unit_pvt.rxs->rsp_prod = start;
1853 
1854 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1855 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1856 
1857 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1858 	    &xnb_unit_pvt.rxb);
1859 
1860 	XNB_ASSERT(nr_reqs == 2);
1861 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1862 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1863 	XNB_ASSERT(rsp->id == id);
1864 	XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1865 	XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1866 	XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1867 	XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1868 	XNB_ASSERT(rsp->status == size);
1869 
1870 	ext = (struct netif_extra_info*)
1871 		RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1872 	XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1873 	XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1874 	XNB_ASSERT(ext->u.gso.size == mss);
1875 	XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1876 
1877 	safe_m_freem(&mbufc);
1878 }
1879 
1880 /**
1881  * xnb_rxpkt2rsp on a packet with more than a pages's worth of data.  It should
1882  * generate two response slot
1883  */
1884 static void
xnb_rxpkt2rsp_2slots(char * buffer,size_t buflen)1885 xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1886 {
1887 	struct xnb_pkt pkt;
1888 	int nr_entries, nr_reqs;
1889 	size_t size = PAGE_SIZE + 100;
1890 	int free_slots = 3;
1891 	uint16_t id1 = 17;
1892 	uint16_t id2 = 37;
1893 	uint16_t gref1 = 24;
1894 	uint16_t gref2 = 34;
1895 	RING_IDX start = 15;
1896 	struct netif_rx_request *req;
1897 	struct netif_rx_response *rsp;
1898 	struct mbuf *mbuf;
1899 
1900 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1901 	mbuf->m_flags |= M_PKTHDR;
1902 	mbuf->m_pkthdr.len = size;
1903 	if (mbuf->m_next != NULL) {
1904 		size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1905 		mbuf->m_len = first_len;
1906 		mbuf->m_next->m_len = size - first_len;
1907 
1908 	} else {
1909 		mbuf->m_len = size;
1910 	}
1911 
1912 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1913 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1914 	req->gref = gref1;
1915 	req->id = id1;
1916 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1917 	req->gref = gref2;
1918 	req->id = id2;
1919 	xnb_unit_pvt.rxb.req_cons = start;
1920 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1921 	xnb_unit_pvt.rxs->req_prod = start + 2;
1922 	xnb_unit_pvt.rxs->rsp_prod = start;
1923 
1924 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1925 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1926 
1927 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1928 	    &xnb_unit_pvt.rxb);
1929 
1930 	XNB_ASSERT(nr_reqs == 2);
1931 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1932 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1933 	XNB_ASSERT(rsp->id == id1);
1934 	XNB_ASSERT(rsp->offset == 0);
1935 	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1936 	XNB_ASSERT(rsp->flags & NETRXF_more_data);
1937 	XNB_ASSERT(rsp->status == PAGE_SIZE);
1938 
1939 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1940 	XNB_ASSERT(rsp->id == id2);
1941 	XNB_ASSERT(rsp->offset == 0);
1942 	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1943 	XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1944 	XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1945 
1946 	safe_m_freem(&mbuf);
1947 }
1948 
1949 /** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1950 static void
xnb_rxpkt2rsp_2short(char * buffer,size_t buflen)1951 xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1952 	struct xnb_pkt pkt;
1953 	int nr_reqs, nr_entries;
1954 	size_t size1 = MHLEN - 5;
1955 	size_t size2 = MHLEN - 15;
1956 	int free_slots = 32;
1957 	RING_IDX start = 14;
1958 	uint16_t id = 47;
1959 	uint16_t gref = 54;
1960 	struct netif_rx_request *req;
1961 	struct netif_rx_response *rsp;
1962 	struct mbuf *mbufc;
1963 
1964 	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1965 	XNB_ASSERT(mbufc != NULL);
1966 	if (mbufc == NULL)
1967 		return;
1968 	mbufc->m_flags |= M_PKTHDR;
1969 
1970 	m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1971 	XNB_ASSERT(mbufc->m_next != NULL);
1972 	mbufc->m_pkthdr.len = size1 + size2;
1973 	mbufc->m_len = size1;
1974 	mbufc->m_next->m_len = size2;
1975 
1976 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1977 
1978 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1979 	req->gref = gref;
1980 	req->id = id;
1981 	xnb_unit_pvt.rxb.req_cons = start;
1982 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1983 	xnb_unit_pvt.rxs->req_prod = start + 1;
1984 	xnb_unit_pvt.rxs->rsp_prod = start;
1985 
1986 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1987 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1988 
1989 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1990 	    &xnb_unit_pvt.rxb);
1991 
1992 	XNB_ASSERT(nr_entries == 2);
1993 	XNB_ASSERT(nr_reqs == 1);
1994 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1995 	XNB_ASSERT(rsp->id == id);
1996 	XNB_ASSERT(rsp->status == size1 + size2);
1997 	XNB_ASSERT(rsp->offset == 0);
1998 	XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
1999 
2000 	safe_m_freem(&mbufc);
2001 }
2002 
2003 /**
2004  * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2005  * Note: this test will result in an error message being printed to the console
2006  * such as:
2007  * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2008  */
2009 static void
xnb_rxpkt2rsp_copyerror(char * buffer,size_t buflen)2010 xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2011 {
2012 	struct xnb_pkt pkt;
2013 	int nr_entries, nr_reqs;
2014 	int id = 7;
2015 	int gref = 42;
2016 	uint16_t canary = 6859;
2017 	size_t size = 7 * MCLBYTES;
2018 	int free_slots = 9;
2019 	RING_IDX start = 2;
2020 	struct netif_rx_request *req;
2021 	struct netif_rx_response *rsp;
2022 	struct mbuf *mbuf;
2023 
2024 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2025 	mbuf->m_flags |= M_PKTHDR;
2026 	mbuf->m_pkthdr.len = size;
2027 	mbuf->m_len = size;
2028 
2029 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2030 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2031 	req->gref = gref;
2032 	req->id = id;
2033 	xnb_unit_pvt.rxb.req_cons = start;
2034 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2035 	xnb_unit_pvt.rxs->req_prod = start + 1;
2036 	xnb_unit_pvt.rxs->rsp_prod = start;
2037 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2038 	req->gref = canary;
2039 	req->id = canary;
2040 
2041 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2042 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2043 	/* Inject the error*/
2044 	xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2045 
2046 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2047 	    &xnb_unit_pvt.rxb);
2048 
2049 	XNB_ASSERT(nr_reqs == 1);
2050 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2051 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2052 	XNB_ASSERT(rsp->id == id);
2053 	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2054 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2055 	XNB_ASSERT(req->gref == canary);
2056 	XNB_ASSERT(req->id == canary);
2057 
2058 	safe_m_freem(&mbuf);
2059 }
2060 
2061 #if defined(INET) || defined(INET6)
2062 /**
2063  * xnb_add_mbuf_cksum on an ARP request packet
2064  */
2065 static void
xnb_add_mbuf_cksum_arp(char * buffer,size_t buflen)2066 xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2067 {
2068 	const size_t pkt_len = sizeof(struct ether_header) +
2069 		sizeof(struct ether_arp);
2070 	struct mbuf *mbufc;
2071 	struct ether_header *eh;
2072 	struct ether_arp *ep;
2073 	unsigned char pkt_orig[pkt_len];
2074 
2075 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2076 	/* Fill in an example arp request */
2077 	eh = mtod(mbufc, struct ether_header*);
2078 	eh->ether_dhost[0] = 0xff;
2079 	eh->ether_dhost[1] = 0xff;
2080 	eh->ether_dhost[2] = 0xff;
2081 	eh->ether_dhost[3] = 0xff;
2082 	eh->ether_dhost[4] = 0xff;
2083 	eh->ether_dhost[5] = 0xff;
2084 	eh->ether_shost[0] = 0x00;
2085 	eh->ether_shost[1] = 0x15;
2086 	eh->ether_shost[2] = 0x17;
2087 	eh->ether_shost[3] = 0xe9;
2088 	eh->ether_shost[4] = 0x30;
2089 	eh->ether_shost[5] = 0x68;
2090 	eh->ether_type = htons(ETHERTYPE_ARP);
2091 	ep = (struct ether_arp*)(eh + 1);
2092 	ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2093 	ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2094 	ep->ea_hdr.ar_hln = 6;
2095 	ep->ea_hdr.ar_pln = 4;
2096 	ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2097 	ep->arp_sha[0] = 0x00;
2098 	ep->arp_sha[1] = 0x15;
2099 	ep->arp_sha[2] = 0x17;
2100 	ep->arp_sha[3] = 0xe9;
2101 	ep->arp_sha[4] = 0x30;
2102 	ep->arp_sha[5] = 0x68;
2103 	ep->arp_spa[0] = 0xc0;
2104 	ep->arp_spa[1] = 0xa8;
2105 	ep->arp_spa[2] = 0x0a;
2106 	ep->arp_spa[3] = 0x04;
2107 	bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2108 	ep->arp_tpa[0] = 0xc0;
2109 	ep->arp_tpa[1] = 0xa8;
2110 	ep->arp_tpa[2] = 0x0a;
2111 	ep->arp_tpa[3] = 0x06;
2112 
2113 	/* fill in the length field */
2114 	mbufc->m_len = pkt_len;
2115 	mbufc->m_pkthdr.len = pkt_len;
2116 	/* indicate that the netfront uses hw-assisted checksums */
2117 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2118 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2119 
2120 	/* Make a backup copy of the packet */
2121 	bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2122 
2123 	/* Function under test */
2124 	xnb_add_mbuf_cksum(mbufc);
2125 
2126 	/* Verify that the packet's data did not change */
2127 	XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2128 	m_freem(mbufc);
2129 }
2130 
2131 /**
2132  * Helper function that populates the ethernet header and IP header used by
2133  * some of the xnb_add_mbuf_cksum unit tests.  m must already be allocated
2134  * and must be large enough
2135  */
2136 static void
xnb_fill_eh_and_ip(struct mbuf * m,uint16_t ip_len,uint16_t ip_id,uint16_t ip_p,uint16_t ip_off,uint16_t ip_sum)2137 xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2138 		   uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2139 {
2140 	struct ether_header *eh;
2141 	struct ip *iph;
2142 
2143 	eh = mtod(m, struct ether_header*);
2144 	eh->ether_dhost[0] = 0x00;
2145 	eh->ether_dhost[1] = 0x16;
2146 	eh->ether_dhost[2] = 0x3e;
2147 	eh->ether_dhost[3] = 0x23;
2148 	eh->ether_dhost[4] = 0x50;
2149 	eh->ether_dhost[5] = 0x0b;
2150 	eh->ether_shost[0] = 0x00;
2151 	eh->ether_shost[1] = 0x16;
2152 	eh->ether_shost[2] = 0x30;
2153 	eh->ether_shost[3] = 0x00;
2154 	eh->ether_shost[4] = 0x00;
2155 	eh->ether_shost[5] = 0x00;
2156 	eh->ether_type = htons(ETHERTYPE_IP);
2157 	iph = (struct ip*)(eh + 1);
2158 	iph->ip_hl = 0x5;	/* 5 dwords == 20 bytes */
2159 	iph->ip_v = 4;		/* IP v4 */
2160 	iph->ip_tos = 0;
2161 	iph->ip_len = htons(ip_len);
2162 	iph->ip_id = htons(ip_id);
2163 	iph->ip_off = htons(ip_off);
2164 	iph->ip_ttl = 64;
2165 	iph->ip_p = ip_p;
2166 	iph->ip_sum = htons(ip_sum);
2167 	iph->ip_src.s_addr = htonl(0xc0a80a04);
2168 	iph->ip_dst.s_addr = htonl(0xc0a80a05);
2169 }
2170 
2171 /**
2172  * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2173  * ICMP packet
2174  */
2175 static void
xnb_add_mbuf_cksum_icmp(char * buffer,size_t buflen)2176 xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2177 {
2178 	const size_t icmp_len = 64;	/* set by ping(1) */
2179 	const size_t pkt_len = sizeof(struct ether_header) +
2180 		sizeof(struct ip) + icmp_len;
2181 	struct mbuf *mbufc;
2182 	struct ether_header *eh;
2183 	struct ip *iph;
2184 	struct icmp *icmph;
2185 	unsigned char pkt_orig[icmp_len];
2186 	uint32_t *tv_field;
2187 	uint8_t *data_payload;
2188 	int i;
2189 	const uint16_t ICMP_CSUM = 0xaed7;
2190 	const uint16_t IP_CSUM = 0xe533;
2191 
2192 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2193 	/* Fill in an example ICMP ping request */
2194 	eh = mtod(mbufc, struct ether_header*);
2195 	xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2196 	iph = (struct ip*)(eh + 1);
2197 	icmph = (struct icmp*)(iph + 1);
2198 	icmph->icmp_type = ICMP_ECHO;
2199 	icmph->icmp_code = 0;
2200 	icmph->icmp_cksum = htons(ICMP_CSUM);
2201 	icmph->icmp_id = htons(31492);
2202 	icmph->icmp_seq = htons(0);
2203 	/*
2204 	 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2205 	 * For this test, we will set the bytes individually for portability.
2206 	 */
2207 	tv_field = (uint32_t*)(&(icmph->icmp_hun));
2208 	tv_field[0] = 0x4f02cfac;
2209 	tv_field[1] = 0x0007c46a;
2210 	/*
2211 	 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2212 	 */
2213 	data_payload = (uint8_t*)(&tv_field[2]);
2214 	for (i = 8; i < 37; i++) {
2215 		*data_payload++ = i;
2216 	}
2217 
2218 	/* fill in the length field */
2219 	mbufc->m_len = pkt_len;
2220 	mbufc->m_pkthdr.len = pkt_len;
2221 	/* indicate that the netfront uses hw-assisted checksums */
2222 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2223 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2224 
2225 	bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2226 	/* Function under test */
2227 	xnb_add_mbuf_cksum(mbufc);
2228 
2229 	/* Check the IP checksum */
2230 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2231 
2232 	/* Check that the ICMP packet did not change */
2233 	XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2234 	m_freem(mbufc);
2235 }
2236 
2237 /**
2238  * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2239  * UDP packet
2240  */
2241 static void
xnb_add_mbuf_cksum_udp(char * buffer,size_t buflen)2242 xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2243 {
2244 	const size_t udp_len = 16;
2245 	const size_t pkt_len = sizeof(struct ether_header) +
2246 		sizeof(struct ip) + udp_len;
2247 	struct mbuf *mbufc;
2248 	struct ether_header *eh;
2249 	struct ip *iph;
2250 	struct udphdr *udp;
2251 	uint8_t *data_payload;
2252 	const uint16_t IP_CSUM = 0xe56b;
2253 	const uint16_t UDP_CSUM = 0xdde2;
2254 
2255 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2256 	/* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2257 	eh = mtod(mbufc, struct ether_header*);
2258 	xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2259 	iph = (struct ip*)(eh + 1);
2260 	udp = (struct udphdr*)(iph + 1);
2261 	udp->uh_sport = htons(0x51ae);
2262 	udp->uh_dport = htons(0x08ae);
2263 	udp->uh_ulen = htons(udp_len);
2264 	udp->uh_sum = htons(0xbaad);  /* xnb_add_mbuf_cksum will fill this in */
2265 	data_payload = (uint8_t*)(udp + 1);
2266 	data_payload[0] = 'F';
2267 	data_payload[1] = 'r';
2268 	data_payload[2] = 'e';
2269 	data_payload[3] = 'e';
2270 	data_payload[4] = 'B';
2271 	data_payload[5] = 'S';
2272 	data_payload[6] = 'D';
2273 	data_payload[7] = '\n';
2274 
2275 	/* fill in the length field */
2276 	mbufc->m_len = pkt_len;
2277 	mbufc->m_pkthdr.len = pkt_len;
2278 	/* indicate that the netfront uses hw-assisted checksums */
2279 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2280 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2281 
2282 	/* Function under test */
2283 	xnb_add_mbuf_cksum(mbufc);
2284 
2285 	/* Check the checksums */
2286 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2287 	XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2288 
2289 	m_freem(mbufc);
2290 }
2291 
2292 /**
2293  * Helper function that populates a TCP packet used by all of the
2294  * xnb_add_mbuf_cksum tcp unit tests.  m must already be allocated and must be
2295  * large enough
2296  */
2297 static void
xnb_fill_tcp(struct mbuf * m)2298 xnb_fill_tcp(struct mbuf *m)
2299 {
2300 	struct ether_header *eh;
2301 	struct ip *iph;
2302 	struct tcphdr *tcp;
2303 	uint32_t *options;
2304 	uint8_t *data_payload;
2305 
2306 	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2307 	eh = mtod(m, struct ether_header*);
2308 	xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2309 	iph = (struct ip*)(eh + 1);
2310 	tcp = (struct tcphdr*)(iph + 1);
2311 	tcp->th_sport = htons(0x9cd9);
2312 	tcp->th_dport = htons(2222);
2313 	tcp->th_seq = htonl(0x00f72b10);
2314 	tcp->th_ack = htonl(0x7f37ba6c);
2315 	tcp_set_flags(tcp, TH_ACK | TH_PUSH);
2316 	tcp->th_off = 8;
2317 	tcp->th_win = htons(0x410);
2318 	/* th_sum is incorrect; will be inserted by function under test */
2319 	tcp->th_sum = htons(0xbaad);
2320 	tcp->th_urp = htons(0);
2321 	/*
2322 	 * The following 12 bytes of options encode:
2323 	 * [nop, nop, TS val 33247 ecr 3457687679]
2324 	 */
2325 	options = (uint32_t*)(tcp + 1);
2326 	options[0] = htonl(0x0101080a);
2327 	options[1] = htonl(0x000081df);
2328 	options[2] = htonl(0xce18207f);
2329 	data_payload = (uint8_t*)(&options[3]);
2330 	data_payload[0] = 'F';
2331 	data_payload[1] = 'r';
2332 	data_payload[2] = 'e';
2333 	data_payload[3] = 'e';
2334 	data_payload[4] = 'B';
2335 	data_payload[5] = 'S';
2336 	data_payload[6] = 'D';
2337 	data_payload[7] = '\n';
2338 }
2339 
2340 /**
2341  * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2342  * packet
2343  */
2344 static void
xnb_add_mbuf_cksum_tcp(char * buffer,size_t buflen)2345 xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2346 {
2347 	const size_t payload_len = 8;
2348 	const size_t tcp_options_len = 12;
2349 	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2350 	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2351 	struct mbuf *mbufc;
2352 	struct ether_header *eh;
2353 	struct ip *iph;
2354 	struct tcphdr *tcp;
2355 	const uint16_t IP_CSUM = 0xa55a;
2356 	const uint16_t TCP_CSUM = 0x2f64;
2357 
2358 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2359 	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2360 	xnb_fill_tcp(mbufc);
2361 	eh = mtod(mbufc, struct ether_header*);
2362 	iph = (struct ip*)(eh + 1);
2363 	tcp = (struct tcphdr*)(iph + 1);
2364 
2365 	/* fill in the length field */
2366 	mbufc->m_len = pkt_len;
2367 	mbufc->m_pkthdr.len = pkt_len;
2368 	/* indicate that the netfront uses hw-assisted checksums */
2369 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2370 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2371 
2372 	/* Function under test */
2373 	xnb_add_mbuf_cksum(mbufc);
2374 
2375 	/* Check the checksums */
2376 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2377 	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2378 
2379 	m_freem(mbufc);
2380 }
2381 
2382 /**
2383  * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2384  */
2385 static void
xnb_add_mbuf_cksum_tcp_swcksum(char * buffer,size_t buflen)2386 xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2387 {
2388 	const size_t payload_len = 8;
2389 	const size_t tcp_options_len = 12;
2390 	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2391 	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2392 	struct mbuf *mbufc;
2393 	struct ether_header *eh;
2394 	struct ip *iph;
2395 	struct tcphdr *tcp;
2396 	/* Use deliberately bad checksums, and verify that they don't get */
2397 	/* corrected by xnb_add_mbuf_cksum */
2398 	const uint16_t IP_CSUM = 0xdead;
2399 	const uint16_t TCP_CSUM = 0xbeef;
2400 
2401 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2402 	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2403 	xnb_fill_tcp(mbufc);
2404 	eh = mtod(mbufc, struct ether_header*);
2405 	iph = (struct ip*)(eh + 1);
2406 	iph->ip_sum = htons(IP_CSUM);
2407 	tcp = (struct tcphdr*)(iph + 1);
2408 	tcp->th_sum = htons(TCP_CSUM);
2409 
2410 	/* fill in the length field */
2411 	mbufc->m_len = pkt_len;
2412 	mbufc->m_pkthdr.len = pkt_len;
2413 	/* indicate that the netfront does not use hw-assisted checksums */
2414 	mbufc->m_pkthdr.csum_flags = 0;
2415 
2416 	/* Function under test */
2417 	xnb_add_mbuf_cksum(mbufc);
2418 
2419 	/* Check that the checksums didn't change */
2420 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2421 	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2422 
2423 	m_freem(mbufc);
2424 }
2425 #endif /* INET || INET6 */
2426 
2427 /**
2428  * sscanf on unsigned chars
2429  */
2430 static void
xnb_sscanf_hhu(char * buffer,size_t buflen)2431 xnb_sscanf_hhu(char *buffer, size_t buflen)
2432 {
2433 	const char mystr[] = "137";
2434 	uint8_t dest[12];
2435 	int i;
2436 
2437 	for (i = 0; i < 12; i++)
2438 		dest[i] = 'X';
2439 
2440 	XNB_ASSERT(sscanf(mystr, "%hhu", &dest[4]) == 1);
2441 	for (i = 0; i < 12; i++)
2442 		XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2443 }
2444 
2445 /**
2446  * sscanf on signed chars
2447  */
2448 static void
xnb_sscanf_hhd(char * buffer,size_t buflen)2449 xnb_sscanf_hhd(char *buffer, size_t buflen)
2450 {
2451 	const char mystr[] = "-27";
2452 	int8_t dest[12];
2453 	int i;
2454 
2455 	for (i = 0; i < 12; i++)
2456 		dest[i] = 'X';
2457 
2458 	XNB_ASSERT(sscanf(mystr, "%hhd", &dest[4]) == 1);
2459 	for (i = 0; i < 12; i++)
2460 		XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2461 }
2462 
2463 /**
2464  * sscanf on signed long longs
2465  */
2466 static void
xnb_sscanf_lld(char * buffer,size_t buflen)2467 xnb_sscanf_lld(char *buffer, size_t buflen)
2468 {
2469 	const char mystr[] = "-123456789012345";	/* about -2**47 */
2470 	long long dest[3];
2471 	int i;
2472 
2473 	for (i = 0; i < 3; i++)
2474 		dest[i] = (long long)0xdeadbeefdeadbeef;
2475 
2476 	XNB_ASSERT(sscanf(mystr, "%lld", &dest[1]) == 1);
2477 	for (i = 0; i < 3; i++)
2478 		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2479 		    -123456789012345));
2480 }
2481 
2482 /**
2483  * sscanf on unsigned long longs
2484  */
2485 static void
xnb_sscanf_llu(char * buffer,size_t buflen)2486 xnb_sscanf_llu(char *buffer, size_t buflen)
2487 {
2488 	const char mystr[] = "12802747070103273189";
2489 	unsigned long long dest[3];
2490 	int i;
2491 
2492 	for (i = 0; i < 3; i++)
2493 		dest[i] = (long long)0xdeadbeefdeadbeef;
2494 
2495 	XNB_ASSERT(sscanf(mystr, "%llu", &dest[1]) == 1);
2496 	for (i = 0; i < 3; i++)
2497 		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2498 		    12802747070103273189ull));
2499 }
2500 
2501 /**
2502  * sscanf on unsigned short short n's
2503  */
2504 static void
xnb_sscanf_hhn(char * buffer,size_t buflen)2505 xnb_sscanf_hhn(char *buffer, size_t buflen)
2506 {
2507 	const char mystr[] =
2508 	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2509 	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2510 	    "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2511 	unsigned char dest[12];
2512 	int i;
2513 
2514 	for (i = 0; i < 12; i++)
2515 		dest[i] = (unsigned char)'X';
2516 
2517 	XNB_ASSERT(sscanf(mystr,
2518 	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2519 	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2520 	    "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]) == 0);
2521 	for (i = 0; i < 12; i++)
2522 		XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));
2523 }
2524