xref: /freebsd/sys/dev/xen/netback/netback_unit_tests.c (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009-2011 Spectra Logic Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon
16  *    including a substantially similar Disclaimer requirement for further
17  *    binary redistribution.
18  *
19  * NO WARRANTY
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGES.
31  *
32  * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
33  *          Alan Somers         (Spectra Logic Corporation)
34  *          John Suykerbuyk     (Spectra Logic Corporation)
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 /**
41  * \file netback_unit_tests.c
42  *
43  * \brief Unit tests for the Xen netback driver.
44  *
45  * Due to the driver's use of static functions, these tests cannot be compiled
46  * standalone; they must be #include'd from the driver's .c file.
47  */
48 
49 /** Helper macro used to snprintf to a buffer and update the buffer pointer */
50 #define	SNCATF(buffer, buflen, ...) do {				\
51 	size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__);	\
52 	buffer += new_chars;						\
53 	/* be careful; snprintf's return value can be  > buflen */	\
54 	buflen -= MIN(buflen, new_chars);				\
55 } while (0)
56 
57 /* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
58 #define	STRINGIFY(x) #x
59 #define	TOSTRING(x) STRINGIFY(x)
60 
61 /**
62  * Writes an error message to buffer if cond is false
63  * Note the implied parameters buffer and
64  * buflen
65  */
66 #define	XNB_ASSERT(cond) ({						\
67 	int passed = (cond);						\
68 	char *_buffer = (buffer);					\
69 	size_t _buflen = (buflen);					\
70 	if (! passed) {							\
71 		strlcat(_buffer, __func__, _buflen);			\
72 		strlcat(_buffer, ":" TOSTRING(__LINE__) 		\
73 		  " Assertion Error: " #cond "\n", _buflen);		\
74 	}								\
75 	})
76 
77 /**
78  * The signature used by all testcases.  If the test writes anything
79  * to buffer, then it will be considered a failure
80  * \param buffer	Return storage for error messages
81  * \param buflen	The space available in the buffer
82  */
83 typedef void testcase_t(char *buffer, size_t buflen);
84 
85 /**
86  * Signature used by setup functions
87  * \return nonzero on error
88  */
89 typedef int setup_t(void);
90 
91 typedef void teardown_t(void);
92 
93 /** A simple test fixture comprising setup, teardown, and test */
94 struct test_fixture {
95 	/** Will be run before the test to allocate and initialize variables */
96 	setup_t *setup;
97 
98 	/** Will be run if setup succeeds */
99 	testcase_t *test;
100 
101 	/** Cleans up test data whether or not the setup succeeded */
102 	teardown_t *teardown;
103 };
104 
105 typedef struct test_fixture test_fixture_t;
106 
107 static int	xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
108 static int	xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
109 				     char *buffer, size_t buflen);
110 
111 static int __unused
112 null_setup(void) { return 0; }
113 
114 static void __unused
115 null_teardown(void) { }
116 
117 static setup_t setup_pvt_data;
118 static teardown_t teardown_pvt_data;
119 static testcase_t xnb_ring2pkt_emptyring;
120 static testcase_t xnb_ring2pkt_1req;
121 static testcase_t xnb_ring2pkt_2req;
122 static testcase_t xnb_ring2pkt_3req;
123 static testcase_t xnb_ring2pkt_extra;
124 static testcase_t xnb_ring2pkt_partial;
125 static testcase_t xnb_ring2pkt_wraps;
126 static testcase_t xnb_txpkt2rsp_emptypkt;
127 static testcase_t xnb_txpkt2rsp_1req;
128 static testcase_t xnb_txpkt2rsp_extra;
129 static testcase_t xnb_txpkt2rsp_long;
130 static testcase_t xnb_txpkt2rsp_invalid;
131 static testcase_t xnb_txpkt2rsp_error;
132 static testcase_t xnb_txpkt2rsp_wraps;
133 static testcase_t xnb_pkt2mbufc_empty;
134 static testcase_t xnb_pkt2mbufc_short;
135 static testcase_t xnb_pkt2mbufc_csum;
136 static testcase_t xnb_pkt2mbufc_1cluster;
137 static testcase_t xnb_pkt2mbufc_largecluster;
138 static testcase_t xnb_pkt2mbufc_2cluster;
139 static testcase_t xnb_txpkt2gnttab_empty;
140 static testcase_t xnb_txpkt2gnttab_short;
141 static testcase_t xnb_txpkt2gnttab_2req;
142 static testcase_t xnb_txpkt2gnttab_2cluster;
143 static testcase_t xnb_update_mbufc_short;
144 static testcase_t xnb_update_mbufc_2req;
145 static testcase_t xnb_update_mbufc_2cluster;
146 static testcase_t xnb_mbufc2pkt_empty;
147 static testcase_t xnb_mbufc2pkt_short;
148 static testcase_t xnb_mbufc2pkt_1cluster;
149 static testcase_t xnb_mbufc2pkt_2short;
150 static testcase_t xnb_mbufc2pkt_long;
151 static testcase_t xnb_mbufc2pkt_extra;
152 static testcase_t xnb_mbufc2pkt_nospace;
153 static testcase_t xnb_rxpkt2gnttab_empty;
154 static testcase_t xnb_rxpkt2gnttab_short;
155 static testcase_t xnb_rxpkt2gnttab_2req;
156 static testcase_t xnb_rxpkt2rsp_empty;
157 static testcase_t xnb_rxpkt2rsp_short;
158 static testcase_t xnb_rxpkt2rsp_extra;
159 static testcase_t xnb_rxpkt2rsp_2short;
160 static testcase_t xnb_rxpkt2rsp_2slots;
161 static testcase_t xnb_rxpkt2rsp_copyerror;
162 static testcase_t xnb_sscanf_llu;
163 static testcase_t xnb_sscanf_lld;
164 static testcase_t xnb_sscanf_hhu;
165 static testcase_t xnb_sscanf_hhd;
166 static testcase_t xnb_sscanf_hhn;
167 
168 #if defined(INET) || defined(INET6)
169 /* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
170 static testcase_t xnb_add_mbuf_cksum_arp;
171 static testcase_t xnb_add_mbuf_cksum_tcp;
172 static testcase_t xnb_add_mbuf_cksum_udp;
173 static testcase_t xnb_add_mbuf_cksum_icmp;
174 static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
175 static void	xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
176 				   uint16_t ip_id, uint16_t ip_p,
177 				   uint16_t ip_off, uint16_t ip_sum);
178 static void	xnb_fill_tcp(struct mbuf *m);
179 #endif /* INET || INET6 */
180 
181 /** Private data used by unit tests */
182 static struct {
183 	gnttab_copy_table 	gnttab;
184 	netif_rx_back_ring_t	rxb;
185 	netif_rx_front_ring_t	rxf;
186 	netif_tx_back_ring_t	txb;
187 	netif_tx_front_ring_t	txf;
188 	struct ifnet*		ifp;
189 	netif_rx_sring_t*	rxs;
190 	netif_tx_sring_t*	txs;
191 } xnb_unit_pvt;
192 
193 static inline void safe_m_freem(struct mbuf **ppMbuf) {
194 	if (*ppMbuf != NULL) {
195 		m_freem(*ppMbuf);
196 		*ppMbuf = NULL;
197 	}
198 }
199 
200 /**
201  * The unit test runner.  It will run every supplied test and return an
202  * output message as a string
203  * \param tests		An array of tests.  Every test will be attempted.
204  * \param ntests	The length of tests
205  * \param buffer	Return storage for the result string
206  * \param buflen	The length of buffer
207  * \return		The number of tests that failed
208  */
209 static int
210 xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
211     		     size_t buflen)
212 {
213 	int i;
214 	int n_passes;
215 	int n_failures = 0;
216 
217 	for (i = 0; i < ntests; i++) {
218 		int error = tests[i].setup();
219 		if (error != 0) {
220 			SNCATF(buffer, buflen,
221 			    "Setup failed for test idx %d\n", i);
222 			n_failures++;
223 		} else {
224 			size_t new_chars;
225 
226 			tests[i].test(buffer, buflen);
227 			new_chars = strnlen(buffer, buflen);
228 			buffer += new_chars;
229 			buflen -= new_chars;
230 
231 			if (new_chars > 0) {
232 				n_failures++;
233 			}
234 		}
235 		tests[i].teardown();
236 	}
237 
238 	n_passes = ntests - n_failures;
239 	if (n_passes > 0) {
240 		SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
241 	}
242 	if (n_failures > 0) {
243 		SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
244 	}
245 
246 	return n_failures;
247 }
248 
249 /** Number of unit tests.  Must match the length of the tests array below */
250 #define	TOTAL_TESTS	(53)
251 /**
252  * Max memory available for returning results.  400 chars/test should give
253  * enough space for a five line error message for every test
254  */
255 #define	TOTAL_BUFLEN	(400 * TOTAL_TESTS + 2)
256 
257 /**
258  * Called from userspace by a sysctl.  Runs all internal unit tests, and
259  * returns the results to userspace as a string
260  * \param oidp	unused
261  * \param arg1	pointer to an xnb_softc for a specific xnb device
262  * \param arg2	unused
263  * \param req	sysctl access structure
264  * \return a string via the special SYSCTL_OUT macro.
265  */
266 
267 static int
268 xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
269 	test_fixture_t const tests[TOTAL_TESTS] = {
270 		{setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
271 		{setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
272 		{setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
273 		{setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
274 		{setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
275 		{setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
276 		{setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
277 		{setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
278 		{setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
279 		{setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
280 		{setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
281 		{setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
282 		{setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
283 		{setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
284 		{setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
285 		{setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
286 		{setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
287 		{setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
288 		{setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
289 		{setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
290 		{setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
291 		{setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
292 		{setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
293 		{setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
294 		{setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
295 		{setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
296 		{setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
297 		{setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
298 		{setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
299 		{setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
300 		{setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
301 		{setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
302 		{setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
303 		{setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
304 		{setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
305 		{setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
306 		{setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
307 		{setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
308 		{setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
309 		{setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
310 		{setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
311 		{setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
312 		{setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
313 #if defined(INET) || defined(INET6)
314 		{null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
315 		{null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
316 		{null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
317 		{null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
318 		{null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
319 #endif
320 		{null_setup, xnb_sscanf_hhd, null_teardown},
321 		{null_setup, xnb_sscanf_hhu, null_teardown},
322 		{null_setup, xnb_sscanf_lld, null_teardown},
323 		{null_setup, xnb_sscanf_llu, null_teardown},
324 		{null_setup, xnb_sscanf_hhn, null_teardown},
325 	};
326 	/**
327 	 * results is static so that the data will persist after this function
328 	 * returns.  The sysctl code expects us to return a constant string.
329 	 * \todo: the static variable is not thread safe.  Put a mutex around
330 	 * it.
331 	 */
332 	static char results[TOTAL_BUFLEN];
333 
334 	/* empty the result strings */
335 	results[0] = 0;
336 	xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
337 
338 	return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
339 }
340 
341 static int
342 setup_pvt_data(void)
343 {
344 	int error = 0;
345 
346 	bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
347 
348 	xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
349 	if (xnb_unit_pvt.txs != NULL) {
350 		SHARED_RING_INIT(xnb_unit_pvt.txs);
351 		BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
352 		FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
353 	} else {
354 		error = 1;
355 	}
356 
357 	xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
358 	if (xnb_unit_pvt.ifp == NULL) {
359 		error = 1;
360 	}
361 
362 	xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
363 	if (xnb_unit_pvt.rxs != NULL) {
364 		SHARED_RING_INIT(xnb_unit_pvt.rxs);
365 		BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
366 		FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
367 	} else {
368 		error = 1;
369 	}
370 
371 	return error;
372 }
373 
374 static void
375 teardown_pvt_data(void)
376 {
377 	if (xnb_unit_pvt.txs != NULL) {
378 		free(xnb_unit_pvt.txs, M_XENNETBACK);
379 	}
380 	if (xnb_unit_pvt.rxs != NULL) {
381 		free(xnb_unit_pvt.rxs, M_XENNETBACK);
382 	}
383 	if (xnb_unit_pvt.ifp != NULL) {
384 		if_free(xnb_unit_pvt.ifp);
385 	}
386 }
387 
388 /**
389  * Verify that xnb_ring2pkt will not consume any requests from an empty ring
390  */
391 static void
392 xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
393 {
394 	struct xnb_pkt pkt;
395 	int num_consumed;
396 
397 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
398 	                            xnb_unit_pvt.txb.req_cons);
399 	XNB_ASSERT(num_consumed == 0);
400 }
401 
402 /**
403  * Verify that xnb_ring2pkt can convert a single request packet correctly
404  */
405 static void
406 xnb_ring2pkt_1req(char *buffer, size_t buflen)
407 {
408 	struct xnb_pkt pkt;
409 	int num_consumed;
410 	struct netif_tx_request *req;
411 
412 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
413 	    xnb_unit_pvt.txf.req_prod_pvt);
414 
415 	req->flags = 0;
416 	req->size = 69;	/* arbitrary number for test */
417 	xnb_unit_pvt.txf.req_prod_pvt++;
418 
419 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
420 
421 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
422 	                            xnb_unit_pvt.txb.req_cons);
423 	XNB_ASSERT(num_consumed == 1);
424 	XNB_ASSERT(pkt.size == 69);
425 	XNB_ASSERT(pkt.car_size == 69);
426 	XNB_ASSERT(pkt.flags == 0);
427 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
428 	XNB_ASSERT(pkt.list_len == 1);
429 	XNB_ASSERT(pkt.car == 0);
430 }
431 
432 /**
433  * Verify that xnb_ring2pkt can convert a two request packet correctly.
434  * This tests handling of the MORE_DATA flag and cdr
435  */
436 static void
437 xnb_ring2pkt_2req(char *buffer, size_t buflen)
438 {
439 	struct xnb_pkt pkt;
440 	int num_consumed;
441 	struct netif_tx_request *req;
442 	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
443 
444 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
445 	    xnb_unit_pvt.txf.req_prod_pvt);
446 	req->flags = NETTXF_more_data;
447 	req->size = 100;
448 	xnb_unit_pvt.txf.req_prod_pvt++;
449 
450 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
451 	    xnb_unit_pvt.txf.req_prod_pvt);
452 	req->flags = 0;
453 	req->size = 40;
454 	xnb_unit_pvt.txf.req_prod_pvt++;
455 
456 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
457 
458 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
459 	                            xnb_unit_pvt.txb.req_cons);
460 	XNB_ASSERT(num_consumed == 2);
461 	XNB_ASSERT(pkt.size == 100);
462 	XNB_ASSERT(pkt.car_size == 60);
463 	XNB_ASSERT(pkt.flags == 0);
464 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
465 	XNB_ASSERT(pkt.list_len == 2);
466 	XNB_ASSERT(pkt.car == start_idx);
467 	XNB_ASSERT(pkt.cdr == start_idx + 1);
468 }
469 
470 /**
471  * Verify that xnb_ring2pkt can convert a three request packet correctly
472  */
473 static void
474 xnb_ring2pkt_3req(char *buffer, size_t buflen)
475 {
476 	struct xnb_pkt pkt;
477 	int num_consumed;
478 	struct netif_tx_request *req;
479 	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
480 
481 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
482 	    xnb_unit_pvt.txf.req_prod_pvt);
483 	req->flags = NETTXF_more_data;
484 	req->size = 200;
485 	xnb_unit_pvt.txf.req_prod_pvt++;
486 
487 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
488 	    xnb_unit_pvt.txf.req_prod_pvt);
489 	req->flags = NETTXF_more_data;
490 	req->size = 40;
491 	xnb_unit_pvt.txf.req_prod_pvt++;
492 
493 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
494 	    xnb_unit_pvt.txf.req_prod_pvt);
495 	req->flags = 0;
496 	req->size = 50;
497 	xnb_unit_pvt.txf.req_prod_pvt++;
498 
499 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
500 
501 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
502 	                            xnb_unit_pvt.txb.req_cons);
503 	XNB_ASSERT(num_consumed == 3);
504 	XNB_ASSERT(pkt.size == 200);
505 	XNB_ASSERT(pkt.car_size == 110);
506 	XNB_ASSERT(pkt.flags == 0);
507 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
508 	XNB_ASSERT(pkt.list_len == 3);
509 	XNB_ASSERT(pkt.car == start_idx);
510 	XNB_ASSERT(pkt.cdr == start_idx + 1);
511 	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
512 }
513 
514 /**
515  * Verify that xnb_ring2pkt can read extra inf
516  */
517 static void
518 xnb_ring2pkt_extra(char *buffer, size_t buflen)
519 {
520 	struct xnb_pkt pkt;
521 	int num_consumed;
522 	struct netif_tx_request *req;
523 	struct netif_extra_info *ext;
524 	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
525 
526 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
527 	    xnb_unit_pvt.txf.req_prod_pvt);
528 	req->flags = NETTXF_extra_info | NETTXF_more_data;
529 	req->size = 150;
530 	xnb_unit_pvt.txf.req_prod_pvt++;
531 
532 	ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
533 	    xnb_unit_pvt.txf.req_prod_pvt);
534 	ext->flags = 0;
535 	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
536 	ext->u.gso.size = 250;
537 	ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
538 	ext->u.gso.features = 0;
539 	xnb_unit_pvt.txf.req_prod_pvt++;
540 
541 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
542 	    xnb_unit_pvt.txf.req_prod_pvt);
543 	req->flags = 0;
544 	req->size = 50;
545 	xnb_unit_pvt.txf.req_prod_pvt++;
546 
547 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
548 
549 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
550 	                            xnb_unit_pvt.txb.req_cons);
551 	XNB_ASSERT(num_consumed == 3);
552 	XNB_ASSERT(pkt.extra.flags == 0);
553 	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
554 	XNB_ASSERT(pkt.extra.u.gso.size == 250);
555 	XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
556 	XNB_ASSERT(pkt.size == 150);
557 	XNB_ASSERT(pkt.car_size == 100);
558 	XNB_ASSERT(pkt.flags == NETTXF_extra_info);
559 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
560 	XNB_ASSERT(pkt.list_len == 2);
561 	XNB_ASSERT(pkt.car == start_idx);
562 	XNB_ASSERT(pkt.cdr == start_idx + 2);
563 	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
564 }
565 
566 /**
567  * Verify that xnb_ring2pkt will consume no requests if the entire packet is
568  * not yet in the ring
569  */
570 static void
571 xnb_ring2pkt_partial(char *buffer, size_t buflen)
572 {
573 	struct xnb_pkt pkt;
574 	int num_consumed;
575 	struct netif_tx_request *req;
576 
577 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
578 	    xnb_unit_pvt.txf.req_prod_pvt);
579 	req->flags = NETTXF_more_data;
580 	req->size = 150;
581 	xnb_unit_pvt.txf.req_prod_pvt++;
582 
583 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
584 
585 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
586 	                            xnb_unit_pvt.txb.req_cons);
587 	XNB_ASSERT(num_consumed == 0);
588 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
589 }
590 
591 /**
592  * Verity that xnb_ring2pkt can read a packet whose requests wrap around
593  * the end of the ring
594  */
595 static void
596 xnb_ring2pkt_wraps(char *buffer, size_t buflen)
597 {
598 	struct xnb_pkt pkt;
599 	int num_consumed;
600 	struct netif_tx_request *req;
601 	unsigned int rsize;
602 
603 	/*
604 	 * Manually tweak the ring indices to create a ring with no responses
605 	 * and the next request slot at position 2 from the end
606 	 */
607 	rsize = RING_SIZE(&xnb_unit_pvt.txf);
608 	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
609 	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
610 	xnb_unit_pvt.txs->req_prod = rsize - 2;
611 	xnb_unit_pvt.txs->req_event = rsize - 1;
612 	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
613 	xnb_unit_pvt.txs->rsp_event = rsize - 1;
614 	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
615 	xnb_unit_pvt.txb.req_cons = rsize - 2;
616 
617 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
618 	    xnb_unit_pvt.txf.req_prod_pvt);
619 	req->flags = NETTXF_more_data;
620 	req->size = 550;
621 	xnb_unit_pvt.txf.req_prod_pvt++;
622 
623 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
624 	    xnb_unit_pvt.txf.req_prod_pvt);
625 	req->flags = NETTXF_more_data;
626 	req->size = 100;
627 	xnb_unit_pvt.txf.req_prod_pvt++;
628 
629 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
630 	    xnb_unit_pvt.txf.req_prod_pvt);
631 	req->flags = 0;
632 	req->size = 50;
633 	xnb_unit_pvt.txf.req_prod_pvt++;
634 
635 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
636 
637 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
638 	                            xnb_unit_pvt.txb.req_cons);
639 	XNB_ASSERT(num_consumed == 3);
640 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
641 	XNB_ASSERT(pkt.list_len == 3);
642 	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
643 }
644 
645 /**
646  * xnb_txpkt2rsp should do nothing for an empty packet
647  */
648 static void
649 xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
650 {
651 	struct xnb_pkt pkt;
652 	netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
653 	netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
654 	pkt.list_len = 0;
655 
656 	/* must call xnb_ring2pkt just to intialize pkt */
657 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
658 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
659 	XNB_ASSERT(
660 	    memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
661 	XNB_ASSERT(
662 	    memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
663 }
664 
665 /**
666  * xnb_txpkt2rsp responding to one request
667  */
668 static void
669 xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
670 {
671 	uint16_t num_consumed;
672 	struct xnb_pkt pkt;
673 	struct netif_tx_request *req;
674 	struct netif_tx_response *rsp;
675 
676 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
677 	    xnb_unit_pvt.txf.req_prod_pvt);
678 	req->size = 1000;
679 	req->flags = 0;
680 	xnb_unit_pvt.txf.req_prod_pvt++;
681 
682 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
683 
684 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
685 	                            xnb_unit_pvt.txb.req_cons);
686 	xnb_unit_pvt.txb.req_cons += num_consumed;
687 
688 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
689 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
690 
691 	XNB_ASSERT(
692 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
693 	XNB_ASSERT(rsp->id == req->id);
694 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
695 };
696 
697 /**
698  * xnb_txpkt2rsp responding to 1 data request and 1 extra info
699  */
700 static void
701 xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
702 {
703 	uint16_t num_consumed;
704 	struct xnb_pkt pkt;
705 	struct netif_tx_request *req;
706 	netif_extra_info_t *ext;
707 	struct netif_tx_response *rsp;
708 
709 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
710 	    xnb_unit_pvt.txf.req_prod_pvt);
711 	req->size = 1000;
712 	req->flags = NETTXF_extra_info;
713 	req->id = 69;
714 	xnb_unit_pvt.txf.req_prod_pvt++;
715 
716 	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
717 	    xnb_unit_pvt.txf.req_prod_pvt);
718 	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
719 	ext->flags = 0;
720 	xnb_unit_pvt.txf.req_prod_pvt++;
721 
722 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
723 
724 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
725 	                            xnb_unit_pvt.txb.req_cons);
726 	xnb_unit_pvt.txb.req_cons += num_consumed;
727 
728 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
729 
730 	XNB_ASSERT(
731 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
732 
733 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
734 	XNB_ASSERT(rsp->id == req->id);
735 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
736 
737 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
738 	    xnb_unit_pvt.txf.rsp_cons + 1);
739 	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
740 };
741 
742 /**
743  * xnb_pkg2rsp responding to 3 data requests and 1 extra info
744  */
745 static void
746 xnb_txpkt2rsp_long(char *buffer, size_t buflen)
747 {
748 	uint16_t num_consumed;
749 	struct xnb_pkt pkt;
750 	struct netif_tx_request *req;
751 	netif_extra_info_t *ext;
752 	struct netif_tx_response *rsp;
753 
754 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
755 	    xnb_unit_pvt.txf.req_prod_pvt);
756 	req->size = 1000;
757 	req->flags = NETTXF_extra_info | NETTXF_more_data;
758 	req->id = 254;
759 	xnb_unit_pvt.txf.req_prod_pvt++;
760 
761 	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
762 	    xnb_unit_pvt.txf.req_prod_pvt);
763 	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
764 	ext->flags = 0;
765 	xnb_unit_pvt.txf.req_prod_pvt++;
766 
767 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
768 	    xnb_unit_pvt.txf.req_prod_pvt);
769 	req->size = 300;
770 	req->flags = NETTXF_more_data;
771 	req->id = 1034;
772 	xnb_unit_pvt.txf.req_prod_pvt++;
773 
774 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
775 	    xnb_unit_pvt.txf.req_prod_pvt);
776 	req->size = 400;
777 	req->flags = 0;
778 	req->id = 34;
779 	xnb_unit_pvt.txf.req_prod_pvt++;
780 
781 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
782 
783 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
784 	                            xnb_unit_pvt.txb.req_cons);
785 	xnb_unit_pvt.txb.req_cons += num_consumed;
786 
787 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
788 
789 	XNB_ASSERT(
790 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
791 
792 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
793 	XNB_ASSERT(rsp->id ==
794 	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
795 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
796 
797 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
798 	    xnb_unit_pvt.txf.rsp_cons + 1);
799 	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
800 
801 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
802 	    xnb_unit_pvt.txf.rsp_cons + 2);
803 	XNB_ASSERT(rsp->id ==
804 	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
805 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
806 
807 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
808 	    xnb_unit_pvt.txf.rsp_cons + 3);
809 	XNB_ASSERT(rsp->id ==
810 	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
811 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
812 }
813 
814 /**
815  * xnb_txpkt2rsp responding to an invalid packet.
816  * Note: this test will result in an error message being printed to the console
817  * such as:
818  * xnb(xnb_ring2pkt:1306): Unknown extra info type 255.  Discarding packet
819  */
820 static void
821 xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
822 {
823 	uint16_t num_consumed;
824 	struct xnb_pkt pkt;
825 	struct netif_tx_request *req;
826 	netif_extra_info_t *ext;
827 	struct netif_tx_response *rsp;
828 
829 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
830 	    xnb_unit_pvt.txf.req_prod_pvt);
831 	req->size = 1000;
832 	req->flags = NETTXF_extra_info;
833 	req->id = 69;
834 	xnb_unit_pvt.txf.req_prod_pvt++;
835 
836 	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
837 	    xnb_unit_pvt.txf.req_prod_pvt);
838 	ext->type = 0xFF;	/* Invalid extra type */
839 	ext->flags = 0;
840 	xnb_unit_pvt.txf.req_prod_pvt++;
841 
842 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
843 
844 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
845 	                            xnb_unit_pvt.txb.req_cons);
846 	xnb_unit_pvt.txb.req_cons += num_consumed;
847 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
848 
849 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
850 
851 	XNB_ASSERT(
852 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
853 
854 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
855 	XNB_ASSERT(rsp->id == req->id);
856 	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
857 
858 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
859 	    xnb_unit_pvt.txf.rsp_cons + 1);
860 	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
861 };
862 
863 /**
864  * xnb_txpkt2rsp responding to one request which caused an error
865  */
866 static void
867 xnb_txpkt2rsp_error(char *buffer, size_t buflen)
868 {
869 	uint16_t num_consumed;
870 	struct xnb_pkt pkt;
871 	struct netif_tx_request *req;
872 	struct netif_tx_response *rsp;
873 
874 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
875 	    xnb_unit_pvt.txf.req_prod_pvt);
876 	req->size = 1000;
877 	req->flags = 0;
878 	xnb_unit_pvt.txf.req_prod_pvt++;
879 
880 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
881 
882 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
883 	                            xnb_unit_pvt.txb.req_cons);
884 	xnb_unit_pvt.txb.req_cons += num_consumed;
885 
886 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
887 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
888 
889 	XNB_ASSERT(
890 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
891 	XNB_ASSERT(rsp->id == req->id);
892 	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
893 };
894 
895 /**
896  * xnb_txpkt2rsp's responses wrap around the end of the ring
897  */
898 static void
899 xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
900 {
901 	struct xnb_pkt pkt;
902 	struct netif_tx_request *req;
903 	struct netif_tx_response *rsp;
904 	unsigned int rsize;
905 
906 	/*
907 	 * Manually tweak the ring indices to create a ring with no responses
908 	 * and the next request slot at position 2 from the end
909 	 */
910 	rsize = RING_SIZE(&xnb_unit_pvt.txf);
911 	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
912 	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
913 	xnb_unit_pvt.txs->req_prod = rsize - 2;
914 	xnb_unit_pvt.txs->req_event = rsize - 1;
915 	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
916 	xnb_unit_pvt.txs->rsp_event = rsize - 1;
917 	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
918 	xnb_unit_pvt.txb.req_cons = rsize - 2;
919 
920 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
921 	    xnb_unit_pvt.txf.req_prod_pvt);
922 	req->flags = NETTXF_more_data;
923 	req->size = 550;
924 	req->id = 1;
925 	xnb_unit_pvt.txf.req_prod_pvt++;
926 
927 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
928 	    xnb_unit_pvt.txf.req_prod_pvt);
929 	req->flags = NETTXF_more_data;
930 	req->size = 100;
931 	req->id = 2;
932 	xnb_unit_pvt.txf.req_prod_pvt++;
933 
934 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
935 	    xnb_unit_pvt.txf.req_prod_pvt);
936 	req->flags = 0;
937 	req->size = 50;
938 	req->id = 3;
939 	xnb_unit_pvt.txf.req_prod_pvt++;
940 
941 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
942 
943 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
944 
945 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
946 
947 	XNB_ASSERT(
948 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
949 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
950 	    xnb_unit_pvt.txf.rsp_cons + 2);
951 	XNB_ASSERT(rsp->id == req->id);
952 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
953 }
954 
955 /**
956  * Helper function used to setup pkt2mbufc tests
957  * \param size     size in bytes of the single request to push to the ring
958  * \param flags		optional flags to put in the netif request
959  * \param[out] pkt the returned packet object
960  * \return number of requests consumed from the ring
961  */
962 static int
963 xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
964 {
965 	struct netif_tx_request *req;
966 
967 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
968 	    xnb_unit_pvt.txf.req_prod_pvt);
969 	req->flags = flags;
970 	req->size = size;
971 	xnb_unit_pvt.txf.req_prod_pvt++;
972 
973 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
974 
975 	return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
976 	                            xnb_unit_pvt.txb.req_cons);
977 }
978 
979 /**
980  * xnb_pkt2mbufc on an empty packet
981  */
982 static void
983 xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
984 {
985 	struct xnb_pkt pkt;
986 	struct mbuf *pMbuf;
987 	pkt.list_len = 0;
988 
989 	/* must call xnb_ring2pkt just to intialize pkt */
990 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
991 	pkt.size = 0;
992 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
993 	safe_m_freem(&pMbuf);
994 }
995 
996 /**
997  * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
998  */
999 static void
1000 xnb_pkt2mbufc_short(char *buffer, size_t buflen)
1001 {
1002 	const size_t size = MINCLSIZE - 1;
1003 	struct xnb_pkt pkt;
1004 	struct mbuf *pMbuf;
1005 
1006 	xnb_get1pkt(&pkt, size, 0);
1007 
1008 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1009 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1010 	safe_m_freem(&pMbuf);
1011 }
1012 
1013 /**
1014  * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1015  */
1016 static void
1017 xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1018 {
1019 	const size_t size = MINCLSIZE - 1;
1020 	struct xnb_pkt pkt;
1021 	struct mbuf *pMbuf;
1022 
1023 	xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1024 
1025 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1026 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1027 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1028 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1029 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1030 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1031 	safe_m_freem(&pMbuf);
1032 }
1033 
1034 /**
1035  * xnb_pkt2mbufc on packet that can fit in one cluster
1036  */
1037 static void
1038 xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1039 {
1040 	const size_t size = MINCLSIZE;
1041 	struct xnb_pkt pkt;
1042 	struct mbuf *pMbuf;
1043 
1044 	xnb_get1pkt(&pkt, size, 0);
1045 
1046 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1047 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1048 	safe_m_freem(&pMbuf);
1049 }
1050 
1051 /**
1052  * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1053  */
1054 static void
1055 xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1056 {
1057 	const size_t size = MCLBYTES + 1;
1058 	struct xnb_pkt pkt;
1059 	struct mbuf *pMbuf;
1060 
1061 	xnb_get1pkt(&pkt, size, 0);
1062 
1063 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1064 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1065 	safe_m_freem(&pMbuf);
1066 }
1067 
1068 /**
1069  * xnb_pkt2mbufc on packet that cannot fit in one clusters
1070  */
1071 static void
1072 xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1073 {
1074 	const size_t size = 2 * MCLBYTES + 1;
1075 	size_t space = 0;
1076 	struct xnb_pkt pkt;
1077 	struct mbuf *pMbuf;
1078 	struct mbuf *m;
1079 
1080 	xnb_get1pkt(&pkt, size, 0);
1081 
1082 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1083 
1084 	for (m = pMbuf; m != NULL; m = m->m_next) {
1085 		space += M_TRAILINGSPACE(m);
1086 	}
1087 	XNB_ASSERT(space >= size);
1088 	safe_m_freem(&pMbuf);
1089 }
1090 
1091 /**
1092  * xnb_txpkt2gnttab on an empty packet.  Should return empty gnttab
1093  */
1094 static void
1095 xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1096 {
1097 	int n_entries;
1098 	struct xnb_pkt pkt;
1099 	struct mbuf *pMbuf;
1100 	pkt.list_len = 0;
1101 
1102 	/* must call xnb_ring2pkt just to intialize pkt */
1103 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1104 	pkt.size = 0;
1105 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1106 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1107 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1108 	XNB_ASSERT(n_entries == 0);
1109 	safe_m_freem(&pMbuf);
1110 }
1111 
1112 /**
1113  * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1114  * and has one request
1115  */
1116 static void
1117 xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1118 {
1119 	const size_t size = MINCLSIZE - 1;
1120 	int n_entries;
1121 	struct xnb_pkt pkt;
1122 	struct mbuf *pMbuf;
1123 
1124 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1125 	    xnb_unit_pvt.txf.req_prod_pvt);
1126 	req->flags = 0;
1127 	req->size = size;
1128 	req->gref = 7;
1129 	req->offset = 17;
1130 	xnb_unit_pvt.txf.req_prod_pvt++;
1131 
1132 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1133 
1134 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1135 
1136 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1137 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1138 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1139 	XNB_ASSERT(n_entries == 1);
1140 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1141 	/* flags should indicate gref's for source */
1142 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1143 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1144 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1145 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1146 	      mtod(pMbuf, vm_offset_t)));
1147 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1148 		virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1149 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1150 	safe_m_freem(&pMbuf);
1151 }
1152 
1153 /**
1154  * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1155  * mbuf cluster
1156  */
1157 static void
1158 xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1159 {
1160 	int n_entries;
1161 	struct xnb_pkt pkt;
1162 	struct mbuf *pMbuf;
1163 
1164 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1165 	    xnb_unit_pvt.txf.req_prod_pvt);
1166 	req->flags = NETTXF_more_data;
1167 	req->size = 1900;
1168 	req->gref = 7;
1169 	req->offset = 0;
1170 	xnb_unit_pvt.txf.req_prod_pvt++;
1171 
1172 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1173 	    xnb_unit_pvt.txf.req_prod_pvt);
1174 	req->flags = 0;
1175 	req->size = 500;
1176 	req->gref = 8;
1177 	req->offset = 0;
1178 	xnb_unit_pvt.txf.req_prod_pvt++;
1179 
1180 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1181 
1182 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1183 
1184 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1185 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1186 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1187 
1188 	XNB_ASSERT(n_entries == 2);
1189 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1190 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1191 	      mtod(pMbuf, vm_offset_t)));
1192 
1193 	XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1194 	XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1195 	      mtod(pMbuf, vm_offset_t) + 1400));
1196 	safe_m_freem(&pMbuf);
1197 }
1198 
1199 /**
1200  * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1201  */
1202 static void
1203 xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1204 {
1205 	int n_entries;
1206 	struct xnb_pkt pkt;
1207 	struct mbuf *pMbuf;
1208 	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1209 
1210 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1211 	    xnb_unit_pvt.txf.req_prod_pvt);
1212 	req->flags = 0;
1213 	req->size = data_this_transaction;
1214 	req->gref = 8;
1215 	req->offset = 0;
1216 	xnb_unit_pvt.txf.req_prod_pvt++;
1217 
1218 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1219 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1220 
1221 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1222 	XNB_ASSERT(pMbuf != NULL);
1223 	if (pMbuf == NULL)
1224 		return;
1225 
1226 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1227 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1228 
1229 	if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1230 		/* there should be three mbufs and three gnttab entries */
1231 		XNB_ASSERT(n_entries == 3);
1232 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1233 		XNB_ASSERT(
1234 		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1235 		      mtod(pMbuf, vm_offset_t)));
1236 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1237 
1238 		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1239 		XNB_ASSERT(
1240 		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1241 		      mtod(pMbuf->m_next, vm_offset_t)));
1242 		XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1243 
1244 		XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1245 		XNB_ASSERT(
1246 		    xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1247 		      mtod(pMbuf->m_next, vm_offset_t)));
1248 		XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1249 			    MCLBYTES);
1250 	} else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1251 		/* there should be two mbufs and two gnttab entries */
1252 		XNB_ASSERT(n_entries == 2);
1253 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1254 		XNB_ASSERT(
1255 		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1256 		      mtod(pMbuf, vm_offset_t)));
1257 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1258 
1259 		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1260 		XNB_ASSERT(
1261 		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1262 		      mtod(pMbuf->m_next, vm_offset_t)));
1263 		XNB_ASSERT(
1264 		    xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1265 
1266 	} else {
1267 		/* should never get here */
1268 		XNB_ASSERT(0);
1269 	}
1270 	m_freem(pMbuf);
1271 }
1272 
1273 /**
1274  * xnb_update_mbufc on a short packet that only has one gnttab entry
1275  */
1276 static void
1277 xnb_update_mbufc_short(char *buffer, size_t buflen)
1278 {
1279 	const size_t size = MINCLSIZE - 1;
1280 	int n_entries;
1281 	struct xnb_pkt pkt;
1282 	struct mbuf *pMbuf;
1283 
1284 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1285 	    xnb_unit_pvt.txf.req_prod_pvt);
1286 	req->flags = 0;
1287 	req->size = size;
1288 	req->gref = 7;
1289 	req->offset = 17;
1290 	xnb_unit_pvt.txf.req_prod_pvt++;
1291 
1292 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1293 
1294 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1295 
1296 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1297 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1298 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1299 
1300 	/* Update grant table's status fields as the hypervisor call would */
1301 	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1302 
1303 	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1304 	XNB_ASSERT(pMbuf->m_len == size);
1305 	XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1306 	safe_m_freem(&pMbuf);
1307 }
1308 
1309 /**
1310  * xnb_update_mbufc on a packet with two requests, that can fit into a single
1311  * mbuf cluster
1312  */
1313 static void
1314 xnb_update_mbufc_2req(char *buffer, size_t buflen)
1315 {
1316 	int n_entries;
1317 	struct xnb_pkt pkt;
1318 	struct mbuf *pMbuf;
1319 
1320 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1321 	    xnb_unit_pvt.txf.req_prod_pvt);
1322 	req->flags = NETTXF_more_data;
1323 	req->size = 1900;
1324 	req->gref = 7;
1325 	req->offset = 0;
1326 	xnb_unit_pvt.txf.req_prod_pvt++;
1327 
1328 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1329 	    xnb_unit_pvt.txf.req_prod_pvt);
1330 	req->flags = 0;
1331 	req->size = 500;
1332 	req->gref = 8;
1333 	req->offset = 0;
1334 	xnb_unit_pvt.txf.req_prod_pvt++;
1335 
1336 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1337 
1338 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1339 
1340 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1341 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1342 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1343 
1344 	/* Update grant table's status fields as the hypervisor call would */
1345 	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1346 	xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1347 
1348 	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1349 	XNB_ASSERT(n_entries == 2);
1350 	XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1351 	XNB_ASSERT(pMbuf->m_len == 1900);
1352 
1353 	safe_m_freem(&pMbuf);
1354 }
1355 
1356 /**
1357  * xnb_update_mbufc on a single request that spans two mbuf clusters
1358  */
1359 static void
1360 xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1361 {
1362 	int i;
1363 	int n_entries;
1364 	struct xnb_pkt pkt;
1365 	struct mbuf *pMbuf;
1366 	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1367 
1368 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1369 	    xnb_unit_pvt.txf.req_prod_pvt);
1370 	req->flags = 0;
1371 	req->size = data_this_transaction;
1372 	req->gref = 8;
1373 	req->offset = 0;
1374 	xnb_unit_pvt.txf.req_prod_pvt++;
1375 
1376 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1377 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1378 
1379 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1380 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1381 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1382 
1383 	/* Update grant table's status fields */
1384 	for (i = 0; i < n_entries; i++) {
1385 		xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1386 	}
1387 	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1388 
1389 	if (n_entries == 3) {
1390 		/* there should be three mbufs and three gnttab entries */
1391 		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1392 		XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1393 		XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1394 		XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1395 	} else if (n_entries == 2) {
1396 		/* there should be two mbufs and two gnttab entries */
1397 		XNB_ASSERT(n_entries == 2);
1398 		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1399 		XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1400 		XNB_ASSERT(pMbuf->m_next->m_len == 1);
1401 	} else {
1402 		/* should never get here */
1403 		XNB_ASSERT(0);
1404 	}
1405 	safe_m_freem(&pMbuf);
1406 }
1407 
1408 /** xnb_mbufc2pkt on an empty mbufc */
1409 static void
1410 xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1411 	struct xnb_pkt pkt;
1412 	int free_slots = 64;
1413 	struct mbuf *mbuf;
1414 
1415 	mbuf = m_get(M_WAITOK, MT_DATA);
1416 	/*
1417 	 * note: it is illegal to set M_PKTHDR on a mbuf with no data.  Doing so
1418 	 * will cause m_freem to segfault
1419 	 */
1420 	XNB_ASSERT(mbuf->m_len == 0);
1421 
1422 	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1423 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1424 
1425 	safe_m_freem(&mbuf);
1426 }
1427 
1428 /** xnb_mbufc2pkt on a short mbufc */
1429 static void
1430 xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1431 	struct xnb_pkt pkt;
1432 	size_t size = 128;
1433 	int free_slots = 64;
1434 	RING_IDX start = 9;
1435 	struct mbuf *mbuf;
1436 
1437 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1438 	mbuf->m_flags |= M_PKTHDR;
1439 	mbuf->m_pkthdr.len = size;
1440 	mbuf->m_len = size;
1441 
1442 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1443 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1444 	XNB_ASSERT(pkt.size == size);
1445 	XNB_ASSERT(pkt.car_size == size);
1446 	XNB_ASSERT(! (pkt.flags &
1447 	      (NETRXF_more_data | NETRXF_extra_info)));
1448 	XNB_ASSERT(pkt.list_len == 1);
1449 	XNB_ASSERT(pkt.car == start);
1450 
1451 	safe_m_freem(&mbuf);
1452 }
1453 
1454 /** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1455 static void
1456 xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1457 	struct xnb_pkt pkt;
1458 	size_t size = MCLBYTES;
1459 	int free_slots = 32;
1460 	RING_IDX start = 12;
1461 	struct mbuf *mbuf;
1462 
1463 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1464 	mbuf->m_flags |= M_PKTHDR;
1465 	mbuf->m_pkthdr.len = size;
1466 	mbuf->m_len = size;
1467 
1468 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1469 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1470 	XNB_ASSERT(pkt.size == size);
1471 	XNB_ASSERT(pkt.car_size == size);
1472 	XNB_ASSERT(! (pkt.flags &
1473 	      (NETRXF_more_data | NETRXF_extra_info)));
1474 	XNB_ASSERT(pkt.list_len == 1);
1475 	XNB_ASSERT(pkt.car == start);
1476 
1477 	safe_m_freem(&mbuf);
1478 }
1479 
1480 /** xnb_mbufc2pkt on a two-mbuf chain with short data regions */
1481 static void
1482 xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1483 	struct xnb_pkt pkt;
1484 	size_t size1 = MHLEN - 5;
1485 	size_t size2 = MHLEN - 15;
1486 	int free_slots = 32;
1487 	RING_IDX start = 14;
1488 	struct mbuf *mbufc, *mbufc2;
1489 
1490 	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1491 	XNB_ASSERT(mbufc != NULL);
1492 	if (mbufc == NULL)
1493 		return;
1494 	mbufc->m_flags |= M_PKTHDR;
1495 
1496 	mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1497 	XNB_ASSERT(mbufc2 != NULL);
1498 	if (mbufc2 == NULL) {
1499 		safe_m_freem(&mbufc);
1500 		return;
1501 	}
1502 	mbufc2->m_pkthdr.len = size1 + size2;
1503 	mbufc2->m_len = size1;
1504 
1505 	xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1506 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1507 	XNB_ASSERT(pkt.size == size1 + size2);
1508 	XNB_ASSERT(pkt.car == start);
1509 	/*
1510 	 * The second m_getm may allocate a new mbuf and append
1511 	 * it to the chain, or it may simply extend the first mbuf.
1512 	 */
1513 	if (mbufc2->m_next != NULL) {
1514 		XNB_ASSERT(pkt.car_size == size1);
1515 		XNB_ASSERT(pkt.list_len == 1);
1516 		XNB_ASSERT(pkt.cdr == start + 1);
1517 	}
1518 
1519 	safe_m_freem(&mbufc2);
1520 }
1521 
1522 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */
1523 static void
1524 xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1525 	struct xnb_pkt pkt;
1526 	size_t size = 14 * MCLBYTES / 3;
1527 	size_t size_remaining;
1528 	int free_slots = 15;
1529 	RING_IDX start = 3;
1530 	struct mbuf *mbufc, *m;
1531 
1532 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1533 	XNB_ASSERT(mbufc != NULL);
1534 	if (mbufc == NULL)
1535 		return;
1536 	mbufc->m_flags |= M_PKTHDR;
1537 
1538 	mbufc->m_pkthdr.len = size;
1539 	size_remaining = size;
1540 	for (m = mbufc; m != NULL; m = m->m_next) {
1541 		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1542 		size_remaining -= m->m_len;
1543 	}
1544 
1545 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1546 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1547 	XNB_ASSERT(pkt.size == size);
1548 	XNB_ASSERT(pkt.car == start);
1549 	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1550 	/*
1551 	 * There should be >1 response in the packet, and there is no
1552 	 * extra info.
1553 	 */
1554 	XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1555 	XNB_ASSERT(pkt.cdr == pkt.car + 1);
1556 
1557 	safe_m_freem(&mbufc);
1558 }
1559 
1560 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */
1561 static void
1562 xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1563 	struct xnb_pkt pkt;
1564 	size_t size = 14 * MCLBYTES / 3;
1565 	size_t size_remaining;
1566 	int free_slots = 15;
1567 	RING_IDX start = 3;
1568 	struct mbuf *mbufc, *m;
1569 
1570 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1571 	XNB_ASSERT(mbufc != NULL);
1572 	if (mbufc == NULL)
1573 		return;
1574 
1575 	mbufc->m_flags |= M_PKTHDR;
1576 	mbufc->m_pkthdr.len = size;
1577 	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1578 	mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1579 	size_remaining = size;
1580 	for (m = mbufc; m != NULL; m = m->m_next) {
1581 		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1582 		size_remaining -= m->m_len;
1583 	}
1584 
1585 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1586 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1587 	XNB_ASSERT(pkt.size == size);
1588 	XNB_ASSERT(pkt.car == start);
1589 	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1590 	/* There should be >1 response in the packet, there is extra info */
1591 	XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1592 	XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1593 	XNB_ASSERT(pkt.cdr == pkt.car + 2);
1594 	XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1595 	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1596 	XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1597 
1598 	safe_m_freem(&mbufc);
1599 }
1600 
1601 /** xnb_mbufc2pkt with insufficient space in the ring */
1602 static void
1603 xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1604 	struct xnb_pkt pkt;
1605 	size_t size = 14 * MCLBYTES / 3;
1606 	size_t size_remaining;
1607 	int free_slots = 2;
1608 	RING_IDX start = 3;
1609 	struct mbuf *mbufc, *m;
1610 	int error;
1611 
1612 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1613 	XNB_ASSERT(mbufc != NULL);
1614 	if (mbufc == NULL)
1615 		return;
1616 	mbufc->m_flags |= M_PKTHDR;
1617 
1618 	mbufc->m_pkthdr.len = size;
1619 	size_remaining = size;
1620 	for (m = mbufc; m != NULL; m = m->m_next) {
1621 		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1622 		size_remaining -= m->m_len;
1623 	}
1624 
1625 	error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1626 	XNB_ASSERT(error == EAGAIN);
1627 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1628 
1629 	safe_m_freem(&mbufc);
1630 }
1631 
1632 /**
1633  * xnb_rxpkt2gnttab on an empty packet.  Should return empty gnttab
1634  */
1635 static void
1636 xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1637 {
1638 	struct xnb_pkt pkt;
1639 	int nr_entries;
1640 	int free_slots = 60;
1641 	struct mbuf *mbuf;
1642 
1643 	mbuf = m_get(M_WAITOK, MT_DATA);
1644 
1645 	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1646 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1647 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1648 
1649 	XNB_ASSERT(nr_entries == 0);
1650 
1651 	safe_m_freem(&mbuf);
1652 }
1653 
1654 /** xnb_rxpkt2gnttab on a short packet without extra data */
1655 static void
1656 xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1657 	struct xnb_pkt pkt;
1658 	int nr_entries;
1659 	size_t size = 128;
1660 	int free_slots = 60;
1661 	RING_IDX start = 9;
1662 	struct netif_rx_request *req;
1663 	struct mbuf *mbuf;
1664 
1665 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1666 	mbuf->m_flags |= M_PKTHDR;
1667 	mbuf->m_pkthdr.len = size;
1668 	mbuf->m_len = size;
1669 
1670 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1671 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1672 			       xnb_unit_pvt.txf.req_prod_pvt);
1673 	req->gref = 7;
1674 
1675 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1676 				      &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1677 
1678 	XNB_ASSERT(nr_entries == 1);
1679 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1680 	/* flags should indicate gref's for dest */
1681 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1682 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1683 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1684 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1685 		   mtod(mbuf, vm_offset_t)));
1686 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1687 		   virt_to_mfn(mtod(mbuf, vm_offset_t)));
1688 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1689 
1690 	safe_m_freem(&mbuf);
1691 }
1692 
1693 /**
1694  * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1695  */
1696 static void
1697 xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1698 {
1699 	struct xnb_pkt pkt;
1700 	int nr_entries;
1701 	int i, num_mbufs;
1702 	size_t total_granted_size = 0;
1703 	size_t size = MJUMPAGESIZE + 1;
1704 	int free_slots = 60;
1705 	RING_IDX start = 11;
1706 	struct netif_rx_request *req;
1707 	struct mbuf *mbuf, *m;
1708 
1709 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1710 	mbuf->m_flags |= M_PKTHDR;
1711 	mbuf->m_pkthdr.len = size;
1712 	mbuf->m_len = size;
1713 
1714 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1715 
1716 	for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1717 		req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1718 		    xnb_unit_pvt.txf.req_prod_pvt);
1719 		req->gref = i;
1720 		req->id = 5;
1721 	}
1722 	num_mbufs = i;
1723 
1724 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1725 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1726 
1727 	XNB_ASSERT(nr_entries >= num_mbufs);
1728 	for (i = 0; i < nr_entries; i++) {
1729 		int end_offset = xnb_unit_pvt.gnttab[i].len +
1730 			xnb_unit_pvt.gnttab[i].dest.offset;
1731 		XNB_ASSERT(end_offset <= PAGE_SIZE);
1732 		total_granted_size += xnb_unit_pvt.gnttab[i].len;
1733 	}
1734 	XNB_ASSERT(total_granted_size == size);
1735 }
1736 
1737 /**
1738  * xnb_rxpkt2rsp on an empty packet.  Shouldn't make any response
1739  */
1740 static void
1741 xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1742 {
1743 	struct xnb_pkt pkt;
1744 	int nr_entries;
1745 	int nr_reqs;
1746 	int free_slots = 60;
1747 	netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1748 	netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1749 	struct mbuf *mbuf;
1750 
1751 	mbuf = m_get(M_WAITOK, MT_DATA);
1752 
1753 	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1754 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1755 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1756 
1757 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1758 	    &xnb_unit_pvt.rxb);
1759 	XNB_ASSERT(nr_reqs == 0);
1760 	XNB_ASSERT(
1761 	    memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1762 	XNB_ASSERT(
1763 	    memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1764 
1765 	safe_m_freem(&mbuf);
1766 }
1767 
1768 /**
1769  * xnb_rxpkt2rsp on a short packet with no extras
1770  */
1771 static void
1772 xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1773 {
1774 	struct xnb_pkt pkt;
1775 	int nr_entries, nr_reqs;
1776 	size_t size = 128;
1777 	int free_slots = 60;
1778 	RING_IDX start = 5;
1779 	struct netif_rx_request *req;
1780 	struct netif_rx_response *rsp;
1781 	struct mbuf *mbuf;
1782 
1783 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1784 	mbuf->m_flags |= M_PKTHDR;
1785 	mbuf->m_pkthdr.len = size;
1786 	mbuf->m_len = size;
1787 
1788 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1789 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1790 	req->gref = 7;
1791 	xnb_unit_pvt.rxb.req_cons = start;
1792 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1793 	xnb_unit_pvt.rxs->req_prod = start + 1;
1794 	xnb_unit_pvt.rxs->rsp_prod = start;
1795 
1796 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1797 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1798 
1799 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1800 	    &xnb_unit_pvt.rxb);
1801 
1802 	XNB_ASSERT(nr_reqs == 1);
1803 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1804 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1805 	XNB_ASSERT(rsp->id == req->id);
1806 	XNB_ASSERT(rsp->offset == 0);
1807 	XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1808 	XNB_ASSERT(rsp->status == size);
1809 
1810 	safe_m_freem(&mbuf);
1811 }
1812 
1813 /**
1814  * xnb_rxpkt2rsp with extra data
1815  */
1816 static void
1817 xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1818 {
1819 	struct xnb_pkt pkt;
1820 	int nr_entries, nr_reqs;
1821 	size_t size = 14;
1822 	int free_slots = 15;
1823 	RING_IDX start = 3;
1824 	uint16_t id = 49;
1825 	uint16_t gref = 65;
1826 	uint16_t mss = TCP_MSS - 40;
1827 	struct mbuf *mbufc;
1828 	struct netif_rx_request *req;
1829 	struct netif_rx_response *rsp;
1830 	struct netif_extra_info *ext;
1831 
1832 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1833 	XNB_ASSERT(mbufc != NULL);
1834 	if (mbufc == NULL)
1835 		return;
1836 
1837 	mbufc->m_flags |= M_PKTHDR;
1838 	mbufc->m_pkthdr.len = size;
1839 	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1840 	mbufc->m_pkthdr.tso_segsz = mss;
1841 	mbufc->m_len = size;
1842 
1843 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1844 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1845 	req->id = id;
1846 	req->gref = gref;
1847 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1848 	req->id = id + 1;
1849 	req->gref = gref + 1;
1850 	xnb_unit_pvt.rxb.req_cons = start;
1851 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1852 	xnb_unit_pvt.rxs->req_prod = start + 2;
1853 	xnb_unit_pvt.rxs->rsp_prod = start;
1854 
1855 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1856 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1857 
1858 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1859 	    &xnb_unit_pvt.rxb);
1860 
1861 	XNB_ASSERT(nr_reqs == 2);
1862 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1863 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1864 	XNB_ASSERT(rsp->id == id);
1865 	XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1866 	XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1867 	XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1868 	XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1869 	XNB_ASSERT(rsp->status == size);
1870 
1871 	ext = (struct netif_extra_info*)
1872 		RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1873 	XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1874 	XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1875 	XNB_ASSERT(ext->u.gso.size == mss);
1876 	XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1877 
1878 	safe_m_freem(&mbufc);
1879 }
1880 
1881 /**
1882  * xnb_rxpkt2rsp on a packet with more than a pages's worth of data.  It should
1883  * generate two response slot
1884  */
1885 static void
1886 xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1887 {
1888 	struct xnb_pkt pkt;
1889 	int nr_entries, nr_reqs;
1890 	size_t size = PAGE_SIZE + 100;
1891 	int free_slots = 3;
1892 	uint16_t id1 = 17;
1893 	uint16_t id2 = 37;
1894 	uint16_t gref1 = 24;
1895 	uint16_t gref2 = 34;
1896 	RING_IDX start = 15;
1897 	struct netif_rx_request *req;
1898 	struct netif_rx_response *rsp;
1899 	struct mbuf *mbuf;
1900 
1901 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1902 	mbuf->m_flags |= M_PKTHDR;
1903 	mbuf->m_pkthdr.len = size;
1904 	if (mbuf->m_next != NULL) {
1905 		size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1906 		mbuf->m_len = first_len;
1907 		mbuf->m_next->m_len = size - first_len;
1908 
1909 	} else {
1910 		mbuf->m_len = size;
1911 	}
1912 
1913 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1914 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1915 	req->gref = gref1;
1916 	req->id = id1;
1917 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1918 	req->gref = gref2;
1919 	req->id = id2;
1920 	xnb_unit_pvt.rxb.req_cons = start;
1921 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1922 	xnb_unit_pvt.rxs->req_prod = start + 2;
1923 	xnb_unit_pvt.rxs->rsp_prod = start;
1924 
1925 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1926 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1927 
1928 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1929 	    &xnb_unit_pvt.rxb);
1930 
1931 	XNB_ASSERT(nr_reqs == 2);
1932 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1933 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1934 	XNB_ASSERT(rsp->id == id1);
1935 	XNB_ASSERT(rsp->offset == 0);
1936 	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1937 	XNB_ASSERT(rsp->flags & NETRXF_more_data);
1938 	XNB_ASSERT(rsp->status == PAGE_SIZE);
1939 
1940 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1941 	XNB_ASSERT(rsp->id == id2);
1942 	XNB_ASSERT(rsp->offset == 0);
1943 	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1944 	XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1945 	XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1946 
1947 	safe_m_freem(&mbuf);
1948 }
1949 
1950 /** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1951 static void
1952 xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1953 	struct xnb_pkt pkt;
1954 	int nr_reqs, nr_entries;
1955 	size_t size1 = MHLEN - 5;
1956 	size_t size2 = MHLEN - 15;
1957 	int free_slots = 32;
1958 	RING_IDX start = 14;
1959 	uint16_t id = 47;
1960 	uint16_t gref = 54;
1961 	struct netif_rx_request *req;
1962 	struct netif_rx_response *rsp;
1963 	struct mbuf *mbufc;
1964 
1965 	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1966 	XNB_ASSERT(mbufc != NULL);
1967 	if (mbufc == NULL)
1968 		return;
1969 	mbufc->m_flags |= M_PKTHDR;
1970 
1971 	m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1972 	XNB_ASSERT(mbufc->m_next != NULL);
1973 	mbufc->m_pkthdr.len = size1 + size2;
1974 	mbufc->m_len = size1;
1975 	mbufc->m_next->m_len = size2;
1976 
1977 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1978 
1979 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1980 	req->gref = gref;
1981 	req->id = id;
1982 	xnb_unit_pvt.rxb.req_cons = start;
1983 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1984 	xnb_unit_pvt.rxs->req_prod = start + 1;
1985 	xnb_unit_pvt.rxs->rsp_prod = start;
1986 
1987 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1988 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1989 
1990 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1991 	    &xnb_unit_pvt.rxb);
1992 
1993 	XNB_ASSERT(nr_entries == 2);
1994 	XNB_ASSERT(nr_reqs == 1);
1995 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1996 	XNB_ASSERT(rsp->id == id);
1997 	XNB_ASSERT(rsp->status == size1 + size2);
1998 	XNB_ASSERT(rsp->offset == 0);
1999 	XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
2000 
2001 	safe_m_freem(&mbufc);
2002 }
2003 
2004 /**
2005  * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2006  * Note: this test will result in an error message being printed to the console
2007  * such as:
2008  * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2009  */
2010 static void
2011 xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2012 {
2013 	struct xnb_pkt pkt;
2014 	int nr_entries, nr_reqs;
2015 	int id = 7;
2016 	int gref = 42;
2017 	uint16_t canary = 6859;
2018 	size_t size = 7 * MCLBYTES;
2019 	int free_slots = 9;
2020 	RING_IDX start = 2;
2021 	struct netif_rx_request *req;
2022 	struct netif_rx_response *rsp;
2023 	struct mbuf *mbuf;
2024 
2025 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2026 	mbuf->m_flags |= M_PKTHDR;
2027 	mbuf->m_pkthdr.len = size;
2028 	mbuf->m_len = size;
2029 
2030 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2031 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2032 	req->gref = gref;
2033 	req->id = id;
2034 	xnb_unit_pvt.rxb.req_cons = start;
2035 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2036 	xnb_unit_pvt.rxs->req_prod = start + 1;
2037 	xnb_unit_pvt.rxs->rsp_prod = start;
2038 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2039 	req->gref = canary;
2040 	req->id = canary;
2041 
2042 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2043 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2044 	/* Inject the error*/
2045 	xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2046 
2047 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2048 	    &xnb_unit_pvt.rxb);
2049 
2050 	XNB_ASSERT(nr_reqs == 1);
2051 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2052 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2053 	XNB_ASSERT(rsp->id == id);
2054 	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2055 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2056 	XNB_ASSERT(req->gref == canary);
2057 	XNB_ASSERT(req->id == canary);
2058 
2059 	safe_m_freem(&mbuf);
2060 }
2061 
2062 #if defined(INET) || defined(INET6)
2063 /**
2064  * xnb_add_mbuf_cksum on an ARP request packet
2065  */
2066 static void
2067 xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2068 {
2069 	const size_t pkt_len = sizeof(struct ether_header) +
2070 		sizeof(struct ether_arp);
2071 	struct mbuf *mbufc;
2072 	struct ether_header *eh;
2073 	struct ether_arp *ep;
2074 	unsigned char pkt_orig[pkt_len];
2075 
2076 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2077 	/* Fill in an example arp request */
2078 	eh = mtod(mbufc, struct ether_header*);
2079 	eh->ether_dhost[0] = 0xff;
2080 	eh->ether_dhost[1] = 0xff;
2081 	eh->ether_dhost[2] = 0xff;
2082 	eh->ether_dhost[3] = 0xff;
2083 	eh->ether_dhost[4] = 0xff;
2084 	eh->ether_dhost[5] = 0xff;
2085 	eh->ether_shost[0] = 0x00;
2086 	eh->ether_shost[1] = 0x15;
2087 	eh->ether_shost[2] = 0x17;
2088 	eh->ether_shost[3] = 0xe9;
2089 	eh->ether_shost[4] = 0x30;
2090 	eh->ether_shost[5] = 0x68;
2091 	eh->ether_type = htons(ETHERTYPE_ARP);
2092 	ep = (struct ether_arp*)(eh + 1);
2093 	ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2094 	ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2095 	ep->ea_hdr.ar_hln = 6;
2096 	ep->ea_hdr.ar_pln = 4;
2097 	ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2098 	ep->arp_sha[0] = 0x00;
2099 	ep->arp_sha[1] = 0x15;
2100 	ep->arp_sha[2] = 0x17;
2101 	ep->arp_sha[3] = 0xe9;
2102 	ep->arp_sha[4] = 0x30;
2103 	ep->arp_sha[5] = 0x68;
2104 	ep->arp_spa[0] = 0xc0;
2105 	ep->arp_spa[1] = 0xa8;
2106 	ep->arp_spa[2] = 0x0a;
2107 	ep->arp_spa[3] = 0x04;
2108 	bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2109 	ep->arp_tpa[0] = 0xc0;
2110 	ep->arp_tpa[1] = 0xa8;
2111 	ep->arp_tpa[2] = 0x0a;
2112 	ep->arp_tpa[3] = 0x06;
2113 
2114 	/* fill in the length field */
2115 	mbufc->m_len = pkt_len;
2116 	mbufc->m_pkthdr.len = pkt_len;
2117 	/* indicate that the netfront uses hw-assisted checksums */
2118 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2119 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2120 
2121 	/* Make a backup copy of the packet */
2122 	bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2123 
2124 	/* Function under test */
2125 	xnb_add_mbuf_cksum(mbufc);
2126 
2127 	/* Verify that the packet's data did not change */
2128 	XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2129 	m_freem(mbufc);
2130 }
2131 
2132 /**
2133  * Helper function that populates the ethernet header and IP header used by
2134  * some of the xnb_add_mbuf_cksum unit tests.  m must already be allocated
2135  * and must be large enough
2136  */
2137 static void
2138 xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2139 		   uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2140 {
2141 	struct ether_header *eh;
2142 	struct ip *iph;
2143 
2144 	eh = mtod(m, struct ether_header*);
2145 	eh->ether_dhost[0] = 0x00;
2146 	eh->ether_dhost[1] = 0x16;
2147 	eh->ether_dhost[2] = 0x3e;
2148 	eh->ether_dhost[3] = 0x23;
2149 	eh->ether_dhost[4] = 0x50;
2150 	eh->ether_dhost[5] = 0x0b;
2151 	eh->ether_shost[0] = 0x00;
2152 	eh->ether_shost[1] = 0x16;
2153 	eh->ether_shost[2] = 0x30;
2154 	eh->ether_shost[3] = 0x00;
2155 	eh->ether_shost[4] = 0x00;
2156 	eh->ether_shost[5] = 0x00;
2157 	eh->ether_type = htons(ETHERTYPE_IP);
2158 	iph = (struct ip*)(eh + 1);
2159 	iph->ip_hl = 0x5;	/* 5 dwords == 20 bytes */
2160 	iph->ip_v = 4;		/* IP v4 */
2161 	iph->ip_tos = 0;
2162 	iph->ip_len = htons(ip_len);
2163 	iph->ip_id = htons(ip_id);
2164 	iph->ip_off = htons(ip_off);
2165 	iph->ip_ttl = 64;
2166 	iph->ip_p = ip_p;
2167 	iph->ip_sum = htons(ip_sum);
2168 	iph->ip_src.s_addr = htonl(0xc0a80a04);
2169 	iph->ip_dst.s_addr = htonl(0xc0a80a05);
2170 }
2171 
2172 /**
2173  * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2174  * ICMP packet
2175  */
2176 static void
2177 xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2178 {
2179 	const size_t icmp_len = 64;	/* set by ping(1) */
2180 	const size_t pkt_len = sizeof(struct ether_header) +
2181 		sizeof(struct ip) + icmp_len;
2182 	struct mbuf *mbufc;
2183 	struct ether_header *eh;
2184 	struct ip *iph;
2185 	struct icmp *icmph;
2186 	unsigned char pkt_orig[icmp_len];
2187 	uint32_t *tv_field;
2188 	uint8_t *data_payload;
2189 	int i;
2190 	const uint16_t ICMP_CSUM = 0xaed7;
2191 	const uint16_t IP_CSUM = 0xe533;
2192 
2193 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2194 	/* Fill in an example ICMP ping request */
2195 	eh = mtod(mbufc, struct ether_header*);
2196 	xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2197 	iph = (struct ip*)(eh + 1);
2198 	icmph = (struct icmp*)(iph + 1);
2199 	icmph->icmp_type = ICMP_ECHO;
2200 	icmph->icmp_code = 0;
2201 	icmph->icmp_cksum = htons(ICMP_CSUM);
2202 	icmph->icmp_id = htons(31492);
2203 	icmph->icmp_seq = htons(0);
2204 	/*
2205 	 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2206 	 * For this test, we will set the bytes individually for portability.
2207 	 */
2208 	tv_field = (uint32_t*)(&(icmph->icmp_hun));
2209 	tv_field[0] = 0x4f02cfac;
2210 	tv_field[1] = 0x0007c46a;
2211 	/*
2212 	 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2213 	 */
2214 	data_payload = (uint8_t*)(&tv_field[2]);
2215 	for (i = 8; i < 37; i++) {
2216 		*data_payload++ = i;
2217 	}
2218 
2219 	/* fill in the length field */
2220 	mbufc->m_len = pkt_len;
2221 	mbufc->m_pkthdr.len = pkt_len;
2222 	/* indicate that the netfront uses hw-assisted checksums */
2223 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2224 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2225 
2226 	bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2227 	/* Function under test */
2228 	xnb_add_mbuf_cksum(mbufc);
2229 
2230 	/* Check the IP checksum */
2231 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2232 
2233 	/* Check that the ICMP packet did not change */
2234 	XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2235 	m_freem(mbufc);
2236 }
2237 
2238 /**
2239  * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2240  * UDP packet
2241  */
2242 static void
2243 xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2244 {
2245 	const size_t udp_len = 16;
2246 	const size_t pkt_len = sizeof(struct ether_header) +
2247 		sizeof(struct ip) + udp_len;
2248 	struct mbuf *mbufc;
2249 	struct ether_header *eh;
2250 	struct ip *iph;
2251 	struct udphdr *udp;
2252 	uint8_t *data_payload;
2253 	const uint16_t IP_CSUM = 0xe56b;
2254 	const uint16_t UDP_CSUM = 0xdde2;
2255 
2256 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2257 	/* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2258 	eh = mtod(mbufc, struct ether_header*);
2259 	xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2260 	iph = (struct ip*)(eh + 1);
2261 	udp = (struct udphdr*)(iph + 1);
2262 	udp->uh_sport = htons(0x51ae);
2263 	udp->uh_dport = htons(0x08ae);
2264 	udp->uh_ulen = htons(udp_len);
2265 	udp->uh_sum = htons(0xbaad);  /* xnb_add_mbuf_cksum will fill this in */
2266 	data_payload = (uint8_t*)(udp + 1);
2267 	data_payload[0] = 'F';
2268 	data_payload[1] = 'r';
2269 	data_payload[2] = 'e';
2270 	data_payload[3] = 'e';
2271 	data_payload[4] = 'B';
2272 	data_payload[5] = 'S';
2273 	data_payload[6] = 'D';
2274 	data_payload[7] = '\n';
2275 
2276 	/* fill in the length field */
2277 	mbufc->m_len = pkt_len;
2278 	mbufc->m_pkthdr.len = pkt_len;
2279 	/* indicate that the netfront uses hw-assisted checksums */
2280 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2281 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2282 
2283 	/* Function under test */
2284 	xnb_add_mbuf_cksum(mbufc);
2285 
2286 	/* Check the checksums */
2287 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2288 	XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2289 
2290 	m_freem(mbufc);
2291 }
2292 
2293 /**
2294  * Helper function that populates a TCP packet used by all of the
2295  * xnb_add_mbuf_cksum tcp unit tests.  m must already be allocated and must be
2296  * large enough
2297  */
2298 static void
2299 xnb_fill_tcp(struct mbuf *m)
2300 {
2301 	struct ether_header *eh;
2302 	struct ip *iph;
2303 	struct tcphdr *tcp;
2304 	uint32_t *options;
2305 	uint8_t *data_payload;
2306 
2307 	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2308 	eh = mtod(m, struct ether_header*);
2309 	xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2310 	iph = (struct ip*)(eh + 1);
2311 	tcp = (struct tcphdr*)(iph + 1);
2312 	tcp->th_sport = htons(0x9cd9);
2313 	tcp->th_dport = htons(2222);
2314 	tcp->th_seq = htonl(0x00f72b10);
2315 	tcp->th_ack = htonl(0x7f37ba6c);
2316 	tcp->th_x2 = 0;
2317 	tcp->th_off = 8;
2318 	tcp->th_flags = 0x18;
2319 	tcp->th_win = htons(0x410);
2320 	/* th_sum is incorrect; will be inserted by function under test */
2321 	tcp->th_sum = htons(0xbaad);
2322 	tcp->th_urp = htons(0);
2323 	/*
2324 	 * The following 12 bytes of options encode:
2325 	 * [nop, nop, TS val 33247 ecr 3457687679]
2326 	 */
2327 	options = (uint32_t*)(tcp + 1);
2328 	options[0] = htonl(0x0101080a);
2329 	options[1] = htonl(0x000081df);
2330 	options[2] = htonl(0xce18207f);
2331 	data_payload = (uint8_t*)(&options[3]);
2332 	data_payload[0] = 'F';
2333 	data_payload[1] = 'r';
2334 	data_payload[2] = 'e';
2335 	data_payload[3] = 'e';
2336 	data_payload[4] = 'B';
2337 	data_payload[5] = 'S';
2338 	data_payload[6] = 'D';
2339 	data_payload[7] = '\n';
2340 }
2341 
2342 /**
2343  * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2344  * packet
2345  */
2346 static void
2347 xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2348 {
2349 	const size_t payload_len = 8;
2350 	const size_t tcp_options_len = 12;
2351 	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2352 	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2353 	struct mbuf *mbufc;
2354 	struct ether_header *eh;
2355 	struct ip *iph;
2356 	struct tcphdr *tcp;
2357 	const uint16_t IP_CSUM = 0xa55a;
2358 	const uint16_t TCP_CSUM = 0x2f64;
2359 
2360 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2361 	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2362 	xnb_fill_tcp(mbufc);
2363 	eh = mtod(mbufc, struct ether_header*);
2364 	iph = (struct ip*)(eh + 1);
2365 	tcp = (struct tcphdr*)(iph + 1);
2366 
2367 	/* fill in the length field */
2368 	mbufc->m_len = pkt_len;
2369 	mbufc->m_pkthdr.len = pkt_len;
2370 	/* indicate that the netfront uses hw-assisted checksums */
2371 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2372 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2373 
2374 	/* Function under test */
2375 	xnb_add_mbuf_cksum(mbufc);
2376 
2377 	/* Check the checksums */
2378 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2379 	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2380 
2381 	m_freem(mbufc);
2382 }
2383 
2384 /**
2385  * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2386  */
2387 static void
2388 xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2389 {
2390 	const size_t payload_len = 8;
2391 	const size_t tcp_options_len = 12;
2392 	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2393 	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2394 	struct mbuf *mbufc;
2395 	struct ether_header *eh;
2396 	struct ip *iph;
2397 	struct tcphdr *tcp;
2398 	/* Use deliberately bad checksums, and verify that they don't get */
2399 	/* corrected by xnb_add_mbuf_cksum */
2400 	const uint16_t IP_CSUM = 0xdead;
2401 	const uint16_t TCP_CSUM = 0xbeef;
2402 
2403 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2404 	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2405 	xnb_fill_tcp(mbufc);
2406 	eh = mtod(mbufc, struct ether_header*);
2407 	iph = (struct ip*)(eh + 1);
2408 	iph->ip_sum = htons(IP_CSUM);
2409 	tcp = (struct tcphdr*)(iph + 1);
2410 	tcp->th_sum = htons(TCP_CSUM);
2411 
2412 	/* fill in the length field */
2413 	mbufc->m_len = pkt_len;
2414 	mbufc->m_pkthdr.len = pkt_len;
2415 	/* indicate that the netfront does not use hw-assisted checksums */
2416 	mbufc->m_pkthdr.csum_flags = 0;
2417 
2418 	/* Function under test */
2419 	xnb_add_mbuf_cksum(mbufc);
2420 
2421 	/* Check that the checksums didn't change */
2422 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2423 	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2424 
2425 	m_freem(mbufc);
2426 }
2427 #endif /* INET || INET6 */
2428 
2429 /**
2430  * sscanf on unsigned chars
2431  */
2432 static void
2433 xnb_sscanf_hhu(char *buffer, size_t buflen)
2434 {
2435 	const char mystr[] = "137";
2436 	uint8_t dest[12];
2437 	int i;
2438 
2439 	for (i = 0; i < 12; i++)
2440 		dest[i] = 'X';
2441 
2442 	XNB_ASSERT(sscanf(mystr, "%hhu", &dest[4]) == 1);
2443 	for (i = 0; i < 12; i++)
2444 		XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2445 }
2446 
2447 /**
2448  * sscanf on signed chars
2449  */
2450 static void
2451 xnb_sscanf_hhd(char *buffer, size_t buflen)
2452 {
2453 	const char mystr[] = "-27";
2454 	int8_t dest[12];
2455 	int i;
2456 
2457 	for (i = 0; i < 12; i++)
2458 		dest[i] = 'X';
2459 
2460 	XNB_ASSERT(sscanf(mystr, "%hhd", &dest[4]) == 1);
2461 	for (i = 0; i < 12; i++)
2462 		XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2463 }
2464 
2465 /**
2466  * sscanf on signed long longs
2467  */
2468 static void
2469 xnb_sscanf_lld(char *buffer, size_t buflen)
2470 {
2471 	const char mystr[] = "-123456789012345";	/* about -2**47 */
2472 	long long dest[3];
2473 	int i;
2474 
2475 	for (i = 0; i < 3; i++)
2476 		dest[i] = (long long)0xdeadbeefdeadbeef;
2477 
2478 	XNB_ASSERT(sscanf(mystr, "%lld", &dest[1]) == 1);
2479 	for (i = 0; i < 3; i++)
2480 		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2481 		    -123456789012345));
2482 }
2483 
2484 /**
2485  * sscanf on unsigned long longs
2486  */
2487 static void
2488 xnb_sscanf_llu(char *buffer, size_t buflen)
2489 {
2490 	const char mystr[] = "12802747070103273189";
2491 	unsigned long long dest[3];
2492 	int i;
2493 
2494 	for (i = 0; i < 3; i++)
2495 		dest[i] = (long long)0xdeadbeefdeadbeef;
2496 
2497 	XNB_ASSERT(sscanf(mystr, "%llu", &dest[1]) == 1);
2498 	for (i = 0; i < 3; i++)
2499 		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2500 		    12802747070103273189ull));
2501 }
2502 
2503 /**
2504  * sscanf on unsigned short short n's
2505  */
2506 static void
2507 xnb_sscanf_hhn(char *buffer, size_t buflen)
2508 {
2509 	const char mystr[] =
2510 	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2511 	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2512 	    "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2513 	unsigned char dest[12];
2514 	int i;
2515 
2516 	for (i = 0; i < 12; i++)
2517 		dest[i] = (unsigned char)'X';
2518 
2519 	XNB_ASSERT(sscanf(mystr,
2520 	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2521 	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2522 	    "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]) == 0);
2523 	for (i = 0; i < 12; i++)
2524 		XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));
2525 }
2526