xref: /freebsd/sys/dev/xen/netback/netback_unit_tests.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*-
2  * Copyright (c) 2009-2011 Spectra Logic Corporation
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    substantially similar to the "NO WARRANTY" disclaimer below
13  *    ("Disclaimer") and any redistribution must be conditioned upon
14  *    including a substantially similar Disclaimer requirement for further
15  *    binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGES.
29  *
30  * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
31  *          Alan Somers         (Spectra Logic Corporation)
32  *          John Suykerbuyk     (Spectra Logic Corporation)
33  */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 /**
39  * \file netback_unit_tests.c
40  *
41  * \brief Unit tests for the Xen netback driver.
42  *
43  * Due to the driver's use of static functions, these tests cannot be compiled
44  * standalone; they must be #include'd from the driver's .c file.
45  */
46 
47 
48 /** Helper macro used to snprintf to a buffer and update the buffer pointer */
49 #define	SNCATF(buffer, buflen, ...) do {				\
50 	size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__);	\
51 	buffer += new_chars;						\
52 	/* be careful; snprintf's return value can be  > buflen */	\
53 	buflen -= MIN(buflen, new_chars);				\
54 } while (0)
55 
56 /* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
57 #define	STRINGIFY(x) #x
58 #define	TOSTRING(x) STRINGIFY(x)
59 
60 /**
61  * Writes an error message to buffer if cond is false
62  * Note the implied parameters buffer and
63  * buflen
64  */
65 #define	XNB_ASSERT(cond) ({						\
66 	int passed = (cond);						\
67 	char *_buffer = (buffer);					\
68 	size_t _buflen = (buflen);					\
69 	if (! passed) {							\
70 		strlcat(_buffer, __func__, _buflen);			\
71 		strlcat(_buffer, ":" TOSTRING(__LINE__) 		\
72 		  " Assertion Error: " #cond "\n", _buflen);		\
73 	}								\
74 	})
75 
76 
77 /**
78  * The signature used by all testcases.  If the test writes anything
79  * to buffer, then it will be considered a failure
80  * \param buffer	Return storage for error messages
81  * \param buflen	The space available in the buffer
82  */
83 typedef void testcase_t(char *buffer, size_t buflen);
84 
85 /**
86  * Signature used by setup functions
87  * \return nonzero on error
88  */
89 typedef int setup_t(void);
90 
91 typedef void teardown_t(void);
92 
93 /** A simple test fixture comprising setup, teardown, and test */
94 struct test_fixture {
95 	/** Will be run before the test to allocate and initialize variables */
96 	setup_t *setup;
97 
98 	/** Will be run if setup succeeds */
99 	testcase_t *test;
100 
101 	/** Cleans up test data whether or not the setup suceeded*/
102 	teardown_t *teardown;
103 };
104 
105 typedef struct test_fixture test_fixture_t;
106 
107 static int	xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
108 static int	xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
109 				     char *buffer, size_t buflen);
110 
111 static int __unused
112 null_setup(void) { return 0; }
113 
114 static void __unused
115 null_teardown(void) { }
116 
117 static setup_t setup_pvt_data;
118 static teardown_t teardown_pvt_data;
119 static testcase_t xnb_ring2pkt_emptyring;
120 static testcase_t xnb_ring2pkt_1req;
121 static testcase_t xnb_ring2pkt_2req;
122 static testcase_t xnb_ring2pkt_3req;
123 static testcase_t xnb_ring2pkt_extra;
124 static testcase_t xnb_ring2pkt_partial;
125 static testcase_t xnb_ring2pkt_wraps;
126 static testcase_t xnb_txpkt2rsp_emptypkt;
127 static testcase_t xnb_txpkt2rsp_1req;
128 static testcase_t xnb_txpkt2rsp_extra;
129 static testcase_t xnb_txpkt2rsp_long;
130 static testcase_t xnb_txpkt2rsp_invalid;
131 static testcase_t xnb_txpkt2rsp_error;
132 static testcase_t xnb_txpkt2rsp_wraps;
133 static testcase_t xnb_pkt2mbufc_empty;
134 static testcase_t xnb_pkt2mbufc_short;
135 static testcase_t xnb_pkt2mbufc_csum;
136 static testcase_t xnb_pkt2mbufc_1cluster;
137 static testcase_t xnb_pkt2mbufc_largecluster;
138 static testcase_t xnb_pkt2mbufc_2cluster;
139 static testcase_t xnb_txpkt2gnttab_empty;
140 static testcase_t xnb_txpkt2gnttab_short;
141 static testcase_t xnb_txpkt2gnttab_2req;
142 static testcase_t xnb_txpkt2gnttab_2cluster;
143 static testcase_t xnb_update_mbufc_short;
144 static testcase_t xnb_update_mbufc_2req;
145 static testcase_t xnb_update_mbufc_2cluster;
146 static testcase_t xnb_mbufc2pkt_empty;
147 static testcase_t xnb_mbufc2pkt_short;
148 static testcase_t xnb_mbufc2pkt_1cluster;
149 static testcase_t xnb_mbufc2pkt_2short;
150 static testcase_t xnb_mbufc2pkt_long;
151 static testcase_t xnb_mbufc2pkt_extra;
152 static testcase_t xnb_mbufc2pkt_nospace;
153 static testcase_t xnb_rxpkt2gnttab_empty;
154 static testcase_t xnb_rxpkt2gnttab_short;
155 static testcase_t xnb_rxpkt2gnttab_2req;
156 static testcase_t xnb_rxpkt2rsp_empty;
157 static testcase_t xnb_rxpkt2rsp_short;
158 static testcase_t xnb_rxpkt2rsp_extra;
159 static testcase_t xnb_rxpkt2rsp_2short;
160 static testcase_t xnb_rxpkt2rsp_2slots;
161 static testcase_t xnb_rxpkt2rsp_copyerror;
162 static testcase_t xnb_sscanf_llu;
163 static testcase_t xnb_sscanf_lld;
164 static testcase_t xnb_sscanf_hhu;
165 static testcase_t xnb_sscanf_hhd;
166 static testcase_t xnb_sscanf_hhn;
167 
168 #if defined(INET) || defined(INET6)
169 /* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
170 static testcase_t xnb_add_mbuf_cksum_arp;
171 static testcase_t xnb_add_mbuf_cksum_tcp;
172 static testcase_t xnb_add_mbuf_cksum_udp;
173 static testcase_t xnb_add_mbuf_cksum_icmp;
174 static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
175 static void	xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
176 				   uint16_t ip_id, uint16_t ip_p,
177 				   uint16_t ip_off, uint16_t ip_sum);
178 static void	xnb_fill_tcp(struct mbuf *m);
179 #endif /* INET || INET6 */
180 
181 /** Private data used by unit tests */
182 static struct {
183 	gnttab_copy_table 	gnttab;
184 	netif_rx_back_ring_t	rxb;
185 	netif_rx_front_ring_t	rxf;
186 	netif_tx_back_ring_t	txb;
187 	netif_tx_front_ring_t	txf;
188 	struct ifnet*		ifp;
189 	netif_rx_sring_t*	rxs;
190 	netif_tx_sring_t*	txs;
191 } xnb_unit_pvt;
192 
193 static inline void safe_m_freem(struct mbuf **ppMbuf) {
194 	if (*ppMbuf != NULL) {
195 		m_freem(*ppMbuf);
196 		*ppMbuf = NULL;
197 	}
198 }
199 
200 /**
201  * The unit test runner.  It will run every supplied test and return an
202  * output message as a string
203  * \param tests		An array of tests.  Every test will be attempted.
204  * \param ntests	The length of tests
205  * \param buffer	Return storage for the result string
206  * \param buflen	The length of buffer
207  * \return		The number of tests that failed
208  */
209 static int
210 xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
211     		     size_t buflen)
212 {
213 	int i;
214 	int n_passes;
215 	int n_failures = 0;
216 
217 	for (i = 0; i < ntests; i++) {
218 		int error = tests[i].setup();
219 		if (error != 0) {
220 			SNCATF(buffer, buflen,
221 			    "Setup failed for test idx %d\n", i);
222 			n_failures++;
223 		} else {
224 			size_t new_chars;
225 
226 			tests[i].test(buffer, buflen);
227 			new_chars = strnlen(buffer, buflen);
228 			buffer += new_chars;
229 			buflen -= new_chars;
230 
231 			if (new_chars > 0) {
232 				n_failures++;
233 			}
234 		}
235 		tests[i].teardown();
236 	}
237 
238 	n_passes = ntests - n_failures;
239 	if (n_passes > 0) {
240 		SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
241 	}
242 	if (n_failures > 0) {
243 		SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
244 	}
245 
246 	return n_failures;
247 }
248 
249 /** Number of unit tests.  Must match the length of the tests array below */
250 #define	TOTAL_TESTS	(53)
251 /**
252  * Max memory available for returning results.  400 chars/test should give
253  * enough space for a five line error message for every test
254  */
255 #define	TOTAL_BUFLEN	(400 * TOTAL_TESTS + 2)
256 
257 /**
258  * Called from userspace by a sysctl.  Runs all internal unit tests, and
259  * returns the results to userspace as a string
260  * \param oidp	unused
261  * \param arg1	pointer to an xnb_softc for a specific xnb device
262  * \param arg2	unused
263  * \param req	sysctl access structure
264  * \return a string via the special SYSCTL_OUT macro.
265  */
266 
267 static int
268 xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
269 	test_fixture_t const tests[TOTAL_TESTS] = {
270 		{setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
271 		{setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
272 		{setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
273 		{setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
274 		{setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
275 		{setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
276 		{setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
277 		{setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
278 		{setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
279 		{setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
280 		{setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
281 		{setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
282 		{setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
283 		{setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
284 		{setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
285 		{setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
286 		{setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
287 		{setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
288 		{setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
289 		{setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
290 		{setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
291 		{setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
292 		{setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
293 		{setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
294 		{setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
295 		{setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
296 		{setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
297 		{setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
298 		{setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
299 		{setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
300 		{setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
301 		{setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
302 		{setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
303 		{setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
304 		{setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
305 		{setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
306 		{setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
307 		{setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
308 		{setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
309 		{setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
310 		{setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
311 		{setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
312 		{setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
313 #if defined(INET) || defined(INET6)
314 		{null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
315 		{null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
316 		{null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
317 		{null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
318 		{null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
319 #endif
320 		{null_setup, xnb_sscanf_hhd, null_teardown},
321 		{null_setup, xnb_sscanf_hhu, null_teardown},
322 		{null_setup, xnb_sscanf_lld, null_teardown},
323 		{null_setup, xnb_sscanf_llu, null_teardown},
324 		{null_setup, xnb_sscanf_hhn, null_teardown},
325 	};
326 	/**
327 	 * results is static so that the data will persist after this function
328 	 * returns.  The sysctl code expects us to return a constant string.
329 	 * \todo: the static variable is not thread safe.  Put a mutex around
330 	 * it.
331 	 */
332 	static char results[TOTAL_BUFLEN];
333 
334 	/* empty the result strings */
335 	results[0] = 0;
336 	xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
337 
338 	return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
339 }
340 
341 static int
342 setup_pvt_data(void)
343 {
344 	int error = 0;
345 
346 	bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
347 
348 	xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
349 	if (xnb_unit_pvt.txs != NULL) {
350 		SHARED_RING_INIT(xnb_unit_pvt.txs);
351 		BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
352 		FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
353 	} else {
354 		error = 1;
355 	}
356 
357 	xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
358 	if (xnb_unit_pvt.ifp == NULL) {
359 		error = 1;
360 	}
361 
362 	xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
363 	if (xnb_unit_pvt.rxs != NULL) {
364 		SHARED_RING_INIT(xnb_unit_pvt.rxs);
365 		BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
366 		FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
367 	} else {
368 		error = 1;
369 	}
370 
371 	return error;
372 }
373 
374 static void
375 teardown_pvt_data(void)
376 {
377 	if (xnb_unit_pvt.txs != NULL) {
378 		free(xnb_unit_pvt.txs, M_XENNETBACK);
379 	}
380 	if (xnb_unit_pvt.rxs != NULL) {
381 		free(xnb_unit_pvt.rxs, M_XENNETBACK);
382 	}
383 	if (xnb_unit_pvt.ifp != NULL) {
384 		if_free(xnb_unit_pvt.ifp);
385 	}
386 }
387 
388 /**
389  * Verify that xnb_ring2pkt will not consume any requests from an empty ring
390  */
391 static void
392 xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
393 {
394 	struct xnb_pkt pkt;
395 	int num_consumed;
396 
397 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
398 	                            xnb_unit_pvt.txb.req_cons);
399 	XNB_ASSERT(num_consumed == 0);
400 }
401 
402 /**
403  * Verify that xnb_ring2pkt can convert a single request packet correctly
404  */
405 static void
406 xnb_ring2pkt_1req(char *buffer, size_t buflen)
407 {
408 	struct xnb_pkt pkt;
409 	int num_consumed;
410 	struct netif_tx_request *req;
411 
412 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
413 	    xnb_unit_pvt.txf.req_prod_pvt);
414 
415 	req->flags = 0;
416 	req->size = 69;	/* arbitrary number for test */
417 	xnb_unit_pvt.txf.req_prod_pvt++;
418 
419 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
420 
421 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
422 	                            xnb_unit_pvt.txb.req_cons);
423 	XNB_ASSERT(num_consumed == 1);
424 	XNB_ASSERT(pkt.size == 69);
425 	XNB_ASSERT(pkt.car_size == 69);
426 	XNB_ASSERT(pkt.flags == 0);
427 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
428 	XNB_ASSERT(pkt.list_len == 1);
429 	XNB_ASSERT(pkt.car == 0);
430 }
431 
432 /**
433  * Verify that xnb_ring2pkt can convert a two request packet correctly.
434  * This tests handling of the MORE_DATA flag and cdr
435  */
436 static void
437 xnb_ring2pkt_2req(char *buffer, size_t buflen)
438 {
439 	struct xnb_pkt pkt;
440 	int num_consumed;
441 	struct netif_tx_request *req;
442 	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
443 
444 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
445 	    xnb_unit_pvt.txf.req_prod_pvt);
446 	req->flags = NETTXF_more_data;
447 	req->size = 100;
448 	xnb_unit_pvt.txf.req_prod_pvt++;
449 
450 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
451 	    xnb_unit_pvt.txf.req_prod_pvt);
452 	req->flags = 0;
453 	req->size = 40;
454 	xnb_unit_pvt.txf.req_prod_pvt++;
455 
456 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
457 
458 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
459 	                            xnb_unit_pvt.txb.req_cons);
460 	XNB_ASSERT(num_consumed == 2);
461 	XNB_ASSERT(pkt.size == 100);
462 	XNB_ASSERT(pkt.car_size == 60);
463 	XNB_ASSERT(pkt.flags == 0);
464 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
465 	XNB_ASSERT(pkt.list_len == 2);
466 	XNB_ASSERT(pkt.car == start_idx);
467 	XNB_ASSERT(pkt.cdr == start_idx + 1);
468 }
469 
470 /**
471  * Verify that xnb_ring2pkt can convert a three request packet correctly
472  */
473 static void
474 xnb_ring2pkt_3req(char *buffer, size_t buflen)
475 {
476 	struct xnb_pkt pkt;
477 	int num_consumed;
478 	struct netif_tx_request *req;
479 	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
480 
481 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
482 	    xnb_unit_pvt.txf.req_prod_pvt);
483 	req->flags = NETTXF_more_data;
484 	req->size = 200;
485 	xnb_unit_pvt.txf.req_prod_pvt++;
486 
487 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
488 	    xnb_unit_pvt.txf.req_prod_pvt);
489 	req->flags = NETTXF_more_data;
490 	req->size = 40;
491 	xnb_unit_pvt.txf.req_prod_pvt++;
492 
493 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
494 	    xnb_unit_pvt.txf.req_prod_pvt);
495 	req->flags = 0;
496 	req->size = 50;
497 	xnb_unit_pvt.txf.req_prod_pvt++;
498 
499 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
500 
501 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
502 	                            xnb_unit_pvt.txb.req_cons);
503 	XNB_ASSERT(num_consumed == 3);
504 	XNB_ASSERT(pkt.size == 200);
505 	XNB_ASSERT(pkt.car_size == 110);
506 	XNB_ASSERT(pkt.flags == 0);
507 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
508 	XNB_ASSERT(pkt.list_len == 3);
509 	XNB_ASSERT(pkt.car == start_idx);
510 	XNB_ASSERT(pkt.cdr == start_idx + 1);
511 	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
512 }
513 
514 /**
515  * Verify that xnb_ring2pkt can read extra inf
516  */
517 static void
518 xnb_ring2pkt_extra(char *buffer, size_t buflen)
519 {
520 	struct xnb_pkt pkt;
521 	int num_consumed;
522 	struct netif_tx_request *req;
523 	struct netif_extra_info *ext;
524 	RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
525 
526 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
527 	    xnb_unit_pvt.txf.req_prod_pvt);
528 	req->flags = NETTXF_extra_info | NETTXF_more_data;
529 	req->size = 150;
530 	xnb_unit_pvt.txf.req_prod_pvt++;
531 
532 	ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
533 	    xnb_unit_pvt.txf.req_prod_pvt);
534 	ext->flags = 0;
535 	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
536 	ext->u.gso.size = 250;
537 	ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
538 	ext->u.gso.features = 0;
539 	xnb_unit_pvt.txf.req_prod_pvt++;
540 
541 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
542 	    xnb_unit_pvt.txf.req_prod_pvt);
543 	req->flags = 0;
544 	req->size = 50;
545 	xnb_unit_pvt.txf.req_prod_pvt++;
546 
547 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
548 
549 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
550 	                            xnb_unit_pvt.txb.req_cons);
551 	XNB_ASSERT(num_consumed == 3);
552 	XNB_ASSERT(pkt.extra.flags == 0);
553 	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
554 	XNB_ASSERT(pkt.extra.u.gso.size == 250);
555 	XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
556 	XNB_ASSERT(pkt.size == 150);
557 	XNB_ASSERT(pkt.car_size == 100);
558 	XNB_ASSERT(pkt.flags == NETTXF_extra_info);
559 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
560 	XNB_ASSERT(pkt.list_len == 2);
561 	XNB_ASSERT(pkt.car == start_idx);
562 	XNB_ASSERT(pkt.cdr == start_idx + 2);
563 	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
564 }
565 
566 /**
567  * Verify that xnb_ring2pkt will consume no requests if the entire packet is
568  * not yet in the ring
569  */
570 static void
571 xnb_ring2pkt_partial(char *buffer, size_t buflen)
572 {
573 	struct xnb_pkt pkt;
574 	int num_consumed;
575 	struct netif_tx_request *req;
576 
577 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
578 	    xnb_unit_pvt.txf.req_prod_pvt);
579 	req->flags = NETTXF_more_data;
580 	req->size = 150;
581 	xnb_unit_pvt.txf.req_prod_pvt++;
582 
583 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
584 
585 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
586 	                            xnb_unit_pvt.txb.req_cons);
587 	XNB_ASSERT(num_consumed == 0);
588 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
589 }
590 
591 /**
592  * Verity that xnb_ring2pkt can read a packet whose requests wrap around
593  * the end of the ring
594  */
595 static void
596 xnb_ring2pkt_wraps(char *buffer, size_t buflen)
597 {
598 	struct xnb_pkt pkt;
599 	int num_consumed;
600 	struct netif_tx_request *req;
601 	unsigned int rsize;
602 
603 	/*
604 	 * Manually tweak the ring indices to create a ring with no responses
605 	 * and the next request slot at position 2 from the end
606 	 */
607 	rsize = RING_SIZE(&xnb_unit_pvt.txf);
608 	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
609 	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
610 	xnb_unit_pvt.txs->req_prod = rsize - 2;
611 	xnb_unit_pvt.txs->req_event = rsize - 1;
612 	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
613 	xnb_unit_pvt.txs->rsp_event = rsize - 1;
614 	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
615 	xnb_unit_pvt.txb.req_cons = rsize - 2;
616 
617 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
618 	    xnb_unit_pvt.txf.req_prod_pvt);
619 	req->flags = NETTXF_more_data;
620 	req->size = 550;
621 	xnb_unit_pvt.txf.req_prod_pvt++;
622 
623 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
624 	    xnb_unit_pvt.txf.req_prod_pvt);
625 	req->flags = NETTXF_more_data;
626 	req->size = 100;
627 	xnb_unit_pvt.txf.req_prod_pvt++;
628 
629 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
630 	    xnb_unit_pvt.txf.req_prod_pvt);
631 	req->flags = 0;
632 	req->size = 50;
633 	xnb_unit_pvt.txf.req_prod_pvt++;
634 
635 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
636 
637 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
638 	                            xnb_unit_pvt.txb.req_cons);
639 	XNB_ASSERT(num_consumed == 3);
640 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
641 	XNB_ASSERT(pkt.list_len == 3);
642 	XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
643 }
644 
645 
646 /**
647  * xnb_txpkt2rsp should do nothing for an empty packet
648  */
649 static void
650 xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
651 {
652 	int num_consumed;
653 	struct xnb_pkt pkt;
654 	netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
655 	netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
656 	pkt.list_len = 0;
657 
658 	/* must call xnb_ring2pkt just to intialize pkt */
659 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
660 	                            xnb_unit_pvt.txb.req_cons);
661 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
662 	XNB_ASSERT(
663 	    memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
664 	XNB_ASSERT(
665 	    memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
666 }
667 
668 /**
669  * xnb_txpkt2rsp responding to one request
670  */
671 static void
672 xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
673 {
674 	uint16_t num_consumed;
675 	struct xnb_pkt pkt;
676 	struct netif_tx_request *req;
677 	struct netif_tx_response *rsp;
678 
679 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
680 	    xnb_unit_pvt.txf.req_prod_pvt);
681 	req->size = 1000;
682 	req->flags = 0;
683 	xnb_unit_pvt.txf.req_prod_pvt++;
684 
685 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
686 
687 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
688 	                            xnb_unit_pvt.txb.req_cons);
689 	xnb_unit_pvt.txb.req_cons += num_consumed;
690 
691 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
692 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
693 
694 	XNB_ASSERT(
695 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
696 	XNB_ASSERT(rsp->id == req->id);
697 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
698 };
699 
700 /**
701  * xnb_txpkt2rsp responding to 1 data request and 1 extra info
702  */
703 static void
704 xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
705 {
706 	uint16_t num_consumed;
707 	struct xnb_pkt pkt;
708 	struct netif_tx_request *req;
709 	netif_extra_info_t *ext;
710 	struct netif_tx_response *rsp;
711 
712 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
713 	    xnb_unit_pvt.txf.req_prod_pvt);
714 	req->size = 1000;
715 	req->flags = NETTXF_extra_info;
716 	req->id = 69;
717 	xnb_unit_pvt.txf.req_prod_pvt++;
718 
719 	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
720 	    xnb_unit_pvt.txf.req_prod_pvt);
721 	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
722 	ext->flags = 0;
723 	xnb_unit_pvt.txf.req_prod_pvt++;
724 
725 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
726 
727 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
728 	                            xnb_unit_pvt.txb.req_cons);
729 	xnb_unit_pvt.txb.req_cons += num_consumed;
730 
731 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
732 
733 	XNB_ASSERT(
734 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
735 
736 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
737 	XNB_ASSERT(rsp->id == req->id);
738 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
739 
740 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
741 	    xnb_unit_pvt.txf.rsp_cons + 1);
742 	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
743 };
744 
745 /**
746  * xnb_pkg2rsp responding to 3 data requests and 1 extra info
747  */
748 static void
749 xnb_txpkt2rsp_long(char *buffer, size_t buflen)
750 {
751 	uint16_t num_consumed;
752 	struct xnb_pkt pkt;
753 	struct netif_tx_request *req;
754 	netif_extra_info_t *ext;
755 	struct netif_tx_response *rsp;
756 
757 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
758 	    xnb_unit_pvt.txf.req_prod_pvt);
759 	req->size = 1000;
760 	req->flags = NETTXF_extra_info | NETTXF_more_data;
761 	req->id = 254;
762 	xnb_unit_pvt.txf.req_prod_pvt++;
763 
764 	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
765 	    xnb_unit_pvt.txf.req_prod_pvt);
766 	ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
767 	ext->flags = 0;
768 	xnb_unit_pvt.txf.req_prod_pvt++;
769 
770 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
771 	    xnb_unit_pvt.txf.req_prod_pvt);
772 	req->size = 300;
773 	req->flags = NETTXF_more_data;
774 	req->id = 1034;
775 	xnb_unit_pvt.txf.req_prod_pvt++;
776 
777 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
778 	    xnb_unit_pvt.txf.req_prod_pvt);
779 	req->size = 400;
780 	req->flags = 0;
781 	req->id = 34;
782 	xnb_unit_pvt.txf.req_prod_pvt++;
783 
784 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
785 
786 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
787 	                            xnb_unit_pvt.txb.req_cons);
788 	xnb_unit_pvt.txb.req_cons += num_consumed;
789 
790 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
791 
792 	XNB_ASSERT(
793 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
794 
795 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
796 	XNB_ASSERT(rsp->id ==
797 	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
798 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
799 
800 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
801 	    xnb_unit_pvt.txf.rsp_cons + 1);
802 	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
803 
804 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
805 	    xnb_unit_pvt.txf.rsp_cons + 2);
806 	XNB_ASSERT(rsp->id ==
807 	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
808 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
809 
810 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
811 	    xnb_unit_pvt.txf.rsp_cons + 3);
812 	XNB_ASSERT(rsp->id ==
813 	    RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
814 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
815 }
816 
817 /**
818  * xnb_txpkt2rsp responding to an invalid packet.
819  * Note: this test will result in an error message being printed to the console
820  * such as:
821  * xnb(xnb_ring2pkt:1306): Unknown extra info type 255.  Discarding packet
822  */
823 static void
824 xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
825 {
826 	uint16_t num_consumed;
827 	struct xnb_pkt pkt;
828 	struct netif_tx_request *req;
829 	netif_extra_info_t *ext;
830 	struct netif_tx_response *rsp;
831 
832 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
833 	    xnb_unit_pvt.txf.req_prod_pvt);
834 	req->size = 1000;
835 	req->flags = NETTXF_extra_info;
836 	req->id = 69;
837 	xnb_unit_pvt.txf.req_prod_pvt++;
838 
839 	ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
840 	    xnb_unit_pvt.txf.req_prod_pvt);
841 	ext->type = 0xFF;	/* Invalid extra type */
842 	ext->flags = 0;
843 	xnb_unit_pvt.txf.req_prod_pvt++;
844 
845 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
846 
847 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
848 	                            xnb_unit_pvt.txb.req_cons);
849 	xnb_unit_pvt.txb.req_cons += num_consumed;
850 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
851 
852 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
853 
854 	XNB_ASSERT(
855 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
856 
857 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
858 	XNB_ASSERT(rsp->id == req->id);
859 	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
860 
861 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
862 	    xnb_unit_pvt.txf.rsp_cons + 1);
863 	XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
864 };
865 
866 /**
867  * xnb_txpkt2rsp responding to one request which caused an error
868  */
869 static void
870 xnb_txpkt2rsp_error(char *buffer, size_t buflen)
871 {
872 	uint16_t num_consumed;
873 	struct xnb_pkt pkt;
874 	struct netif_tx_request *req;
875 	struct netif_tx_response *rsp;
876 
877 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
878 	    xnb_unit_pvt.txf.req_prod_pvt);
879 	req->size = 1000;
880 	req->flags = 0;
881 	xnb_unit_pvt.txf.req_prod_pvt++;
882 
883 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
884 
885 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
886 	                            xnb_unit_pvt.txb.req_cons);
887 	xnb_unit_pvt.txb.req_cons += num_consumed;
888 
889 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
890 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
891 
892 	XNB_ASSERT(
893 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
894 	XNB_ASSERT(rsp->id == req->id);
895 	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
896 };
897 
898 /**
899  * xnb_txpkt2rsp's responses wrap around the end of the ring
900  */
901 static void
902 xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
903 {
904 	struct xnb_pkt pkt;
905 	int num_consumed;
906 	struct netif_tx_request *req;
907 	struct netif_tx_response *rsp;
908 	unsigned int rsize;
909 
910 	/*
911 	 * Manually tweak the ring indices to create a ring with no responses
912 	 * and the next request slot at position 2 from the end
913 	 */
914 	rsize = RING_SIZE(&xnb_unit_pvt.txf);
915 	xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
916 	xnb_unit_pvt.txf.rsp_cons = rsize - 2;
917 	xnb_unit_pvt.txs->req_prod = rsize - 2;
918 	xnb_unit_pvt.txs->req_event = rsize - 1;
919 	xnb_unit_pvt.txs->rsp_prod = rsize - 2;
920 	xnb_unit_pvt.txs->rsp_event = rsize - 1;
921 	xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
922 	xnb_unit_pvt.txb.req_cons = rsize - 2;
923 
924 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
925 	    xnb_unit_pvt.txf.req_prod_pvt);
926 	req->flags = NETTXF_more_data;
927 	req->size = 550;
928 	req->id = 1;
929 	xnb_unit_pvt.txf.req_prod_pvt++;
930 
931 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
932 	    xnb_unit_pvt.txf.req_prod_pvt);
933 	req->flags = NETTXF_more_data;
934 	req->size = 100;
935 	req->id = 2;
936 	xnb_unit_pvt.txf.req_prod_pvt++;
937 
938 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
939 	    xnb_unit_pvt.txf.req_prod_pvt);
940 	req->flags = 0;
941 	req->size = 50;
942 	req->id = 3;
943 	xnb_unit_pvt.txf.req_prod_pvt++;
944 
945 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
946 
947 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
948 	                            xnb_unit_pvt.txb.req_cons);
949 
950 	xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
951 
952 	XNB_ASSERT(
953 	    xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
954 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
955 	    xnb_unit_pvt.txf.rsp_cons + 2);
956 	XNB_ASSERT(rsp->id == req->id);
957 	XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
958 }
959 
960 
961 /**
962  * Helper function used to setup pkt2mbufc tests
963  * \param size     size in bytes of the single request to push to the ring
964  * \param flags		optional flags to put in the netif request
965  * \param[out] pkt the returned packet object
966  * \return number of requests consumed from the ring
967  */
968 static int
969 xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
970 {
971 	struct netif_tx_request *req;
972 
973 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
974 	    xnb_unit_pvt.txf.req_prod_pvt);
975 	req->flags = flags;
976 	req->size = size;
977 	xnb_unit_pvt.txf.req_prod_pvt++;
978 
979 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
980 
981 	return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
982 	                            xnb_unit_pvt.txb.req_cons);
983 }
984 
985 /**
986  * xnb_pkt2mbufc on an empty packet
987  */
988 static void
989 xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
990 {
991 	int num_consumed;
992 	struct xnb_pkt pkt;
993 	struct mbuf *pMbuf;
994 	pkt.list_len = 0;
995 
996 	/* must call xnb_ring2pkt just to intialize pkt */
997 	num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
998 	                            xnb_unit_pvt.txb.req_cons);
999 	pkt.size = 0;
1000 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1001 	safe_m_freem(&pMbuf);
1002 }
1003 
1004 /**
1005  * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
1006  */
1007 static void
1008 xnb_pkt2mbufc_short(char *buffer, size_t buflen)
1009 {
1010 	const size_t size = MINCLSIZE - 1;
1011 	struct xnb_pkt pkt;
1012 	struct mbuf *pMbuf;
1013 
1014 	xnb_get1pkt(&pkt, size, 0);
1015 
1016 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1017 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1018 	safe_m_freem(&pMbuf);
1019 }
1020 
1021 /**
1022  * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1023  */
1024 static void
1025 xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1026 {
1027 	const size_t size = MINCLSIZE - 1;
1028 	struct xnb_pkt pkt;
1029 	struct mbuf *pMbuf;
1030 
1031 	xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1032 
1033 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1034 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1035 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1036 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1037 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1038 	XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1039 	safe_m_freem(&pMbuf);
1040 }
1041 
1042 /**
1043  * xnb_pkt2mbufc on packet that can fit in one cluster
1044  */
1045 static void
1046 xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1047 {
1048 	const size_t size = MINCLSIZE;
1049 	struct xnb_pkt pkt;
1050 	struct mbuf *pMbuf;
1051 
1052 	xnb_get1pkt(&pkt, size, 0);
1053 
1054 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1055 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1056 	safe_m_freem(&pMbuf);
1057 }
1058 
1059 /**
1060  * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1061  */
1062 static void
1063 xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1064 {
1065 	const size_t size = MCLBYTES + 1;
1066 	struct xnb_pkt pkt;
1067 	struct mbuf *pMbuf;
1068 
1069 	xnb_get1pkt(&pkt, size, 0);
1070 
1071 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1072 	XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1073 	safe_m_freem(&pMbuf);
1074 }
1075 
1076 /**
1077  * xnb_pkt2mbufc on packet that cannot fit in one clusters
1078  */
1079 static void
1080 xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1081 {
1082 	const size_t size = 2 * MCLBYTES + 1;
1083 	size_t space = 0;
1084 	struct xnb_pkt pkt;
1085 	struct mbuf *pMbuf;
1086 	struct mbuf *m;
1087 
1088 	xnb_get1pkt(&pkt, size, 0);
1089 
1090 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1091 
1092 	for (m = pMbuf; m != NULL; m = m->m_next) {
1093 		space += M_TRAILINGSPACE(m);
1094 	}
1095 	XNB_ASSERT(space >= size);
1096 	safe_m_freem(&pMbuf);
1097 }
1098 
1099 /**
1100  * xnb_txpkt2gnttab on an empty packet.  Should return empty gnttab
1101  */
1102 static void
1103 xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1104 {
1105 	int n_entries;
1106 	struct xnb_pkt pkt;
1107 	struct mbuf *pMbuf;
1108 	pkt.list_len = 0;
1109 
1110 	/* must call xnb_ring2pkt just to intialize pkt */
1111 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1112 	pkt.size = 0;
1113 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1114 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1115 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1116 	XNB_ASSERT(n_entries == 0);
1117 	safe_m_freem(&pMbuf);
1118 }
1119 
1120 /**
1121  * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1122  * and has one request
1123  */
1124 static void
1125 xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1126 {
1127 	const size_t size = MINCLSIZE - 1;
1128 	int n_entries;
1129 	struct xnb_pkt pkt;
1130 	struct mbuf *pMbuf;
1131 
1132 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1133 	    xnb_unit_pvt.txf.req_prod_pvt);
1134 	req->flags = 0;
1135 	req->size = size;
1136 	req->gref = 7;
1137 	req->offset = 17;
1138 	xnb_unit_pvt.txf.req_prod_pvt++;
1139 
1140 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1141 
1142 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1143 
1144 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1145 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1146 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1147 	XNB_ASSERT(n_entries == 1);
1148 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1149 	/* flags should indicate gref's for source */
1150 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1151 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1152 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1153 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1154 	      mtod(pMbuf, vm_offset_t)));
1155 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1156 		virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1157 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1158 	safe_m_freem(&pMbuf);
1159 }
1160 
1161 /**
1162  * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1163  * mbuf cluster
1164  */
1165 static void
1166 xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1167 {
1168 	int n_entries;
1169 	struct xnb_pkt pkt;
1170 	struct mbuf *pMbuf;
1171 
1172 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1173 	    xnb_unit_pvt.txf.req_prod_pvt);
1174 	req->flags = NETTXF_more_data;
1175 	req->size = 1900;
1176 	req->gref = 7;
1177 	req->offset = 0;
1178 	xnb_unit_pvt.txf.req_prod_pvt++;
1179 
1180 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1181 	    xnb_unit_pvt.txf.req_prod_pvt);
1182 	req->flags = 0;
1183 	req->size = 500;
1184 	req->gref = 8;
1185 	req->offset = 0;
1186 	xnb_unit_pvt.txf.req_prod_pvt++;
1187 
1188 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1189 
1190 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1191 
1192 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1193 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1194 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1195 
1196 	XNB_ASSERT(n_entries == 2);
1197 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1198 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1199 	      mtod(pMbuf, vm_offset_t)));
1200 
1201 	XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1202 	XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1203 	      mtod(pMbuf, vm_offset_t) + 1400));
1204 	safe_m_freem(&pMbuf);
1205 }
1206 
1207 /**
1208  * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1209  */
1210 static void
1211 xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1212 {
1213 	int n_entries;
1214 	struct xnb_pkt pkt;
1215 	struct mbuf *pMbuf;
1216 	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1217 
1218 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1219 	    xnb_unit_pvt.txf.req_prod_pvt);
1220 	req->flags = 0;
1221 	req->size = data_this_transaction;
1222 	req->gref = 8;
1223 	req->offset = 0;
1224 	xnb_unit_pvt.txf.req_prod_pvt++;
1225 
1226 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1227 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1228 
1229 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1230 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1231 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1232 
1233 	if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1234 		/* there should be three mbufs and three gnttab entries */
1235 		XNB_ASSERT(n_entries == 3);
1236 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1237 		XNB_ASSERT(
1238 		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1239 		      mtod(pMbuf, vm_offset_t)));
1240 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1241 
1242 		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1243 		XNB_ASSERT(
1244 		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1245 		      mtod(pMbuf->m_next, vm_offset_t)));
1246 		XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1247 
1248 		XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1249 		XNB_ASSERT(
1250 		    xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1251 		      mtod(pMbuf->m_next, vm_offset_t)));
1252 		XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1253 			    MCLBYTES);
1254 	} else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1255 		/* there should be two mbufs and two gnttab entries */
1256 		XNB_ASSERT(n_entries == 2);
1257 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1258 		XNB_ASSERT(
1259 		    xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1260 		      mtod(pMbuf, vm_offset_t)));
1261 		XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1262 
1263 		XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1264 		XNB_ASSERT(
1265 		    xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1266 		      mtod(pMbuf->m_next, vm_offset_t)));
1267 		XNB_ASSERT(
1268 		    xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1269 
1270 	} else {
1271 		/* should never get here */
1272 		XNB_ASSERT(0);
1273 	}
1274 	if (pMbuf != NULL)
1275 		m_freem(pMbuf);
1276 }
1277 
1278 
1279 /**
1280  * xnb_update_mbufc on a short packet that only has one gnttab entry
1281  */
1282 static void
1283 xnb_update_mbufc_short(char *buffer, size_t buflen)
1284 {
1285 	const size_t size = MINCLSIZE - 1;
1286 	int n_entries;
1287 	struct xnb_pkt pkt;
1288 	struct mbuf *pMbuf;
1289 
1290 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1291 	    xnb_unit_pvt.txf.req_prod_pvt);
1292 	req->flags = 0;
1293 	req->size = size;
1294 	req->gref = 7;
1295 	req->offset = 17;
1296 	xnb_unit_pvt.txf.req_prod_pvt++;
1297 
1298 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1299 
1300 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1301 
1302 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1303 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1304 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1305 
1306 	/* Update grant table's status fields as the hypervisor call would */
1307 	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1308 
1309 	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1310 	XNB_ASSERT(pMbuf->m_len == size);
1311 	XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1312 	safe_m_freem(&pMbuf);
1313 }
1314 
1315 /**
1316  * xnb_update_mbufc on a packet with two requests, that can fit into a single
1317  * mbuf cluster
1318  */
1319 static void
1320 xnb_update_mbufc_2req(char *buffer, size_t buflen)
1321 {
1322 	int n_entries;
1323 	struct xnb_pkt pkt;
1324 	struct mbuf *pMbuf;
1325 
1326 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1327 	    xnb_unit_pvt.txf.req_prod_pvt);
1328 	req->flags = NETTXF_more_data;
1329 	req->size = 1900;
1330 	req->gref = 7;
1331 	req->offset = 0;
1332 	xnb_unit_pvt.txf.req_prod_pvt++;
1333 
1334 	req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1335 	    xnb_unit_pvt.txf.req_prod_pvt);
1336 	req->flags = 0;
1337 	req->size = 500;
1338 	req->gref = 8;
1339 	req->offset = 0;
1340 	xnb_unit_pvt.txf.req_prod_pvt++;
1341 
1342 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1343 
1344 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1345 
1346 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1347 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1348 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1349 
1350 	/* Update grant table's status fields as the hypervisor call would */
1351 	xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1352 	xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1353 
1354 	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1355 	XNB_ASSERT(n_entries == 2);
1356 	XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1357 	XNB_ASSERT(pMbuf->m_len == 1900);
1358 
1359 	safe_m_freem(&pMbuf);
1360 }
1361 
1362 /**
1363  * xnb_update_mbufc on a single request that spans two mbuf clusters
1364  */
1365 static void
1366 xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1367 {
1368 	int i;
1369 	int n_entries;
1370 	struct xnb_pkt pkt;
1371 	struct mbuf *pMbuf;
1372 	const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1373 
1374 	struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1375 	    xnb_unit_pvt.txf.req_prod_pvt);
1376 	req->flags = 0;
1377 	req->size = data_this_transaction;
1378 	req->gref = 8;
1379 	req->offset = 0;
1380 	xnb_unit_pvt.txf.req_prod_pvt++;
1381 
1382 	RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1383 	xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1384 
1385 	pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1386 	n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1387 	    &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1388 
1389 	/* Update grant table's status fields */
1390 	for (i = 0; i < n_entries; i++) {
1391 		xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1392 	}
1393 	xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1394 
1395 	if (n_entries == 3) {
1396 		/* there should be three mbufs and three gnttab entries */
1397 		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1398 		XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1399 		XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1400 		XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1401 	} else if (n_entries == 2) {
1402 		/* there should be two mbufs and two gnttab entries */
1403 		XNB_ASSERT(n_entries == 2);
1404 		XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1405 		XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1406 		XNB_ASSERT(pMbuf->m_next->m_len == 1);
1407 	} else {
1408 		/* should never get here */
1409 		XNB_ASSERT(0);
1410 	}
1411 	safe_m_freem(&pMbuf);
1412 }
1413 
1414 /** xnb_mbufc2pkt on an empty mbufc */
1415 static void
1416 xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1417 	struct xnb_pkt pkt;
1418 	int free_slots = 64;
1419 	struct mbuf *mbuf;
1420 
1421 	mbuf = m_get(M_WAITOK, MT_DATA);
1422 	/*
1423 	 * note: it is illegal to set M_PKTHDR on a mbuf with no data.  Doing so
1424 	 * will cause m_freem to segfault
1425 	 */
1426 	XNB_ASSERT(mbuf->m_len == 0);
1427 
1428 	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1429 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1430 
1431 	safe_m_freem(&mbuf);
1432 }
1433 
1434 /** xnb_mbufc2pkt on a short mbufc */
1435 static void
1436 xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1437 	struct xnb_pkt pkt;
1438 	size_t size = 128;
1439 	int free_slots = 64;
1440 	RING_IDX start = 9;
1441 	struct mbuf *mbuf;
1442 
1443 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1444 	mbuf->m_flags |= M_PKTHDR;
1445 	mbuf->m_pkthdr.len = size;
1446 	mbuf->m_len = size;
1447 
1448 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1449 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1450 	XNB_ASSERT(pkt.size == size);
1451 	XNB_ASSERT(pkt.car_size == size);
1452 	XNB_ASSERT(! (pkt.flags &
1453 	      (NETRXF_more_data | NETRXF_extra_info)));
1454 	XNB_ASSERT(pkt.list_len == 1);
1455 	XNB_ASSERT(pkt.car == start);
1456 
1457 	safe_m_freem(&mbuf);
1458 }
1459 
1460 /** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1461 static void
1462 xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1463 	struct xnb_pkt pkt;
1464 	size_t size = MCLBYTES;
1465 	int free_slots = 32;
1466 	RING_IDX start = 12;
1467 	struct mbuf *mbuf;
1468 
1469 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1470 	mbuf->m_flags |= M_PKTHDR;
1471 	mbuf->m_pkthdr.len = size;
1472 	mbuf->m_len = size;
1473 
1474 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1475 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1476 	XNB_ASSERT(pkt.size == size);
1477 	XNB_ASSERT(pkt.car_size == size);
1478 	XNB_ASSERT(! (pkt.flags &
1479 	      (NETRXF_more_data | NETRXF_extra_info)));
1480 	XNB_ASSERT(pkt.list_len == 1);
1481 	XNB_ASSERT(pkt.car == start);
1482 
1483 	safe_m_freem(&mbuf);
1484 }
1485 
1486 /** xnb_mbufc2pkt on a two-mbuf chain with short data regions */
1487 static void
1488 xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1489 	struct xnb_pkt pkt;
1490 	size_t size1 = MHLEN - 5;
1491 	size_t size2 = MHLEN - 15;
1492 	int free_slots = 32;
1493 	RING_IDX start = 14;
1494 	struct mbuf *mbufc, *mbufc2;
1495 
1496 	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1497 	mbufc->m_flags |= M_PKTHDR;
1498 	if (mbufc == NULL) {
1499 		XNB_ASSERT(mbufc != NULL);
1500 		return;
1501 	}
1502 
1503 	mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1504 	if (mbufc2 == NULL) {
1505 		XNB_ASSERT(mbufc2 != NULL);
1506 		safe_m_freem(&mbufc);
1507 		return;
1508 	}
1509 	mbufc2->m_pkthdr.len = size1 + size2;
1510 	mbufc2->m_len = size1;
1511 
1512 	xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1513 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1514 	XNB_ASSERT(pkt.size == size1 + size2);
1515 	XNB_ASSERT(pkt.car == start);
1516 	/*
1517 	 * The second m_getm may allocate a new mbuf and append
1518 	 * it to the chain, or it may simply extend the first mbuf.
1519 	 */
1520 	if (mbufc2->m_next != NULL) {
1521 		XNB_ASSERT(pkt.car_size == size1);
1522 		XNB_ASSERT(pkt.list_len == 1);
1523 		XNB_ASSERT(pkt.cdr == start + 1);
1524 	}
1525 
1526 	safe_m_freem(&mbufc2);
1527 }
1528 
1529 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */
1530 static void
1531 xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1532 	struct xnb_pkt pkt;
1533 	size_t size = 14 * MCLBYTES / 3;
1534 	size_t size_remaining;
1535 	int free_slots = 15;
1536 	RING_IDX start = 3;
1537 	struct mbuf *mbufc, *m;
1538 
1539 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1540 	mbufc->m_flags |= M_PKTHDR;
1541 	if (mbufc == NULL) {
1542 		XNB_ASSERT(mbufc != NULL);
1543 		return;
1544 	}
1545 
1546 	mbufc->m_pkthdr.len = size;
1547 	size_remaining = size;
1548 	for (m = mbufc; m != NULL; m = m->m_next) {
1549 		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1550 		size_remaining -= m->m_len;
1551 	}
1552 
1553 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1554 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1555 	XNB_ASSERT(pkt.size == size);
1556 	XNB_ASSERT(pkt.car == start);
1557 	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1558 	/*
1559 	 * There should be >1 response in the packet, and there is no
1560 	 * extra info.
1561 	 */
1562 	XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1563 	XNB_ASSERT(pkt.cdr == pkt.car + 1);
1564 
1565 	safe_m_freem(&mbufc);
1566 }
1567 
1568 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */
1569 static void
1570 xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1571 	struct xnb_pkt pkt;
1572 	size_t size = 14 * MCLBYTES / 3;
1573 	size_t size_remaining;
1574 	int free_slots = 15;
1575 	RING_IDX start = 3;
1576 	struct mbuf *mbufc, *m;
1577 
1578 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1579 	if (mbufc == NULL) {
1580 		XNB_ASSERT(mbufc != NULL);
1581 		return;
1582 	}
1583 
1584 	mbufc->m_flags |= M_PKTHDR;
1585 	mbufc->m_pkthdr.len = size;
1586 	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1587 	mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1588 	size_remaining = size;
1589 	for (m = mbufc; m != NULL; m = m->m_next) {
1590 		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1591 		size_remaining -= m->m_len;
1592 	}
1593 
1594 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1595 	XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1596 	XNB_ASSERT(pkt.size == size);
1597 	XNB_ASSERT(pkt.car == start);
1598 	XNB_ASSERT(pkt.car_size = mbufc->m_len);
1599 	/* There should be >1 response in the packet, there is extra info */
1600 	XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1601 	XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1602 	XNB_ASSERT(pkt.cdr == pkt.car + 2);
1603 	XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1604 	XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1605 	XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1606 
1607 	safe_m_freem(&mbufc);
1608 }
1609 
1610 /** xnb_mbufc2pkt with insufficient space in the ring */
1611 static void
1612 xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1613 	struct xnb_pkt pkt;
1614 	size_t size = 14 * MCLBYTES / 3;
1615 	size_t size_remaining;
1616 	int free_slots = 2;
1617 	RING_IDX start = 3;
1618 	struct mbuf *mbufc, *m;
1619 	int error;
1620 
1621 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1622 	mbufc->m_flags |= M_PKTHDR;
1623 	if (mbufc == NULL) {
1624 		XNB_ASSERT(mbufc != NULL);
1625 		return;
1626 	}
1627 
1628 	mbufc->m_pkthdr.len = size;
1629 	size_remaining = size;
1630 	for (m = mbufc; m != NULL; m = m->m_next) {
1631 		m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1632 		size_remaining -= m->m_len;
1633 	}
1634 
1635 	error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1636 	XNB_ASSERT(error == EAGAIN);
1637 	XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1638 
1639 	safe_m_freem(&mbufc);
1640 }
1641 
1642 /**
1643  * xnb_rxpkt2gnttab on an empty packet.  Should return empty gnttab
1644  */
1645 static void
1646 xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1647 {
1648 	struct xnb_pkt pkt;
1649 	int nr_entries;
1650 	int free_slots = 60;
1651 	struct mbuf *mbuf;
1652 
1653 	mbuf = m_get(M_WAITOK, MT_DATA);
1654 
1655 	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1656 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1657 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1658 
1659 	XNB_ASSERT(nr_entries == 0);
1660 
1661 	safe_m_freem(&mbuf);
1662 }
1663 
1664 /** xnb_rxpkt2gnttab on a short packet without extra data */
1665 static void
1666 xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1667 	struct xnb_pkt pkt;
1668 	int nr_entries;
1669 	size_t size = 128;
1670 	int free_slots = 60;
1671 	RING_IDX start = 9;
1672 	struct netif_rx_request *req;
1673 	struct mbuf *mbuf;
1674 
1675 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1676 	mbuf->m_flags |= M_PKTHDR;
1677 	mbuf->m_pkthdr.len = size;
1678 	mbuf->m_len = size;
1679 
1680 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1681 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1682 			       xnb_unit_pvt.txf.req_prod_pvt);
1683 	req->gref = 7;
1684 
1685 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1686 				      &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1687 
1688 	XNB_ASSERT(nr_entries == 1);
1689 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1690 	/* flags should indicate gref's for dest */
1691 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1692 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1693 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1694 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1695 		   mtod(mbuf, vm_offset_t)));
1696 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1697 		   virt_to_mfn(mtod(mbuf, vm_offset_t)));
1698 	XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1699 
1700 	safe_m_freem(&mbuf);
1701 }
1702 
1703 /**
1704  * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1705  */
1706 static void
1707 xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1708 {
1709 	struct xnb_pkt pkt;
1710 	int nr_entries;
1711 	int i, num_mbufs;
1712 	size_t total_granted_size = 0;
1713 	size_t size = MJUMPAGESIZE + 1;
1714 	int free_slots = 60;
1715 	RING_IDX start = 11;
1716 	struct netif_rx_request *req;
1717 	struct mbuf *mbuf, *m;
1718 
1719 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1720 	mbuf->m_flags |= M_PKTHDR;
1721 	mbuf->m_pkthdr.len = size;
1722 	mbuf->m_len = size;
1723 
1724 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1725 
1726 	for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1727 		req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1728 		    xnb_unit_pvt.txf.req_prod_pvt);
1729 		req->gref = i;
1730 		req->id = 5;
1731 	}
1732 	num_mbufs = i;
1733 
1734 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1735 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1736 
1737 	XNB_ASSERT(nr_entries >= num_mbufs);
1738 	for (i = 0; i < nr_entries; i++) {
1739 		int end_offset = xnb_unit_pvt.gnttab[i].len +
1740 			xnb_unit_pvt.gnttab[i].dest.offset;
1741 		XNB_ASSERT(end_offset <= PAGE_SIZE);
1742 		total_granted_size += xnb_unit_pvt.gnttab[i].len;
1743 	}
1744 	XNB_ASSERT(total_granted_size == size);
1745 }
1746 
1747 /**
1748  * xnb_rxpkt2rsp on an empty packet.  Shouldn't make any response
1749  */
1750 static void
1751 xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1752 {
1753 	struct xnb_pkt pkt;
1754 	int nr_entries;
1755 	int nr_reqs;
1756 	int free_slots = 60;
1757 	netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1758 	netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1759 	struct mbuf *mbuf;
1760 
1761 	mbuf = m_get(M_WAITOK, MT_DATA);
1762 
1763 	xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1764 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1765 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1766 
1767 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1768 	    &xnb_unit_pvt.rxb);
1769 	XNB_ASSERT(nr_reqs == 0);
1770 	XNB_ASSERT(
1771 	    memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1772 	XNB_ASSERT(
1773 	    memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1774 
1775 	safe_m_freem(&mbuf);
1776 }
1777 
1778 /**
1779  * xnb_rxpkt2rsp on a short packet with no extras
1780  */
1781 static void
1782 xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1783 {
1784 	struct xnb_pkt pkt;
1785 	int nr_entries, nr_reqs;
1786 	size_t size = 128;
1787 	int free_slots = 60;
1788 	RING_IDX start = 5;
1789 	struct netif_rx_request *req;
1790 	struct netif_rx_response *rsp;
1791 	struct mbuf *mbuf;
1792 
1793 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1794 	mbuf->m_flags |= M_PKTHDR;
1795 	mbuf->m_pkthdr.len = size;
1796 	mbuf->m_len = size;
1797 
1798 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1799 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1800 	req->gref = 7;
1801 	xnb_unit_pvt.rxb.req_cons = start;
1802 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1803 	xnb_unit_pvt.rxs->req_prod = start + 1;
1804 	xnb_unit_pvt.rxs->rsp_prod = start;
1805 
1806 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1807 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1808 
1809 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1810 	    &xnb_unit_pvt.rxb);
1811 
1812 	XNB_ASSERT(nr_reqs == 1);
1813 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1814 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1815 	XNB_ASSERT(rsp->id == req->id);
1816 	XNB_ASSERT(rsp->offset == 0);
1817 	XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1818 	XNB_ASSERT(rsp->status == size);
1819 
1820 	safe_m_freem(&mbuf);
1821 }
1822 
1823 /**
1824  * xnb_rxpkt2rsp with extra data
1825  */
1826 static void
1827 xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1828 {
1829 	struct xnb_pkt pkt;
1830 	int nr_entries, nr_reqs;
1831 	size_t size = 14;
1832 	int free_slots = 15;
1833 	RING_IDX start = 3;
1834 	uint16_t id = 49;
1835 	uint16_t gref = 65;
1836 	uint16_t mss = TCP_MSS - 40;
1837 	struct mbuf *mbufc;
1838 	struct netif_rx_request *req;
1839 	struct netif_rx_response *rsp;
1840 	struct netif_extra_info *ext;
1841 
1842 	mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1843 	if (mbufc == NULL) {
1844 		XNB_ASSERT(mbufc != NULL);
1845 		return;
1846 	}
1847 
1848 	mbufc->m_flags |= M_PKTHDR;
1849 	mbufc->m_pkthdr.len = size;
1850 	mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1851 	mbufc->m_pkthdr.tso_segsz = mss;
1852 	mbufc->m_len = size;
1853 
1854 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1855 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1856 	req->id = id;
1857 	req->gref = gref;
1858 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1859 	req->id = id + 1;
1860 	req->gref = gref + 1;
1861 	xnb_unit_pvt.rxb.req_cons = start;
1862 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1863 	xnb_unit_pvt.rxs->req_prod = start + 2;
1864 	xnb_unit_pvt.rxs->rsp_prod = start;
1865 
1866 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1867 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1868 
1869 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1870 	    &xnb_unit_pvt.rxb);
1871 
1872 	XNB_ASSERT(nr_reqs == 2);
1873 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1874 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1875 	XNB_ASSERT(rsp->id == id);
1876 	XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1877 	XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1878 	XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1879 	XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1880 	XNB_ASSERT(rsp->status == size);
1881 
1882 	ext = (struct netif_extra_info*)
1883 		RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1884 	XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1885 	XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1886 	XNB_ASSERT(ext->u.gso.size == mss);
1887 	XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1888 
1889 	safe_m_freem(&mbufc);
1890 }
1891 
1892 /**
1893  * xnb_rxpkt2rsp on a packet with more than a pages's worth of data.  It should
1894  * generate two response slot
1895  */
1896 static void
1897 xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1898 {
1899 	struct xnb_pkt pkt;
1900 	int nr_entries, nr_reqs;
1901 	size_t size = PAGE_SIZE + 100;
1902 	int free_slots = 3;
1903 	uint16_t id1 = 17;
1904 	uint16_t id2 = 37;
1905 	uint16_t gref1 = 24;
1906 	uint16_t gref2 = 34;
1907 	RING_IDX start = 15;
1908 	struct netif_rx_request *req;
1909 	struct netif_rx_response *rsp;
1910 	struct mbuf *mbuf;
1911 
1912 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1913 	mbuf->m_flags |= M_PKTHDR;
1914 	mbuf->m_pkthdr.len = size;
1915 	if (mbuf->m_next != NULL) {
1916 		size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1917 		mbuf->m_len = first_len;
1918 		mbuf->m_next->m_len = size - first_len;
1919 
1920 	} else {
1921 		mbuf->m_len = size;
1922 	}
1923 
1924 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1925 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1926 	req->gref = gref1;
1927 	req->id = id1;
1928 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1929 	req->gref = gref2;
1930 	req->id = id2;
1931 	xnb_unit_pvt.rxb.req_cons = start;
1932 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1933 	xnb_unit_pvt.rxs->req_prod = start + 2;
1934 	xnb_unit_pvt.rxs->rsp_prod = start;
1935 
1936 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1937 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1938 
1939 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1940 	    &xnb_unit_pvt.rxb);
1941 
1942 	XNB_ASSERT(nr_reqs == 2);
1943 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1944 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1945 	XNB_ASSERT(rsp->id == id1);
1946 	XNB_ASSERT(rsp->offset == 0);
1947 	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1948 	XNB_ASSERT(rsp->flags & NETRXF_more_data);
1949 	XNB_ASSERT(rsp->status == PAGE_SIZE);
1950 
1951 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1952 	XNB_ASSERT(rsp->id == id2);
1953 	XNB_ASSERT(rsp->offset == 0);
1954 	XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1955 	XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1956 	XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1957 
1958 	safe_m_freem(&mbuf);
1959 }
1960 
1961 /** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1962 static void
1963 xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1964 	struct xnb_pkt pkt;
1965 	int nr_reqs, nr_entries;
1966 	size_t size1 = MHLEN - 5;
1967 	size_t size2 = MHLEN - 15;
1968 	int free_slots = 32;
1969 	RING_IDX start = 14;
1970 	uint16_t id = 47;
1971 	uint16_t gref = 54;
1972 	struct netif_rx_request *req;
1973 	struct netif_rx_response *rsp;
1974 	struct mbuf *mbufc;
1975 
1976 	mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1977 	mbufc->m_flags |= M_PKTHDR;
1978 	if (mbufc == NULL) {
1979 		XNB_ASSERT(mbufc != NULL);
1980 		return;
1981 	}
1982 
1983 	m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1984 	XNB_ASSERT(mbufc->m_next != NULL);
1985 	mbufc->m_pkthdr.len = size1 + size2;
1986 	mbufc->m_len = size1;
1987 	mbufc->m_next->m_len = size2;
1988 
1989 	xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1990 
1991 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1992 	req->gref = gref;
1993 	req->id = id;
1994 	xnb_unit_pvt.rxb.req_cons = start;
1995 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1996 	xnb_unit_pvt.rxs->req_prod = start + 1;
1997 	xnb_unit_pvt.rxs->rsp_prod = start;
1998 
1999 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
2000 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2001 
2002 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2003 	    &xnb_unit_pvt.rxb);
2004 
2005 	XNB_ASSERT(nr_entries == 2);
2006 	XNB_ASSERT(nr_reqs == 1);
2007 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2008 	XNB_ASSERT(rsp->id == id);
2009 	XNB_ASSERT(rsp->status == size1 + size2);
2010 	XNB_ASSERT(rsp->offset == 0);
2011 	XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
2012 
2013 	safe_m_freem(&mbufc);
2014 }
2015 
2016 /**
2017  * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2018  * Note: this test will result in an error message being printed to the console
2019  * such as:
2020  * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2021  */
2022 static void
2023 xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2024 {
2025 	struct xnb_pkt pkt;
2026 	int nr_entries, nr_reqs;
2027 	int id = 7;
2028 	int gref = 42;
2029 	uint16_t canary = 6859;
2030 	size_t size = 7 * MCLBYTES;
2031 	int free_slots = 9;
2032 	RING_IDX start = 2;
2033 	struct netif_rx_request *req;
2034 	struct netif_rx_response *rsp;
2035 	struct mbuf *mbuf;
2036 
2037 	mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2038 	mbuf->m_flags |= M_PKTHDR;
2039 	mbuf->m_pkthdr.len = size;
2040 	mbuf->m_len = size;
2041 
2042 	xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2043 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2044 	req->gref = gref;
2045 	req->id = id;
2046 	xnb_unit_pvt.rxb.req_cons = start;
2047 	xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2048 	xnb_unit_pvt.rxs->req_prod = start + 1;
2049 	xnb_unit_pvt.rxs->rsp_prod = start;
2050 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2051 	req->gref = canary;
2052 	req->id = canary;
2053 
2054 	nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2055 			&xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2056 	/* Inject the error*/
2057 	xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2058 
2059 	nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2060 	    &xnb_unit_pvt.rxb);
2061 
2062 	XNB_ASSERT(nr_reqs == 1);
2063 	XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2064 	rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2065 	XNB_ASSERT(rsp->id == id);
2066 	XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2067 	req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2068 	XNB_ASSERT(req->gref == canary);
2069 	XNB_ASSERT(req->id == canary);
2070 
2071 	safe_m_freem(&mbuf);
2072 }
2073 
2074 #if defined(INET) || defined(INET6)
2075 /**
2076  * xnb_add_mbuf_cksum on an ARP request packet
2077  */
2078 static void
2079 xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2080 {
2081 	const size_t pkt_len = sizeof(struct ether_header) +
2082 		sizeof(struct ether_arp);
2083 	struct mbuf *mbufc;
2084 	struct ether_header *eh;
2085 	struct ether_arp *ep;
2086 	unsigned char pkt_orig[pkt_len];
2087 
2088 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2089 	/* Fill in an example arp request */
2090 	eh = mtod(mbufc, struct ether_header*);
2091 	eh->ether_dhost[0] = 0xff;
2092 	eh->ether_dhost[1] = 0xff;
2093 	eh->ether_dhost[2] = 0xff;
2094 	eh->ether_dhost[3] = 0xff;
2095 	eh->ether_dhost[4] = 0xff;
2096 	eh->ether_dhost[5] = 0xff;
2097 	eh->ether_shost[0] = 0x00;
2098 	eh->ether_shost[1] = 0x15;
2099 	eh->ether_shost[2] = 0x17;
2100 	eh->ether_shost[3] = 0xe9;
2101 	eh->ether_shost[4] = 0x30;
2102 	eh->ether_shost[5] = 0x68;
2103 	eh->ether_type = htons(ETHERTYPE_ARP);
2104 	ep = (struct ether_arp*)(eh + 1);
2105 	ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2106 	ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2107 	ep->ea_hdr.ar_hln = 6;
2108 	ep->ea_hdr.ar_pln = 4;
2109 	ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2110 	ep->arp_sha[0] = 0x00;
2111 	ep->arp_sha[1] = 0x15;
2112 	ep->arp_sha[2] = 0x17;
2113 	ep->arp_sha[3] = 0xe9;
2114 	ep->arp_sha[4] = 0x30;
2115 	ep->arp_sha[5] = 0x68;
2116 	ep->arp_spa[0] = 0xc0;
2117 	ep->arp_spa[1] = 0xa8;
2118 	ep->arp_spa[2] = 0x0a;
2119 	ep->arp_spa[3] = 0x04;
2120 	bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2121 	ep->arp_tpa[0] = 0xc0;
2122 	ep->arp_tpa[1] = 0xa8;
2123 	ep->arp_tpa[2] = 0x0a;
2124 	ep->arp_tpa[3] = 0x06;
2125 
2126 	/* fill in the length field */
2127 	mbufc->m_len = pkt_len;
2128 	mbufc->m_pkthdr.len = pkt_len;
2129 	/* indicate that the netfront uses hw-assisted checksums */
2130 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2131 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2132 
2133 	/* Make a backup copy of the packet */
2134 	bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2135 
2136 	/* Function under test */
2137 	xnb_add_mbuf_cksum(mbufc);
2138 
2139 	/* Verify that the packet's data did not change */
2140 	XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2141 	m_freem(mbufc);
2142 }
2143 
2144 /**
2145  * Helper function that populates the ethernet header and IP header used by
2146  * some of the xnb_add_mbuf_cksum unit tests.  m must already be allocated
2147  * and must be large enough
2148  */
2149 static void
2150 xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2151 		   uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2152 {
2153 	struct ether_header *eh;
2154 	struct ip *iph;
2155 
2156 	eh = mtod(m, struct ether_header*);
2157 	eh->ether_dhost[0] = 0x00;
2158 	eh->ether_dhost[1] = 0x16;
2159 	eh->ether_dhost[2] = 0x3e;
2160 	eh->ether_dhost[3] = 0x23;
2161 	eh->ether_dhost[4] = 0x50;
2162 	eh->ether_dhost[5] = 0x0b;
2163 	eh->ether_shost[0] = 0x00;
2164 	eh->ether_shost[1] = 0x16;
2165 	eh->ether_shost[2] = 0x30;
2166 	eh->ether_shost[3] = 0x00;
2167 	eh->ether_shost[4] = 0x00;
2168 	eh->ether_shost[5] = 0x00;
2169 	eh->ether_type = htons(ETHERTYPE_IP);
2170 	iph = (struct ip*)(eh + 1);
2171 	iph->ip_hl = 0x5;	/* 5 dwords == 20 bytes */
2172 	iph->ip_v = 4;		/* IP v4 */
2173 	iph->ip_tos = 0;
2174 	iph->ip_len = htons(ip_len);
2175 	iph->ip_id = htons(ip_id);
2176 	iph->ip_off = htons(ip_off);
2177 	iph->ip_ttl = 64;
2178 	iph->ip_p = ip_p;
2179 	iph->ip_sum = htons(ip_sum);
2180 	iph->ip_src.s_addr = htonl(0xc0a80a04);
2181 	iph->ip_dst.s_addr = htonl(0xc0a80a05);
2182 }
2183 
2184 /**
2185  * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2186  * ICMP packet
2187  */
2188 static void
2189 xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2190 {
2191 	const size_t icmp_len = 64;	/* set by ping(1) */
2192 	const size_t pkt_len = sizeof(struct ether_header) +
2193 		sizeof(struct ip) + icmp_len;
2194 	struct mbuf *mbufc;
2195 	struct ether_header *eh;
2196 	struct ip *iph;
2197 	struct icmp *icmph;
2198 	unsigned char pkt_orig[icmp_len];
2199 	uint32_t *tv_field;
2200 	uint8_t *data_payload;
2201 	int i;
2202 	const uint16_t ICMP_CSUM = 0xaed7;
2203 	const uint16_t IP_CSUM = 0xe533;
2204 
2205 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2206 	/* Fill in an example ICMP ping request */
2207 	eh = mtod(mbufc, struct ether_header*);
2208 	xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2209 	iph = (struct ip*)(eh + 1);
2210 	icmph = (struct icmp*)(iph + 1);
2211 	icmph->icmp_type = ICMP_ECHO;
2212 	icmph->icmp_code = 0;
2213 	icmph->icmp_cksum = htons(ICMP_CSUM);
2214 	icmph->icmp_id = htons(31492);
2215 	icmph->icmp_seq = htons(0);
2216 	/*
2217 	 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2218 	 * For this test, we will set the bytes individually for portability.
2219 	 */
2220 	tv_field = (uint32_t*)(&(icmph->icmp_hun));
2221 	tv_field[0] = 0x4f02cfac;
2222 	tv_field[1] = 0x0007c46a;
2223 	/*
2224 	 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2225 	 */
2226 	data_payload = (uint8_t*)(&tv_field[2]);
2227 	for (i = 8; i < 37; i++) {
2228 		*data_payload++ = i;
2229 	}
2230 
2231 	/* fill in the length field */
2232 	mbufc->m_len = pkt_len;
2233 	mbufc->m_pkthdr.len = pkt_len;
2234 	/* indicate that the netfront uses hw-assisted checksums */
2235 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2236 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2237 
2238 	bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2239 	/* Function under test */
2240 	xnb_add_mbuf_cksum(mbufc);
2241 
2242 	/* Check the IP checksum */
2243 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2244 
2245 	/* Check that the ICMP packet did not change */
2246 	XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2247 	m_freem(mbufc);
2248 }
2249 
2250 /**
2251  * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2252  * UDP packet
2253  */
2254 static void
2255 xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2256 {
2257 	const size_t udp_len = 16;
2258 	const size_t pkt_len = sizeof(struct ether_header) +
2259 		sizeof(struct ip) + udp_len;
2260 	struct mbuf *mbufc;
2261 	struct ether_header *eh;
2262 	struct ip *iph;
2263 	struct udphdr *udp;
2264 	uint8_t *data_payload;
2265 	const uint16_t IP_CSUM = 0xe56b;
2266 	const uint16_t UDP_CSUM = 0xdde2;
2267 
2268 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2269 	/* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2270 	eh = mtod(mbufc, struct ether_header*);
2271 	xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2272 	iph = (struct ip*)(eh + 1);
2273 	udp = (struct udphdr*)(iph + 1);
2274 	udp->uh_sport = htons(0x51ae);
2275 	udp->uh_dport = htons(0x08ae);
2276 	udp->uh_ulen = htons(udp_len);
2277 	udp->uh_sum = htons(0xbaad);  /* xnb_add_mbuf_cksum will fill this in */
2278 	data_payload = (uint8_t*)(udp + 1);
2279 	data_payload[0] = 'F';
2280 	data_payload[1] = 'r';
2281 	data_payload[2] = 'e';
2282 	data_payload[3] = 'e';
2283 	data_payload[4] = 'B';
2284 	data_payload[5] = 'S';
2285 	data_payload[6] = 'D';
2286 	data_payload[7] = '\n';
2287 
2288 	/* fill in the length field */
2289 	mbufc->m_len = pkt_len;
2290 	mbufc->m_pkthdr.len = pkt_len;
2291 	/* indicate that the netfront uses hw-assisted checksums */
2292 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2293 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2294 
2295 	/* Function under test */
2296 	xnb_add_mbuf_cksum(mbufc);
2297 
2298 	/* Check the checksums */
2299 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2300 	XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2301 
2302 	m_freem(mbufc);
2303 }
2304 
2305 /**
2306  * Helper function that populates a TCP packet used by all of the
2307  * xnb_add_mbuf_cksum tcp unit tests.  m must already be allocated and must be
2308  * large enough
2309  */
2310 static void
2311 xnb_fill_tcp(struct mbuf *m)
2312 {
2313 	struct ether_header *eh;
2314 	struct ip *iph;
2315 	struct tcphdr *tcp;
2316 	uint32_t *options;
2317 	uint8_t *data_payload;
2318 
2319 	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2320 	eh = mtod(m, struct ether_header*);
2321 	xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2322 	iph = (struct ip*)(eh + 1);
2323 	tcp = (struct tcphdr*)(iph + 1);
2324 	tcp->th_sport = htons(0x9cd9);
2325 	tcp->th_dport = htons(2222);
2326 	tcp->th_seq = htonl(0x00f72b10);
2327 	tcp->th_ack = htonl(0x7f37ba6c);
2328 	tcp->th_x2 = 0;
2329 	tcp->th_off = 8;
2330 	tcp->th_flags = 0x18;
2331 	tcp->th_win = htons(0x410);
2332 	/* th_sum is incorrect; will be inserted by function under test */
2333 	tcp->th_sum = htons(0xbaad);
2334 	tcp->th_urp = htons(0);
2335 	/*
2336 	 * The following 12 bytes of options encode:
2337 	 * [nop, nop, TS val 33247 ecr 3457687679]
2338 	 */
2339 	options = (uint32_t*)(tcp + 1);
2340 	options[0] = htonl(0x0101080a);
2341 	options[1] = htonl(0x000081df);
2342 	options[2] = htonl(0xce18207f);
2343 	data_payload = (uint8_t*)(&options[3]);
2344 	data_payload[0] = 'F';
2345 	data_payload[1] = 'r';
2346 	data_payload[2] = 'e';
2347 	data_payload[3] = 'e';
2348 	data_payload[4] = 'B';
2349 	data_payload[5] = 'S';
2350 	data_payload[6] = 'D';
2351 	data_payload[7] = '\n';
2352 }
2353 
2354 /**
2355  * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2356  * packet
2357  */
2358 static void
2359 xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2360 {
2361 	const size_t payload_len = 8;
2362 	const size_t tcp_options_len = 12;
2363 	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2364 	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2365 	struct mbuf *mbufc;
2366 	struct ether_header *eh;
2367 	struct ip *iph;
2368 	struct tcphdr *tcp;
2369 	const uint16_t IP_CSUM = 0xa55a;
2370 	const uint16_t TCP_CSUM = 0x2f64;
2371 
2372 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2373 	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2374 	xnb_fill_tcp(mbufc);
2375 	eh = mtod(mbufc, struct ether_header*);
2376 	iph = (struct ip*)(eh + 1);
2377 	tcp = (struct tcphdr*)(iph + 1);
2378 
2379 	/* fill in the length field */
2380 	mbufc->m_len = pkt_len;
2381 	mbufc->m_pkthdr.len = pkt_len;
2382 	/* indicate that the netfront uses hw-assisted checksums */
2383 	mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID   |
2384 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2385 
2386 	/* Function under test */
2387 	xnb_add_mbuf_cksum(mbufc);
2388 
2389 	/* Check the checksums */
2390 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2391 	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2392 
2393 	m_freem(mbufc);
2394 }
2395 
2396 /**
2397  * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2398  */
2399 static void
2400 xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2401 {
2402 	const size_t payload_len = 8;
2403 	const size_t tcp_options_len = 12;
2404 	const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2405 	    sizeof(struct tcphdr) + tcp_options_len + payload_len;
2406 	struct mbuf *mbufc;
2407 	struct ether_header *eh;
2408 	struct ip *iph;
2409 	struct tcphdr *tcp;
2410 	/* Use deliberately bad checksums, and verify that they don't get */
2411 	/* corrected by xnb_add_mbuf_cksum */
2412 	const uint16_t IP_CSUM = 0xdead;
2413 	const uint16_t TCP_CSUM = 0xbeef;
2414 
2415 	mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2416 	/* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2417 	xnb_fill_tcp(mbufc);
2418 	eh = mtod(mbufc, struct ether_header*);
2419 	iph = (struct ip*)(eh + 1);
2420 	iph->ip_sum = htons(IP_CSUM);
2421 	tcp = (struct tcphdr*)(iph + 1);
2422 	tcp->th_sum = htons(TCP_CSUM);
2423 
2424 	/* fill in the length field */
2425 	mbufc->m_len = pkt_len;
2426 	mbufc->m_pkthdr.len = pkt_len;
2427 	/* indicate that the netfront does not use hw-assisted checksums */
2428 	mbufc->m_pkthdr.csum_flags = 0;
2429 
2430 	/* Function under test */
2431 	xnb_add_mbuf_cksum(mbufc);
2432 
2433 	/* Check that the checksums didn't change */
2434 	XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2435 	XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2436 
2437 	m_freem(mbufc);
2438 }
2439 #endif /* INET || INET6 */
2440 
2441 /**
2442  * sscanf on unsigned chars
2443  */
2444 static void
2445 xnb_sscanf_hhu(char *buffer, size_t buflen)
2446 {
2447 	const char mystr[] = "137";
2448 	uint8_t dest[12];
2449 	int i;
2450 
2451 	for (i = 0; i < 12; i++)
2452 		dest[i] = 'X';
2453 
2454 	sscanf(mystr, "%hhu", &dest[4]);
2455 	for (i = 0; i < 12; i++)
2456 		XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2457 }
2458 
2459 /**
2460  * sscanf on signed chars
2461  */
2462 static void
2463 xnb_sscanf_hhd(char *buffer, size_t buflen)
2464 {
2465 	const char mystr[] = "-27";
2466 	int8_t dest[12];
2467 	int i;
2468 
2469 	for (i = 0; i < 12; i++)
2470 		dest[i] = 'X';
2471 
2472 	sscanf(mystr, "%hhd", &dest[4]);
2473 	for (i = 0; i < 12; i++)
2474 		XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2475 }
2476 
2477 /**
2478  * sscanf on signed long longs
2479  */
2480 static void
2481 xnb_sscanf_lld(char *buffer, size_t buflen)
2482 {
2483 	const char mystr[] = "-123456789012345";	/* about -2**47 */
2484 	long long dest[3];
2485 	int i;
2486 
2487 	for (i = 0; i < 3; i++)
2488 		dest[i] = (long long)0xdeadbeefdeadbeef;
2489 
2490 	sscanf(mystr, "%lld", &dest[1]);
2491 	for (i = 0; i < 3; i++)
2492 		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2493 		    -123456789012345));
2494 }
2495 
2496 /**
2497  * sscanf on unsigned long longs
2498  */
2499 static void
2500 xnb_sscanf_llu(char *buffer, size_t buflen)
2501 {
2502 	const char mystr[] = "12802747070103273189";
2503 	unsigned long long dest[3];
2504 	int i;
2505 
2506 	for (i = 0; i < 3; i++)
2507 		dest[i] = (long long)0xdeadbeefdeadbeef;
2508 
2509 	sscanf(mystr, "%llu", &dest[1]);
2510 	for (i = 0; i < 3; i++)
2511 		XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2512 		    12802747070103273189ull));
2513 }
2514 
2515 /**
2516  * sscanf on unsigned short short n's
2517  */
2518 static void
2519 xnb_sscanf_hhn(char *buffer, size_t buflen)
2520 {
2521 	const char mystr[] =
2522 	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2523 	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2524 	    "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2525 	unsigned char dest[12];
2526 	int i;
2527 
2528 	for (i = 0; i < 12; i++)
2529 		dest[i] = (unsigned char)'X';
2530 
2531 	sscanf(mystr,
2532 	    "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2533 	    "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2534 	    "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]);
2535 	for (i = 0; i < 12; i++)
2536 		XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));
2537 }
2538