1 /*- 2 * Copyright (c) 2009-2011 Spectra Logic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * Authors: Justin T. Gibbs (Spectra Logic Corporation) 31 * Alan Somers (Spectra Logic Corporation) 32 * John Suykerbuyk (Spectra Logic Corporation) 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 /** 39 * \file netback_unit_tests.c 40 * 41 * \brief Unit tests for the Xen netback driver. 42 * 43 * Due to the driver's use of static functions, these tests cannot be compiled 44 * standalone; they must be #include'd from the driver's .c file. 45 */ 46 47 48 /** Helper macro used to snprintf to a buffer and update the buffer pointer */ 49 #define SNCATF(buffer, buflen, ...) do { \ 50 size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__); \ 51 buffer += new_chars; \ 52 /* be careful; snprintf's return value can be > buflen */ \ 53 buflen -= MIN(buflen, new_chars); \ 54 } while (0) 55 56 /* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */ 57 #define STRINGIFY(x) #x 58 #define TOSTRING(x) STRINGIFY(x) 59 60 /** 61 * Writes an error message to buffer if cond is false 62 * Note the implied parameters buffer and 63 * buflen 64 */ 65 #define XNB_ASSERT(cond) ({ \ 66 int passed = (cond); \ 67 char *_buffer = (buffer); \ 68 size_t _buflen = (buflen); \ 69 if (! passed) { \ 70 strlcat(_buffer, __func__, _buflen); \ 71 strlcat(_buffer, ":" TOSTRING(__LINE__) \ 72 " Assertion Error: " #cond "\n", _buflen); \ 73 } \ 74 }) 75 76 77 /** 78 * The signature used by all testcases. If the test writes anything 79 * to buffer, then it will be considered a failure 80 * \param buffer Return storage for error messages 81 * \param buflen The space available in the buffer 82 */ 83 typedef void testcase_t(char *buffer, size_t buflen); 84 85 /** 86 * Signature used by setup functions 87 * \return nonzero on error 88 */ 89 typedef int setup_t(void); 90 91 typedef void teardown_t(void); 92 93 /** A simple test fixture comprising setup, teardown, and test */ 94 struct test_fixture { 95 /** Will be run before the test to allocate and initialize variables */ 96 setup_t *setup; 97 98 /** Will be run if setup succeeds */ 99 testcase_t *test; 100 101 /** Cleans up test data whether or not the setup succeeded */ 102 teardown_t *teardown; 103 }; 104 105 typedef struct test_fixture test_fixture_t; 106 107 static int xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags); 108 static int xnb_unit_test_runner(test_fixture_t const tests[], int ntests, 109 char *buffer, size_t buflen); 110 111 static int __unused 112 null_setup(void) { return 0; } 113 114 static void __unused 115 null_teardown(void) { } 116 117 static setup_t setup_pvt_data; 118 static teardown_t teardown_pvt_data; 119 static testcase_t xnb_ring2pkt_emptyring; 120 static testcase_t xnb_ring2pkt_1req; 121 static testcase_t xnb_ring2pkt_2req; 122 static testcase_t xnb_ring2pkt_3req; 123 static testcase_t xnb_ring2pkt_extra; 124 static testcase_t xnb_ring2pkt_partial; 125 static testcase_t xnb_ring2pkt_wraps; 126 static testcase_t xnb_txpkt2rsp_emptypkt; 127 static testcase_t xnb_txpkt2rsp_1req; 128 static testcase_t xnb_txpkt2rsp_extra; 129 static testcase_t xnb_txpkt2rsp_long; 130 static testcase_t xnb_txpkt2rsp_invalid; 131 static testcase_t xnb_txpkt2rsp_error; 132 static testcase_t xnb_txpkt2rsp_wraps; 133 static testcase_t xnb_pkt2mbufc_empty; 134 static testcase_t xnb_pkt2mbufc_short; 135 static testcase_t xnb_pkt2mbufc_csum; 136 static testcase_t xnb_pkt2mbufc_1cluster; 137 static testcase_t xnb_pkt2mbufc_largecluster; 138 static testcase_t xnb_pkt2mbufc_2cluster; 139 static testcase_t xnb_txpkt2gnttab_empty; 140 static testcase_t xnb_txpkt2gnttab_short; 141 static testcase_t xnb_txpkt2gnttab_2req; 142 static testcase_t xnb_txpkt2gnttab_2cluster; 143 static testcase_t xnb_update_mbufc_short; 144 static testcase_t xnb_update_mbufc_2req; 145 static testcase_t xnb_update_mbufc_2cluster; 146 static testcase_t xnb_mbufc2pkt_empty; 147 static testcase_t xnb_mbufc2pkt_short; 148 static testcase_t xnb_mbufc2pkt_1cluster; 149 static testcase_t xnb_mbufc2pkt_2short; 150 static testcase_t xnb_mbufc2pkt_long; 151 static testcase_t xnb_mbufc2pkt_extra; 152 static testcase_t xnb_mbufc2pkt_nospace; 153 static testcase_t xnb_rxpkt2gnttab_empty; 154 static testcase_t xnb_rxpkt2gnttab_short; 155 static testcase_t xnb_rxpkt2gnttab_2req; 156 static testcase_t xnb_rxpkt2rsp_empty; 157 static testcase_t xnb_rxpkt2rsp_short; 158 static testcase_t xnb_rxpkt2rsp_extra; 159 static testcase_t xnb_rxpkt2rsp_2short; 160 static testcase_t xnb_rxpkt2rsp_2slots; 161 static testcase_t xnb_rxpkt2rsp_copyerror; 162 static testcase_t xnb_sscanf_llu; 163 static testcase_t xnb_sscanf_lld; 164 static testcase_t xnb_sscanf_hhu; 165 static testcase_t xnb_sscanf_hhd; 166 static testcase_t xnb_sscanf_hhn; 167 168 #if defined(INET) || defined(INET6) 169 /* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */ 170 static testcase_t xnb_add_mbuf_cksum_arp; 171 static testcase_t xnb_add_mbuf_cksum_tcp; 172 static testcase_t xnb_add_mbuf_cksum_udp; 173 static testcase_t xnb_add_mbuf_cksum_icmp; 174 static testcase_t xnb_add_mbuf_cksum_tcp_swcksum; 175 static void xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, 176 uint16_t ip_id, uint16_t ip_p, 177 uint16_t ip_off, uint16_t ip_sum); 178 static void xnb_fill_tcp(struct mbuf *m); 179 #endif /* INET || INET6 */ 180 181 /** Private data used by unit tests */ 182 static struct { 183 gnttab_copy_table gnttab; 184 netif_rx_back_ring_t rxb; 185 netif_rx_front_ring_t rxf; 186 netif_tx_back_ring_t txb; 187 netif_tx_front_ring_t txf; 188 struct ifnet* ifp; 189 netif_rx_sring_t* rxs; 190 netif_tx_sring_t* txs; 191 } xnb_unit_pvt; 192 193 static inline void safe_m_freem(struct mbuf **ppMbuf) { 194 if (*ppMbuf != NULL) { 195 m_freem(*ppMbuf); 196 *ppMbuf = NULL; 197 } 198 } 199 200 /** 201 * The unit test runner. It will run every supplied test and return an 202 * output message as a string 203 * \param tests An array of tests. Every test will be attempted. 204 * \param ntests The length of tests 205 * \param buffer Return storage for the result string 206 * \param buflen The length of buffer 207 * \return The number of tests that failed 208 */ 209 static int 210 xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer, 211 size_t buflen) 212 { 213 int i; 214 int n_passes; 215 int n_failures = 0; 216 217 for (i = 0; i < ntests; i++) { 218 int error = tests[i].setup(); 219 if (error != 0) { 220 SNCATF(buffer, buflen, 221 "Setup failed for test idx %d\n", i); 222 n_failures++; 223 } else { 224 size_t new_chars; 225 226 tests[i].test(buffer, buflen); 227 new_chars = strnlen(buffer, buflen); 228 buffer += new_chars; 229 buflen -= new_chars; 230 231 if (new_chars > 0) { 232 n_failures++; 233 } 234 } 235 tests[i].teardown(); 236 } 237 238 n_passes = ntests - n_failures; 239 if (n_passes > 0) { 240 SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes); 241 } 242 if (n_failures > 0) { 243 SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures); 244 } 245 246 return n_failures; 247 } 248 249 /** Number of unit tests. Must match the length of the tests array below */ 250 #define TOTAL_TESTS (53) 251 /** 252 * Max memory available for returning results. 400 chars/test should give 253 * enough space for a five line error message for every test 254 */ 255 #define TOTAL_BUFLEN (400 * TOTAL_TESTS + 2) 256 257 /** 258 * Called from userspace by a sysctl. Runs all internal unit tests, and 259 * returns the results to userspace as a string 260 * \param oidp unused 261 * \param arg1 pointer to an xnb_softc for a specific xnb device 262 * \param arg2 unused 263 * \param req sysctl access structure 264 * \return a string via the special SYSCTL_OUT macro. 265 */ 266 267 static int 268 xnb_unit_test_main(SYSCTL_HANDLER_ARGS) { 269 test_fixture_t const tests[TOTAL_TESTS] = { 270 {setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data}, 271 {setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data}, 272 {setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data}, 273 {setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data}, 274 {setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data}, 275 {setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data}, 276 {setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data}, 277 {setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data}, 278 {setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data}, 279 {setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data}, 280 {setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data}, 281 {setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data}, 282 {setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data}, 283 {setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data}, 284 {setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data}, 285 {setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data}, 286 {setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data}, 287 {setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data}, 288 {setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data}, 289 {setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data}, 290 {setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data}, 291 {setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data}, 292 {setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data}, 293 {setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data}, 294 {setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data}, 295 {setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data}, 296 {setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data}, 297 {setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data}, 298 {setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data}, 299 {setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data}, 300 {setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data}, 301 {setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data}, 302 {setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data}, 303 {setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data}, 304 {setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data}, 305 {setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data}, 306 {setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data}, 307 {setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data}, 308 {setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data}, 309 {setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data}, 310 {setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data}, 311 {setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data}, 312 {setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data}, 313 #if defined(INET) || defined(INET6) 314 {null_setup, xnb_add_mbuf_cksum_arp, null_teardown}, 315 {null_setup, xnb_add_mbuf_cksum_icmp, null_teardown}, 316 {null_setup, xnb_add_mbuf_cksum_tcp, null_teardown}, 317 {null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown}, 318 {null_setup, xnb_add_mbuf_cksum_udp, null_teardown}, 319 #endif 320 {null_setup, xnb_sscanf_hhd, null_teardown}, 321 {null_setup, xnb_sscanf_hhu, null_teardown}, 322 {null_setup, xnb_sscanf_lld, null_teardown}, 323 {null_setup, xnb_sscanf_llu, null_teardown}, 324 {null_setup, xnb_sscanf_hhn, null_teardown}, 325 }; 326 /** 327 * results is static so that the data will persist after this function 328 * returns. The sysctl code expects us to return a constant string. 329 * \todo: the static variable is not thread safe. Put a mutex around 330 * it. 331 */ 332 static char results[TOTAL_BUFLEN]; 333 334 /* empty the result strings */ 335 results[0] = 0; 336 xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN); 337 338 return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN))); 339 } 340 341 static int 342 setup_pvt_data(void) 343 { 344 int error = 0; 345 346 bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab)); 347 348 xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO); 349 if (xnb_unit_pvt.txs != NULL) { 350 SHARED_RING_INIT(xnb_unit_pvt.txs); 351 BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE); 352 FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE); 353 } else { 354 error = 1; 355 } 356 357 xnb_unit_pvt.ifp = if_alloc(IFT_ETHER); 358 if (xnb_unit_pvt.ifp == NULL) { 359 error = 1; 360 } 361 362 xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO); 363 if (xnb_unit_pvt.rxs != NULL) { 364 SHARED_RING_INIT(xnb_unit_pvt.rxs); 365 BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE); 366 FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE); 367 } else { 368 error = 1; 369 } 370 371 return error; 372 } 373 374 static void 375 teardown_pvt_data(void) 376 { 377 if (xnb_unit_pvt.txs != NULL) { 378 free(xnb_unit_pvt.txs, M_XENNETBACK); 379 } 380 if (xnb_unit_pvt.rxs != NULL) { 381 free(xnb_unit_pvt.rxs, M_XENNETBACK); 382 } 383 if (xnb_unit_pvt.ifp != NULL) { 384 if_free(xnb_unit_pvt.ifp); 385 } 386 } 387 388 /** 389 * Verify that xnb_ring2pkt will not consume any requests from an empty ring 390 */ 391 static void 392 xnb_ring2pkt_emptyring(char *buffer, size_t buflen) 393 { 394 struct xnb_pkt pkt; 395 int num_consumed; 396 397 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 398 xnb_unit_pvt.txb.req_cons); 399 XNB_ASSERT(num_consumed == 0); 400 } 401 402 /** 403 * Verify that xnb_ring2pkt can convert a single request packet correctly 404 */ 405 static void 406 xnb_ring2pkt_1req(char *buffer, size_t buflen) 407 { 408 struct xnb_pkt pkt; 409 int num_consumed; 410 struct netif_tx_request *req; 411 412 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 413 xnb_unit_pvt.txf.req_prod_pvt); 414 415 req->flags = 0; 416 req->size = 69; /* arbitrary number for test */ 417 xnb_unit_pvt.txf.req_prod_pvt++; 418 419 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 420 421 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 422 xnb_unit_pvt.txb.req_cons); 423 XNB_ASSERT(num_consumed == 1); 424 XNB_ASSERT(pkt.size == 69); 425 XNB_ASSERT(pkt.car_size == 69); 426 XNB_ASSERT(pkt.flags == 0); 427 XNB_ASSERT(xnb_pkt_is_valid(&pkt)); 428 XNB_ASSERT(pkt.list_len == 1); 429 XNB_ASSERT(pkt.car == 0); 430 } 431 432 /** 433 * Verify that xnb_ring2pkt can convert a two request packet correctly. 434 * This tests handling of the MORE_DATA flag and cdr 435 */ 436 static void 437 xnb_ring2pkt_2req(char *buffer, size_t buflen) 438 { 439 struct xnb_pkt pkt; 440 int num_consumed; 441 struct netif_tx_request *req; 442 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt; 443 444 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 445 xnb_unit_pvt.txf.req_prod_pvt); 446 req->flags = NETTXF_more_data; 447 req->size = 100; 448 xnb_unit_pvt.txf.req_prod_pvt++; 449 450 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 451 xnb_unit_pvt.txf.req_prod_pvt); 452 req->flags = 0; 453 req->size = 40; 454 xnb_unit_pvt.txf.req_prod_pvt++; 455 456 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 457 458 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 459 xnb_unit_pvt.txb.req_cons); 460 XNB_ASSERT(num_consumed == 2); 461 XNB_ASSERT(pkt.size == 100); 462 XNB_ASSERT(pkt.car_size == 60); 463 XNB_ASSERT(pkt.flags == 0); 464 XNB_ASSERT(xnb_pkt_is_valid(&pkt)); 465 XNB_ASSERT(pkt.list_len == 2); 466 XNB_ASSERT(pkt.car == start_idx); 467 XNB_ASSERT(pkt.cdr == start_idx + 1); 468 } 469 470 /** 471 * Verify that xnb_ring2pkt can convert a three request packet correctly 472 */ 473 static void 474 xnb_ring2pkt_3req(char *buffer, size_t buflen) 475 { 476 struct xnb_pkt pkt; 477 int num_consumed; 478 struct netif_tx_request *req; 479 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt; 480 481 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 482 xnb_unit_pvt.txf.req_prod_pvt); 483 req->flags = NETTXF_more_data; 484 req->size = 200; 485 xnb_unit_pvt.txf.req_prod_pvt++; 486 487 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 488 xnb_unit_pvt.txf.req_prod_pvt); 489 req->flags = NETTXF_more_data; 490 req->size = 40; 491 xnb_unit_pvt.txf.req_prod_pvt++; 492 493 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 494 xnb_unit_pvt.txf.req_prod_pvt); 495 req->flags = 0; 496 req->size = 50; 497 xnb_unit_pvt.txf.req_prod_pvt++; 498 499 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 500 501 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 502 xnb_unit_pvt.txb.req_cons); 503 XNB_ASSERT(num_consumed == 3); 504 XNB_ASSERT(pkt.size == 200); 505 XNB_ASSERT(pkt.car_size == 110); 506 XNB_ASSERT(pkt.flags == 0); 507 XNB_ASSERT(xnb_pkt_is_valid(&pkt)); 508 XNB_ASSERT(pkt.list_len == 3); 509 XNB_ASSERT(pkt.car == start_idx); 510 XNB_ASSERT(pkt.cdr == start_idx + 1); 511 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req); 512 } 513 514 /** 515 * Verify that xnb_ring2pkt can read extra inf 516 */ 517 static void 518 xnb_ring2pkt_extra(char *buffer, size_t buflen) 519 { 520 struct xnb_pkt pkt; 521 int num_consumed; 522 struct netif_tx_request *req; 523 struct netif_extra_info *ext; 524 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt; 525 526 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 527 xnb_unit_pvt.txf.req_prod_pvt); 528 req->flags = NETTXF_extra_info | NETTXF_more_data; 529 req->size = 150; 530 xnb_unit_pvt.txf.req_prod_pvt++; 531 532 ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf, 533 xnb_unit_pvt.txf.req_prod_pvt); 534 ext->flags = 0; 535 ext->type = XEN_NETIF_EXTRA_TYPE_GSO; 536 ext->u.gso.size = 250; 537 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 538 ext->u.gso.features = 0; 539 xnb_unit_pvt.txf.req_prod_pvt++; 540 541 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 542 xnb_unit_pvt.txf.req_prod_pvt); 543 req->flags = 0; 544 req->size = 50; 545 xnb_unit_pvt.txf.req_prod_pvt++; 546 547 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 548 549 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 550 xnb_unit_pvt.txb.req_cons); 551 XNB_ASSERT(num_consumed == 3); 552 XNB_ASSERT(pkt.extra.flags == 0); 553 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO); 554 XNB_ASSERT(pkt.extra.u.gso.size == 250); 555 XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4); 556 XNB_ASSERT(pkt.size == 150); 557 XNB_ASSERT(pkt.car_size == 100); 558 XNB_ASSERT(pkt.flags == NETTXF_extra_info); 559 XNB_ASSERT(xnb_pkt_is_valid(&pkt)); 560 XNB_ASSERT(pkt.list_len == 2); 561 XNB_ASSERT(pkt.car == start_idx); 562 XNB_ASSERT(pkt.cdr == start_idx + 2); 563 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req); 564 } 565 566 /** 567 * Verify that xnb_ring2pkt will consume no requests if the entire packet is 568 * not yet in the ring 569 */ 570 static void 571 xnb_ring2pkt_partial(char *buffer, size_t buflen) 572 { 573 struct xnb_pkt pkt; 574 int num_consumed; 575 struct netif_tx_request *req; 576 577 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 578 xnb_unit_pvt.txf.req_prod_pvt); 579 req->flags = NETTXF_more_data; 580 req->size = 150; 581 xnb_unit_pvt.txf.req_prod_pvt++; 582 583 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 584 585 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 586 xnb_unit_pvt.txb.req_cons); 587 XNB_ASSERT(num_consumed == 0); 588 XNB_ASSERT(! xnb_pkt_is_valid(&pkt)); 589 } 590 591 /** 592 * Verity that xnb_ring2pkt can read a packet whose requests wrap around 593 * the end of the ring 594 */ 595 static void 596 xnb_ring2pkt_wraps(char *buffer, size_t buflen) 597 { 598 struct xnb_pkt pkt; 599 int num_consumed; 600 struct netif_tx_request *req; 601 unsigned int rsize; 602 603 /* 604 * Manually tweak the ring indices to create a ring with no responses 605 * and the next request slot at position 2 from the end 606 */ 607 rsize = RING_SIZE(&xnb_unit_pvt.txf); 608 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2; 609 xnb_unit_pvt.txf.rsp_cons = rsize - 2; 610 xnb_unit_pvt.txs->req_prod = rsize - 2; 611 xnb_unit_pvt.txs->req_event = rsize - 1; 612 xnb_unit_pvt.txs->rsp_prod = rsize - 2; 613 xnb_unit_pvt.txs->rsp_event = rsize - 1; 614 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2; 615 xnb_unit_pvt.txb.req_cons = rsize - 2; 616 617 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 618 xnb_unit_pvt.txf.req_prod_pvt); 619 req->flags = NETTXF_more_data; 620 req->size = 550; 621 xnb_unit_pvt.txf.req_prod_pvt++; 622 623 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 624 xnb_unit_pvt.txf.req_prod_pvt); 625 req->flags = NETTXF_more_data; 626 req->size = 100; 627 xnb_unit_pvt.txf.req_prod_pvt++; 628 629 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 630 xnb_unit_pvt.txf.req_prod_pvt); 631 req->flags = 0; 632 req->size = 50; 633 xnb_unit_pvt.txf.req_prod_pvt++; 634 635 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 636 637 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 638 xnb_unit_pvt.txb.req_cons); 639 XNB_ASSERT(num_consumed == 3); 640 XNB_ASSERT(xnb_pkt_is_valid(&pkt)); 641 XNB_ASSERT(pkt.list_len == 3); 642 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req); 643 } 644 645 646 /** 647 * xnb_txpkt2rsp should do nothing for an empty packet 648 */ 649 static void 650 xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen) 651 { 652 int num_consumed; 653 struct xnb_pkt pkt; 654 netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb; 655 netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs; 656 pkt.list_len = 0; 657 658 /* must call xnb_ring2pkt just to intialize pkt */ 659 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 660 xnb_unit_pvt.txb.req_cons); 661 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0); 662 XNB_ASSERT( 663 memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0); 664 XNB_ASSERT( 665 memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0); 666 } 667 668 /** 669 * xnb_txpkt2rsp responding to one request 670 */ 671 static void 672 xnb_txpkt2rsp_1req(char *buffer, size_t buflen) 673 { 674 uint16_t num_consumed; 675 struct xnb_pkt pkt; 676 struct netif_tx_request *req; 677 struct netif_tx_response *rsp; 678 679 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 680 xnb_unit_pvt.txf.req_prod_pvt); 681 req->size = 1000; 682 req->flags = 0; 683 xnb_unit_pvt.txf.req_prod_pvt++; 684 685 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 686 687 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 688 xnb_unit_pvt.txb.req_cons); 689 xnb_unit_pvt.txb.req_cons += num_consumed; 690 691 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0); 692 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons); 693 694 XNB_ASSERT( 695 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod); 696 XNB_ASSERT(rsp->id == req->id); 697 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY); 698 }; 699 700 /** 701 * xnb_txpkt2rsp responding to 1 data request and 1 extra info 702 */ 703 static void 704 xnb_txpkt2rsp_extra(char *buffer, size_t buflen) 705 { 706 uint16_t num_consumed; 707 struct xnb_pkt pkt; 708 struct netif_tx_request *req; 709 netif_extra_info_t *ext; 710 struct netif_tx_response *rsp; 711 712 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 713 xnb_unit_pvt.txf.req_prod_pvt); 714 req->size = 1000; 715 req->flags = NETTXF_extra_info; 716 req->id = 69; 717 xnb_unit_pvt.txf.req_prod_pvt++; 718 719 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf, 720 xnb_unit_pvt.txf.req_prod_pvt); 721 ext->type = XEN_NETIF_EXTRA_TYPE_GSO; 722 ext->flags = 0; 723 xnb_unit_pvt.txf.req_prod_pvt++; 724 725 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 726 727 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 728 xnb_unit_pvt.txb.req_cons); 729 xnb_unit_pvt.txb.req_cons += num_consumed; 730 731 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0); 732 733 XNB_ASSERT( 734 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod); 735 736 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons); 737 XNB_ASSERT(rsp->id == req->id); 738 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY); 739 740 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, 741 xnb_unit_pvt.txf.rsp_cons + 1); 742 XNB_ASSERT(rsp->status == NETIF_RSP_NULL); 743 }; 744 745 /** 746 * xnb_pkg2rsp responding to 3 data requests and 1 extra info 747 */ 748 static void 749 xnb_txpkt2rsp_long(char *buffer, size_t buflen) 750 { 751 uint16_t num_consumed; 752 struct xnb_pkt pkt; 753 struct netif_tx_request *req; 754 netif_extra_info_t *ext; 755 struct netif_tx_response *rsp; 756 757 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 758 xnb_unit_pvt.txf.req_prod_pvt); 759 req->size = 1000; 760 req->flags = NETTXF_extra_info | NETTXF_more_data; 761 req->id = 254; 762 xnb_unit_pvt.txf.req_prod_pvt++; 763 764 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf, 765 xnb_unit_pvt.txf.req_prod_pvt); 766 ext->type = XEN_NETIF_EXTRA_TYPE_GSO; 767 ext->flags = 0; 768 xnb_unit_pvt.txf.req_prod_pvt++; 769 770 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 771 xnb_unit_pvt.txf.req_prod_pvt); 772 req->size = 300; 773 req->flags = NETTXF_more_data; 774 req->id = 1034; 775 xnb_unit_pvt.txf.req_prod_pvt++; 776 777 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 778 xnb_unit_pvt.txf.req_prod_pvt); 779 req->size = 400; 780 req->flags = 0; 781 req->id = 34; 782 xnb_unit_pvt.txf.req_prod_pvt++; 783 784 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 785 786 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 787 xnb_unit_pvt.txb.req_cons); 788 xnb_unit_pvt.txb.req_cons += num_consumed; 789 790 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0); 791 792 XNB_ASSERT( 793 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod); 794 795 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons); 796 XNB_ASSERT(rsp->id == 797 RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id); 798 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY); 799 800 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, 801 xnb_unit_pvt.txf.rsp_cons + 1); 802 XNB_ASSERT(rsp->status == NETIF_RSP_NULL); 803 804 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, 805 xnb_unit_pvt.txf.rsp_cons + 2); 806 XNB_ASSERT(rsp->id == 807 RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id); 808 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY); 809 810 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, 811 xnb_unit_pvt.txf.rsp_cons + 3); 812 XNB_ASSERT(rsp->id == 813 RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id); 814 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY); 815 } 816 817 /** 818 * xnb_txpkt2rsp responding to an invalid packet. 819 * Note: this test will result in an error message being printed to the console 820 * such as: 821 * xnb(xnb_ring2pkt:1306): Unknown extra info type 255. Discarding packet 822 */ 823 static void 824 xnb_txpkt2rsp_invalid(char *buffer, size_t buflen) 825 { 826 uint16_t num_consumed; 827 struct xnb_pkt pkt; 828 struct netif_tx_request *req; 829 netif_extra_info_t *ext; 830 struct netif_tx_response *rsp; 831 832 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 833 xnb_unit_pvt.txf.req_prod_pvt); 834 req->size = 1000; 835 req->flags = NETTXF_extra_info; 836 req->id = 69; 837 xnb_unit_pvt.txf.req_prod_pvt++; 838 839 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf, 840 xnb_unit_pvt.txf.req_prod_pvt); 841 ext->type = 0xFF; /* Invalid extra type */ 842 ext->flags = 0; 843 xnb_unit_pvt.txf.req_prod_pvt++; 844 845 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 846 847 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 848 xnb_unit_pvt.txb.req_cons); 849 xnb_unit_pvt.txb.req_cons += num_consumed; 850 XNB_ASSERT(! xnb_pkt_is_valid(&pkt)); 851 852 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0); 853 854 XNB_ASSERT( 855 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod); 856 857 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons); 858 XNB_ASSERT(rsp->id == req->id); 859 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR); 860 861 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, 862 xnb_unit_pvt.txf.rsp_cons + 1); 863 XNB_ASSERT(rsp->status == NETIF_RSP_NULL); 864 }; 865 866 /** 867 * xnb_txpkt2rsp responding to one request which caused an error 868 */ 869 static void 870 xnb_txpkt2rsp_error(char *buffer, size_t buflen) 871 { 872 uint16_t num_consumed; 873 struct xnb_pkt pkt; 874 struct netif_tx_request *req; 875 struct netif_tx_response *rsp; 876 877 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 878 xnb_unit_pvt.txf.req_prod_pvt); 879 req->size = 1000; 880 req->flags = 0; 881 xnb_unit_pvt.txf.req_prod_pvt++; 882 883 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 884 885 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 886 xnb_unit_pvt.txb.req_cons); 887 xnb_unit_pvt.txb.req_cons += num_consumed; 888 889 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1); 890 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons); 891 892 XNB_ASSERT( 893 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod); 894 XNB_ASSERT(rsp->id == req->id); 895 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR); 896 }; 897 898 /** 899 * xnb_txpkt2rsp's responses wrap around the end of the ring 900 */ 901 static void 902 xnb_txpkt2rsp_wraps(char *buffer, size_t buflen) 903 { 904 struct xnb_pkt pkt; 905 int num_consumed; 906 struct netif_tx_request *req; 907 struct netif_tx_response *rsp; 908 unsigned int rsize; 909 910 /* 911 * Manually tweak the ring indices to create a ring with no responses 912 * and the next request slot at position 2 from the end 913 */ 914 rsize = RING_SIZE(&xnb_unit_pvt.txf); 915 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2; 916 xnb_unit_pvt.txf.rsp_cons = rsize - 2; 917 xnb_unit_pvt.txs->req_prod = rsize - 2; 918 xnb_unit_pvt.txs->req_event = rsize - 1; 919 xnb_unit_pvt.txs->rsp_prod = rsize - 2; 920 xnb_unit_pvt.txs->rsp_event = rsize - 1; 921 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2; 922 xnb_unit_pvt.txb.req_cons = rsize - 2; 923 924 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 925 xnb_unit_pvt.txf.req_prod_pvt); 926 req->flags = NETTXF_more_data; 927 req->size = 550; 928 req->id = 1; 929 xnb_unit_pvt.txf.req_prod_pvt++; 930 931 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 932 xnb_unit_pvt.txf.req_prod_pvt); 933 req->flags = NETTXF_more_data; 934 req->size = 100; 935 req->id = 2; 936 xnb_unit_pvt.txf.req_prod_pvt++; 937 938 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 939 xnb_unit_pvt.txf.req_prod_pvt); 940 req->flags = 0; 941 req->size = 50; 942 req->id = 3; 943 xnb_unit_pvt.txf.req_prod_pvt++; 944 945 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 946 947 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 948 xnb_unit_pvt.txb.req_cons); 949 950 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0); 951 952 XNB_ASSERT( 953 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod); 954 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, 955 xnb_unit_pvt.txf.rsp_cons + 2); 956 XNB_ASSERT(rsp->id == req->id); 957 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY); 958 } 959 960 961 /** 962 * Helper function used to setup pkt2mbufc tests 963 * \param size size in bytes of the single request to push to the ring 964 * \param flags optional flags to put in the netif request 965 * \param[out] pkt the returned packet object 966 * \return number of requests consumed from the ring 967 */ 968 static int 969 xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags) 970 { 971 struct netif_tx_request *req; 972 973 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 974 xnb_unit_pvt.txf.req_prod_pvt); 975 req->flags = flags; 976 req->size = size; 977 xnb_unit_pvt.txf.req_prod_pvt++; 978 979 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 980 981 return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb, 982 xnb_unit_pvt.txb.req_cons); 983 } 984 985 /** 986 * xnb_pkt2mbufc on an empty packet 987 */ 988 static void 989 xnb_pkt2mbufc_empty(char *buffer, size_t buflen) 990 { 991 int num_consumed; 992 struct xnb_pkt pkt; 993 struct mbuf *pMbuf; 994 pkt.list_len = 0; 995 996 /* must call xnb_ring2pkt just to intialize pkt */ 997 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, 998 xnb_unit_pvt.txb.req_cons); 999 pkt.size = 0; 1000 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1001 safe_m_freem(&pMbuf); 1002 } 1003 1004 /** 1005 * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer 1006 */ 1007 static void 1008 xnb_pkt2mbufc_short(char *buffer, size_t buflen) 1009 { 1010 const size_t size = MINCLSIZE - 1; 1011 struct xnb_pkt pkt; 1012 struct mbuf *pMbuf; 1013 1014 xnb_get1pkt(&pkt, size, 0); 1015 1016 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1017 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size); 1018 safe_m_freem(&pMbuf); 1019 } 1020 1021 /** 1022 * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron 1023 */ 1024 static void 1025 xnb_pkt2mbufc_csum(char *buffer, size_t buflen) 1026 { 1027 const size_t size = MINCLSIZE - 1; 1028 struct xnb_pkt pkt; 1029 struct mbuf *pMbuf; 1030 1031 xnb_get1pkt(&pkt, size, NETTXF_data_validated); 1032 1033 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1034 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size); 1035 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED); 1036 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID); 1037 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID); 1038 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR); 1039 safe_m_freem(&pMbuf); 1040 } 1041 1042 /** 1043 * xnb_pkt2mbufc on packet that can fit in one cluster 1044 */ 1045 static void 1046 xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen) 1047 { 1048 const size_t size = MINCLSIZE; 1049 struct xnb_pkt pkt; 1050 struct mbuf *pMbuf; 1051 1052 xnb_get1pkt(&pkt, size, 0); 1053 1054 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1055 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size); 1056 safe_m_freem(&pMbuf); 1057 } 1058 1059 /** 1060 * xnb_pkt2mbufc on packet that cannot fit in one regular cluster 1061 */ 1062 static void 1063 xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen) 1064 { 1065 const size_t size = MCLBYTES + 1; 1066 struct xnb_pkt pkt; 1067 struct mbuf *pMbuf; 1068 1069 xnb_get1pkt(&pkt, size, 0); 1070 1071 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1072 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size); 1073 safe_m_freem(&pMbuf); 1074 } 1075 1076 /** 1077 * xnb_pkt2mbufc on packet that cannot fit in one clusters 1078 */ 1079 static void 1080 xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen) 1081 { 1082 const size_t size = 2 * MCLBYTES + 1; 1083 size_t space = 0; 1084 struct xnb_pkt pkt; 1085 struct mbuf *pMbuf; 1086 struct mbuf *m; 1087 1088 xnb_get1pkt(&pkt, size, 0); 1089 1090 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1091 1092 for (m = pMbuf; m != NULL; m = m->m_next) { 1093 space += M_TRAILINGSPACE(m); 1094 } 1095 XNB_ASSERT(space >= size); 1096 safe_m_freem(&pMbuf); 1097 } 1098 1099 /** 1100 * xnb_txpkt2gnttab on an empty packet. Should return empty gnttab 1101 */ 1102 static void 1103 xnb_txpkt2gnttab_empty(char *buffer, size_t buflen) 1104 { 1105 int n_entries; 1106 struct xnb_pkt pkt; 1107 struct mbuf *pMbuf; 1108 pkt.list_len = 0; 1109 1110 /* must call xnb_ring2pkt just to intialize pkt */ 1111 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons); 1112 pkt.size = 0; 1113 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1114 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab, 1115 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED); 1116 XNB_ASSERT(n_entries == 0); 1117 safe_m_freem(&pMbuf); 1118 } 1119 1120 /** 1121 * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer 1122 * and has one request 1123 */ 1124 static void 1125 xnb_txpkt2gnttab_short(char *buffer, size_t buflen) 1126 { 1127 const size_t size = MINCLSIZE - 1; 1128 int n_entries; 1129 struct xnb_pkt pkt; 1130 struct mbuf *pMbuf; 1131 1132 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 1133 xnb_unit_pvt.txf.req_prod_pvt); 1134 req->flags = 0; 1135 req->size = size; 1136 req->gref = 7; 1137 req->offset = 17; 1138 xnb_unit_pvt.txf.req_prod_pvt++; 1139 1140 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 1141 1142 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons); 1143 1144 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1145 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab, 1146 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED); 1147 XNB_ASSERT(n_entries == 1); 1148 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size); 1149 /* flags should indicate gref's for source */ 1150 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref); 1151 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset); 1152 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF); 1153 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset( 1154 mtod(pMbuf, vm_offset_t))); 1155 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn == 1156 virt_to_mfn(mtod(pMbuf, vm_offset_t))); 1157 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED); 1158 safe_m_freem(&pMbuf); 1159 } 1160 1161 /** 1162 * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single 1163 * mbuf cluster 1164 */ 1165 static void 1166 xnb_txpkt2gnttab_2req(char *buffer, size_t buflen) 1167 { 1168 int n_entries; 1169 struct xnb_pkt pkt; 1170 struct mbuf *pMbuf; 1171 1172 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 1173 xnb_unit_pvt.txf.req_prod_pvt); 1174 req->flags = NETTXF_more_data; 1175 req->size = 1900; 1176 req->gref = 7; 1177 req->offset = 0; 1178 xnb_unit_pvt.txf.req_prod_pvt++; 1179 1180 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 1181 xnb_unit_pvt.txf.req_prod_pvt); 1182 req->flags = 0; 1183 req->size = 500; 1184 req->gref = 8; 1185 req->offset = 0; 1186 xnb_unit_pvt.txf.req_prod_pvt++; 1187 1188 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 1189 1190 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons); 1191 1192 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1193 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab, 1194 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED); 1195 1196 XNB_ASSERT(n_entries == 2); 1197 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400); 1198 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset( 1199 mtod(pMbuf, vm_offset_t))); 1200 1201 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500); 1202 XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset( 1203 mtod(pMbuf, vm_offset_t) + 1400)); 1204 safe_m_freem(&pMbuf); 1205 } 1206 1207 /** 1208 * xnb_txpkt2gnttab on a single request that spans two mbuf clusters 1209 */ 1210 static void 1211 xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen) 1212 { 1213 int n_entries; 1214 struct xnb_pkt pkt; 1215 struct mbuf *pMbuf; 1216 const uint16_t data_this_transaction = (MCLBYTES*2) + 1; 1217 1218 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 1219 xnb_unit_pvt.txf.req_prod_pvt); 1220 req->flags = 0; 1221 req->size = data_this_transaction; 1222 req->gref = 8; 1223 req->offset = 0; 1224 xnb_unit_pvt.txf.req_prod_pvt++; 1225 1226 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 1227 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons); 1228 1229 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1230 XNB_ASSERT(pMbuf != NULL); 1231 if (pMbuf == NULL) 1232 return; 1233 1234 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab, 1235 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED); 1236 1237 if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) { 1238 /* there should be three mbufs and three gnttab entries */ 1239 XNB_ASSERT(n_entries == 3); 1240 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES); 1241 XNB_ASSERT( 1242 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset( 1243 mtod(pMbuf, vm_offset_t))); 1244 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0); 1245 1246 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES); 1247 XNB_ASSERT( 1248 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset( 1249 mtod(pMbuf->m_next, vm_offset_t))); 1250 XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES); 1251 1252 XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1); 1253 XNB_ASSERT( 1254 xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset( 1255 mtod(pMbuf->m_next, vm_offset_t))); 1256 XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 * 1257 MCLBYTES); 1258 } else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) { 1259 /* there should be two mbufs and two gnttab entries */ 1260 XNB_ASSERT(n_entries == 2); 1261 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES); 1262 XNB_ASSERT( 1263 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset( 1264 mtod(pMbuf, vm_offset_t))); 1265 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0); 1266 1267 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1); 1268 XNB_ASSERT( 1269 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset( 1270 mtod(pMbuf->m_next, vm_offset_t))); 1271 XNB_ASSERT( 1272 xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES); 1273 1274 } else { 1275 /* should never get here */ 1276 XNB_ASSERT(0); 1277 } 1278 m_freem(pMbuf); 1279 } 1280 1281 1282 /** 1283 * xnb_update_mbufc on a short packet that only has one gnttab entry 1284 */ 1285 static void 1286 xnb_update_mbufc_short(char *buffer, size_t buflen) 1287 { 1288 const size_t size = MINCLSIZE - 1; 1289 int n_entries; 1290 struct xnb_pkt pkt; 1291 struct mbuf *pMbuf; 1292 1293 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 1294 xnb_unit_pvt.txf.req_prod_pvt); 1295 req->flags = 0; 1296 req->size = size; 1297 req->gref = 7; 1298 req->offset = 17; 1299 xnb_unit_pvt.txf.req_prod_pvt++; 1300 1301 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 1302 1303 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons); 1304 1305 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1306 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab, 1307 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED); 1308 1309 /* Update grant table's status fields as the hypervisor call would */ 1310 xnb_unit_pvt.gnttab[0].status = GNTST_okay; 1311 1312 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries); 1313 XNB_ASSERT(pMbuf->m_len == size); 1314 XNB_ASSERT(pMbuf->m_pkthdr.len == size); 1315 safe_m_freem(&pMbuf); 1316 } 1317 1318 /** 1319 * xnb_update_mbufc on a packet with two requests, that can fit into a single 1320 * mbuf cluster 1321 */ 1322 static void 1323 xnb_update_mbufc_2req(char *buffer, size_t buflen) 1324 { 1325 int n_entries; 1326 struct xnb_pkt pkt; 1327 struct mbuf *pMbuf; 1328 1329 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 1330 xnb_unit_pvt.txf.req_prod_pvt); 1331 req->flags = NETTXF_more_data; 1332 req->size = 1900; 1333 req->gref = 7; 1334 req->offset = 0; 1335 xnb_unit_pvt.txf.req_prod_pvt++; 1336 1337 req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 1338 xnb_unit_pvt.txf.req_prod_pvt); 1339 req->flags = 0; 1340 req->size = 500; 1341 req->gref = 8; 1342 req->offset = 0; 1343 xnb_unit_pvt.txf.req_prod_pvt++; 1344 1345 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 1346 1347 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons); 1348 1349 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1350 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab, 1351 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED); 1352 1353 /* Update grant table's status fields as the hypervisor call would */ 1354 xnb_unit_pvt.gnttab[0].status = GNTST_okay; 1355 xnb_unit_pvt.gnttab[1].status = GNTST_okay; 1356 1357 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries); 1358 XNB_ASSERT(n_entries == 2); 1359 XNB_ASSERT(pMbuf->m_pkthdr.len == 1900); 1360 XNB_ASSERT(pMbuf->m_len == 1900); 1361 1362 safe_m_freem(&pMbuf); 1363 } 1364 1365 /** 1366 * xnb_update_mbufc on a single request that spans two mbuf clusters 1367 */ 1368 static void 1369 xnb_update_mbufc_2cluster(char *buffer, size_t buflen) 1370 { 1371 int i; 1372 int n_entries; 1373 struct xnb_pkt pkt; 1374 struct mbuf *pMbuf; 1375 const uint16_t data_this_transaction = (MCLBYTES*2) + 1; 1376 1377 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf, 1378 xnb_unit_pvt.txf.req_prod_pvt); 1379 req->flags = 0; 1380 req->size = data_this_transaction; 1381 req->gref = 8; 1382 req->offset = 0; 1383 xnb_unit_pvt.txf.req_prod_pvt++; 1384 1385 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf); 1386 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons); 1387 1388 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp); 1389 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab, 1390 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED); 1391 1392 /* Update grant table's status fields */ 1393 for (i = 0; i < n_entries; i++) { 1394 xnb_unit_pvt.gnttab[0].status = GNTST_okay; 1395 } 1396 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries); 1397 1398 if (n_entries == 3) { 1399 /* there should be three mbufs and three gnttab entries */ 1400 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction); 1401 XNB_ASSERT(pMbuf->m_len == MCLBYTES); 1402 XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES); 1403 XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1); 1404 } else if (n_entries == 2) { 1405 /* there should be two mbufs and two gnttab entries */ 1406 XNB_ASSERT(n_entries == 2); 1407 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction); 1408 XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES); 1409 XNB_ASSERT(pMbuf->m_next->m_len == 1); 1410 } else { 1411 /* should never get here */ 1412 XNB_ASSERT(0); 1413 } 1414 safe_m_freem(&pMbuf); 1415 } 1416 1417 /** xnb_mbufc2pkt on an empty mbufc */ 1418 static void 1419 xnb_mbufc2pkt_empty(char *buffer, size_t buflen) { 1420 struct xnb_pkt pkt; 1421 int free_slots = 64; 1422 struct mbuf *mbuf; 1423 1424 mbuf = m_get(M_WAITOK, MT_DATA); 1425 /* 1426 * note: it is illegal to set M_PKTHDR on a mbuf with no data. Doing so 1427 * will cause m_freem to segfault 1428 */ 1429 XNB_ASSERT(mbuf->m_len == 0); 1430 1431 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots); 1432 XNB_ASSERT(! xnb_pkt_is_valid(&pkt)); 1433 1434 safe_m_freem(&mbuf); 1435 } 1436 1437 /** xnb_mbufc2pkt on a short mbufc */ 1438 static void 1439 xnb_mbufc2pkt_short(char *buffer, size_t buflen) { 1440 struct xnb_pkt pkt; 1441 size_t size = 128; 1442 int free_slots = 64; 1443 RING_IDX start = 9; 1444 struct mbuf *mbuf; 1445 1446 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA); 1447 mbuf->m_flags |= M_PKTHDR; 1448 mbuf->m_pkthdr.len = size; 1449 mbuf->m_len = size; 1450 1451 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots); 1452 XNB_ASSERT(xnb_pkt_is_valid(&pkt)); 1453 XNB_ASSERT(pkt.size == size); 1454 XNB_ASSERT(pkt.car_size == size); 1455 XNB_ASSERT(! (pkt.flags & 1456 (NETRXF_more_data | NETRXF_extra_info))); 1457 XNB_ASSERT(pkt.list_len == 1); 1458 XNB_ASSERT(pkt.car == start); 1459 1460 safe_m_freem(&mbuf); 1461 } 1462 1463 /** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */ 1464 static void 1465 xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) { 1466 struct xnb_pkt pkt; 1467 size_t size = MCLBYTES; 1468 int free_slots = 32; 1469 RING_IDX start = 12; 1470 struct mbuf *mbuf; 1471 1472 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA); 1473 mbuf->m_flags |= M_PKTHDR; 1474 mbuf->m_pkthdr.len = size; 1475 mbuf->m_len = size; 1476 1477 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots); 1478 XNB_ASSERT(xnb_pkt_is_valid(&pkt)); 1479 XNB_ASSERT(pkt.size == size); 1480 XNB_ASSERT(pkt.car_size == size); 1481 XNB_ASSERT(! (pkt.flags & 1482 (NETRXF_more_data | NETRXF_extra_info))); 1483 XNB_ASSERT(pkt.list_len == 1); 1484 XNB_ASSERT(pkt.car == start); 1485 1486 safe_m_freem(&mbuf); 1487 } 1488 1489 /** xnb_mbufc2pkt on a two-mbuf chain with short data regions */ 1490 static void 1491 xnb_mbufc2pkt_2short(char *buffer, size_t buflen) { 1492 struct xnb_pkt pkt; 1493 size_t size1 = MHLEN - 5; 1494 size_t size2 = MHLEN - 15; 1495 int free_slots = 32; 1496 RING_IDX start = 14; 1497 struct mbuf *mbufc, *mbufc2; 1498 1499 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA); 1500 XNB_ASSERT(mbufc != NULL); 1501 if (mbufc == NULL) 1502 return; 1503 mbufc->m_flags |= M_PKTHDR; 1504 1505 mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA); 1506 XNB_ASSERT(mbufc2 != NULL); 1507 if (mbufc2 == NULL) { 1508 safe_m_freem(&mbufc); 1509 return; 1510 } 1511 mbufc2->m_pkthdr.len = size1 + size2; 1512 mbufc2->m_len = size1; 1513 1514 xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots); 1515 XNB_ASSERT(xnb_pkt_is_valid(&pkt)); 1516 XNB_ASSERT(pkt.size == size1 + size2); 1517 XNB_ASSERT(pkt.car == start); 1518 /* 1519 * The second m_getm may allocate a new mbuf and append 1520 * it to the chain, or it may simply extend the first mbuf. 1521 */ 1522 if (mbufc2->m_next != NULL) { 1523 XNB_ASSERT(pkt.car_size == size1); 1524 XNB_ASSERT(pkt.list_len == 1); 1525 XNB_ASSERT(pkt.cdr == start + 1); 1526 } 1527 1528 safe_m_freem(&mbufc2); 1529 } 1530 1531 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */ 1532 static void 1533 xnb_mbufc2pkt_long(char *buffer, size_t buflen) { 1534 struct xnb_pkt pkt; 1535 size_t size = 14 * MCLBYTES / 3; 1536 size_t size_remaining; 1537 int free_slots = 15; 1538 RING_IDX start = 3; 1539 struct mbuf *mbufc, *m; 1540 1541 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA); 1542 XNB_ASSERT(mbufc != NULL); 1543 if (mbufc == NULL) 1544 return; 1545 mbufc->m_flags |= M_PKTHDR; 1546 1547 mbufc->m_pkthdr.len = size; 1548 size_remaining = size; 1549 for (m = mbufc; m != NULL; m = m->m_next) { 1550 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining); 1551 size_remaining -= m->m_len; 1552 } 1553 1554 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots); 1555 XNB_ASSERT(xnb_pkt_is_valid(&pkt)); 1556 XNB_ASSERT(pkt.size == size); 1557 XNB_ASSERT(pkt.car == start); 1558 XNB_ASSERT(pkt.car_size = mbufc->m_len); 1559 /* 1560 * There should be >1 response in the packet, and there is no 1561 * extra info. 1562 */ 1563 XNB_ASSERT(! (pkt.flags & NETRXF_extra_info)); 1564 XNB_ASSERT(pkt.cdr == pkt.car + 1); 1565 1566 safe_m_freem(&mbufc); 1567 } 1568 1569 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */ 1570 static void 1571 xnb_mbufc2pkt_extra(char *buffer, size_t buflen) { 1572 struct xnb_pkt pkt; 1573 size_t size = 14 * MCLBYTES / 3; 1574 size_t size_remaining; 1575 int free_slots = 15; 1576 RING_IDX start = 3; 1577 struct mbuf *mbufc, *m; 1578 1579 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA); 1580 XNB_ASSERT(mbufc != NULL); 1581 if (mbufc == NULL) 1582 return; 1583 1584 mbufc->m_flags |= M_PKTHDR; 1585 mbufc->m_pkthdr.len = size; 1586 mbufc->m_pkthdr.csum_flags |= CSUM_TSO; 1587 mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40; 1588 size_remaining = size; 1589 for (m = mbufc; m != NULL; m = m->m_next) { 1590 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining); 1591 size_remaining -= m->m_len; 1592 } 1593 1594 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots); 1595 XNB_ASSERT(xnb_pkt_is_valid(&pkt)); 1596 XNB_ASSERT(pkt.size == size); 1597 XNB_ASSERT(pkt.car == start); 1598 XNB_ASSERT(pkt.car_size = mbufc->m_len); 1599 /* There should be >1 response in the packet, there is extra info */ 1600 XNB_ASSERT(pkt.flags & NETRXF_extra_info); 1601 XNB_ASSERT(pkt.flags & NETRXF_data_validated); 1602 XNB_ASSERT(pkt.cdr == pkt.car + 2); 1603 XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz); 1604 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO); 1605 XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE)); 1606 1607 safe_m_freem(&mbufc); 1608 } 1609 1610 /** xnb_mbufc2pkt with insufficient space in the ring */ 1611 static void 1612 xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) { 1613 struct xnb_pkt pkt; 1614 size_t size = 14 * MCLBYTES / 3; 1615 size_t size_remaining; 1616 int free_slots = 2; 1617 RING_IDX start = 3; 1618 struct mbuf *mbufc, *m; 1619 int error; 1620 1621 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA); 1622 XNB_ASSERT(mbufc != NULL); 1623 if (mbufc == NULL) 1624 return; 1625 mbufc->m_flags |= M_PKTHDR; 1626 1627 mbufc->m_pkthdr.len = size; 1628 size_remaining = size; 1629 for (m = mbufc; m != NULL; m = m->m_next) { 1630 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining); 1631 size_remaining -= m->m_len; 1632 } 1633 1634 error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots); 1635 XNB_ASSERT(error == EAGAIN); 1636 XNB_ASSERT(! xnb_pkt_is_valid(&pkt)); 1637 1638 safe_m_freem(&mbufc); 1639 } 1640 1641 /** 1642 * xnb_rxpkt2gnttab on an empty packet. Should return empty gnttab 1643 */ 1644 static void 1645 xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen) 1646 { 1647 struct xnb_pkt pkt; 1648 int nr_entries; 1649 int free_slots = 60; 1650 struct mbuf *mbuf; 1651 1652 mbuf = m_get(M_WAITOK, MT_DATA); 1653 1654 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots); 1655 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab, 1656 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED); 1657 1658 XNB_ASSERT(nr_entries == 0); 1659 1660 safe_m_freem(&mbuf); 1661 } 1662 1663 /** xnb_rxpkt2gnttab on a short packet without extra data */ 1664 static void 1665 xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) { 1666 struct xnb_pkt pkt; 1667 int nr_entries; 1668 size_t size = 128; 1669 int free_slots = 60; 1670 RING_IDX start = 9; 1671 struct netif_rx_request *req; 1672 struct mbuf *mbuf; 1673 1674 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA); 1675 mbuf->m_flags |= M_PKTHDR; 1676 mbuf->m_pkthdr.len = size; 1677 mbuf->m_len = size; 1678 1679 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots); 1680 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, 1681 xnb_unit_pvt.txf.req_prod_pvt); 1682 req->gref = 7; 1683 1684 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab, 1685 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED); 1686 1687 XNB_ASSERT(nr_entries == 1); 1688 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size); 1689 /* flags should indicate gref's for dest */ 1690 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref); 1691 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0); 1692 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF); 1693 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset( 1694 mtod(mbuf, vm_offset_t))); 1695 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn == 1696 virt_to_mfn(mtod(mbuf, vm_offset_t))); 1697 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED); 1698 1699 safe_m_freem(&mbuf); 1700 } 1701 1702 /** 1703 * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai 1704 */ 1705 static void 1706 xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen) 1707 { 1708 struct xnb_pkt pkt; 1709 int nr_entries; 1710 int i, num_mbufs; 1711 size_t total_granted_size = 0; 1712 size_t size = MJUMPAGESIZE + 1; 1713 int free_slots = 60; 1714 RING_IDX start = 11; 1715 struct netif_rx_request *req; 1716 struct mbuf *mbuf, *m; 1717 1718 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA); 1719 mbuf->m_flags |= M_PKTHDR; 1720 mbuf->m_pkthdr.len = size; 1721 mbuf->m_len = size; 1722 1723 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots); 1724 1725 for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) { 1726 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, 1727 xnb_unit_pvt.txf.req_prod_pvt); 1728 req->gref = i; 1729 req->id = 5; 1730 } 1731 num_mbufs = i; 1732 1733 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab, 1734 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED); 1735 1736 XNB_ASSERT(nr_entries >= num_mbufs); 1737 for (i = 0; i < nr_entries; i++) { 1738 int end_offset = xnb_unit_pvt.gnttab[i].len + 1739 xnb_unit_pvt.gnttab[i].dest.offset; 1740 XNB_ASSERT(end_offset <= PAGE_SIZE); 1741 total_granted_size += xnb_unit_pvt.gnttab[i].len; 1742 } 1743 XNB_ASSERT(total_granted_size == size); 1744 } 1745 1746 /** 1747 * xnb_rxpkt2rsp on an empty packet. Shouldn't make any response 1748 */ 1749 static void 1750 xnb_rxpkt2rsp_empty(char *buffer, size_t buflen) 1751 { 1752 struct xnb_pkt pkt; 1753 int nr_entries; 1754 int nr_reqs; 1755 int free_slots = 60; 1756 netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb; 1757 netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs; 1758 struct mbuf *mbuf; 1759 1760 mbuf = m_get(M_WAITOK, MT_DATA); 1761 1762 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots); 1763 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab, 1764 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED); 1765 1766 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries, 1767 &xnb_unit_pvt.rxb); 1768 XNB_ASSERT(nr_reqs == 0); 1769 XNB_ASSERT( 1770 memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0); 1771 XNB_ASSERT( 1772 memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0); 1773 1774 safe_m_freem(&mbuf); 1775 } 1776 1777 /** 1778 * xnb_rxpkt2rsp on a short packet with no extras 1779 */ 1780 static void 1781 xnb_rxpkt2rsp_short(char *buffer, size_t buflen) 1782 { 1783 struct xnb_pkt pkt; 1784 int nr_entries, nr_reqs; 1785 size_t size = 128; 1786 int free_slots = 60; 1787 RING_IDX start = 5; 1788 struct netif_rx_request *req; 1789 struct netif_rx_response *rsp; 1790 struct mbuf *mbuf; 1791 1792 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA); 1793 mbuf->m_flags |= M_PKTHDR; 1794 mbuf->m_pkthdr.len = size; 1795 mbuf->m_len = size; 1796 1797 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots); 1798 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start); 1799 req->gref = 7; 1800 xnb_unit_pvt.rxb.req_cons = start; 1801 xnb_unit_pvt.rxb.rsp_prod_pvt = start; 1802 xnb_unit_pvt.rxs->req_prod = start + 1; 1803 xnb_unit_pvt.rxs->rsp_prod = start; 1804 1805 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab, 1806 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED); 1807 1808 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries, 1809 &xnb_unit_pvt.rxb); 1810 1811 XNB_ASSERT(nr_reqs == 1); 1812 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1); 1813 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start); 1814 XNB_ASSERT(rsp->id == req->id); 1815 XNB_ASSERT(rsp->offset == 0); 1816 XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0); 1817 XNB_ASSERT(rsp->status == size); 1818 1819 safe_m_freem(&mbuf); 1820 } 1821 1822 /** 1823 * xnb_rxpkt2rsp with extra data 1824 */ 1825 static void 1826 xnb_rxpkt2rsp_extra(char *buffer, size_t buflen) 1827 { 1828 struct xnb_pkt pkt; 1829 int nr_entries, nr_reqs; 1830 size_t size = 14; 1831 int free_slots = 15; 1832 RING_IDX start = 3; 1833 uint16_t id = 49; 1834 uint16_t gref = 65; 1835 uint16_t mss = TCP_MSS - 40; 1836 struct mbuf *mbufc; 1837 struct netif_rx_request *req; 1838 struct netif_rx_response *rsp; 1839 struct netif_extra_info *ext; 1840 1841 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA); 1842 XNB_ASSERT(mbufc != NULL); 1843 if (mbufc == NULL) 1844 return; 1845 1846 mbufc->m_flags |= M_PKTHDR; 1847 mbufc->m_pkthdr.len = size; 1848 mbufc->m_pkthdr.csum_flags |= CSUM_TSO; 1849 mbufc->m_pkthdr.tso_segsz = mss; 1850 mbufc->m_len = size; 1851 1852 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots); 1853 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start); 1854 req->id = id; 1855 req->gref = gref; 1856 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1); 1857 req->id = id + 1; 1858 req->gref = gref + 1; 1859 xnb_unit_pvt.rxb.req_cons = start; 1860 xnb_unit_pvt.rxb.rsp_prod_pvt = start; 1861 xnb_unit_pvt.rxs->req_prod = start + 2; 1862 xnb_unit_pvt.rxs->rsp_prod = start; 1863 1864 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab, 1865 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED); 1866 1867 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries, 1868 &xnb_unit_pvt.rxb); 1869 1870 XNB_ASSERT(nr_reqs == 2); 1871 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2); 1872 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start); 1873 XNB_ASSERT(rsp->id == id); 1874 XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0); 1875 XNB_ASSERT((rsp->flags & NETRXF_extra_info)); 1876 XNB_ASSERT((rsp->flags & NETRXF_data_validated)); 1877 XNB_ASSERT((rsp->flags & NETRXF_csum_blank)); 1878 XNB_ASSERT(rsp->status == size); 1879 1880 ext = (struct netif_extra_info*) 1881 RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1); 1882 XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO); 1883 XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE)); 1884 XNB_ASSERT(ext->u.gso.size == mss); 1885 XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO); 1886 1887 safe_m_freem(&mbufc); 1888 } 1889 1890 /** 1891 * xnb_rxpkt2rsp on a packet with more than a pages's worth of data. It should 1892 * generate two response slot 1893 */ 1894 static void 1895 xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen) 1896 { 1897 struct xnb_pkt pkt; 1898 int nr_entries, nr_reqs; 1899 size_t size = PAGE_SIZE + 100; 1900 int free_slots = 3; 1901 uint16_t id1 = 17; 1902 uint16_t id2 = 37; 1903 uint16_t gref1 = 24; 1904 uint16_t gref2 = 34; 1905 RING_IDX start = 15; 1906 struct netif_rx_request *req; 1907 struct netif_rx_response *rsp; 1908 struct mbuf *mbuf; 1909 1910 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA); 1911 mbuf->m_flags |= M_PKTHDR; 1912 mbuf->m_pkthdr.len = size; 1913 if (mbuf->m_next != NULL) { 1914 size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size); 1915 mbuf->m_len = first_len; 1916 mbuf->m_next->m_len = size - first_len; 1917 1918 } else { 1919 mbuf->m_len = size; 1920 } 1921 1922 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots); 1923 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start); 1924 req->gref = gref1; 1925 req->id = id1; 1926 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1); 1927 req->gref = gref2; 1928 req->id = id2; 1929 xnb_unit_pvt.rxb.req_cons = start; 1930 xnb_unit_pvt.rxb.rsp_prod_pvt = start; 1931 xnb_unit_pvt.rxs->req_prod = start + 2; 1932 xnb_unit_pvt.rxs->rsp_prod = start; 1933 1934 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab, 1935 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED); 1936 1937 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries, 1938 &xnb_unit_pvt.rxb); 1939 1940 XNB_ASSERT(nr_reqs == 2); 1941 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2); 1942 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start); 1943 XNB_ASSERT(rsp->id == id1); 1944 XNB_ASSERT(rsp->offset == 0); 1945 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0); 1946 XNB_ASSERT(rsp->flags & NETRXF_more_data); 1947 XNB_ASSERT(rsp->status == PAGE_SIZE); 1948 1949 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1); 1950 XNB_ASSERT(rsp->id == id2); 1951 XNB_ASSERT(rsp->offset == 0); 1952 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0); 1953 XNB_ASSERT(! (rsp->flags & NETRXF_more_data)); 1954 XNB_ASSERT(rsp->status == size - PAGE_SIZE); 1955 1956 safe_m_freem(&mbuf); 1957 } 1958 1959 /** xnb_rxpkt2rsp on a grant table with two sub-page entries */ 1960 static void 1961 xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) { 1962 struct xnb_pkt pkt; 1963 int nr_reqs, nr_entries; 1964 size_t size1 = MHLEN - 5; 1965 size_t size2 = MHLEN - 15; 1966 int free_slots = 32; 1967 RING_IDX start = 14; 1968 uint16_t id = 47; 1969 uint16_t gref = 54; 1970 struct netif_rx_request *req; 1971 struct netif_rx_response *rsp; 1972 struct mbuf *mbufc; 1973 1974 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA); 1975 XNB_ASSERT(mbufc != NULL); 1976 if (mbufc == NULL) 1977 return; 1978 mbufc->m_flags |= M_PKTHDR; 1979 1980 m_getm(mbufc, size2, M_WAITOK, MT_DATA); 1981 XNB_ASSERT(mbufc->m_next != NULL); 1982 mbufc->m_pkthdr.len = size1 + size2; 1983 mbufc->m_len = size1; 1984 mbufc->m_next->m_len = size2; 1985 1986 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots); 1987 1988 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start); 1989 req->gref = gref; 1990 req->id = id; 1991 xnb_unit_pvt.rxb.req_cons = start; 1992 xnb_unit_pvt.rxb.rsp_prod_pvt = start; 1993 xnb_unit_pvt.rxs->req_prod = start + 1; 1994 xnb_unit_pvt.rxs->rsp_prod = start; 1995 1996 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab, 1997 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED); 1998 1999 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries, 2000 &xnb_unit_pvt.rxb); 2001 2002 XNB_ASSERT(nr_entries == 2); 2003 XNB_ASSERT(nr_reqs == 1); 2004 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start); 2005 XNB_ASSERT(rsp->id == id); 2006 XNB_ASSERT(rsp->status == size1 + size2); 2007 XNB_ASSERT(rsp->offset == 0); 2008 XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info))); 2009 2010 safe_m_freem(&mbufc); 2011 } 2012 2013 /** 2014 * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error 2015 * Note: this test will result in an error message being printed to the console 2016 * such as: 2017 * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status 2018 */ 2019 static void 2020 xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen) 2021 { 2022 struct xnb_pkt pkt; 2023 int nr_entries, nr_reqs; 2024 int id = 7; 2025 int gref = 42; 2026 uint16_t canary = 6859; 2027 size_t size = 7 * MCLBYTES; 2028 int free_slots = 9; 2029 RING_IDX start = 2; 2030 struct netif_rx_request *req; 2031 struct netif_rx_response *rsp; 2032 struct mbuf *mbuf; 2033 2034 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA); 2035 mbuf->m_flags |= M_PKTHDR; 2036 mbuf->m_pkthdr.len = size; 2037 mbuf->m_len = size; 2038 2039 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots); 2040 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start); 2041 req->gref = gref; 2042 req->id = id; 2043 xnb_unit_pvt.rxb.req_cons = start; 2044 xnb_unit_pvt.rxb.rsp_prod_pvt = start; 2045 xnb_unit_pvt.rxs->req_prod = start + 1; 2046 xnb_unit_pvt.rxs->rsp_prod = start; 2047 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1); 2048 req->gref = canary; 2049 req->id = canary; 2050 2051 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab, 2052 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED); 2053 /* Inject the error*/ 2054 xnb_unit_pvt.gnttab[2].status = GNTST_general_error; 2055 2056 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries, 2057 &xnb_unit_pvt.rxb); 2058 2059 XNB_ASSERT(nr_reqs == 1); 2060 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1); 2061 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start); 2062 XNB_ASSERT(rsp->id == id); 2063 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR); 2064 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1); 2065 XNB_ASSERT(req->gref == canary); 2066 XNB_ASSERT(req->id == canary); 2067 2068 safe_m_freem(&mbuf); 2069 } 2070 2071 #if defined(INET) || defined(INET6) 2072 /** 2073 * xnb_add_mbuf_cksum on an ARP request packet 2074 */ 2075 static void 2076 xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen) 2077 { 2078 const size_t pkt_len = sizeof(struct ether_header) + 2079 sizeof(struct ether_arp); 2080 struct mbuf *mbufc; 2081 struct ether_header *eh; 2082 struct ether_arp *ep; 2083 unsigned char pkt_orig[pkt_len]; 2084 2085 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA); 2086 /* Fill in an example arp request */ 2087 eh = mtod(mbufc, struct ether_header*); 2088 eh->ether_dhost[0] = 0xff; 2089 eh->ether_dhost[1] = 0xff; 2090 eh->ether_dhost[2] = 0xff; 2091 eh->ether_dhost[3] = 0xff; 2092 eh->ether_dhost[4] = 0xff; 2093 eh->ether_dhost[5] = 0xff; 2094 eh->ether_shost[0] = 0x00; 2095 eh->ether_shost[1] = 0x15; 2096 eh->ether_shost[2] = 0x17; 2097 eh->ether_shost[3] = 0xe9; 2098 eh->ether_shost[4] = 0x30; 2099 eh->ether_shost[5] = 0x68; 2100 eh->ether_type = htons(ETHERTYPE_ARP); 2101 ep = (struct ether_arp*)(eh + 1); 2102 ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER); 2103 ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP); 2104 ep->ea_hdr.ar_hln = 6; 2105 ep->ea_hdr.ar_pln = 4; 2106 ep->ea_hdr.ar_op = htons(ARPOP_REQUEST); 2107 ep->arp_sha[0] = 0x00; 2108 ep->arp_sha[1] = 0x15; 2109 ep->arp_sha[2] = 0x17; 2110 ep->arp_sha[3] = 0xe9; 2111 ep->arp_sha[4] = 0x30; 2112 ep->arp_sha[5] = 0x68; 2113 ep->arp_spa[0] = 0xc0; 2114 ep->arp_spa[1] = 0xa8; 2115 ep->arp_spa[2] = 0x0a; 2116 ep->arp_spa[3] = 0x04; 2117 bzero(&(ep->arp_tha), ETHER_ADDR_LEN); 2118 ep->arp_tpa[0] = 0xc0; 2119 ep->arp_tpa[1] = 0xa8; 2120 ep->arp_tpa[2] = 0x0a; 2121 ep->arp_tpa[3] = 0x06; 2122 2123 /* fill in the length field */ 2124 mbufc->m_len = pkt_len; 2125 mbufc->m_pkthdr.len = pkt_len; 2126 /* indicate that the netfront uses hw-assisted checksums */ 2127 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | 2128 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2129 2130 /* Make a backup copy of the packet */ 2131 bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len); 2132 2133 /* Function under test */ 2134 xnb_add_mbuf_cksum(mbufc); 2135 2136 /* Verify that the packet's data did not change */ 2137 XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0); 2138 m_freem(mbufc); 2139 } 2140 2141 /** 2142 * Helper function that populates the ethernet header and IP header used by 2143 * some of the xnb_add_mbuf_cksum unit tests. m must already be allocated 2144 * and must be large enough 2145 */ 2146 static void 2147 xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id, 2148 uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum) 2149 { 2150 struct ether_header *eh; 2151 struct ip *iph; 2152 2153 eh = mtod(m, struct ether_header*); 2154 eh->ether_dhost[0] = 0x00; 2155 eh->ether_dhost[1] = 0x16; 2156 eh->ether_dhost[2] = 0x3e; 2157 eh->ether_dhost[3] = 0x23; 2158 eh->ether_dhost[4] = 0x50; 2159 eh->ether_dhost[5] = 0x0b; 2160 eh->ether_shost[0] = 0x00; 2161 eh->ether_shost[1] = 0x16; 2162 eh->ether_shost[2] = 0x30; 2163 eh->ether_shost[3] = 0x00; 2164 eh->ether_shost[4] = 0x00; 2165 eh->ether_shost[5] = 0x00; 2166 eh->ether_type = htons(ETHERTYPE_IP); 2167 iph = (struct ip*)(eh + 1); 2168 iph->ip_hl = 0x5; /* 5 dwords == 20 bytes */ 2169 iph->ip_v = 4; /* IP v4 */ 2170 iph->ip_tos = 0; 2171 iph->ip_len = htons(ip_len); 2172 iph->ip_id = htons(ip_id); 2173 iph->ip_off = htons(ip_off); 2174 iph->ip_ttl = 64; 2175 iph->ip_p = ip_p; 2176 iph->ip_sum = htons(ip_sum); 2177 iph->ip_src.s_addr = htonl(0xc0a80a04); 2178 iph->ip_dst.s_addr = htonl(0xc0a80a05); 2179 } 2180 2181 /** 2182 * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual 2183 * ICMP packet 2184 */ 2185 static void 2186 xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen) 2187 { 2188 const size_t icmp_len = 64; /* set by ping(1) */ 2189 const size_t pkt_len = sizeof(struct ether_header) + 2190 sizeof(struct ip) + icmp_len; 2191 struct mbuf *mbufc; 2192 struct ether_header *eh; 2193 struct ip *iph; 2194 struct icmp *icmph; 2195 unsigned char pkt_orig[icmp_len]; 2196 uint32_t *tv_field; 2197 uint8_t *data_payload; 2198 int i; 2199 const uint16_t ICMP_CSUM = 0xaed7; 2200 const uint16_t IP_CSUM = 0xe533; 2201 2202 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA); 2203 /* Fill in an example ICMP ping request */ 2204 eh = mtod(mbufc, struct ether_header*); 2205 xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0); 2206 iph = (struct ip*)(eh + 1); 2207 icmph = (struct icmp*)(iph + 1); 2208 icmph->icmp_type = ICMP_ECHO; 2209 icmph->icmp_code = 0; 2210 icmph->icmp_cksum = htons(ICMP_CSUM); 2211 icmph->icmp_id = htons(31492); 2212 icmph->icmp_seq = htons(0); 2213 /* 2214 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq. 2215 * For this test, we will set the bytes individually for portability. 2216 */ 2217 tv_field = (uint32_t*)(&(icmph->icmp_hun)); 2218 tv_field[0] = 0x4f02cfac; 2219 tv_field[1] = 0x0007c46a; 2220 /* 2221 * Remainder of packet is an incrmenting 8 bit integer, starting with 8 2222 */ 2223 data_payload = (uint8_t*)(&tv_field[2]); 2224 for (i = 8; i < 37; i++) { 2225 *data_payload++ = i; 2226 } 2227 2228 /* fill in the length field */ 2229 mbufc->m_len = pkt_len; 2230 mbufc->m_pkthdr.len = pkt_len; 2231 /* indicate that the netfront uses hw-assisted checksums */ 2232 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | 2233 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2234 2235 bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len); 2236 /* Function under test */ 2237 xnb_add_mbuf_cksum(mbufc); 2238 2239 /* Check the IP checksum */ 2240 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM)); 2241 2242 /* Check that the ICMP packet did not change */ 2243 XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len)); 2244 m_freem(mbufc); 2245 } 2246 2247 /** 2248 * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual 2249 * UDP packet 2250 */ 2251 static void 2252 xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen) 2253 { 2254 const size_t udp_len = 16; 2255 const size_t pkt_len = sizeof(struct ether_header) + 2256 sizeof(struct ip) + udp_len; 2257 struct mbuf *mbufc; 2258 struct ether_header *eh; 2259 struct ip *iph; 2260 struct udphdr *udp; 2261 uint8_t *data_payload; 2262 const uint16_t IP_CSUM = 0xe56b; 2263 const uint16_t UDP_CSUM = 0xdde2; 2264 2265 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA); 2266 /* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */ 2267 eh = mtod(mbufc, struct ether_header*); 2268 xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad); 2269 iph = (struct ip*)(eh + 1); 2270 udp = (struct udphdr*)(iph + 1); 2271 udp->uh_sport = htons(0x51ae); 2272 udp->uh_dport = htons(0x08ae); 2273 udp->uh_ulen = htons(udp_len); 2274 udp->uh_sum = htons(0xbaad); /* xnb_add_mbuf_cksum will fill this in */ 2275 data_payload = (uint8_t*)(udp + 1); 2276 data_payload[0] = 'F'; 2277 data_payload[1] = 'r'; 2278 data_payload[2] = 'e'; 2279 data_payload[3] = 'e'; 2280 data_payload[4] = 'B'; 2281 data_payload[5] = 'S'; 2282 data_payload[6] = 'D'; 2283 data_payload[7] = '\n'; 2284 2285 /* fill in the length field */ 2286 mbufc->m_len = pkt_len; 2287 mbufc->m_pkthdr.len = pkt_len; 2288 /* indicate that the netfront uses hw-assisted checksums */ 2289 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | 2290 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2291 2292 /* Function under test */ 2293 xnb_add_mbuf_cksum(mbufc); 2294 2295 /* Check the checksums */ 2296 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM)); 2297 XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM)); 2298 2299 m_freem(mbufc); 2300 } 2301 2302 /** 2303 * Helper function that populates a TCP packet used by all of the 2304 * xnb_add_mbuf_cksum tcp unit tests. m must already be allocated and must be 2305 * large enough 2306 */ 2307 static void 2308 xnb_fill_tcp(struct mbuf *m) 2309 { 2310 struct ether_header *eh; 2311 struct ip *iph; 2312 struct tcphdr *tcp; 2313 uint32_t *options; 2314 uint8_t *data_payload; 2315 2316 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */ 2317 eh = mtod(m, struct ether_header*); 2318 xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0); 2319 iph = (struct ip*)(eh + 1); 2320 tcp = (struct tcphdr*)(iph + 1); 2321 tcp->th_sport = htons(0x9cd9); 2322 tcp->th_dport = htons(2222); 2323 tcp->th_seq = htonl(0x00f72b10); 2324 tcp->th_ack = htonl(0x7f37ba6c); 2325 tcp->th_x2 = 0; 2326 tcp->th_off = 8; 2327 tcp->th_flags = 0x18; 2328 tcp->th_win = htons(0x410); 2329 /* th_sum is incorrect; will be inserted by function under test */ 2330 tcp->th_sum = htons(0xbaad); 2331 tcp->th_urp = htons(0); 2332 /* 2333 * The following 12 bytes of options encode: 2334 * [nop, nop, TS val 33247 ecr 3457687679] 2335 */ 2336 options = (uint32_t*)(tcp + 1); 2337 options[0] = htonl(0x0101080a); 2338 options[1] = htonl(0x000081df); 2339 options[2] = htonl(0xce18207f); 2340 data_payload = (uint8_t*)(&options[3]); 2341 data_payload[0] = 'F'; 2342 data_payload[1] = 'r'; 2343 data_payload[2] = 'e'; 2344 data_payload[3] = 'e'; 2345 data_payload[4] = 'B'; 2346 data_payload[5] = 'S'; 2347 data_payload[6] = 'D'; 2348 data_payload[7] = '\n'; 2349 } 2350 2351 /** 2352 * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP 2353 * packet 2354 */ 2355 static void 2356 xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen) 2357 { 2358 const size_t payload_len = 8; 2359 const size_t tcp_options_len = 12; 2360 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) + 2361 sizeof(struct tcphdr) + tcp_options_len + payload_len; 2362 struct mbuf *mbufc; 2363 struct ether_header *eh; 2364 struct ip *iph; 2365 struct tcphdr *tcp; 2366 const uint16_t IP_CSUM = 0xa55a; 2367 const uint16_t TCP_CSUM = 0x2f64; 2368 2369 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA); 2370 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */ 2371 xnb_fill_tcp(mbufc); 2372 eh = mtod(mbufc, struct ether_header*); 2373 iph = (struct ip*)(eh + 1); 2374 tcp = (struct tcphdr*)(iph + 1); 2375 2376 /* fill in the length field */ 2377 mbufc->m_len = pkt_len; 2378 mbufc->m_pkthdr.len = pkt_len; 2379 /* indicate that the netfront uses hw-assisted checksums */ 2380 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | 2381 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2382 2383 /* Function under test */ 2384 xnb_add_mbuf_cksum(mbufc); 2385 2386 /* Check the checksums */ 2387 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM)); 2388 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM)); 2389 2390 m_freem(mbufc); 2391 } 2392 2393 /** 2394 * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums 2395 */ 2396 static void 2397 xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen) 2398 { 2399 const size_t payload_len = 8; 2400 const size_t tcp_options_len = 12; 2401 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) + 2402 sizeof(struct tcphdr) + tcp_options_len + payload_len; 2403 struct mbuf *mbufc; 2404 struct ether_header *eh; 2405 struct ip *iph; 2406 struct tcphdr *tcp; 2407 /* Use deliberately bad checksums, and verify that they don't get */ 2408 /* corrected by xnb_add_mbuf_cksum */ 2409 const uint16_t IP_CSUM = 0xdead; 2410 const uint16_t TCP_CSUM = 0xbeef; 2411 2412 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA); 2413 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */ 2414 xnb_fill_tcp(mbufc); 2415 eh = mtod(mbufc, struct ether_header*); 2416 iph = (struct ip*)(eh + 1); 2417 iph->ip_sum = htons(IP_CSUM); 2418 tcp = (struct tcphdr*)(iph + 1); 2419 tcp->th_sum = htons(TCP_CSUM); 2420 2421 /* fill in the length field */ 2422 mbufc->m_len = pkt_len; 2423 mbufc->m_pkthdr.len = pkt_len; 2424 /* indicate that the netfront does not use hw-assisted checksums */ 2425 mbufc->m_pkthdr.csum_flags = 0; 2426 2427 /* Function under test */ 2428 xnb_add_mbuf_cksum(mbufc); 2429 2430 /* Check that the checksums didn't change */ 2431 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM)); 2432 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM)); 2433 2434 m_freem(mbufc); 2435 } 2436 #endif /* INET || INET6 */ 2437 2438 /** 2439 * sscanf on unsigned chars 2440 */ 2441 static void 2442 xnb_sscanf_hhu(char *buffer, size_t buflen) 2443 { 2444 const char mystr[] = "137"; 2445 uint8_t dest[12]; 2446 int i; 2447 2448 for (i = 0; i < 12; i++) 2449 dest[i] = 'X'; 2450 2451 XNB_ASSERT(sscanf(mystr, "%hhu", &dest[4]) == 1); 2452 for (i = 0; i < 12; i++) 2453 XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X')); 2454 } 2455 2456 /** 2457 * sscanf on signed chars 2458 */ 2459 static void 2460 xnb_sscanf_hhd(char *buffer, size_t buflen) 2461 { 2462 const char mystr[] = "-27"; 2463 int8_t dest[12]; 2464 int i; 2465 2466 for (i = 0; i < 12; i++) 2467 dest[i] = 'X'; 2468 2469 XNB_ASSERT(sscanf(mystr, "%hhd", &dest[4]) == 1); 2470 for (i = 0; i < 12; i++) 2471 XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X')); 2472 } 2473 2474 /** 2475 * sscanf on signed long longs 2476 */ 2477 static void 2478 xnb_sscanf_lld(char *buffer, size_t buflen) 2479 { 2480 const char mystr[] = "-123456789012345"; /* about -2**47 */ 2481 long long dest[3]; 2482 int i; 2483 2484 for (i = 0; i < 3; i++) 2485 dest[i] = (long long)0xdeadbeefdeadbeef; 2486 2487 XNB_ASSERT(sscanf(mystr, "%lld", &dest[1]) == 1); 2488 for (i = 0; i < 3; i++) 2489 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef : 2490 -123456789012345)); 2491 } 2492 2493 /** 2494 * sscanf on unsigned long longs 2495 */ 2496 static void 2497 xnb_sscanf_llu(char *buffer, size_t buflen) 2498 { 2499 const char mystr[] = "12802747070103273189"; 2500 unsigned long long dest[3]; 2501 int i; 2502 2503 for (i = 0; i < 3; i++) 2504 dest[i] = (long long)0xdeadbeefdeadbeef; 2505 2506 XNB_ASSERT(sscanf(mystr, "%llu", &dest[1]) == 1); 2507 for (i = 0; i < 3; i++) 2508 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef : 2509 12802747070103273189ull)); 2510 } 2511 2512 /** 2513 * sscanf on unsigned short short n's 2514 */ 2515 static void 2516 xnb_sscanf_hhn(char *buffer, size_t buflen) 2517 { 2518 const char mystr[] = 2519 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f" 2520 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f" 2521 "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f"; 2522 unsigned char dest[12]; 2523 int i; 2524 2525 for (i = 0; i < 12; i++) 2526 dest[i] = (unsigned char)'X'; 2527 2528 XNB_ASSERT(sscanf(mystr, 2529 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f" 2530 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f" 2531 "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]) == 0); 2532 for (i = 0; i < 12; i++) 2533 XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X')); 2534 } 2535