1 /* Simple test of virtio code, entirely in userpsace. */ 2 #define _GNU_SOURCE 3 #include <sched.h> 4 #include <err.h> 5 #include <linux/kernel.h> 6 #include <linux/err.h> 7 #include <linux/virtio.h> 8 #include <linux/vringh.h> 9 #include <linux/virtio_ring.h> 10 #include <linux/virtio_config.h> 11 #include <linux/uaccess.h> 12 #include <sys/types.h> 13 #include <sys/stat.h> 14 #include <sys/mman.h> 15 #include <sys/wait.h> 16 #include <fcntl.h> 17 18 #define USER_MEM (1024*1024) 19 void *__user_addr_min, *__user_addr_max; 20 void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end; 21 static u64 user_addr_offset; 22 23 #define RINGSIZE 256 24 #define ALIGN 4096 25 26 static bool never_notify_host(struct virtqueue *vq) 27 { 28 abort(); 29 } 30 31 static void never_callback_guest(struct virtqueue *vq) 32 { 33 abort(); 34 } 35 36 static bool getrange_iov(struct vringh *vrh, u64 addr, struct vringh_range *r) 37 { 38 if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset) 39 return false; 40 if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset) 41 return false; 42 43 r->start = (u64)(unsigned long)__user_addr_min - user_addr_offset; 44 r->end_incl = (u64)(unsigned long)__user_addr_max - 1 - user_addr_offset; 45 r->offset = user_addr_offset; 46 return true; 47 } 48 49 /* We return single byte ranges. */ 50 static bool getrange_slow(struct vringh *vrh, u64 addr, struct vringh_range *r) 51 { 52 if (addr < (u64)(unsigned long)__user_addr_min - user_addr_offset) 53 return false; 54 if (addr >= (u64)(unsigned long)__user_addr_max - user_addr_offset) 55 return false; 56 57 r->start = addr; 58 r->end_incl = r->start; 59 r->offset = user_addr_offset; 60 return true; 61 } 62 63 struct guest_virtio_device { 64 struct virtio_device vdev; 65 int to_host_fd; 66 unsigned long notifies; 67 }; 68 69 static bool parallel_notify_host(struct virtqueue *vq) 70 { 71 int rc; 72 struct guest_virtio_device *gvdev; 73 74 gvdev = container_of(vq->vdev, struct guest_virtio_device, vdev); 75 rc = write(gvdev->to_host_fd, "", 1); 76 if (rc < 0) 77 return false; 78 gvdev->notifies++; 79 return true; 80 } 81 82 static bool no_notify_host(struct virtqueue *vq) 83 { 84 return true; 85 } 86 87 #define NUM_XFERS (10000000) 88 89 /* We aim for two "distant" cpus. */ 90 static void find_cpus(unsigned int *first, unsigned int *last) 91 { 92 unsigned int i; 93 94 *first = -1U; 95 *last = 0; 96 for (i = 0; i < 4096; i++) { 97 cpu_set_t set; 98 CPU_ZERO(&set); 99 CPU_SET(i, &set); 100 if (sched_setaffinity(getpid(), sizeof(set), &set) == 0) { 101 if (i < *first) 102 *first = i; 103 if (i > *last) 104 *last = i; 105 } 106 } 107 } 108 109 /* Opencoded version for fast mode */ 110 static inline int vringh_get_head(struct vringh *vrh, u16 *head) 111 { 112 u16 avail_idx, i; 113 int err; 114 115 err = get_user(avail_idx, &vrh->vring.avail->idx); 116 if (err) 117 return err; 118 119 if (vrh->last_avail_idx == avail_idx) 120 return 0; 121 122 /* Only get avail ring entries after they have been exposed by guest. */ 123 virtio_rmb(vrh->weak_barriers); 124 125 i = vrh->last_avail_idx & (vrh->vring.num - 1); 126 127 err = get_user(*head, &vrh->vring.avail->ring[i]); 128 if (err) 129 return err; 130 131 vrh->last_avail_idx++; 132 return 1; 133 } 134 135 static int parallel_test(u64 features, 136 bool (*getrange)(struct vringh *vrh, 137 u64 addr, struct vringh_range *r), 138 bool fast_vringh) 139 { 140 void *host_map, *guest_map; 141 int fd, mapsize, to_guest[2], to_host[2]; 142 unsigned long xfers = 0, notifies = 0, receives = 0; 143 unsigned int first_cpu, last_cpu; 144 cpu_set_t cpu_set; 145 char buf[128]; 146 147 /* Create real file to mmap. */ 148 fd = open("/tmp/vringh_test-file", O_RDWR|O_CREAT|O_TRUNC, 0600); 149 if (fd < 0) 150 err(1, "Opening /tmp/vringh_test-file"); 151 152 /* Extra room at the end for some data, and indirects */ 153 mapsize = vring_size(RINGSIZE, ALIGN) 154 + RINGSIZE * 2 * sizeof(int) 155 + RINGSIZE * 6 * sizeof(struct vring_desc); 156 mapsize = (mapsize + getpagesize() - 1) & ~(getpagesize() - 1); 157 ftruncate(fd, mapsize); 158 159 /* Parent and child use separate addresses, to check our mapping logic! */ 160 host_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 161 guest_map = mmap(NULL, mapsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 162 163 pipe(to_guest); 164 pipe(to_host); 165 166 CPU_ZERO(&cpu_set); 167 find_cpus(&first_cpu, &last_cpu); 168 printf("Using CPUS %u and %u\n", first_cpu, last_cpu); 169 fflush(stdout); 170 171 if (fork() != 0) { 172 struct vringh vrh; 173 int status, err, rlen = 0; 174 char rbuf[5]; 175 176 /* We are the host: never access guest addresses! */ 177 munmap(guest_map, mapsize); 178 179 __user_addr_min = host_map; 180 __user_addr_max = __user_addr_min + mapsize; 181 user_addr_offset = host_map - guest_map; 182 assert(user_addr_offset); 183 184 close(to_guest[0]); 185 close(to_host[1]); 186 187 vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN); 188 vringh_init_user(&vrh, features, RINGSIZE, true, 189 vrh.vring.desc, vrh.vring.avail, vrh.vring.used); 190 CPU_SET(first_cpu, &cpu_set); 191 if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set)) 192 errx(1, "Could not set affinity to cpu %u", first_cpu); 193 194 while (xfers < NUM_XFERS) { 195 struct iovec host_riov[2], host_wiov[2]; 196 struct vringh_iov riov, wiov; 197 u16 head, written; 198 199 if (fast_vringh) { 200 for (;;) { 201 err = vringh_get_head(&vrh, &head); 202 if (err != 0) 203 break; 204 err = vringh_need_notify_user(&vrh); 205 if (err < 0) 206 errx(1, "vringh_need_notify_user: %i", 207 err); 208 if (err) { 209 write(to_guest[1], "", 1); 210 notifies++; 211 } 212 } 213 if (err != 1) 214 errx(1, "vringh_get_head"); 215 written = 0; 216 goto complete; 217 } else { 218 vringh_iov_init(&riov, 219 host_riov, 220 ARRAY_SIZE(host_riov)); 221 vringh_iov_init(&wiov, 222 host_wiov, 223 ARRAY_SIZE(host_wiov)); 224 225 err = vringh_getdesc_user(&vrh, &riov, &wiov, 226 getrange, &head); 227 } 228 if (err == 0) { 229 err = vringh_need_notify_user(&vrh); 230 if (err < 0) 231 errx(1, "vringh_need_notify_user: %i", 232 err); 233 if (err) { 234 write(to_guest[1], "", 1); 235 notifies++; 236 } 237 238 if (!vringh_notify_enable_user(&vrh)) 239 continue; 240 241 /* Swallow all notifies at once. */ 242 if (read(to_host[0], buf, sizeof(buf)) < 1) 243 break; 244 245 vringh_notify_disable_user(&vrh); 246 receives++; 247 continue; 248 } 249 if (err != 1) 250 errx(1, "vringh_getdesc_user: %i", err); 251 252 /* We simply copy bytes. */ 253 if (riov.used) { 254 rlen = vringh_iov_pull_user(&riov, rbuf, 255 sizeof(rbuf)); 256 if (rlen != 4) 257 errx(1, "vringh_iov_pull_user: %i", 258 rlen); 259 assert(riov.i == riov.used); 260 written = 0; 261 } else { 262 err = vringh_iov_push_user(&wiov, rbuf, rlen); 263 if (err != rlen) 264 errx(1, "vringh_iov_push_user: %i", 265 err); 266 assert(wiov.i == wiov.used); 267 written = err; 268 } 269 complete: 270 xfers++; 271 272 err = vringh_complete_user(&vrh, head, written); 273 if (err != 0) 274 errx(1, "vringh_complete_user: %i", err); 275 } 276 277 err = vringh_need_notify_user(&vrh); 278 if (err < 0) 279 errx(1, "vringh_need_notify_user: %i", err); 280 if (err) { 281 write(to_guest[1], "", 1); 282 notifies++; 283 } 284 wait(&status); 285 if (!WIFEXITED(status)) 286 errx(1, "Child died with signal %i?", WTERMSIG(status)); 287 if (WEXITSTATUS(status) != 0) 288 errx(1, "Child exited %i?", WEXITSTATUS(status)); 289 printf("Host: notified %lu, pinged %lu\n", notifies, receives); 290 return 0; 291 } else { 292 struct guest_virtio_device gvdev; 293 struct virtqueue *vq; 294 unsigned int *data; 295 struct vring_desc *indirects; 296 unsigned int finished = 0; 297 298 /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */ 299 data = guest_map + vring_size(RINGSIZE, ALIGN); 300 indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int); 301 302 /* We are the guest. */ 303 munmap(host_map, mapsize); 304 305 close(to_guest[1]); 306 close(to_host[0]); 307 308 gvdev.vdev.features = features; 309 gvdev.to_host_fd = to_host[1]; 310 gvdev.notifies = 0; 311 312 CPU_SET(first_cpu, &cpu_set); 313 if (sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set)) 314 err(1, "Could not set affinity to cpu %u", first_cpu); 315 316 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true, 317 false, guest_map, 318 fast_vringh ? no_notify_host 319 : parallel_notify_host, 320 never_callback_guest, "guest vq"); 321 322 /* Don't kfree indirects. */ 323 __kfree_ignore_start = indirects; 324 __kfree_ignore_end = indirects + RINGSIZE * 6; 325 326 while (xfers < NUM_XFERS) { 327 struct scatterlist sg[4]; 328 unsigned int num_sg, len; 329 int *dbuf, err; 330 bool output = !(xfers % 2); 331 332 /* Consume bufs. */ 333 while ((dbuf = virtqueue_get_buf(vq, &len)) != NULL) { 334 if (len == 4) 335 assert(*dbuf == finished - 1); 336 else if (!fast_vringh) 337 assert(*dbuf == finished); 338 finished++; 339 } 340 341 /* Produce a buffer. */ 342 dbuf = data + (xfers % (RINGSIZE + 1)); 343 344 if (output) 345 *dbuf = xfers; 346 else 347 *dbuf = -1; 348 349 switch ((xfers / sizeof(*dbuf)) % 4) { 350 case 0: 351 /* Nasty three-element sg list. */ 352 sg_init_table(sg, num_sg = 3); 353 sg_set_buf(&sg[0], (void *)dbuf, 1); 354 sg_set_buf(&sg[1], (void *)dbuf + 1, 2); 355 sg_set_buf(&sg[2], (void *)dbuf + 3, 1); 356 break; 357 case 1: 358 sg_init_table(sg, num_sg = 2); 359 sg_set_buf(&sg[0], (void *)dbuf, 1); 360 sg_set_buf(&sg[1], (void *)dbuf + 1, 3); 361 break; 362 case 2: 363 sg_init_table(sg, num_sg = 1); 364 sg_set_buf(&sg[0], (void *)dbuf, 4); 365 break; 366 case 3: 367 sg_init_table(sg, num_sg = 4); 368 sg_set_buf(&sg[0], (void *)dbuf, 1); 369 sg_set_buf(&sg[1], (void *)dbuf + 1, 1); 370 sg_set_buf(&sg[2], (void *)dbuf + 2, 1); 371 sg_set_buf(&sg[3], (void *)dbuf + 3, 1); 372 break; 373 } 374 375 /* May allocate an indirect, so force it to allocate 376 * user addr */ 377 __kmalloc_fake = indirects + (xfers % RINGSIZE) * 4; 378 if (output) 379 err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf, 380 GFP_KERNEL); 381 else 382 err = virtqueue_add_inbuf(vq, sg, num_sg, 383 dbuf, GFP_KERNEL); 384 385 if (err == -ENOSPC) { 386 if (!virtqueue_enable_cb_delayed(vq)) 387 continue; 388 /* Swallow all notifies at once. */ 389 if (read(to_guest[0], buf, sizeof(buf)) < 1) 390 break; 391 392 receives++; 393 virtqueue_disable_cb(vq); 394 continue; 395 } 396 397 if (err) 398 errx(1, "virtqueue_add_in/outbuf: %i", err); 399 400 xfers++; 401 virtqueue_kick(vq); 402 } 403 404 /* Any extra? */ 405 while (finished != xfers) { 406 int *dbuf; 407 unsigned int len; 408 409 /* Consume bufs. */ 410 dbuf = virtqueue_get_buf(vq, &len); 411 if (dbuf) { 412 if (len == 4) 413 assert(*dbuf == finished - 1); 414 else 415 assert(len == 0); 416 finished++; 417 continue; 418 } 419 420 if (!virtqueue_enable_cb_delayed(vq)) 421 continue; 422 if (read(to_guest[0], buf, sizeof(buf)) < 1) 423 break; 424 425 receives++; 426 virtqueue_disable_cb(vq); 427 } 428 429 printf("Guest: notified %lu, pinged %lu\n", 430 gvdev.notifies, receives); 431 vring_del_virtqueue(vq); 432 return 0; 433 } 434 } 435 436 int main(int argc, char *argv[]) 437 { 438 struct virtio_device vdev; 439 struct virtqueue *vq; 440 struct vringh vrh; 441 struct scatterlist guest_sg[RINGSIZE], *sgs[2]; 442 struct iovec host_riov[2], host_wiov[2]; 443 struct vringh_iov riov, wiov; 444 struct vring_used_elem used[RINGSIZE]; 445 char buf[28]; 446 u16 head; 447 int err; 448 unsigned i; 449 void *ret; 450 bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r); 451 bool fast_vringh = false, parallel = false; 452 453 getrange = getrange_iov; 454 vdev.features = 0; 455 456 while (argv[1]) { 457 if (strcmp(argv[1], "--indirect") == 0) 458 __virtio_set_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC); 459 else if (strcmp(argv[1], "--eventidx") == 0) 460 __virtio_set_bit(&vdev, VIRTIO_RING_F_EVENT_IDX); 461 else if (strcmp(argv[1], "--virtio-1") == 0) 462 __virtio_set_bit(&vdev, VIRTIO_F_VERSION_1); 463 else if (strcmp(argv[1], "--slow-range") == 0) 464 getrange = getrange_slow; 465 else if (strcmp(argv[1], "--fast-vringh") == 0) 466 fast_vringh = true; 467 else if (strcmp(argv[1], "--parallel") == 0) 468 parallel = true; 469 else 470 errx(1, "Unknown arg %s", argv[1]); 471 argv++; 472 } 473 474 if (parallel) 475 return parallel_test(vdev.features, getrange, fast_vringh); 476 477 if (posix_memalign(&__user_addr_min, PAGE_SIZE, USER_MEM) != 0) 478 abort(); 479 __user_addr_max = __user_addr_min + USER_MEM; 480 memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN)); 481 482 /* Set up guest side. */ 483 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true, false, 484 __user_addr_min, 485 never_notify_host, never_callback_guest, 486 "guest vq"); 487 488 /* Set up host side. */ 489 vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN); 490 vringh_init_user(&vrh, vdev.features, RINGSIZE, true, 491 vrh.vring.desc, vrh.vring.avail, vrh.vring.used); 492 493 /* No descriptor to get yet... */ 494 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); 495 if (err != 0) 496 errx(1, "vringh_getdesc_user: %i", err); 497 498 /* Guest puts in a descriptor. */ 499 memcpy(__user_addr_max - 1, "a", 1); 500 sg_init_table(guest_sg, 1); 501 sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1); 502 sg_init_table(guest_sg+1, 1); 503 sg_set_buf(&guest_sg[1], __user_addr_max - 3, 2); 504 sgs[0] = &guest_sg[0]; 505 sgs[1] = &guest_sg[1]; 506 507 /* May allocate an indirect, so force it to allocate user addr */ 508 __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN); 509 err = virtqueue_add_sgs(vq, sgs, 1, 1, &err, GFP_KERNEL); 510 if (err) 511 errx(1, "virtqueue_add_sgs: %i", err); 512 __kmalloc_fake = NULL; 513 514 /* Host retreives it. */ 515 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); 516 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); 517 518 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); 519 if (err != 1) 520 errx(1, "vringh_getdesc_user: %i", err); 521 522 assert(riov.used == 1); 523 assert(riov.iov[0].iov_base == __user_addr_max - 1); 524 assert(riov.iov[0].iov_len == 1); 525 if (getrange != getrange_slow) { 526 assert(wiov.used == 1); 527 assert(wiov.iov[0].iov_base == __user_addr_max - 3); 528 assert(wiov.iov[0].iov_len == 2); 529 } else { 530 assert(wiov.used == 2); 531 assert(wiov.iov[0].iov_base == __user_addr_max - 3); 532 assert(wiov.iov[0].iov_len == 1); 533 assert(wiov.iov[1].iov_base == __user_addr_max - 2); 534 assert(wiov.iov[1].iov_len == 1); 535 } 536 537 err = vringh_iov_pull_user(&riov, buf, 5); 538 if (err != 1) 539 errx(1, "vringh_iov_pull_user: %i", err); 540 assert(buf[0] == 'a'); 541 assert(riov.i == 1); 542 assert(vringh_iov_pull_user(&riov, buf, 5) == 0); 543 544 memcpy(buf, "bcdef", 5); 545 err = vringh_iov_push_user(&wiov, buf, 5); 546 if (err != 2) 547 errx(1, "vringh_iov_push_user: %i", err); 548 assert(memcmp(__user_addr_max - 3, "bc", 2) == 0); 549 assert(wiov.i == wiov.used); 550 assert(vringh_iov_push_user(&wiov, buf, 5) == 0); 551 552 /* Host is done. */ 553 err = vringh_complete_user(&vrh, head, err); 554 if (err != 0) 555 errx(1, "vringh_complete_user: %i", err); 556 557 /* Guest should see used token now. */ 558 __kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN); 559 __kfree_ignore_end = __kfree_ignore_start + 1; 560 ret = virtqueue_get_buf(vq, &i); 561 if (ret != &err) 562 errx(1, "virtqueue_get_buf: %p", ret); 563 assert(i == 2); 564 565 /* Guest puts in a huge descriptor. */ 566 sg_init_table(guest_sg, RINGSIZE); 567 for (i = 0; i < RINGSIZE; i++) { 568 sg_set_buf(&guest_sg[i], 569 __user_addr_max - USER_MEM/4, USER_MEM/4); 570 } 571 572 /* Fill contents with recognisable garbage. */ 573 for (i = 0; i < USER_MEM/4; i++) 574 ((char *)__user_addr_max - USER_MEM/4)[i] = i; 575 576 /* This will allocate an indirect, so force it to allocate user addr */ 577 __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN); 578 err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL); 579 if (err) 580 errx(1, "virtqueue_add_outbuf (large): %i", err); 581 __kmalloc_fake = NULL; 582 583 /* Host picks it up (allocates new iov). */ 584 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); 585 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); 586 587 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); 588 if (err != 1) 589 errx(1, "vringh_getdesc_user: %i", err); 590 591 assert(riov.max_num & VRINGH_IOV_ALLOCATED); 592 assert(riov.iov != host_riov); 593 if (getrange != getrange_slow) 594 assert(riov.used == RINGSIZE); 595 else 596 assert(riov.used == RINGSIZE * USER_MEM/4); 597 598 assert(!(wiov.max_num & VRINGH_IOV_ALLOCATED)); 599 assert(wiov.used == 0); 600 601 /* Pull data back out (in odd chunks), should be as expected. */ 602 for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) { 603 err = vringh_iov_pull_user(&riov, buf, 3); 604 if (err != 3 && i + err != RINGSIZE * USER_MEM/4) 605 errx(1, "vringh_iov_pull_user large: %i", err); 606 assert(buf[0] == (char)i); 607 assert(err < 2 || buf[1] == (char)(i + 1)); 608 assert(err < 3 || buf[2] == (char)(i + 2)); 609 } 610 assert(riov.i == riov.used); 611 vringh_iov_cleanup(&riov); 612 vringh_iov_cleanup(&wiov); 613 614 /* Complete using multi interface, just because we can. */ 615 used[0].id = head; 616 used[0].len = 0; 617 err = vringh_complete_multi_user(&vrh, used, 1); 618 if (err) 619 errx(1, "vringh_complete_multi_user(1): %i", err); 620 621 /* Free up those descriptors. */ 622 ret = virtqueue_get_buf(vq, &i); 623 if (ret != &err) 624 errx(1, "virtqueue_get_buf: %p", ret); 625 626 /* Add lots of descriptors. */ 627 sg_init_table(guest_sg, 1); 628 sg_set_buf(&guest_sg[0], __user_addr_max - 1, 1); 629 for (i = 0; i < RINGSIZE; i++) { 630 err = virtqueue_add_outbuf(vq, guest_sg, 1, &err, GFP_KERNEL); 631 if (err) 632 errx(1, "virtqueue_add_outbuf (multiple): %i", err); 633 } 634 635 /* Now get many, and consume them all at once. */ 636 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); 637 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); 638 639 for (i = 0; i < RINGSIZE; i++) { 640 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); 641 if (err != 1) 642 errx(1, "vringh_getdesc_user: %i", err); 643 used[i].id = head; 644 used[i].len = 0; 645 } 646 /* Make sure it wraps around ring, to test! */ 647 assert(vrh.vring.used->idx % RINGSIZE != 0); 648 err = vringh_complete_multi_user(&vrh, used, RINGSIZE); 649 if (err) 650 errx(1, "vringh_complete_multi_user: %i", err); 651 652 /* Free those buffers. */ 653 for (i = 0; i < RINGSIZE; i++) { 654 unsigned len; 655 assert(virtqueue_get_buf(vq, &len) != NULL); 656 } 657 658 /* Test weird (but legal!) indirect. */ 659 if (__virtio_test_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC)) { 660 char *data = __user_addr_max - USER_MEM/4; 661 struct vring_desc *d = __user_addr_max - USER_MEM/2; 662 struct vring vring; 663 664 /* Force creation of direct, which we modify. */ 665 __virtio_clear_bit(&vdev, VIRTIO_RING_F_INDIRECT_DESC); 666 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true, 667 false, __user_addr_min, 668 never_notify_host, 669 never_callback_guest, 670 "guest vq"); 671 672 sg_init_table(guest_sg, 4); 673 sg_set_buf(&guest_sg[0], d, sizeof(*d)*2); 674 sg_set_buf(&guest_sg[1], d + 2, sizeof(*d)*1); 675 sg_set_buf(&guest_sg[2], data + 6, 4); 676 sg_set_buf(&guest_sg[3], d + 3, sizeof(*d)*3); 677 678 err = virtqueue_add_outbuf(vq, guest_sg, 4, &err, GFP_KERNEL); 679 if (err) 680 errx(1, "virtqueue_add_outbuf (indirect): %i", err); 681 682 vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN); 683 684 /* They're used in order, but double-check... */ 685 assert(vring.desc[0].addr == (unsigned long)d); 686 assert(vring.desc[1].addr == (unsigned long)(d+2)); 687 assert(vring.desc[2].addr == (unsigned long)data + 6); 688 assert(vring.desc[3].addr == (unsigned long)(d+3)); 689 vring.desc[0].flags |= VRING_DESC_F_INDIRECT; 690 vring.desc[1].flags |= VRING_DESC_F_INDIRECT; 691 vring.desc[3].flags |= VRING_DESC_F_INDIRECT; 692 693 /* First indirect */ 694 d[0].addr = (unsigned long)data; 695 d[0].len = 1; 696 d[0].flags = VRING_DESC_F_NEXT; 697 d[0].next = 1; 698 d[1].addr = (unsigned long)data + 1; 699 d[1].len = 2; 700 d[1].flags = 0; 701 702 /* Second indirect */ 703 d[2].addr = (unsigned long)data + 3; 704 d[2].len = 3; 705 d[2].flags = 0; 706 707 /* Third indirect */ 708 d[3].addr = (unsigned long)data + 10; 709 d[3].len = 5; 710 d[3].flags = VRING_DESC_F_NEXT; 711 d[3].next = 1; 712 d[4].addr = (unsigned long)data + 15; 713 d[4].len = 6; 714 d[4].flags = VRING_DESC_F_NEXT; 715 d[4].next = 2; 716 d[5].addr = (unsigned long)data + 21; 717 d[5].len = 7; 718 d[5].flags = 0; 719 720 /* Host picks it up (allocates new iov). */ 721 vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); 722 vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); 723 724 err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); 725 if (err != 1) 726 errx(1, "vringh_getdesc_user: %i", err); 727 728 if (head != 0) 729 errx(1, "vringh_getdesc_user: head %i not 0", head); 730 731 assert(riov.max_num & VRINGH_IOV_ALLOCATED); 732 if (getrange != getrange_slow) 733 assert(riov.used == 7); 734 else 735 assert(riov.used == 28); 736 err = vringh_iov_pull_user(&riov, buf, 29); 737 assert(err == 28); 738 739 /* Data should be linear. */ 740 for (i = 0; i < err; i++) 741 assert(buf[i] == i); 742 vringh_iov_cleanup(&riov); 743 } 744 745 /* Don't leak memory... */ 746 vring_del_virtqueue(vq); 747 free(__user_addr_min); 748 749 return 0; 750 } 751