1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018 Cambridge Greys Ltd 4 * Copyright (C) 2015-2016 Anton Ivanov (aivanov@brocade.com) 5 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) 6 */ 7 8 /* 2001-09-28...2002-04-17 9 * Partition stuff by James_McMechan@hotmail.com 10 * old style ubd by setting UBD_SHIFT to 0 11 * 2002-09-27...2002-10-18 massive tinkering for 2.5 12 * partitions have changed in 2.5 13 * 2003-01-29 more tinkering for 2.5.59-1 14 * This should now address the sysfs problems and has 15 * the symlink for devfs to allow for booting with 16 * the common /dev/ubd/discX/... names rather than 17 * only /dev/ubdN/discN this version also has lots of 18 * clean ups preparing for ubd-many. 19 * James McMechan 20 */ 21 22 #define UBD_SHIFT 4 23 24 #include <linux/module.h> 25 #include <linux/init.h> 26 #include <linux/blkdev.h> 27 #include <linux/blk-mq.h> 28 #include <linux/ata.h> 29 #include <linux/hdreg.h> 30 #include <linux/major.h> 31 #include <linux/cdrom.h> 32 #include <linux/proc_fs.h> 33 #include <linux/seq_file.h> 34 #include <linux/ctype.h> 35 #include <linux/slab.h> 36 #include <linux/vmalloc.h> 37 #include <linux/platform_device.h> 38 #include <linux/scatterlist.h> 39 #include <asm/tlbflush.h> 40 #include <kern_util.h> 41 #include "mconsole_kern.h" 42 #include <init.h> 43 #include <irq_kern.h> 44 #include "ubd.h" 45 #include <os.h> 46 #include "cow.h" 47 48 /* Max request size is determined by sector mask - 32K */ 49 #define UBD_MAX_REQUEST (8 * sizeof(long)) 50 51 struct io_desc { 52 char *buffer; 53 unsigned long length; 54 unsigned long sector_mask; 55 unsigned long long cow_offset; 56 unsigned long bitmap_words[2]; 57 }; 58 59 struct io_thread_req { 60 struct request *req; 61 int fds[2]; 62 unsigned long offsets[2]; 63 unsigned long long offset; 64 int sectorsize; 65 int error; 66 67 int desc_cnt; 68 /* io_desc has to be the last element of the struct */ 69 struct io_desc io_desc[]; 70 }; 71 72 73 static struct io_thread_req * (*irq_req_buffer)[]; 74 static struct io_thread_req *irq_remainder; 75 static int irq_remainder_size; 76 77 static struct io_thread_req * (*io_req_buffer)[]; 78 static struct io_thread_req *io_remainder; 79 static int io_remainder_size; 80 81 82 83 static inline int ubd_test_bit(__u64 bit, unsigned char *data) 84 { 85 __u64 n; 86 int bits, off; 87 88 bits = sizeof(data[0]) * 8; 89 n = bit / bits; 90 off = bit % bits; 91 return (data[n] & (1 << off)) != 0; 92 } 93 94 static inline void ubd_set_bit(__u64 bit, unsigned char *data) 95 { 96 __u64 n; 97 int bits, off; 98 99 bits = sizeof(data[0]) * 8; 100 n = bit / bits; 101 off = bit % bits; 102 data[n] |= (1 << off); 103 } 104 /*End stuff from ubd_user.h*/ 105 106 #define DRIVER_NAME "uml-blkdev" 107 108 static DEFINE_MUTEX(ubd_lock); 109 static DEFINE_MUTEX(ubd_mutex); /* replaces BKL, might not be needed */ 110 111 static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode, 112 unsigned int cmd, unsigned long arg); 113 static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo); 114 115 #define MAX_DEV (16) 116 117 static const struct block_device_operations ubd_blops = { 118 .owner = THIS_MODULE, 119 .ioctl = ubd_ioctl, 120 .compat_ioctl = blkdev_compat_ptr_ioctl, 121 .getgeo = ubd_getgeo, 122 }; 123 124 #ifdef CONFIG_BLK_DEV_UBD_SYNC 125 #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \ 126 .cl = 1 }) 127 #else 128 #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0, \ 129 .cl = 1 }) 130 #endif 131 static struct openflags global_openflags = OPEN_FLAGS; 132 133 struct cow { 134 /* backing file name */ 135 char *file; 136 /* backing file fd */ 137 int fd; 138 unsigned long *bitmap; 139 unsigned long bitmap_len; 140 int bitmap_offset; 141 int data_offset; 142 }; 143 144 #define MAX_SG 64 145 146 struct ubd { 147 /* name (and fd, below) of the file opened for writing, either the 148 * backing or the cow file. */ 149 char *file; 150 char *serial; 151 int fd; 152 __u64 size; 153 struct openflags boot_openflags; 154 struct openflags openflags; 155 unsigned shared:1; 156 unsigned no_cow:1; 157 unsigned no_trim:1; 158 struct cow cow; 159 struct platform_device pdev; 160 struct gendisk *disk; 161 struct blk_mq_tag_set tag_set; 162 spinlock_t lock; 163 }; 164 165 #define DEFAULT_COW { \ 166 .file = NULL, \ 167 .fd = -1, \ 168 .bitmap = NULL, \ 169 .bitmap_offset = 0, \ 170 .data_offset = 0, \ 171 } 172 173 #define DEFAULT_UBD { \ 174 .file = NULL, \ 175 .serial = NULL, \ 176 .fd = -1, \ 177 .size = -1, \ 178 .boot_openflags = OPEN_FLAGS, \ 179 .openflags = OPEN_FLAGS, \ 180 .no_cow = 0, \ 181 .no_trim = 0, \ 182 .shared = 0, \ 183 .cow = DEFAULT_COW, \ 184 .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \ 185 } 186 187 /* Protected by ubd_lock */ 188 static struct ubd ubd_devs[MAX_DEV] = { [0 ... MAX_DEV - 1] = DEFAULT_UBD }; 189 190 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, 191 const struct blk_mq_queue_data *bd); 192 193 static int fake_ide_setup(char *str) 194 { 195 pr_warn("The fake_ide option has been removed\n"); 196 return 1; 197 } 198 __setup("fake_ide", fake_ide_setup); 199 200 __uml_help(fake_ide_setup, 201 "fake_ide\n" 202 " Obsolete stub.\n\n" 203 ); 204 205 static int parse_unit(char **ptr) 206 { 207 char *str = *ptr, *end; 208 int n = -1; 209 210 if(isdigit(*str)) { 211 n = simple_strtoul(str, &end, 0); 212 if(end == str) 213 return -1; 214 *ptr = end; 215 } 216 else if (('a' <= *str) && (*str <= 'z')) { 217 n = *str - 'a'; 218 str++; 219 *ptr = str; 220 } 221 return n; 222 } 223 224 /* If *index_out == -1 at exit, the passed option was a general one; 225 * otherwise, the str pointer is used (and owned) inside ubd_devs array, so it 226 * should not be freed on exit. 227 */ 228 static int ubd_setup_common(char *str, int *index_out, char **error_out) 229 { 230 struct ubd *ubd_dev; 231 struct openflags flags = global_openflags; 232 char *file, *backing_file, *serial; 233 int n, err = 0, i; 234 235 if(index_out) *index_out = -1; 236 n = *str; 237 if(n == '='){ 238 str++; 239 if(!strcmp(str, "sync")){ 240 global_openflags = of_sync(global_openflags); 241 return err; 242 } 243 244 pr_warn("fake major not supported any more\n"); 245 return 0; 246 } 247 248 n = parse_unit(&str); 249 if(n < 0){ 250 *error_out = "Couldn't parse device number"; 251 return -EINVAL; 252 } 253 if(n >= MAX_DEV){ 254 *error_out = "Device number out of range"; 255 return 1; 256 } 257 258 err = -EBUSY; 259 mutex_lock(&ubd_lock); 260 261 ubd_dev = &ubd_devs[n]; 262 if(ubd_dev->file != NULL){ 263 *error_out = "Device is already configured"; 264 goto out; 265 } 266 267 if (index_out) 268 *index_out = n; 269 270 err = -EINVAL; 271 for (i = 0; i < sizeof("rscdt="); i++) { 272 switch (*str) { 273 case 'r': 274 flags.w = 0; 275 break; 276 case 's': 277 flags.s = 1; 278 break; 279 case 'd': 280 ubd_dev->no_cow = 1; 281 break; 282 case 'c': 283 ubd_dev->shared = 1; 284 break; 285 case 't': 286 ubd_dev->no_trim = 1; 287 break; 288 case '=': 289 str++; 290 goto break_loop; 291 default: 292 *error_out = "Expected '=' or flag letter " 293 "(r, s, c, t or d)"; 294 goto out; 295 } 296 str++; 297 } 298 299 if (*str == '=') 300 *error_out = "Too many flags specified"; 301 else 302 *error_out = "Missing '='"; 303 goto out; 304 305 break_loop: 306 file = strsep(&str, ",:"); 307 if (*file == '\0') 308 file = NULL; 309 310 backing_file = strsep(&str, ",:"); 311 if (backing_file && *backing_file == '\0') 312 backing_file = NULL; 313 314 serial = strsep(&str, ",:"); 315 if (serial && *serial == '\0') 316 serial = NULL; 317 318 if (backing_file && ubd_dev->no_cow) { 319 *error_out = "Can't specify both 'd' and a cow file"; 320 goto out; 321 } 322 323 err = 0; 324 ubd_dev->file = file; 325 ubd_dev->cow.file = backing_file; 326 ubd_dev->serial = serial; 327 ubd_dev->boot_openflags = flags; 328 out: 329 mutex_unlock(&ubd_lock); 330 return err; 331 } 332 333 static int ubd_setup(char *str) 334 { 335 char *error; 336 int err; 337 338 err = ubd_setup_common(str, NULL, &error); 339 if(err) 340 printk(KERN_ERR "Failed to initialize device with \"%s\" : " 341 "%s\n", str, error); 342 return 1; 343 } 344 345 __setup("ubd", ubd_setup); 346 __uml_help(ubd_setup, 347 "ubd<n><flags>=<filename>[(:|,)<filename2>][(:|,)<serial>]\n" 348 " This is used to associate a device with a file in the underlying\n" 349 " filesystem. When specifying two filenames, the first one is the\n" 350 " COW name and the second is the backing file name. As separator you can\n" 351 " use either a ':' or a ',': the first one allows writing things like;\n" 352 " ubd0=~/Uml/root_cow:~/Uml/root_backing_file\n" 353 " while with a ',' the shell would not expand the 2nd '~'.\n" 354 " When using only one filename, UML will detect whether to treat it like\n" 355 " a COW file or a backing file. To override this detection, add the 'd'\n" 356 " flag:\n" 357 " ubd0d=BackingFile\n" 358 " Usually, there is a filesystem in the file, but \n" 359 " that's not required. Swap devices containing swap files can be\n" 360 " specified like this. Also, a file which doesn't contain a\n" 361 " filesystem can have its contents read in the virtual \n" 362 " machine by running 'dd' on the device. <n> must be in the range\n" 363 " 0 to 7. Appending an 'r' to the number will cause that device\n" 364 " to be mounted read-only. For example ubd1r=./ext_fs. Appending\n" 365 " an 's' will cause data to be written to disk on the host immediately.\n" 366 " 'c' will cause the device to be treated as being shared between multiple\n" 367 " UMLs and file locking will be turned off - this is appropriate for a\n" 368 " cluster filesystem and inappropriate at almost all other times.\n\n" 369 " 't' will disable trim/discard support on the device (enabled by default).\n\n" 370 " An optional device serial number can be exposed using the serial parameter\n" 371 " on the cmdline which is exposed as a sysfs entry. This is particularly\n" 372 " useful when a unique number should be given to the device. Note when\n" 373 " specifying a label, the filename2 must be also presented. It can be\n" 374 " an empty string, in which case the backing file is not used:\n" 375 " ubd0=File,,Serial\n" 376 ); 377 378 static int udb_setup(char *str) 379 { 380 printk("udb%s specified on command line is almost certainly a ubd -> " 381 "udb TYPO\n", str); 382 return 1; 383 } 384 385 __setup("udb", udb_setup); 386 __uml_help(udb_setup, 387 "udb\n" 388 " This option is here solely to catch ubd -> udb typos, which can be\n" 389 " to impossible to catch visually unless you specifically look for\n" 390 " them. The only result of any option starting with 'udb' is an error\n" 391 " in the boot output.\n\n" 392 ); 393 394 /* Only changed by ubd_init, which is an initcall. */ 395 static int thread_fd = -1; 396 397 /* Function to read several request pointers at a time 398 * handling fractional reads if (and as) needed 399 */ 400 401 static int bulk_req_safe_read( 402 int fd, 403 struct io_thread_req * (*request_buffer)[], 404 struct io_thread_req **remainder, 405 int *remainder_size, 406 int max_recs 407 ) 408 { 409 int n = 0; 410 int res = 0; 411 412 if (*remainder_size > 0) { 413 memmove( 414 (char *) request_buffer, 415 (char *) remainder, *remainder_size 416 ); 417 n = *remainder_size; 418 } 419 420 res = os_read_file( 421 fd, 422 ((char *) request_buffer) + *remainder_size, 423 sizeof(struct io_thread_req *)*max_recs 424 - *remainder_size 425 ); 426 if (res > 0) { 427 n += res; 428 if ((n % sizeof(struct io_thread_req *)) > 0) { 429 /* 430 * Read somehow returned not a multiple of dword 431 * theoretically possible, but never observed in the 432 * wild, so read routine must be able to handle it 433 */ 434 *remainder_size = n % sizeof(struct io_thread_req *); 435 WARN(*remainder_size > 0, "UBD IPC read returned a partial result"); 436 memmove( 437 remainder, 438 ((char *) request_buffer) + 439 (n/sizeof(struct io_thread_req *))*sizeof(struct io_thread_req *), 440 *remainder_size 441 ); 442 n = n - *remainder_size; 443 } 444 } else { 445 n = res; 446 } 447 return n; 448 } 449 450 static void ubd_end_request(struct io_thread_req *io_req) 451 { 452 if (io_req->error == BLK_STS_NOTSUPP) { 453 if (req_op(io_req->req) == REQ_OP_DISCARD) 454 blk_queue_disable_discard(io_req->req->q); 455 else if (req_op(io_req->req) == REQ_OP_WRITE_ZEROES) 456 blk_queue_disable_write_zeroes(io_req->req->q); 457 } 458 blk_mq_end_request(io_req->req, io_req->error); 459 kfree(io_req); 460 } 461 462 static irqreturn_t ubd_intr(int irq, void *dev) 463 { 464 int len, i; 465 466 while ((len = bulk_req_safe_read(thread_fd, irq_req_buffer, 467 &irq_remainder, &irq_remainder_size, 468 UBD_REQ_BUFFER_SIZE)) >= 0) { 469 for (i = 0; i < len / sizeof(struct io_thread_req *); i++) 470 ubd_end_request((*irq_req_buffer)[i]); 471 } 472 473 if (len < 0 && len != -EAGAIN) 474 pr_err("spurious interrupt in %s, err = %d\n", __func__, len); 475 return IRQ_HANDLED; 476 } 477 478 /* Only changed by ubd_init, which is an initcall. */ 479 static int io_pid = -1; 480 481 static void kill_io_thread(void) 482 { 483 if(io_pid != -1) 484 os_kill_process(io_pid, 1); 485 } 486 487 __uml_exitcall(kill_io_thread); 488 489 static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out) 490 { 491 char *file; 492 int fd; 493 int err; 494 495 __u32 version; 496 __u32 align; 497 char *backing_file; 498 time64_t mtime; 499 unsigned long long size; 500 int sector_size; 501 int bitmap_offset; 502 503 if (ubd_dev->file && ubd_dev->cow.file) { 504 file = ubd_dev->cow.file; 505 506 goto out; 507 } 508 509 fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0); 510 if (fd < 0) 511 return fd; 512 513 err = read_cow_header(file_reader, &fd, &version, &backing_file, \ 514 &mtime, &size, §or_size, &align, &bitmap_offset); 515 os_close_file(fd); 516 517 if(err == -EINVAL) 518 file = ubd_dev->file; 519 else 520 file = backing_file; 521 522 out: 523 return os_file_size(file, size_out); 524 } 525 526 static int read_cow_bitmap(int fd, void *buf, int offset, int len) 527 { 528 int err; 529 530 err = os_pread_file(fd, buf, len, offset); 531 if (err < 0) 532 return err; 533 534 return 0; 535 } 536 537 static int backing_file_mismatch(char *file, __u64 size, time64_t mtime) 538 { 539 time64_t modtime; 540 unsigned long long actual; 541 int err; 542 543 err = os_file_modtime(file, &modtime); 544 if (err < 0) { 545 printk(KERN_ERR "Failed to get modification time of backing " 546 "file \"%s\", err = %d\n", file, -err); 547 return err; 548 } 549 550 err = os_file_size(file, &actual); 551 if (err < 0) { 552 printk(KERN_ERR "Failed to get size of backing file \"%s\", " 553 "err = %d\n", file, -err); 554 return err; 555 } 556 557 if (actual != size) { 558 /*__u64 can be a long on AMD64 and with %lu GCC complains; so 559 * the typecast.*/ 560 printk(KERN_ERR "Size mismatch (%llu vs %llu) of COW header " 561 "vs backing file\n", (unsigned long long) size, actual); 562 return -EINVAL; 563 } 564 if (modtime != mtime) { 565 printk(KERN_ERR "mtime mismatch (%lld vs %lld) of COW header vs " 566 "backing file\n", mtime, modtime); 567 return -EINVAL; 568 } 569 return 0; 570 } 571 572 static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow) 573 { 574 struct uml_stat buf1, buf2; 575 int err; 576 577 if (from_cmdline == NULL) 578 return 0; 579 if (!strcmp(from_cmdline, from_cow)) 580 return 0; 581 582 err = os_stat_file(from_cmdline, &buf1); 583 if (err < 0) { 584 printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cmdline, 585 -err); 586 return 0; 587 } 588 err = os_stat_file(from_cow, &buf2); 589 if (err < 0) { 590 printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cow, 591 -err); 592 return 1; 593 } 594 if ((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino)) 595 return 0; 596 597 printk(KERN_ERR "Backing file mismatch - \"%s\" requested, " 598 "\"%s\" specified in COW header of \"%s\"\n", 599 from_cmdline, from_cow, cow); 600 return 1; 601 } 602 603 static int open_ubd_file(char *file, struct openflags *openflags, int shared, 604 char **backing_file_out, int *bitmap_offset_out, 605 unsigned long *bitmap_len_out, int *data_offset_out, 606 int *create_cow_out) 607 { 608 time64_t mtime; 609 unsigned long long size; 610 __u32 version, align; 611 char *backing_file; 612 int fd, err, sectorsize, asked_switch, mode = 0644; 613 614 fd = os_open_file(file, *openflags, mode); 615 if (fd < 0) { 616 if ((fd == -ENOENT) && (create_cow_out != NULL)) 617 *create_cow_out = 1; 618 if (!openflags->w || 619 ((fd != -EROFS) && (fd != -EACCES))) 620 return fd; 621 openflags->w = 0; 622 fd = os_open_file(file, *openflags, mode); 623 if (fd < 0) 624 return fd; 625 } 626 627 if (shared) 628 printk(KERN_INFO "Not locking \"%s\" on the host\n", file); 629 else { 630 err = os_lock_file(fd, openflags->w); 631 if (err < 0) { 632 printk(KERN_ERR "Failed to lock '%s', err = %d\n", 633 file, -err); 634 goto out_close; 635 } 636 } 637 638 /* Successful return case! */ 639 if (backing_file_out == NULL) 640 return fd; 641 642 err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime, 643 &size, §orsize, &align, bitmap_offset_out); 644 if (err && (*backing_file_out != NULL)) { 645 printk(KERN_ERR "Failed to read COW header from COW file " 646 "\"%s\", errno = %d\n", file, -err); 647 goto out_close; 648 } 649 if (err) 650 return fd; 651 652 asked_switch = path_requires_switch(*backing_file_out, backing_file, 653 file); 654 655 /* Allow switching only if no mismatch. */ 656 if (asked_switch && !backing_file_mismatch(*backing_file_out, size, 657 mtime)) { 658 printk(KERN_ERR "Switching backing file to '%s'\n", 659 *backing_file_out); 660 err = write_cow_header(file, fd, *backing_file_out, 661 sectorsize, align, &size); 662 if (err) { 663 printk(KERN_ERR "Switch failed, errno = %d\n", -err); 664 goto out_close; 665 } 666 } else { 667 *backing_file_out = backing_file; 668 err = backing_file_mismatch(*backing_file_out, size, mtime); 669 if (err) 670 goto out_close; 671 } 672 673 cow_sizes(version, size, sectorsize, align, *bitmap_offset_out, 674 bitmap_len_out, data_offset_out); 675 676 return fd; 677 out_close: 678 os_close_file(fd); 679 return err; 680 } 681 682 static int create_cow_file(char *cow_file, char *backing_file, 683 struct openflags flags, 684 int sectorsize, int alignment, int *bitmap_offset_out, 685 unsigned long *bitmap_len_out, int *data_offset_out) 686 { 687 int err, fd; 688 689 flags.c = 1; 690 fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL); 691 if (fd < 0) { 692 err = fd; 693 printk(KERN_ERR "Open of COW file '%s' failed, errno = %d\n", 694 cow_file, -err); 695 goto out; 696 } 697 698 err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment, 699 bitmap_offset_out, bitmap_len_out, 700 data_offset_out); 701 if (!err) 702 return fd; 703 os_close_file(fd); 704 out: 705 return err; 706 } 707 708 static void ubd_close_dev(struct ubd *ubd_dev) 709 { 710 os_close_file(ubd_dev->fd); 711 if(ubd_dev->cow.file == NULL) 712 return; 713 714 os_close_file(ubd_dev->cow.fd); 715 vfree(ubd_dev->cow.bitmap); 716 ubd_dev->cow.bitmap = NULL; 717 } 718 719 static int ubd_open_dev(struct ubd *ubd_dev) 720 { 721 struct openflags flags; 722 char **back_ptr; 723 int err, create_cow, *create_ptr; 724 int fd; 725 726 ubd_dev->openflags = ubd_dev->boot_openflags; 727 create_cow = 0; 728 create_ptr = (ubd_dev->cow.file != NULL) ? &create_cow : NULL; 729 back_ptr = ubd_dev->no_cow ? NULL : &ubd_dev->cow.file; 730 731 fd = open_ubd_file(ubd_dev->file, &ubd_dev->openflags, ubd_dev->shared, 732 back_ptr, &ubd_dev->cow.bitmap_offset, 733 &ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset, 734 create_ptr); 735 736 if((fd == -ENOENT) && create_cow){ 737 fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file, 738 ubd_dev->openflags, SECTOR_SIZE, PAGE_SIZE, 739 &ubd_dev->cow.bitmap_offset, 740 &ubd_dev->cow.bitmap_len, 741 &ubd_dev->cow.data_offset); 742 if(fd >= 0){ 743 printk(KERN_INFO "Creating \"%s\" as COW file for " 744 "\"%s\"\n", ubd_dev->file, ubd_dev->cow.file); 745 } 746 } 747 748 if(fd < 0){ 749 printk("Failed to open '%s', errno = %d\n", ubd_dev->file, 750 -fd); 751 return fd; 752 } 753 ubd_dev->fd = fd; 754 755 if(ubd_dev->cow.file != NULL){ 756 err = -ENOMEM; 757 ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len); 758 if(ubd_dev->cow.bitmap == NULL){ 759 printk(KERN_ERR "Failed to vmalloc COW bitmap\n"); 760 goto error; 761 } 762 flush_tlb_kernel_vm(); 763 764 err = read_cow_bitmap(ubd_dev->fd, ubd_dev->cow.bitmap, 765 ubd_dev->cow.bitmap_offset, 766 ubd_dev->cow.bitmap_len); 767 if(err < 0) 768 goto error; 769 770 flags = ubd_dev->openflags; 771 flags.w = 0; 772 err = open_ubd_file(ubd_dev->cow.file, &flags, ubd_dev->shared, NULL, 773 NULL, NULL, NULL, NULL); 774 if(err < 0) goto error; 775 ubd_dev->cow.fd = err; 776 } 777 return 0; 778 error: 779 os_close_file(ubd_dev->fd); 780 return err; 781 } 782 783 static void ubd_device_release(struct device *dev) 784 { 785 struct ubd *ubd_dev = dev_get_drvdata(dev); 786 787 blk_mq_free_tag_set(&ubd_dev->tag_set); 788 *ubd_dev = ((struct ubd) DEFAULT_UBD); 789 } 790 791 static ssize_t serial_show(struct device *dev, 792 struct device_attribute *attr, char *buf) 793 { 794 struct gendisk *disk = dev_to_disk(dev); 795 struct ubd *ubd_dev = disk->private_data; 796 797 if (!ubd_dev) 798 return 0; 799 800 return sprintf(buf, "%s", ubd_dev->serial); 801 } 802 803 static DEVICE_ATTR_RO(serial); 804 805 static struct attribute *ubd_attrs[] = { 806 &dev_attr_serial.attr, 807 NULL, 808 }; 809 810 static umode_t ubd_attrs_are_visible(struct kobject *kobj, 811 struct attribute *a, int n) 812 { 813 return a->mode; 814 } 815 816 static const struct attribute_group ubd_attr_group = { 817 .attrs = ubd_attrs, 818 .is_visible = ubd_attrs_are_visible, 819 }; 820 821 static const struct attribute_group *ubd_attr_groups[] = { 822 &ubd_attr_group, 823 NULL, 824 }; 825 826 #define ROUND_BLOCK(n) ((n + (SECTOR_SIZE - 1)) & (-SECTOR_SIZE)) 827 828 static const struct blk_mq_ops ubd_mq_ops = { 829 .queue_rq = ubd_queue_rq, 830 }; 831 832 static int ubd_add(int n, char **error_out) 833 { 834 struct ubd *ubd_dev = &ubd_devs[n]; 835 struct queue_limits lim = { 836 .max_segments = MAX_SG, 837 .seg_boundary_mask = PAGE_SIZE - 1, 838 }; 839 struct gendisk *disk; 840 int err = 0; 841 842 if(ubd_dev->file == NULL) 843 goto out; 844 845 if (ubd_dev->cow.file) 846 lim.max_hw_sectors = 8 * sizeof(long); 847 if (!ubd_dev->no_trim) { 848 lim.max_hw_discard_sectors = UBD_MAX_REQUEST; 849 lim.max_write_zeroes_sectors = UBD_MAX_REQUEST; 850 } 851 852 err = ubd_file_size(ubd_dev, &ubd_dev->size); 853 if(err < 0){ 854 *error_out = "Couldn't determine size of device's file"; 855 goto out; 856 } 857 858 err = ubd_open_dev(ubd_dev); 859 if (err) { 860 pr_err("ubd%c: Can't open \"%s\": errno = %d\n", 861 'a' + n, ubd_dev->file, -err); 862 goto out; 863 } 864 865 ubd_dev->size = ROUND_BLOCK(ubd_dev->size); 866 867 ubd_dev->tag_set.ops = &ubd_mq_ops; 868 ubd_dev->tag_set.queue_depth = 64; 869 ubd_dev->tag_set.numa_node = NUMA_NO_NODE; 870 ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 871 ubd_dev->tag_set.driver_data = ubd_dev; 872 ubd_dev->tag_set.nr_hw_queues = 1; 873 874 err = blk_mq_alloc_tag_set(&ubd_dev->tag_set); 875 if (err) 876 goto out_close; 877 878 disk = blk_mq_alloc_disk(&ubd_dev->tag_set, &lim, ubd_dev); 879 if (IS_ERR(disk)) { 880 err = PTR_ERR(disk); 881 goto out_cleanup_tags; 882 } 883 884 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); 885 blk_queue_write_cache(disk->queue, true, false); 886 disk->major = UBD_MAJOR; 887 disk->first_minor = n << UBD_SHIFT; 888 disk->minors = 1 << UBD_SHIFT; 889 disk->fops = &ubd_blops; 890 set_capacity(disk, ubd_dev->size / 512); 891 sprintf(disk->disk_name, "ubd%c", 'a' + n); 892 disk->private_data = ubd_dev; 893 set_disk_ro(disk, !ubd_dev->openflags.w); 894 895 ubd_dev->pdev.id = n; 896 ubd_dev->pdev.name = DRIVER_NAME; 897 ubd_dev->pdev.dev.release = ubd_device_release; 898 dev_set_drvdata(&ubd_dev->pdev.dev, ubd_dev); 899 platform_device_register(&ubd_dev->pdev); 900 901 err = device_add_disk(&ubd_dev->pdev.dev, disk, ubd_attr_groups); 902 if (err) 903 goto out_cleanup_disk; 904 905 return 0; 906 907 out_cleanup_disk: 908 put_disk(disk); 909 out_cleanup_tags: 910 blk_mq_free_tag_set(&ubd_dev->tag_set); 911 out_close: 912 ubd_close_dev(ubd_dev); 913 out: 914 return err; 915 } 916 917 static int ubd_config(char *str, char **error_out) 918 { 919 int n, ret; 920 921 /* This string is possibly broken up and stored, so it's only 922 * freed if ubd_setup_common fails, or if only general options 923 * were set. 924 */ 925 str = kstrdup(str, GFP_KERNEL); 926 if (str == NULL) { 927 *error_out = "Failed to allocate memory"; 928 return -ENOMEM; 929 } 930 931 ret = ubd_setup_common(str, &n, error_out); 932 if (ret) 933 goto err_free; 934 935 if (n == -1) { 936 ret = 0; 937 goto err_free; 938 } 939 940 mutex_lock(&ubd_lock); 941 ret = ubd_add(n, error_out); 942 if (ret) 943 ubd_devs[n].file = NULL; 944 mutex_unlock(&ubd_lock); 945 946 out: 947 return ret; 948 949 err_free: 950 kfree(str); 951 goto out; 952 } 953 954 static int ubd_get_config(char *name, char *str, int size, char **error_out) 955 { 956 struct ubd *ubd_dev; 957 int n, len = 0; 958 959 n = parse_unit(&name); 960 if((n >= MAX_DEV) || (n < 0)){ 961 *error_out = "ubd_get_config : device number out of range"; 962 return -1; 963 } 964 965 ubd_dev = &ubd_devs[n]; 966 mutex_lock(&ubd_lock); 967 968 if(ubd_dev->file == NULL){ 969 CONFIG_CHUNK(str, size, len, "", 1); 970 goto out; 971 } 972 973 CONFIG_CHUNK(str, size, len, ubd_dev->file, 0); 974 975 if(ubd_dev->cow.file != NULL){ 976 CONFIG_CHUNK(str, size, len, ",", 0); 977 CONFIG_CHUNK(str, size, len, ubd_dev->cow.file, 1); 978 } 979 else CONFIG_CHUNK(str, size, len, "", 1); 980 981 out: 982 mutex_unlock(&ubd_lock); 983 return len; 984 } 985 986 static int ubd_id(char **str, int *start_out, int *end_out) 987 { 988 int n; 989 990 n = parse_unit(str); 991 *start_out = 0; 992 *end_out = MAX_DEV - 1; 993 return n; 994 } 995 996 static int ubd_remove(int n, char **error_out) 997 { 998 struct ubd *ubd_dev; 999 int err = -ENODEV; 1000 1001 mutex_lock(&ubd_lock); 1002 1003 ubd_dev = &ubd_devs[n]; 1004 1005 if(ubd_dev->file == NULL) 1006 goto out; 1007 1008 if (ubd_dev->disk) { 1009 /* you cannot remove a open disk */ 1010 err = -EBUSY; 1011 if (disk_openers(ubd_dev->disk)) 1012 goto out; 1013 1014 del_gendisk(ubd_dev->disk); 1015 ubd_close_dev(ubd_dev); 1016 put_disk(ubd_dev->disk); 1017 } 1018 1019 err = 0; 1020 platform_device_unregister(&ubd_dev->pdev); 1021 out: 1022 mutex_unlock(&ubd_lock); 1023 return err; 1024 } 1025 1026 /* All these are called by mconsole in process context and without 1027 * ubd-specific locks. The structure itself is const except for .list. 1028 */ 1029 static struct mc_device ubd_mc = { 1030 .list = LIST_HEAD_INIT(ubd_mc.list), 1031 .name = "ubd", 1032 .config = ubd_config, 1033 .get_config = ubd_get_config, 1034 .id = ubd_id, 1035 .remove = ubd_remove, 1036 }; 1037 1038 static int __init ubd_mc_init(void) 1039 { 1040 mconsole_register_dev(&ubd_mc); 1041 return 0; 1042 } 1043 1044 __initcall(ubd_mc_init); 1045 1046 static int __init ubd0_init(void) 1047 { 1048 struct ubd *ubd_dev = &ubd_devs[0]; 1049 1050 mutex_lock(&ubd_lock); 1051 if(ubd_dev->file == NULL) 1052 ubd_dev->file = "root_fs"; 1053 mutex_unlock(&ubd_lock); 1054 1055 return 0; 1056 } 1057 1058 __initcall(ubd0_init); 1059 1060 /* Used in ubd_init, which is an initcall */ 1061 static struct platform_driver ubd_driver = { 1062 .driver = { 1063 .name = DRIVER_NAME, 1064 }, 1065 }; 1066 1067 static int __init ubd_init(void) 1068 { 1069 char *error; 1070 int i, err; 1071 1072 if (register_blkdev(UBD_MAJOR, "ubd")) 1073 return -1; 1074 1075 irq_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE, 1076 sizeof(struct io_thread_req *), 1077 GFP_KERNEL 1078 ); 1079 irq_remainder = 0; 1080 1081 if (irq_req_buffer == NULL) { 1082 printk(KERN_ERR "Failed to initialize ubd buffering\n"); 1083 return -ENOMEM; 1084 } 1085 io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE, 1086 sizeof(struct io_thread_req *), 1087 GFP_KERNEL 1088 ); 1089 1090 io_remainder = 0; 1091 1092 if (io_req_buffer == NULL) { 1093 printk(KERN_ERR "Failed to initialize ubd buffering\n"); 1094 return -ENOMEM; 1095 } 1096 platform_driver_register(&ubd_driver); 1097 mutex_lock(&ubd_lock); 1098 for (i = 0; i < MAX_DEV; i++){ 1099 err = ubd_add(i, &error); 1100 if(err) 1101 printk(KERN_ERR "Failed to initialize ubd device %d :" 1102 "%s\n", i, error); 1103 } 1104 mutex_unlock(&ubd_lock); 1105 return 0; 1106 } 1107 1108 late_initcall(ubd_init); 1109 1110 static int __init ubd_driver_init(void){ 1111 unsigned long stack; 1112 int err; 1113 1114 /* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/ 1115 if(global_openflags.s){ 1116 printk(KERN_INFO "ubd: Synchronous mode\n"); 1117 /* Letting ubd=sync be like using ubd#s= instead of ubd#= is 1118 * enough. So use anyway the io thread. */ 1119 } 1120 stack = alloc_stack(0, 0); 1121 io_pid = start_io_thread(stack + PAGE_SIZE, &thread_fd); 1122 if(io_pid < 0){ 1123 printk(KERN_ERR 1124 "ubd : Failed to start I/O thread (errno = %d) - " 1125 "falling back to synchronous I/O\n", -io_pid); 1126 io_pid = -1; 1127 return 0; 1128 } 1129 err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr, 1130 0, "ubd", ubd_devs); 1131 if(err < 0) 1132 printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err); 1133 return 0; 1134 } 1135 1136 device_initcall(ubd_driver_init); 1137 1138 static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask, 1139 __u64 *cow_offset, unsigned long *bitmap, 1140 __u64 bitmap_offset, unsigned long *bitmap_words, 1141 __u64 bitmap_len) 1142 { 1143 __u64 sector = io_offset >> SECTOR_SHIFT; 1144 int i, update_bitmap = 0; 1145 1146 for (i = 0; i < length >> SECTOR_SHIFT; i++) { 1147 if(cow_mask != NULL) 1148 ubd_set_bit(i, (unsigned char *) cow_mask); 1149 if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) 1150 continue; 1151 1152 update_bitmap = 1; 1153 ubd_set_bit(sector + i, (unsigned char *) bitmap); 1154 } 1155 1156 if(!update_bitmap) 1157 return; 1158 1159 *cow_offset = sector / (sizeof(unsigned long) * 8); 1160 1161 /* This takes care of the case where we're exactly at the end of the 1162 * device, and *cow_offset + 1 is off the end. So, just back it up 1163 * by one word. Thanks to Lynn Kerby for the fix and James McMechan 1164 * for the original diagnosis. 1165 */ 1166 if (*cow_offset == (DIV_ROUND_UP(bitmap_len, 1167 sizeof(unsigned long)) - 1)) 1168 (*cow_offset)--; 1169 1170 bitmap_words[0] = bitmap[*cow_offset]; 1171 bitmap_words[1] = bitmap[*cow_offset + 1]; 1172 1173 *cow_offset *= sizeof(unsigned long); 1174 *cow_offset += bitmap_offset; 1175 } 1176 1177 static void cowify_req(struct io_thread_req *req, struct io_desc *segment, 1178 unsigned long offset, unsigned long *bitmap, 1179 __u64 bitmap_offset, __u64 bitmap_len) 1180 { 1181 __u64 sector = offset >> SECTOR_SHIFT; 1182 int i; 1183 1184 if (segment->length > (sizeof(segment->sector_mask) * 8) << SECTOR_SHIFT) 1185 panic("Operation too long"); 1186 1187 if (req_op(req->req) == REQ_OP_READ) { 1188 for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) { 1189 if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) 1190 ubd_set_bit(i, (unsigned char *) 1191 &segment->sector_mask); 1192 } 1193 } else { 1194 cowify_bitmap(offset, segment->length, &segment->sector_mask, 1195 &segment->cow_offset, bitmap, bitmap_offset, 1196 segment->bitmap_words, bitmap_len); 1197 } 1198 } 1199 1200 static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req, 1201 struct request *req) 1202 { 1203 struct bio_vec bvec; 1204 struct req_iterator iter; 1205 int i = 0; 1206 unsigned long byte_offset = io_req->offset; 1207 enum req_op op = req_op(req); 1208 1209 if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) { 1210 io_req->io_desc[0].buffer = NULL; 1211 io_req->io_desc[0].length = blk_rq_bytes(req); 1212 } else { 1213 rq_for_each_segment(bvec, req, iter) { 1214 BUG_ON(i >= io_req->desc_cnt); 1215 1216 io_req->io_desc[i].buffer = bvec_virt(&bvec); 1217 io_req->io_desc[i].length = bvec.bv_len; 1218 i++; 1219 } 1220 } 1221 1222 if (dev->cow.file) { 1223 for (i = 0; i < io_req->desc_cnt; i++) { 1224 cowify_req(io_req, &io_req->io_desc[i], byte_offset, 1225 dev->cow.bitmap, dev->cow.bitmap_offset, 1226 dev->cow.bitmap_len); 1227 byte_offset += io_req->io_desc[i].length; 1228 } 1229 1230 } 1231 } 1232 1233 static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req, 1234 int desc_cnt) 1235 { 1236 struct io_thread_req *io_req; 1237 int i; 1238 1239 io_req = kmalloc(sizeof(*io_req) + 1240 (desc_cnt * sizeof(struct io_desc)), 1241 GFP_ATOMIC); 1242 if (!io_req) 1243 return NULL; 1244 1245 io_req->req = req; 1246 if (dev->cow.file) 1247 io_req->fds[0] = dev->cow.fd; 1248 else 1249 io_req->fds[0] = dev->fd; 1250 io_req->error = 0; 1251 io_req->sectorsize = SECTOR_SIZE; 1252 io_req->fds[1] = dev->fd; 1253 io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT; 1254 io_req->offsets[0] = 0; 1255 io_req->offsets[1] = dev->cow.data_offset; 1256 1257 for (i = 0 ; i < desc_cnt; i++) { 1258 io_req->io_desc[i].sector_mask = 0; 1259 io_req->io_desc[i].cow_offset = -1; 1260 } 1261 1262 return io_req; 1263 } 1264 1265 static int ubd_submit_request(struct ubd *dev, struct request *req) 1266 { 1267 int segs = 0; 1268 struct io_thread_req *io_req; 1269 int ret; 1270 enum req_op op = req_op(req); 1271 1272 if (op == REQ_OP_FLUSH) 1273 segs = 0; 1274 else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) 1275 segs = 1; 1276 else 1277 segs = blk_rq_nr_phys_segments(req); 1278 1279 io_req = ubd_alloc_req(dev, req, segs); 1280 if (!io_req) 1281 return -ENOMEM; 1282 1283 io_req->desc_cnt = segs; 1284 if (segs) 1285 ubd_map_req(dev, io_req, req); 1286 1287 ret = os_write_file(thread_fd, &io_req, sizeof(io_req)); 1288 if (ret != sizeof(io_req)) { 1289 if (ret != -EAGAIN) 1290 pr_err("write to io thread failed: %d\n", -ret); 1291 kfree(io_req); 1292 } 1293 return ret; 1294 } 1295 1296 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, 1297 const struct blk_mq_queue_data *bd) 1298 { 1299 struct ubd *ubd_dev = hctx->queue->queuedata; 1300 struct request *req = bd->rq; 1301 int ret = 0, res = BLK_STS_OK; 1302 1303 blk_mq_start_request(req); 1304 1305 spin_lock_irq(&ubd_dev->lock); 1306 1307 switch (req_op(req)) { 1308 case REQ_OP_FLUSH: 1309 case REQ_OP_READ: 1310 case REQ_OP_WRITE: 1311 case REQ_OP_DISCARD: 1312 case REQ_OP_WRITE_ZEROES: 1313 ret = ubd_submit_request(ubd_dev, req); 1314 break; 1315 default: 1316 WARN_ON_ONCE(1); 1317 res = BLK_STS_NOTSUPP; 1318 } 1319 1320 spin_unlock_irq(&ubd_dev->lock); 1321 1322 if (ret < 0) { 1323 if (ret == -ENOMEM) 1324 res = BLK_STS_RESOURCE; 1325 else 1326 res = BLK_STS_DEV_RESOURCE; 1327 } 1328 1329 return res; 1330 } 1331 1332 static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1333 { 1334 struct ubd *ubd_dev = bdev->bd_disk->private_data; 1335 1336 geo->heads = 128; 1337 geo->sectors = 32; 1338 geo->cylinders = ubd_dev->size / (128 * 32 * 512); 1339 return 0; 1340 } 1341 1342 static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode, 1343 unsigned int cmd, unsigned long arg) 1344 { 1345 struct ubd *ubd_dev = bdev->bd_disk->private_data; 1346 u16 ubd_id[ATA_ID_WORDS]; 1347 1348 switch (cmd) { 1349 struct cdrom_volctrl volume; 1350 case HDIO_GET_IDENTITY: 1351 memset(&ubd_id, 0, ATA_ID_WORDS * 2); 1352 ubd_id[ATA_ID_CYLS] = ubd_dev->size / (128 * 32 * 512); 1353 ubd_id[ATA_ID_HEADS] = 128; 1354 ubd_id[ATA_ID_SECTORS] = 32; 1355 if(copy_to_user((char __user *) arg, (char *) &ubd_id, 1356 sizeof(ubd_id))) 1357 return -EFAULT; 1358 return 0; 1359 1360 case CDROMVOLREAD: 1361 if(copy_from_user(&volume, (char __user *) arg, sizeof(volume))) 1362 return -EFAULT; 1363 volume.channel0 = 255; 1364 volume.channel1 = 255; 1365 volume.channel2 = 255; 1366 volume.channel3 = 255; 1367 if(copy_to_user((char __user *) arg, &volume, sizeof(volume))) 1368 return -EFAULT; 1369 return 0; 1370 } 1371 return -EINVAL; 1372 } 1373 1374 static int map_error(int error_code) 1375 { 1376 switch (error_code) { 1377 case 0: 1378 return BLK_STS_OK; 1379 case ENOSYS: 1380 case EOPNOTSUPP: 1381 return BLK_STS_NOTSUPP; 1382 case ENOSPC: 1383 return BLK_STS_NOSPC; 1384 } 1385 return BLK_STS_IOERR; 1386 } 1387 1388 /* 1389 * Everything from here onwards *IS NOT PART OF THE KERNEL* 1390 * 1391 * The following functions are part of UML hypervisor code. 1392 * All functions from here onwards are executed as a helper 1393 * thread and are not allowed to execute any kernel functions. 1394 * 1395 * Any communication must occur strictly via shared memory and IPC. 1396 * 1397 * Do not add printks, locks, kernel memory operations, etc - it 1398 * will result in unpredictable behaviour and/or crashes. 1399 */ 1400 1401 static int update_bitmap(struct io_thread_req *req, struct io_desc *segment) 1402 { 1403 int n; 1404 1405 if (segment->cow_offset == -1) 1406 return map_error(0); 1407 1408 n = os_pwrite_file(req->fds[1], &segment->bitmap_words, 1409 sizeof(segment->bitmap_words), segment->cow_offset); 1410 if (n != sizeof(segment->bitmap_words)) 1411 return map_error(-n); 1412 1413 return map_error(0); 1414 } 1415 1416 static void do_io(struct io_thread_req *req, struct io_desc *desc) 1417 { 1418 char *buf = NULL; 1419 unsigned long len; 1420 int n, nsectors, start, end, bit; 1421 __u64 off; 1422 1423 /* FLUSH is really a special case, we cannot "case" it with others */ 1424 1425 if (req_op(req->req) == REQ_OP_FLUSH) { 1426 /* fds[0] is always either the rw image or our cow file */ 1427 req->error = map_error(-os_sync_file(req->fds[0])); 1428 return; 1429 } 1430 1431 nsectors = desc->length / req->sectorsize; 1432 start = 0; 1433 do { 1434 bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask); 1435 end = start; 1436 while((end < nsectors) && 1437 (ubd_test_bit(end, (unsigned char *) &desc->sector_mask) == bit)) 1438 end++; 1439 1440 off = req->offset + req->offsets[bit] + 1441 start * req->sectorsize; 1442 len = (end - start) * req->sectorsize; 1443 if (desc->buffer != NULL) 1444 buf = &desc->buffer[start * req->sectorsize]; 1445 1446 switch (req_op(req->req)) { 1447 case REQ_OP_READ: 1448 n = 0; 1449 do { 1450 buf = &buf[n]; 1451 len -= n; 1452 n = os_pread_file(req->fds[bit], buf, len, off); 1453 if (n < 0) { 1454 req->error = map_error(-n); 1455 return; 1456 } 1457 } while((n < len) && (n != 0)); 1458 if (n < len) memset(&buf[n], 0, len - n); 1459 break; 1460 case REQ_OP_WRITE: 1461 n = os_pwrite_file(req->fds[bit], buf, len, off); 1462 if(n != len){ 1463 req->error = map_error(-n); 1464 return; 1465 } 1466 break; 1467 case REQ_OP_DISCARD: 1468 n = os_falloc_punch(req->fds[bit], off, len); 1469 if (n) { 1470 req->error = map_error(-n); 1471 return; 1472 } 1473 break; 1474 case REQ_OP_WRITE_ZEROES: 1475 n = os_falloc_zeroes(req->fds[bit], off, len); 1476 if (n) { 1477 req->error = map_error(-n); 1478 return; 1479 } 1480 break; 1481 default: 1482 WARN_ON_ONCE(1); 1483 req->error = BLK_STS_NOTSUPP; 1484 return; 1485 } 1486 1487 start = end; 1488 } while(start < nsectors); 1489 1490 req->offset += len; 1491 req->error = update_bitmap(req, desc); 1492 } 1493 1494 /* Changed in start_io_thread, which is serialized by being called only 1495 * from ubd_init, which is an initcall. 1496 */ 1497 int kernel_fd = -1; 1498 1499 /* Only changed by the io thread. XXX: currently unused. */ 1500 static int io_count; 1501 1502 int io_thread(void *arg) 1503 { 1504 int n, count, written, res; 1505 1506 os_fix_helper_signals(); 1507 1508 while(1){ 1509 n = bulk_req_safe_read( 1510 kernel_fd, 1511 io_req_buffer, 1512 &io_remainder, 1513 &io_remainder_size, 1514 UBD_REQ_BUFFER_SIZE 1515 ); 1516 if (n <= 0) { 1517 if (n == -EAGAIN) 1518 ubd_read_poll(-1); 1519 1520 continue; 1521 } 1522 1523 for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { 1524 struct io_thread_req *req = (*io_req_buffer)[count]; 1525 int i; 1526 1527 io_count++; 1528 for (i = 0; !req->error && i < req->desc_cnt; i++) 1529 do_io(req, &(req->io_desc[i])); 1530 1531 } 1532 1533 written = 0; 1534 1535 do { 1536 res = os_write_file(kernel_fd, 1537 ((char *) io_req_buffer) + written, 1538 n - written); 1539 if (res >= 0) { 1540 written += res; 1541 } 1542 if (written < n) { 1543 ubd_write_poll(-1); 1544 } 1545 } while (written < n); 1546 } 1547 1548 return 0; 1549 } 1550