1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018 Cambridge Greys Ltd 4 * Copyright (C) 2015-2016 Anton Ivanov (aivanov@brocade.com) 5 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) 6 */ 7 8 /* 2001-09-28...2002-04-17 9 * Partition stuff by James_McMechan@hotmail.com 10 * old style ubd by setting UBD_SHIFT to 0 11 * 2002-09-27...2002-10-18 massive tinkering for 2.5 12 * partitions have changed in 2.5 13 * 2003-01-29 more tinkering for 2.5.59-1 14 * This should now address the sysfs problems and has 15 * the symlink for devfs to allow for booting with 16 * the common /dev/ubd/discX/... names rather than 17 * only /dev/ubdN/discN this version also has lots of 18 * clean ups preparing for ubd-many. 19 * James McMechan 20 */ 21 22 #define UBD_SHIFT 4 23 24 #include <linux/module.h> 25 #include <linux/init.h> 26 #include <linux/blkdev.h> 27 #include <linux/blk-mq.h> 28 #include <linux/ata.h> 29 #include <linux/hdreg.h> 30 #include <linux/major.h> 31 #include <linux/cdrom.h> 32 #include <linux/proc_fs.h> 33 #include <linux/seq_file.h> 34 #include <linux/ctype.h> 35 #include <linux/slab.h> 36 #include <linux/vmalloc.h> 37 #include <linux/platform_device.h> 38 #include <linux/scatterlist.h> 39 #include <asm/tlbflush.h> 40 #include <kern_util.h> 41 #include "mconsole_kern.h" 42 #include <init.h> 43 #include <irq_kern.h> 44 #include "ubd.h" 45 #include <os.h> 46 #include "cow.h" 47 48 /* Max request size is determined by sector mask - 32K */ 49 #define UBD_MAX_REQUEST (8 * sizeof(long)) 50 51 struct io_desc { 52 char *buffer; 53 unsigned long length; 54 unsigned long sector_mask; 55 unsigned long long cow_offset; 56 unsigned long bitmap_words[2]; 57 }; 58 59 struct io_thread_req { 60 struct request *req; 61 int fds[2]; 62 unsigned long offsets[2]; 63 unsigned long long offset; 64 int sectorsize; 65 int error; 66 67 int desc_cnt; 68 /* io_desc has to be the last element of the struct */ 69 struct io_desc io_desc[]; 70 }; 71 72 73 static struct io_thread_req * (*irq_req_buffer)[]; 74 static struct io_thread_req *irq_remainder; 75 static int irq_remainder_size; 76 77 static struct io_thread_req * (*io_req_buffer)[]; 78 static struct io_thread_req *io_remainder; 79 static int io_remainder_size; 80 81 82 83 static inline int ubd_test_bit(__u64 bit, unsigned char *data) 84 { 85 __u64 n; 86 int bits, off; 87 88 bits = sizeof(data[0]) * 8; 89 n = bit / bits; 90 off = bit % bits; 91 return (data[n] & (1 << off)) != 0; 92 } 93 94 static inline void ubd_set_bit(__u64 bit, unsigned char *data) 95 { 96 __u64 n; 97 int bits, off; 98 99 bits = sizeof(data[0]) * 8; 100 n = bit / bits; 101 off = bit % bits; 102 data[n] |= (1 << off); 103 } 104 /*End stuff from ubd_user.h*/ 105 106 #define DRIVER_NAME "uml-blkdev" 107 108 static DEFINE_MUTEX(ubd_lock); 109 static DEFINE_MUTEX(ubd_mutex); /* replaces BKL, might not be needed */ 110 111 static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode, 112 unsigned int cmd, unsigned long arg); 113 static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo); 114 115 #define MAX_DEV (16) 116 117 static const struct block_device_operations ubd_blops = { 118 .owner = THIS_MODULE, 119 .ioctl = ubd_ioctl, 120 .compat_ioctl = blkdev_compat_ptr_ioctl, 121 .getgeo = ubd_getgeo, 122 }; 123 124 #ifdef CONFIG_BLK_DEV_UBD_SYNC 125 #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 1, .c = 0, \ 126 .cl = 1 }) 127 #else 128 #define OPEN_FLAGS ((struct openflags) { .r = 1, .w = 1, .s = 0, .c = 0, \ 129 .cl = 1 }) 130 #endif 131 static struct openflags global_openflags = OPEN_FLAGS; 132 133 struct cow { 134 /* backing file name */ 135 char *file; 136 /* backing file fd */ 137 int fd; 138 unsigned long *bitmap; 139 unsigned long bitmap_len; 140 int bitmap_offset; 141 int data_offset; 142 }; 143 144 #define MAX_SG 64 145 146 struct ubd { 147 /* name (and fd, below) of the file opened for writing, either the 148 * backing or the cow file. */ 149 char *file; 150 char *serial; 151 int fd; 152 __u64 size; 153 struct openflags boot_openflags; 154 struct openflags openflags; 155 unsigned shared:1; 156 unsigned no_cow:1; 157 unsigned no_trim:1; 158 struct cow cow; 159 struct platform_device pdev; 160 struct gendisk *disk; 161 struct blk_mq_tag_set tag_set; 162 spinlock_t lock; 163 }; 164 165 #define DEFAULT_COW { \ 166 .file = NULL, \ 167 .fd = -1, \ 168 .bitmap = NULL, \ 169 .bitmap_offset = 0, \ 170 .data_offset = 0, \ 171 } 172 173 #define DEFAULT_UBD { \ 174 .file = NULL, \ 175 .serial = NULL, \ 176 .fd = -1, \ 177 .size = -1, \ 178 .boot_openflags = OPEN_FLAGS, \ 179 .openflags = OPEN_FLAGS, \ 180 .no_cow = 0, \ 181 .no_trim = 0, \ 182 .shared = 0, \ 183 .cow = DEFAULT_COW, \ 184 .lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \ 185 } 186 187 /* Protected by ubd_lock */ 188 static struct ubd ubd_devs[MAX_DEV] = { [0 ... MAX_DEV - 1] = DEFAULT_UBD }; 189 190 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, 191 const struct blk_mq_queue_data *bd); 192 193 static int fake_ide_setup(char *str) 194 { 195 pr_warn("The fake_ide option has been removed\n"); 196 return 1; 197 } 198 __setup("fake_ide", fake_ide_setup); 199 200 __uml_help(fake_ide_setup, 201 "fake_ide\n" 202 " Obsolete stub.\n\n" 203 ); 204 205 static int parse_unit(char **ptr) 206 { 207 char *str = *ptr, *end; 208 int n = -1; 209 210 if(isdigit(*str)) { 211 n = simple_strtoul(str, &end, 0); 212 if(end == str) 213 return -1; 214 *ptr = end; 215 } 216 else if (('a' <= *str) && (*str <= 'z')) { 217 n = *str - 'a'; 218 str++; 219 *ptr = str; 220 } 221 return n; 222 } 223 224 /* If *index_out == -1 at exit, the passed option was a general one; 225 * otherwise, the str pointer is used (and owned) inside ubd_devs array, so it 226 * should not be freed on exit. 227 */ 228 static int ubd_setup_common(char *str, int *index_out, char **error_out) 229 { 230 struct ubd *ubd_dev; 231 struct openflags flags = global_openflags; 232 char *file, *backing_file, *serial; 233 int n, err = 0, i; 234 235 if(index_out) *index_out = -1; 236 n = *str; 237 if(n == '='){ 238 str++; 239 if(!strcmp(str, "sync")){ 240 global_openflags = of_sync(global_openflags); 241 return err; 242 } 243 244 pr_warn("fake major not supported any more\n"); 245 return 0; 246 } 247 248 n = parse_unit(&str); 249 if(n < 0){ 250 *error_out = "Couldn't parse device number"; 251 return -EINVAL; 252 } 253 if(n >= MAX_DEV){ 254 *error_out = "Device number out of range"; 255 return 1; 256 } 257 258 err = -EBUSY; 259 mutex_lock(&ubd_lock); 260 261 ubd_dev = &ubd_devs[n]; 262 if(ubd_dev->file != NULL){ 263 *error_out = "Device is already configured"; 264 goto out; 265 } 266 267 if (index_out) 268 *index_out = n; 269 270 err = -EINVAL; 271 for (i = 0; i < sizeof("rscdt="); i++) { 272 switch (*str) { 273 case 'r': 274 flags.w = 0; 275 break; 276 case 's': 277 flags.s = 1; 278 break; 279 case 'd': 280 ubd_dev->no_cow = 1; 281 break; 282 case 'c': 283 ubd_dev->shared = 1; 284 break; 285 case 't': 286 ubd_dev->no_trim = 1; 287 break; 288 case '=': 289 str++; 290 goto break_loop; 291 default: 292 *error_out = "Expected '=' or flag letter " 293 "(r, s, c, t or d)"; 294 goto out; 295 } 296 str++; 297 } 298 299 if (*str == '=') 300 *error_out = "Too many flags specified"; 301 else 302 *error_out = "Missing '='"; 303 goto out; 304 305 break_loop: 306 file = strsep(&str, ",:"); 307 if (*file == '\0') 308 file = NULL; 309 310 backing_file = strsep(&str, ",:"); 311 if (backing_file && *backing_file == '\0') 312 backing_file = NULL; 313 314 serial = strsep(&str, ",:"); 315 if (serial && *serial == '\0') 316 serial = NULL; 317 318 if (backing_file && ubd_dev->no_cow) { 319 *error_out = "Can't specify both 'd' and a cow file"; 320 goto out; 321 } 322 323 err = 0; 324 ubd_dev->file = file; 325 ubd_dev->cow.file = backing_file; 326 ubd_dev->serial = serial; 327 ubd_dev->boot_openflags = flags; 328 out: 329 mutex_unlock(&ubd_lock); 330 return err; 331 } 332 333 static int ubd_setup(char *str) 334 { 335 char *error; 336 int err; 337 338 err = ubd_setup_common(str, NULL, &error); 339 if(err) 340 printk(KERN_ERR "Failed to initialize device with \"%s\" : " 341 "%s\n", str, error); 342 return 1; 343 } 344 345 __setup("ubd", ubd_setup); 346 __uml_help(ubd_setup, 347 "ubd<n><flags>=<filename>[(:|,)<filename2>][(:|,)<serial>]\n" 348 " This is used to associate a device with a file in the underlying\n" 349 " filesystem. When specifying two filenames, the first one is the\n" 350 " COW name and the second is the backing file name. As separator you can\n" 351 " use either a ':' or a ',': the first one allows writing things like;\n" 352 " ubd0=~/Uml/root_cow:~/Uml/root_backing_file\n" 353 " while with a ',' the shell would not expand the 2nd '~'.\n" 354 " When using only one filename, UML will detect whether to treat it like\n" 355 " a COW file or a backing file. To override this detection, add the 'd'\n" 356 " flag:\n" 357 " ubd0d=BackingFile\n" 358 " Usually, there is a filesystem in the file, but \n" 359 " that's not required. Swap devices containing swap files can be\n" 360 " specified like this. Also, a file which doesn't contain a\n" 361 " filesystem can have its contents read in the virtual \n" 362 " machine by running 'dd' on the device. <n> must be in the range\n" 363 " 0 to 7. Appending an 'r' to the number will cause that device\n" 364 " to be mounted read-only. For example ubd1r=./ext_fs. Appending\n" 365 " an 's' will cause data to be written to disk on the host immediately.\n" 366 " 'c' will cause the device to be treated as being shared between multiple\n" 367 " UMLs and file locking will be turned off - this is appropriate for a\n" 368 " cluster filesystem and inappropriate at almost all other times.\n\n" 369 " 't' will disable trim/discard support on the device (enabled by default).\n\n" 370 " An optional device serial number can be exposed using the serial parameter\n" 371 " on the cmdline which is exposed as a sysfs entry. This is particularly\n" 372 " useful when a unique number should be given to the device. Note when\n" 373 " specifying a label, the filename2 must be also presented. It can be\n" 374 " an empty string, in which case the backing file is not used:\n" 375 " ubd0=File,,Serial\n" 376 ); 377 378 static int udb_setup(char *str) 379 { 380 printk("udb%s specified on command line is almost certainly a ubd -> " 381 "udb TYPO\n", str); 382 return 1; 383 } 384 385 __setup("udb", udb_setup); 386 __uml_help(udb_setup, 387 "udb\n" 388 " This option is here solely to catch ubd -> udb typos, which can be\n" 389 " to impossible to catch visually unless you specifically look for\n" 390 " them. The only result of any option starting with 'udb' is an error\n" 391 " in the boot output.\n\n" 392 ); 393 394 /* Only changed by ubd_init, which is an initcall. */ 395 static int thread_fd = -1; 396 397 /* Function to read several request pointers at a time 398 * handling fractional reads if (and as) needed 399 */ 400 401 static int bulk_req_safe_read( 402 int fd, 403 struct io_thread_req * (*request_buffer)[], 404 struct io_thread_req **remainder, 405 int *remainder_size, 406 int max_recs 407 ) 408 { 409 int n = 0; 410 int res = 0; 411 412 if (*remainder_size > 0) { 413 memmove( 414 (char *) request_buffer, 415 (char *) remainder, *remainder_size 416 ); 417 n = *remainder_size; 418 } 419 420 res = os_read_file( 421 fd, 422 ((char *) request_buffer) + *remainder_size, 423 sizeof(struct io_thread_req *)*max_recs 424 - *remainder_size 425 ); 426 if (res > 0) { 427 n += res; 428 if ((n % sizeof(struct io_thread_req *)) > 0) { 429 /* 430 * Read somehow returned not a multiple of dword 431 * theoretically possible, but never observed in the 432 * wild, so read routine must be able to handle it 433 */ 434 *remainder_size = n % sizeof(struct io_thread_req *); 435 WARN(*remainder_size > 0, "UBD IPC read returned a partial result"); 436 memmove( 437 remainder, 438 ((char *) request_buffer) + 439 (n/sizeof(struct io_thread_req *))*sizeof(struct io_thread_req *), 440 *remainder_size 441 ); 442 n = n - *remainder_size; 443 } 444 } else { 445 n = res; 446 } 447 return n; 448 } 449 450 static void ubd_end_request(struct io_thread_req *io_req) 451 { 452 if (io_req->error == BLK_STS_NOTSUPP) { 453 if (req_op(io_req->req) == REQ_OP_DISCARD) 454 blk_queue_disable_discard(io_req->req->q); 455 else if (req_op(io_req->req) == REQ_OP_WRITE_ZEROES) 456 blk_queue_disable_write_zeroes(io_req->req->q); 457 } 458 blk_mq_end_request(io_req->req, io_req->error); 459 kfree(io_req); 460 } 461 462 static irqreturn_t ubd_intr(int irq, void *dev) 463 { 464 int len, i; 465 466 while ((len = bulk_req_safe_read(thread_fd, irq_req_buffer, 467 &irq_remainder, &irq_remainder_size, 468 UBD_REQ_BUFFER_SIZE)) >= 0) { 469 for (i = 0; i < len / sizeof(struct io_thread_req *); i++) 470 ubd_end_request((*irq_req_buffer)[i]); 471 } 472 473 if (len < 0 && len != -EAGAIN) 474 pr_err("spurious interrupt in %s, err = %d\n", __func__, len); 475 return IRQ_HANDLED; 476 } 477 478 /* Only changed by ubd_init, which is an initcall. */ 479 static int io_pid = -1; 480 481 static void kill_io_thread(void) 482 { 483 if(io_pid != -1) 484 os_kill_process(io_pid, 1); 485 } 486 487 __uml_exitcall(kill_io_thread); 488 489 static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out) 490 { 491 char *file; 492 int fd; 493 int err; 494 495 __u32 version; 496 __u32 align; 497 char *backing_file; 498 time64_t mtime; 499 unsigned long long size; 500 int sector_size; 501 int bitmap_offset; 502 503 if (ubd_dev->file && ubd_dev->cow.file) { 504 file = ubd_dev->cow.file; 505 506 goto out; 507 } 508 509 fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0); 510 if (fd < 0) 511 return fd; 512 513 err = read_cow_header(file_reader, &fd, &version, &backing_file, \ 514 &mtime, &size, §or_size, &align, &bitmap_offset); 515 os_close_file(fd); 516 517 if(err == -EINVAL) 518 file = ubd_dev->file; 519 else 520 file = backing_file; 521 522 out: 523 return os_file_size(file, size_out); 524 } 525 526 static int read_cow_bitmap(int fd, void *buf, int offset, int len) 527 { 528 int err; 529 530 err = os_pread_file(fd, buf, len, offset); 531 if (err < 0) 532 return err; 533 534 return 0; 535 } 536 537 static int backing_file_mismatch(char *file, __u64 size, time64_t mtime) 538 { 539 time64_t modtime; 540 unsigned long long actual; 541 int err; 542 543 err = os_file_modtime(file, &modtime); 544 if (err < 0) { 545 printk(KERN_ERR "Failed to get modification time of backing " 546 "file \"%s\", err = %d\n", file, -err); 547 return err; 548 } 549 550 err = os_file_size(file, &actual); 551 if (err < 0) { 552 printk(KERN_ERR "Failed to get size of backing file \"%s\", " 553 "err = %d\n", file, -err); 554 return err; 555 } 556 557 if (actual != size) { 558 /*__u64 can be a long on AMD64 and with %lu GCC complains; so 559 * the typecast.*/ 560 printk(KERN_ERR "Size mismatch (%llu vs %llu) of COW header " 561 "vs backing file\n", (unsigned long long) size, actual); 562 return -EINVAL; 563 } 564 if (modtime != mtime) { 565 printk(KERN_ERR "mtime mismatch (%lld vs %lld) of COW header vs " 566 "backing file\n", mtime, modtime); 567 return -EINVAL; 568 } 569 return 0; 570 } 571 572 static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow) 573 { 574 struct uml_stat buf1, buf2; 575 int err; 576 577 if (from_cmdline == NULL) 578 return 0; 579 if (!strcmp(from_cmdline, from_cow)) 580 return 0; 581 582 err = os_stat_file(from_cmdline, &buf1); 583 if (err < 0) { 584 printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cmdline, 585 -err); 586 return 0; 587 } 588 err = os_stat_file(from_cow, &buf2); 589 if (err < 0) { 590 printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cow, 591 -err); 592 return 1; 593 } 594 if ((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino)) 595 return 0; 596 597 printk(KERN_ERR "Backing file mismatch - \"%s\" requested, " 598 "\"%s\" specified in COW header of \"%s\"\n", 599 from_cmdline, from_cow, cow); 600 return 1; 601 } 602 603 static int open_ubd_file(char *file, struct openflags *openflags, int shared, 604 char **backing_file_out, int *bitmap_offset_out, 605 unsigned long *bitmap_len_out, int *data_offset_out, 606 int *create_cow_out) 607 { 608 time64_t mtime; 609 unsigned long long size; 610 __u32 version, align; 611 char *backing_file; 612 int fd, err, sectorsize, asked_switch, mode = 0644; 613 614 fd = os_open_file(file, *openflags, mode); 615 if (fd < 0) { 616 if ((fd == -ENOENT) && (create_cow_out != NULL)) 617 *create_cow_out = 1; 618 if (!openflags->w || 619 ((fd != -EROFS) && (fd != -EACCES))) 620 return fd; 621 openflags->w = 0; 622 fd = os_open_file(file, *openflags, mode); 623 if (fd < 0) 624 return fd; 625 } 626 627 if (shared) 628 printk(KERN_INFO "Not locking \"%s\" on the host\n", file); 629 else { 630 err = os_lock_file(fd, openflags->w); 631 if (err < 0) { 632 printk(KERN_ERR "Failed to lock '%s', err = %d\n", 633 file, -err); 634 goto out_close; 635 } 636 } 637 638 /* Successful return case! */ 639 if (backing_file_out == NULL) 640 return fd; 641 642 err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime, 643 &size, §orsize, &align, bitmap_offset_out); 644 if (err && (*backing_file_out != NULL)) { 645 printk(KERN_ERR "Failed to read COW header from COW file " 646 "\"%s\", errno = %d\n", file, -err); 647 goto out_close; 648 } 649 if (err) 650 return fd; 651 652 asked_switch = path_requires_switch(*backing_file_out, backing_file, 653 file); 654 655 /* Allow switching only if no mismatch. */ 656 if (asked_switch && !backing_file_mismatch(*backing_file_out, size, 657 mtime)) { 658 printk(KERN_ERR "Switching backing file to '%s'\n", 659 *backing_file_out); 660 err = write_cow_header(file, fd, *backing_file_out, 661 sectorsize, align, &size); 662 if (err) { 663 printk(KERN_ERR "Switch failed, errno = %d\n", -err); 664 goto out_close; 665 } 666 } else { 667 *backing_file_out = backing_file; 668 err = backing_file_mismatch(*backing_file_out, size, mtime); 669 if (err) 670 goto out_close; 671 } 672 673 cow_sizes(version, size, sectorsize, align, *bitmap_offset_out, 674 bitmap_len_out, data_offset_out); 675 676 return fd; 677 out_close: 678 os_close_file(fd); 679 return err; 680 } 681 682 static int create_cow_file(char *cow_file, char *backing_file, 683 struct openflags flags, 684 int sectorsize, int alignment, int *bitmap_offset_out, 685 unsigned long *bitmap_len_out, int *data_offset_out) 686 { 687 int err, fd; 688 689 flags.c = 1; 690 fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL); 691 if (fd < 0) { 692 err = fd; 693 printk(KERN_ERR "Open of COW file '%s' failed, errno = %d\n", 694 cow_file, -err); 695 goto out; 696 } 697 698 err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment, 699 bitmap_offset_out, bitmap_len_out, 700 data_offset_out); 701 if (!err) 702 return fd; 703 os_close_file(fd); 704 out: 705 return err; 706 } 707 708 static void ubd_close_dev(struct ubd *ubd_dev) 709 { 710 os_close_file(ubd_dev->fd); 711 if(ubd_dev->cow.file == NULL) 712 return; 713 714 os_close_file(ubd_dev->cow.fd); 715 vfree(ubd_dev->cow.bitmap); 716 ubd_dev->cow.bitmap = NULL; 717 } 718 719 static int ubd_open_dev(struct ubd *ubd_dev) 720 { 721 struct openflags flags; 722 char **back_ptr; 723 int err, create_cow, *create_ptr; 724 int fd; 725 726 ubd_dev->openflags = ubd_dev->boot_openflags; 727 create_cow = 0; 728 create_ptr = (ubd_dev->cow.file != NULL) ? &create_cow : NULL; 729 back_ptr = ubd_dev->no_cow ? NULL : &ubd_dev->cow.file; 730 731 fd = open_ubd_file(ubd_dev->file, &ubd_dev->openflags, ubd_dev->shared, 732 back_ptr, &ubd_dev->cow.bitmap_offset, 733 &ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset, 734 create_ptr); 735 736 if((fd == -ENOENT) && create_cow){ 737 fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file, 738 ubd_dev->openflags, SECTOR_SIZE, PAGE_SIZE, 739 &ubd_dev->cow.bitmap_offset, 740 &ubd_dev->cow.bitmap_len, 741 &ubd_dev->cow.data_offset); 742 if(fd >= 0){ 743 printk(KERN_INFO "Creating \"%s\" as COW file for " 744 "\"%s\"\n", ubd_dev->file, ubd_dev->cow.file); 745 } 746 } 747 748 if(fd < 0){ 749 printk("Failed to open '%s', errno = %d\n", ubd_dev->file, 750 -fd); 751 return fd; 752 } 753 ubd_dev->fd = fd; 754 755 if(ubd_dev->cow.file != NULL){ 756 err = -ENOMEM; 757 ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len); 758 if(ubd_dev->cow.bitmap == NULL){ 759 printk(KERN_ERR "Failed to vmalloc COW bitmap\n"); 760 goto error; 761 } 762 flush_tlb_kernel_vm(); 763 764 err = read_cow_bitmap(ubd_dev->fd, ubd_dev->cow.bitmap, 765 ubd_dev->cow.bitmap_offset, 766 ubd_dev->cow.bitmap_len); 767 if(err < 0) 768 goto error; 769 770 flags = ubd_dev->openflags; 771 flags.w = 0; 772 err = open_ubd_file(ubd_dev->cow.file, &flags, ubd_dev->shared, NULL, 773 NULL, NULL, NULL, NULL); 774 if(err < 0) goto error; 775 ubd_dev->cow.fd = err; 776 } 777 return 0; 778 error: 779 os_close_file(ubd_dev->fd); 780 return err; 781 } 782 783 static void ubd_device_release(struct device *dev) 784 { 785 struct ubd *ubd_dev = dev_get_drvdata(dev); 786 787 blk_mq_free_tag_set(&ubd_dev->tag_set); 788 *ubd_dev = ((struct ubd) DEFAULT_UBD); 789 } 790 791 static ssize_t serial_show(struct device *dev, 792 struct device_attribute *attr, char *buf) 793 { 794 struct gendisk *disk = dev_to_disk(dev); 795 struct ubd *ubd_dev = disk->private_data; 796 797 if (!ubd_dev) 798 return 0; 799 800 return sprintf(buf, "%s", ubd_dev->serial); 801 } 802 803 static DEVICE_ATTR_RO(serial); 804 805 static struct attribute *ubd_attrs[] = { 806 &dev_attr_serial.attr, 807 NULL, 808 }; 809 810 static umode_t ubd_attrs_are_visible(struct kobject *kobj, 811 struct attribute *a, int n) 812 { 813 return a->mode; 814 } 815 816 static const struct attribute_group ubd_attr_group = { 817 .attrs = ubd_attrs, 818 .is_visible = ubd_attrs_are_visible, 819 }; 820 821 static const struct attribute_group *ubd_attr_groups[] = { 822 &ubd_attr_group, 823 NULL, 824 }; 825 826 #define ROUND_BLOCK(n) ((n + (SECTOR_SIZE - 1)) & (-SECTOR_SIZE)) 827 828 static const struct blk_mq_ops ubd_mq_ops = { 829 .queue_rq = ubd_queue_rq, 830 }; 831 832 static int ubd_add(int n, char **error_out) 833 { 834 struct ubd *ubd_dev = &ubd_devs[n]; 835 struct queue_limits lim = { 836 .max_segments = MAX_SG, 837 .seg_boundary_mask = PAGE_SIZE - 1, 838 .features = BLK_FEAT_WRITE_CACHE, 839 }; 840 struct gendisk *disk; 841 int err = 0; 842 843 if(ubd_dev->file == NULL) 844 goto out; 845 846 if (ubd_dev->cow.file) 847 lim.max_hw_sectors = 8 * sizeof(long); 848 if (!ubd_dev->no_trim) { 849 lim.max_hw_discard_sectors = UBD_MAX_REQUEST; 850 lim.max_write_zeroes_sectors = UBD_MAX_REQUEST; 851 } 852 853 err = ubd_file_size(ubd_dev, &ubd_dev->size); 854 if(err < 0){ 855 *error_out = "Couldn't determine size of device's file"; 856 goto out; 857 } 858 859 err = ubd_open_dev(ubd_dev); 860 if (err) { 861 pr_err("ubd%c: Can't open \"%s\": errno = %d\n", 862 'a' + n, ubd_dev->file, -err); 863 goto out; 864 } 865 866 ubd_dev->size = ROUND_BLOCK(ubd_dev->size); 867 868 ubd_dev->tag_set.ops = &ubd_mq_ops; 869 ubd_dev->tag_set.queue_depth = 64; 870 ubd_dev->tag_set.numa_node = NUMA_NO_NODE; 871 ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 872 ubd_dev->tag_set.driver_data = ubd_dev; 873 ubd_dev->tag_set.nr_hw_queues = 1; 874 875 err = blk_mq_alloc_tag_set(&ubd_dev->tag_set); 876 if (err) 877 goto out_close; 878 879 disk = blk_mq_alloc_disk(&ubd_dev->tag_set, &lim, ubd_dev); 880 if (IS_ERR(disk)) { 881 err = PTR_ERR(disk); 882 goto out_cleanup_tags; 883 } 884 885 disk->major = UBD_MAJOR; 886 disk->first_minor = n << UBD_SHIFT; 887 disk->minors = 1 << UBD_SHIFT; 888 disk->fops = &ubd_blops; 889 set_capacity(disk, ubd_dev->size / 512); 890 sprintf(disk->disk_name, "ubd%c", 'a' + n); 891 disk->private_data = ubd_dev; 892 set_disk_ro(disk, !ubd_dev->openflags.w); 893 894 ubd_dev->pdev.id = n; 895 ubd_dev->pdev.name = DRIVER_NAME; 896 ubd_dev->pdev.dev.release = ubd_device_release; 897 dev_set_drvdata(&ubd_dev->pdev.dev, ubd_dev); 898 platform_device_register(&ubd_dev->pdev); 899 900 err = device_add_disk(&ubd_dev->pdev.dev, disk, ubd_attr_groups); 901 if (err) 902 goto out_cleanup_disk; 903 904 return 0; 905 906 out_cleanup_disk: 907 put_disk(disk); 908 out_cleanup_tags: 909 blk_mq_free_tag_set(&ubd_dev->tag_set); 910 out_close: 911 ubd_close_dev(ubd_dev); 912 out: 913 return err; 914 } 915 916 static int ubd_config(char *str, char **error_out) 917 { 918 int n, ret; 919 920 /* This string is possibly broken up and stored, so it's only 921 * freed if ubd_setup_common fails, or if only general options 922 * were set. 923 */ 924 str = kstrdup(str, GFP_KERNEL); 925 if (str == NULL) { 926 *error_out = "Failed to allocate memory"; 927 return -ENOMEM; 928 } 929 930 ret = ubd_setup_common(str, &n, error_out); 931 if (ret) 932 goto err_free; 933 934 if (n == -1) { 935 ret = 0; 936 goto err_free; 937 } 938 939 mutex_lock(&ubd_lock); 940 ret = ubd_add(n, error_out); 941 if (ret) 942 ubd_devs[n].file = NULL; 943 mutex_unlock(&ubd_lock); 944 945 out: 946 return ret; 947 948 err_free: 949 kfree(str); 950 goto out; 951 } 952 953 static int ubd_get_config(char *name, char *str, int size, char **error_out) 954 { 955 struct ubd *ubd_dev; 956 int n, len = 0; 957 958 n = parse_unit(&name); 959 if((n >= MAX_DEV) || (n < 0)){ 960 *error_out = "ubd_get_config : device number out of range"; 961 return -1; 962 } 963 964 ubd_dev = &ubd_devs[n]; 965 mutex_lock(&ubd_lock); 966 967 if(ubd_dev->file == NULL){ 968 CONFIG_CHUNK(str, size, len, "", 1); 969 goto out; 970 } 971 972 CONFIG_CHUNK(str, size, len, ubd_dev->file, 0); 973 974 if(ubd_dev->cow.file != NULL){ 975 CONFIG_CHUNK(str, size, len, ",", 0); 976 CONFIG_CHUNK(str, size, len, ubd_dev->cow.file, 1); 977 } 978 else CONFIG_CHUNK(str, size, len, "", 1); 979 980 out: 981 mutex_unlock(&ubd_lock); 982 return len; 983 } 984 985 static int ubd_id(char **str, int *start_out, int *end_out) 986 { 987 int n; 988 989 n = parse_unit(str); 990 *start_out = 0; 991 *end_out = MAX_DEV - 1; 992 return n; 993 } 994 995 static int ubd_remove(int n, char **error_out) 996 { 997 struct ubd *ubd_dev; 998 int err = -ENODEV; 999 1000 mutex_lock(&ubd_lock); 1001 1002 ubd_dev = &ubd_devs[n]; 1003 1004 if(ubd_dev->file == NULL) 1005 goto out; 1006 1007 if (ubd_dev->disk) { 1008 /* you cannot remove a open disk */ 1009 err = -EBUSY; 1010 if (disk_openers(ubd_dev->disk)) 1011 goto out; 1012 1013 del_gendisk(ubd_dev->disk); 1014 ubd_close_dev(ubd_dev); 1015 put_disk(ubd_dev->disk); 1016 } 1017 1018 err = 0; 1019 platform_device_unregister(&ubd_dev->pdev); 1020 out: 1021 mutex_unlock(&ubd_lock); 1022 return err; 1023 } 1024 1025 /* All these are called by mconsole in process context and without 1026 * ubd-specific locks. The structure itself is const except for .list. 1027 */ 1028 static struct mc_device ubd_mc = { 1029 .list = LIST_HEAD_INIT(ubd_mc.list), 1030 .name = "ubd", 1031 .config = ubd_config, 1032 .get_config = ubd_get_config, 1033 .id = ubd_id, 1034 .remove = ubd_remove, 1035 }; 1036 1037 static int __init ubd_mc_init(void) 1038 { 1039 mconsole_register_dev(&ubd_mc); 1040 return 0; 1041 } 1042 1043 __initcall(ubd_mc_init); 1044 1045 static int __init ubd0_init(void) 1046 { 1047 struct ubd *ubd_dev = &ubd_devs[0]; 1048 1049 mutex_lock(&ubd_lock); 1050 if(ubd_dev->file == NULL) 1051 ubd_dev->file = "root_fs"; 1052 mutex_unlock(&ubd_lock); 1053 1054 return 0; 1055 } 1056 1057 __initcall(ubd0_init); 1058 1059 /* Used in ubd_init, which is an initcall */ 1060 static struct platform_driver ubd_driver = { 1061 .driver = { 1062 .name = DRIVER_NAME, 1063 }, 1064 }; 1065 1066 static int __init ubd_init(void) 1067 { 1068 char *error; 1069 int i, err; 1070 1071 if (register_blkdev(UBD_MAJOR, "ubd")) 1072 return -1; 1073 1074 irq_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE, 1075 sizeof(struct io_thread_req *), 1076 GFP_KERNEL 1077 ); 1078 irq_remainder = 0; 1079 1080 if (irq_req_buffer == NULL) { 1081 printk(KERN_ERR "Failed to initialize ubd buffering\n"); 1082 return -ENOMEM; 1083 } 1084 io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE, 1085 sizeof(struct io_thread_req *), 1086 GFP_KERNEL 1087 ); 1088 1089 io_remainder = 0; 1090 1091 if (io_req_buffer == NULL) { 1092 printk(KERN_ERR "Failed to initialize ubd buffering\n"); 1093 return -ENOMEM; 1094 } 1095 platform_driver_register(&ubd_driver); 1096 mutex_lock(&ubd_lock); 1097 for (i = 0; i < MAX_DEV; i++){ 1098 err = ubd_add(i, &error); 1099 if(err) 1100 printk(KERN_ERR "Failed to initialize ubd device %d :" 1101 "%s\n", i, error); 1102 } 1103 mutex_unlock(&ubd_lock); 1104 return 0; 1105 } 1106 1107 late_initcall(ubd_init); 1108 1109 static int __init ubd_driver_init(void){ 1110 unsigned long stack; 1111 int err; 1112 1113 /* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/ 1114 if(global_openflags.s){ 1115 printk(KERN_INFO "ubd: Synchronous mode\n"); 1116 /* Letting ubd=sync be like using ubd#s= instead of ubd#= is 1117 * enough. So use anyway the io thread. */ 1118 } 1119 stack = alloc_stack(0, 0); 1120 io_pid = start_io_thread(stack + PAGE_SIZE, &thread_fd); 1121 if(io_pid < 0){ 1122 printk(KERN_ERR 1123 "ubd : Failed to start I/O thread (errno = %d) - " 1124 "falling back to synchronous I/O\n", -io_pid); 1125 io_pid = -1; 1126 return 0; 1127 } 1128 err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr, 1129 0, "ubd", ubd_devs); 1130 if(err < 0) 1131 printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err); 1132 return 0; 1133 } 1134 1135 device_initcall(ubd_driver_init); 1136 1137 static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask, 1138 __u64 *cow_offset, unsigned long *bitmap, 1139 __u64 bitmap_offset, unsigned long *bitmap_words, 1140 __u64 bitmap_len) 1141 { 1142 __u64 sector = io_offset >> SECTOR_SHIFT; 1143 int i, update_bitmap = 0; 1144 1145 for (i = 0; i < length >> SECTOR_SHIFT; i++) { 1146 if(cow_mask != NULL) 1147 ubd_set_bit(i, (unsigned char *) cow_mask); 1148 if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) 1149 continue; 1150 1151 update_bitmap = 1; 1152 ubd_set_bit(sector + i, (unsigned char *) bitmap); 1153 } 1154 1155 if(!update_bitmap) 1156 return; 1157 1158 *cow_offset = sector / (sizeof(unsigned long) * 8); 1159 1160 /* This takes care of the case where we're exactly at the end of the 1161 * device, and *cow_offset + 1 is off the end. So, just back it up 1162 * by one word. Thanks to Lynn Kerby for the fix and James McMechan 1163 * for the original diagnosis. 1164 */ 1165 if (*cow_offset == (DIV_ROUND_UP(bitmap_len, 1166 sizeof(unsigned long)) - 1)) 1167 (*cow_offset)--; 1168 1169 bitmap_words[0] = bitmap[*cow_offset]; 1170 bitmap_words[1] = bitmap[*cow_offset + 1]; 1171 1172 *cow_offset *= sizeof(unsigned long); 1173 *cow_offset += bitmap_offset; 1174 } 1175 1176 static void cowify_req(struct io_thread_req *req, struct io_desc *segment, 1177 unsigned long offset, unsigned long *bitmap, 1178 __u64 bitmap_offset, __u64 bitmap_len) 1179 { 1180 __u64 sector = offset >> SECTOR_SHIFT; 1181 int i; 1182 1183 if (segment->length > (sizeof(segment->sector_mask) * 8) << SECTOR_SHIFT) 1184 panic("Operation too long"); 1185 1186 if (req_op(req->req) == REQ_OP_READ) { 1187 for (i = 0; i < segment->length >> SECTOR_SHIFT; i++) { 1188 if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) 1189 ubd_set_bit(i, (unsigned char *) 1190 &segment->sector_mask); 1191 } 1192 } else { 1193 cowify_bitmap(offset, segment->length, &segment->sector_mask, 1194 &segment->cow_offset, bitmap, bitmap_offset, 1195 segment->bitmap_words, bitmap_len); 1196 } 1197 } 1198 1199 static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req, 1200 struct request *req) 1201 { 1202 struct bio_vec bvec; 1203 struct req_iterator iter; 1204 int i = 0; 1205 unsigned long byte_offset = io_req->offset; 1206 enum req_op op = req_op(req); 1207 1208 if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) { 1209 io_req->io_desc[0].buffer = NULL; 1210 io_req->io_desc[0].length = blk_rq_bytes(req); 1211 } else { 1212 rq_for_each_segment(bvec, req, iter) { 1213 BUG_ON(i >= io_req->desc_cnt); 1214 1215 io_req->io_desc[i].buffer = bvec_virt(&bvec); 1216 io_req->io_desc[i].length = bvec.bv_len; 1217 i++; 1218 } 1219 } 1220 1221 if (dev->cow.file) { 1222 for (i = 0; i < io_req->desc_cnt; i++) { 1223 cowify_req(io_req, &io_req->io_desc[i], byte_offset, 1224 dev->cow.bitmap, dev->cow.bitmap_offset, 1225 dev->cow.bitmap_len); 1226 byte_offset += io_req->io_desc[i].length; 1227 } 1228 1229 } 1230 } 1231 1232 static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req, 1233 int desc_cnt) 1234 { 1235 struct io_thread_req *io_req; 1236 int i; 1237 1238 io_req = kmalloc(sizeof(*io_req) + 1239 (desc_cnt * sizeof(struct io_desc)), 1240 GFP_ATOMIC); 1241 if (!io_req) 1242 return NULL; 1243 1244 io_req->req = req; 1245 if (dev->cow.file) 1246 io_req->fds[0] = dev->cow.fd; 1247 else 1248 io_req->fds[0] = dev->fd; 1249 io_req->error = 0; 1250 io_req->sectorsize = SECTOR_SIZE; 1251 io_req->fds[1] = dev->fd; 1252 io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT; 1253 io_req->offsets[0] = 0; 1254 io_req->offsets[1] = dev->cow.data_offset; 1255 1256 for (i = 0 ; i < desc_cnt; i++) { 1257 io_req->io_desc[i].sector_mask = 0; 1258 io_req->io_desc[i].cow_offset = -1; 1259 } 1260 1261 return io_req; 1262 } 1263 1264 static int ubd_submit_request(struct ubd *dev, struct request *req) 1265 { 1266 int segs = 0; 1267 struct io_thread_req *io_req; 1268 int ret; 1269 enum req_op op = req_op(req); 1270 1271 if (op == REQ_OP_FLUSH) 1272 segs = 0; 1273 else if (op == REQ_OP_WRITE_ZEROES || op == REQ_OP_DISCARD) 1274 segs = 1; 1275 else 1276 segs = blk_rq_nr_phys_segments(req); 1277 1278 io_req = ubd_alloc_req(dev, req, segs); 1279 if (!io_req) 1280 return -ENOMEM; 1281 1282 io_req->desc_cnt = segs; 1283 if (segs) 1284 ubd_map_req(dev, io_req, req); 1285 1286 ret = os_write_file(thread_fd, &io_req, sizeof(io_req)); 1287 if (ret != sizeof(io_req)) { 1288 if (ret != -EAGAIN) 1289 pr_err("write to io thread failed: %d\n", -ret); 1290 kfree(io_req); 1291 } 1292 return ret; 1293 } 1294 1295 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, 1296 const struct blk_mq_queue_data *bd) 1297 { 1298 struct ubd *ubd_dev = hctx->queue->queuedata; 1299 struct request *req = bd->rq; 1300 int ret = 0, res = BLK_STS_OK; 1301 1302 blk_mq_start_request(req); 1303 1304 spin_lock_irq(&ubd_dev->lock); 1305 1306 switch (req_op(req)) { 1307 case REQ_OP_FLUSH: 1308 case REQ_OP_READ: 1309 case REQ_OP_WRITE: 1310 case REQ_OP_DISCARD: 1311 case REQ_OP_WRITE_ZEROES: 1312 ret = ubd_submit_request(ubd_dev, req); 1313 break; 1314 default: 1315 WARN_ON_ONCE(1); 1316 res = BLK_STS_NOTSUPP; 1317 } 1318 1319 spin_unlock_irq(&ubd_dev->lock); 1320 1321 if (ret < 0) { 1322 if (ret == -ENOMEM) 1323 res = BLK_STS_RESOURCE; 1324 else 1325 res = BLK_STS_DEV_RESOURCE; 1326 } 1327 1328 return res; 1329 } 1330 1331 static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1332 { 1333 struct ubd *ubd_dev = bdev->bd_disk->private_data; 1334 1335 geo->heads = 128; 1336 geo->sectors = 32; 1337 geo->cylinders = ubd_dev->size / (128 * 32 * 512); 1338 return 0; 1339 } 1340 1341 static int ubd_ioctl(struct block_device *bdev, blk_mode_t mode, 1342 unsigned int cmd, unsigned long arg) 1343 { 1344 struct ubd *ubd_dev = bdev->bd_disk->private_data; 1345 u16 ubd_id[ATA_ID_WORDS]; 1346 1347 switch (cmd) { 1348 struct cdrom_volctrl volume; 1349 case HDIO_GET_IDENTITY: 1350 memset(&ubd_id, 0, ATA_ID_WORDS * 2); 1351 ubd_id[ATA_ID_CYLS] = ubd_dev->size / (128 * 32 * 512); 1352 ubd_id[ATA_ID_HEADS] = 128; 1353 ubd_id[ATA_ID_SECTORS] = 32; 1354 if(copy_to_user((char __user *) arg, (char *) &ubd_id, 1355 sizeof(ubd_id))) 1356 return -EFAULT; 1357 return 0; 1358 1359 case CDROMVOLREAD: 1360 if(copy_from_user(&volume, (char __user *) arg, sizeof(volume))) 1361 return -EFAULT; 1362 volume.channel0 = 255; 1363 volume.channel1 = 255; 1364 volume.channel2 = 255; 1365 volume.channel3 = 255; 1366 if(copy_to_user((char __user *) arg, &volume, sizeof(volume))) 1367 return -EFAULT; 1368 return 0; 1369 } 1370 return -EINVAL; 1371 } 1372 1373 static int map_error(int error_code) 1374 { 1375 switch (error_code) { 1376 case 0: 1377 return BLK_STS_OK; 1378 case ENOSYS: 1379 case EOPNOTSUPP: 1380 return BLK_STS_NOTSUPP; 1381 case ENOSPC: 1382 return BLK_STS_NOSPC; 1383 } 1384 return BLK_STS_IOERR; 1385 } 1386 1387 /* 1388 * Everything from here onwards *IS NOT PART OF THE KERNEL* 1389 * 1390 * The following functions are part of UML hypervisor code. 1391 * All functions from here onwards are executed as a helper 1392 * thread and are not allowed to execute any kernel functions. 1393 * 1394 * Any communication must occur strictly via shared memory and IPC. 1395 * 1396 * Do not add printks, locks, kernel memory operations, etc - it 1397 * will result in unpredictable behaviour and/or crashes. 1398 */ 1399 1400 static int update_bitmap(struct io_thread_req *req, struct io_desc *segment) 1401 { 1402 int n; 1403 1404 if (segment->cow_offset == -1) 1405 return map_error(0); 1406 1407 n = os_pwrite_file(req->fds[1], &segment->bitmap_words, 1408 sizeof(segment->bitmap_words), segment->cow_offset); 1409 if (n != sizeof(segment->bitmap_words)) 1410 return map_error(-n); 1411 1412 return map_error(0); 1413 } 1414 1415 static void do_io(struct io_thread_req *req, struct io_desc *desc) 1416 { 1417 char *buf = NULL; 1418 unsigned long len; 1419 int n, nsectors, start, end, bit; 1420 __u64 off; 1421 1422 /* FLUSH is really a special case, we cannot "case" it with others */ 1423 1424 if (req_op(req->req) == REQ_OP_FLUSH) { 1425 /* fds[0] is always either the rw image or our cow file */ 1426 req->error = map_error(-os_sync_file(req->fds[0])); 1427 return; 1428 } 1429 1430 nsectors = desc->length / req->sectorsize; 1431 start = 0; 1432 do { 1433 bit = ubd_test_bit(start, (unsigned char *) &desc->sector_mask); 1434 end = start; 1435 while((end < nsectors) && 1436 (ubd_test_bit(end, (unsigned char *) &desc->sector_mask) == bit)) 1437 end++; 1438 1439 off = req->offset + req->offsets[bit] + 1440 start * req->sectorsize; 1441 len = (end - start) * req->sectorsize; 1442 if (desc->buffer != NULL) 1443 buf = &desc->buffer[start * req->sectorsize]; 1444 1445 switch (req_op(req->req)) { 1446 case REQ_OP_READ: 1447 n = 0; 1448 do { 1449 buf = &buf[n]; 1450 len -= n; 1451 n = os_pread_file(req->fds[bit], buf, len, off); 1452 if (n < 0) { 1453 req->error = map_error(-n); 1454 return; 1455 } 1456 } while((n < len) && (n != 0)); 1457 if (n < len) memset(&buf[n], 0, len - n); 1458 break; 1459 case REQ_OP_WRITE: 1460 n = os_pwrite_file(req->fds[bit], buf, len, off); 1461 if(n != len){ 1462 req->error = map_error(-n); 1463 return; 1464 } 1465 break; 1466 case REQ_OP_DISCARD: 1467 n = os_falloc_punch(req->fds[bit], off, len); 1468 if (n) { 1469 req->error = map_error(-n); 1470 return; 1471 } 1472 break; 1473 case REQ_OP_WRITE_ZEROES: 1474 n = os_falloc_zeroes(req->fds[bit], off, len); 1475 if (n) { 1476 req->error = map_error(-n); 1477 return; 1478 } 1479 break; 1480 default: 1481 WARN_ON_ONCE(1); 1482 req->error = BLK_STS_NOTSUPP; 1483 return; 1484 } 1485 1486 start = end; 1487 } while(start < nsectors); 1488 1489 req->offset += len; 1490 req->error = update_bitmap(req, desc); 1491 } 1492 1493 /* Changed in start_io_thread, which is serialized by being called only 1494 * from ubd_init, which is an initcall. 1495 */ 1496 int kernel_fd = -1; 1497 1498 /* Only changed by the io thread. XXX: currently unused. */ 1499 static int io_count; 1500 1501 int io_thread(void *arg) 1502 { 1503 int n, count, written, res; 1504 1505 os_fix_helper_signals(); 1506 1507 while(1){ 1508 n = bulk_req_safe_read( 1509 kernel_fd, 1510 io_req_buffer, 1511 &io_remainder, 1512 &io_remainder_size, 1513 UBD_REQ_BUFFER_SIZE 1514 ); 1515 if (n <= 0) { 1516 if (n == -EAGAIN) 1517 ubd_read_poll(-1); 1518 1519 continue; 1520 } 1521 1522 for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { 1523 struct io_thread_req *req = (*io_req_buffer)[count]; 1524 int i; 1525 1526 io_count++; 1527 for (i = 0; !req->error && i < req->desc_cnt; i++) 1528 do_io(req, &(req->io_desc[i])); 1529 1530 } 1531 1532 written = 0; 1533 1534 do { 1535 res = os_write_file(kernel_fd, 1536 ((char *) io_req_buffer) + written, 1537 n - written); 1538 if (res >= 0) { 1539 written += res; 1540 } 1541 if (written < n) { 1542 ubd_write_poll(-1); 1543 } 1544 } while (written < n); 1545 } 1546 1547 return 0; 1548 } 1549