1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * dcssblk.c -- the S/390 block driver for dcss memory 4 * 5 * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer 6 */ 7 8 #define KMSG_COMPONENT "dcssblk" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/ctype.h> 14 #include <linux/errno.h> 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/blkdev.h> 18 #include <linux/completion.h> 19 #include <linux/interrupt.h> 20 #include <linux/uio.h> 21 #include <linux/dax.h> 22 #include <linux/io.h> 23 #include <asm/extmem.h> 24 25 #define DCSSBLK_NAME "dcssblk" 26 #define DCSSBLK_MINORS_PER_DISK 1 27 #define DCSSBLK_PARM_LEN 400 28 #define DCSS_BUS_ID_SIZE 20 29 30 static int dcssblk_open(struct gendisk *disk, blk_mode_t mode); 31 static void dcssblk_release(struct gendisk *disk); 32 static void dcssblk_submit_bio(struct bio *bio); 33 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 34 long nr_pages, enum dax_access_mode mode, void **kaddr, 35 unsigned long *pfn); 36 37 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; 38 39 static int dcssblk_major; 40 static const struct block_device_operations dcssblk_devops = { 41 .owner = THIS_MODULE, 42 .submit_bio = dcssblk_submit_bio, 43 .open = dcssblk_open, 44 .release = dcssblk_release, 45 }; 46 47 static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev, 48 pgoff_t pgoff, size_t nr_pages) 49 { 50 long rc; 51 void *kaddr; 52 53 rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, 54 &kaddr, NULL); 55 if (rc < 0) 56 return dax_mem2blk_err(rc); 57 58 memset(kaddr, 0, nr_pages << PAGE_SHIFT); 59 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT); 60 return 0; 61 } 62 63 static const struct dax_operations dcssblk_dax_ops = { 64 .direct_access = dcssblk_dax_direct_access, 65 .zero_page_range = dcssblk_dax_zero_page_range, 66 }; 67 68 struct dcssblk_dev_info { 69 struct list_head lh; 70 struct device dev; 71 char segment_name[DCSS_BUS_ID_SIZE]; 72 atomic_t use_count; 73 struct gendisk *gd; 74 unsigned long start; 75 unsigned long end; 76 int segment_type; 77 unsigned char save_pending; 78 unsigned char is_shared; 79 int num_of_segments; 80 struct list_head seg_list; 81 struct dax_device *dax_dev; 82 }; 83 84 struct segment_info { 85 struct list_head lh; 86 char segment_name[DCSS_BUS_ID_SIZE]; 87 unsigned long start; 88 unsigned long end; 89 int segment_type; 90 }; 91 92 static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf, 93 size_t count); 94 static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf, 95 size_t count); 96 97 static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store); 98 static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store); 99 100 static struct device *dcssblk_root_dev; 101 102 static LIST_HEAD(dcssblk_devices); 103 static struct rw_semaphore dcssblk_devices_sem; 104 105 /* 106 * release function for segment device. 107 */ 108 static void 109 dcssblk_release_segment(struct device *dev) 110 { 111 struct dcssblk_dev_info *dev_info; 112 struct segment_info *entry, *temp; 113 114 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 115 list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) { 116 list_del(&entry->lh); 117 kfree(entry); 118 } 119 kfree(dev_info); 120 module_put(THIS_MODULE); 121 } 122 123 /* 124 * get a minor number. needs to be called with 125 * down_write(&dcssblk_devices_sem) and the 126 * device needs to be enqueued before the semaphore is 127 * freed. 128 */ 129 static int 130 dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) 131 { 132 int minor, found; 133 struct dcssblk_dev_info *entry; 134 135 if (dev_info == NULL) 136 return -EINVAL; 137 for (minor = 0; minor < (1<<MINORBITS); minor++) { 138 found = 0; 139 // test if minor available 140 list_for_each_entry(entry, &dcssblk_devices, lh) 141 if (minor == entry->gd->first_minor) 142 found++; 143 if (!found) break; // got unused minor 144 } 145 if (found) 146 return -EBUSY; 147 dev_info->gd->first_minor = minor; 148 return 0; 149 } 150 151 /* 152 * get the struct dcssblk_dev_info from dcssblk_devices 153 * for the given name. 154 * down_read(&dcssblk_devices_sem) must be held. 155 */ 156 static struct dcssblk_dev_info * 157 dcssblk_get_device_by_name(char *name) 158 { 159 struct dcssblk_dev_info *entry; 160 161 list_for_each_entry(entry, &dcssblk_devices, lh) { 162 if (!strcmp(name, entry->segment_name)) { 163 return entry; 164 } 165 } 166 return NULL; 167 } 168 169 /* 170 * get the struct segment_info from seg_list 171 * for the given name. 172 * down_read(&dcssblk_devices_sem) must be held. 173 */ 174 static struct segment_info * 175 dcssblk_get_segment_by_name(char *name) 176 { 177 struct dcssblk_dev_info *dev_info; 178 struct segment_info *entry; 179 180 list_for_each_entry(dev_info, &dcssblk_devices, lh) { 181 list_for_each_entry(entry, &dev_info->seg_list, lh) { 182 if (!strcmp(name, entry->segment_name)) 183 return entry; 184 } 185 } 186 return NULL; 187 } 188 189 /* 190 * get the highest address of the multi-segment block. 191 */ 192 static unsigned long 193 dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info) 194 { 195 unsigned long highest_addr; 196 struct segment_info *entry; 197 198 highest_addr = 0; 199 list_for_each_entry(entry, &dev_info->seg_list, lh) { 200 if (highest_addr < entry->end) 201 highest_addr = entry->end; 202 } 203 return highest_addr; 204 } 205 206 /* 207 * get the lowest address of the multi-segment block. 208 */ 209 static unsigned long 210 dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info) 211 { 212 int set_first; 213 unsigned long lowest_addr; 214 struct segment_info *entry; 215 216 set_first = 0; 217 lowest_addr = 0; 218 list_for_each_entry(entry, &dev_info->seg_list, lh) { 219 if (set_first == 0) { 220 lowest_addr = entry->start; 221 set_first = 1; 222 } else { 223 if (lowest_addr > entry->start) 224 lowest_addr = entry->start; 225 } 226 } 227 return lowest_addr; 228 } 229 230 /* 231 * Check continuity of segments. 232 */ 233 static int 234 dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) 235 { 236 int i, j, rc; 237 struct segment_info *sort_list, *entry, temp; 238 239 if (dev_info->num_of_segments <= 1) 240 return 0; 241 242 sort_list = kcalloc(dev_info->num_of_segments, 243 sizeof(struct segment_info), 244 GFP_KERNEL); 245 if (sort_list == NULL) 246 return -ENOMEM; 247 i = 0; 248 list_for_each_entry(entry, &dev_info->seg_list, lh) { 249 memcpy(&sort_list[i], entry, sizeof(struct segment_info)); 250 i++; 251 } 252 253 /* sort segments */ 254 for (i = 0; i < dev_info->num_of_segments; i++) 255 for (j = 0; j < dev_info->num_of_segments; j++) 256 if (sort_list[j].start > sort_list[i].start) { 257 memcpy(&temp, &sort_list[i], 258 sizeof(struct segment_info)); 259 memcpy(&sort_list[i], &sort_list[j], 260 sizeof(struct segment_info)); 261 memcpy(&sort_list[j], &temp, 262 sizeof(struct segment_info)); 263 } 264 265 /* check continuity */ 266 for (i = 0; i < dev_info->num_of_segments - 1; i++) { 267 if ((sort_list[i].end + 1) != sort_list[i+1].start) { 268 pr_err("Adjacent DCSSs %s and %s are not " 269 "contiguous\n", sort_list[i].segment_name, 270 sort_list[i+1].segment_name); 271 rc = -EINVAL; 272 goto out; 273 } 274 /* EN and EW are allowed in a block device */ 275 if (sort_list[i].segment_type != sort_list[i+1].segment_type) { 276 if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) || 277 (sort_list[i].segment_type == SEG_TYPE_ER) || 278 !(sort_list[i+1].segment_type & 279 SEGMENT_EXCLUSIVE) || 280 (sort_list[i+1].segment_type == SEG_TYPE_ER)) { 281 pr_err("DCSS %s and DCSS %s have " 282 "incompatible types\n", 283 sort_list[i].segment_name, 284 sort_list[i+1].segment_name); 285 rc = -EINVAL; 286 goto out; 287 } 288 } 289 } 290 rc = 0; 291 out: 292 kfree(sort_list); 293 return rc; 294 } 295 296 /* 297 * Load a segment 298 */ 299 static int 300 dcssblk_load_segment(char *name, struct segment_info **seg_info) 301 { 302 int rc; 303 304 /* already loaded? */ 305 down_read(&dcssblk_devices_sem); 306 *seg_info = dcssblk_get_segment_by_name(name); 307 up_read(&dcssblk_devices_sem); 308 if (*seg_info != NULL) 309 return -EEXIST; 310 311 /* get a struct segment_info */ 312 *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL); 313 if (*seg_info == NULL) 314 return -ENOMEM; 315 316 strscpy((*seg_info)->segment_name, name); 317 318 /* load the segment */ 319 rc = segment_load(name, SEGMENT_SHARED, 320 &(*seg_info)->start, &(*seg_info)->end); 321 if (rc < 0) { 322 segment_warning(rc, (*seg_info)->segment_name); 323 kfree(*seg_info); 324 } else { 325 INIT_LIST_HEAD(&(*seg_info)->lh); 326 (*seg_info)->segment_type = rc; 327 } 328 return rc; 329 } 330 331 /* 332 * device attribute for switching shared/nonshared (exclusive) 333 * operation (show + store) 334 */ 335 static ssize_t 336 dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf) 337 { 338 struct dcssblk_dev_info *dev_info; 339 340 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 341 return sysfs_emit(buf, dev_info->is_shared ? "1\n" : "0\n"); 342 } 343 344 static ssize_t 345 dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) 346 { 347 struct dcssblk_dev_info *dev_info; 348 struct segment_info *entry, *temp; 349 int rc; 350 351 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) 352 return -EINVAL; 353 down_write(&dcssblk_devices_sem); 354 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 355 if (atomic_read(&dev_info->use_count)) { 356 rc = -EBUSY; 357 goto out; 358 } 359 if (inbuf[0] == '1') { 360 /* reload segments in shared mode */ 361 list_for_each_entry(entry, &dev_info->seg_list, lh) { 362 rc = segment_modify_shared(entry->segment_name, 363 SEGMENT_SHARED); 364 if (rc < 0) { 365 BUG_ON(rc == -EINVAL); 366 if (rc != -EAGAIN) 367 goto removeseg; 368 } 369 } 370 dev_info->is_shared = 1; 371 switch (dev_info->segment_type) { 372 case SEG_TYPE_SR: 373 case SEG_TYPE_ER: 374 case SEG_TYPE_SC: 375 set_disk_ro(dev_info->gd, 1); 376 } 377 } else if (inbuf[0] == '0') { 378 /* reload segments in exclusive mode */ 379 if (dev_info->segment_type == SEG_TYPE_SC) { 380 pr_err("DCSS %s is of type SC and cannot be " 381 "loaded as exclusive-writable\n", 382 dev_info->segment_name); 383 rc = -EINVAL; 384 goto out; 385 } 386 list_for_each_entry(entry, &dev_info->seg_list, lh) { 387 rc = segment_modify_shared(entry->segment_name, 388 SEGMENT_EXCLUSIVE); 389 if (rc < 0) { 390 BUG_ON(rc == -EINVAL); 391 if (rc != -EAGAIN) 392 goto removeseg; 393 } 394 } 395 dev_info->is_shared = 0; 396 set_disk_ro(dev_info->gd, 0); 397 } else { 398 rc = -EINVAL; 399 goto out; 400 } 401 rc = count; 402 goto out; 403 404 removeseg: 405 pr_err("DCSS device %s is removed after a failed access mode " 406 "change\n", dev_info->segment_name); 407 temp = entry; 408 list_for_each_entry(entry, &dev_info->seg_list, lh) { 409 if (entry != temp) 410 segment_unload(entry->segment_name); 411 } 412 list_del(&dev_info->lh); 413 up_write(&dcssblk_devices_sem); 414 415 dax_remove_host(dev_info->gd); 416 kill_dax(dev_info->dax_dev); 417 put_dax(dev_info->dax_dev); 418 del_gendisk(dev_info->gd); 419 put_disk(dev_info->gd); 420 421 if (device_remove_file_self(dev, attr)) { 422 device_unregister(dev); 423 put_device(dev); 424 } 425 return rc; 426 out: 427 up_write(&dcssblk_devices_sem); 428 return rc; 429 } 430 static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show, 431 dcssblk_shared_store); 432 433 /* 434 * device attribute for save operation on current copy 435 * of the segment. If the segment is busy, saving will 436 * become pending until it gets released, which can be 437 * undone by storing a non-true value to this entry. 438 * (show + store) 439 */ 440 static ssize_t 441 dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf) 442 { 443 struct dcssblk_dev_info *dev_info; 444 445 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 446 return sysfs_emit(buf, dev_info->save_pending ? "1\n" : "0\n"); 447 } 448 449 static ssize_t 450 dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) 451 { 452 struct dcssblk_dev_info *dev_info; 453 struct segment_info *entry; 454 455 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) 456 return -EINVAL; 457 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 458 459 down_write(&dcssblk_devices_sem); 460 if (inbuf[0] == '1') { 461 if (atomic_read(&dev_info->use_count) == 0) { 462 // device is idle => we save immediately 463 pr_info("All DCSSs that map to device %s are " 464 "saved\n", dev_info->segment_name); 465 list_for_each_entry(entry, &dev_info->seg_list, lh) { 466 if (entry->segment_type == SEG_TYPE_EN || 467 entry->segment_type == SEG_TYPE_SN) 468 pr_warn("DCSS %s is of type SN or EN" 469 " and cannot be saved\n", 470 entry->segment_name); 471 else 472 segment_save(entry->segment_name); 473 } 474 } else { 475 // device is busy => we save it when it becomes 476 // idle in dcssblk_release 477 pr_info("Device %s is in use, its DCSSs will be " 478 "saved when it becomes idle\n", 479 dev_info->segment_name); 480 dev_info->save_pending = 1; 481 } 482 } else if (inbuf[0] == '0') { 483 if (dev_info->save_pending) { 484 // device is busy & the user wants to undo his save 485 // request 486 dev_info->save_pending = 0; 487 pr_info("A pending save request for device %s " 488 "has been canceled\n", 489 dev_info->segment_name); 490 } 491 } else { 492 up_write(&dcssblk_devices_sem); 493 return -EINVAL; 494 } 495 up_write(&dcssblk_devices_sem); 496 return count; 497 } 498 static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show, 499 dcssblk_save_store); 500 501 /* 502 * device attribute for showing all segments in a device 503 */ 504 static ssize_t 505 dcssblk_seglist_show(struct device *dev, struct device_attribute *attr, 506 char *buf) 507 { 508 struct dcssblk_dev_info *dev_info; 509 struct segment_info *entry; 510 int i; 511 512 i = 0; 513 down_read(&dcssblk_devices_sem); 514 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 515 list_for_each_entry(entry, &dev_info->seg_list, lh) 516 i += sysfs_emit_at(buf, i, "%s\n", entry->segment_name); 517 up_read(&dcssblk_devices_sem); 518 return i; 519 } 520 static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL); 521 522 static struct attribute *dcssblk_dev_attrs[] = { 523 &dev_attr_shared.attr, 524 &dev_attr_save.attr, 525 &dev_attr_seglist.attr, 526 NULL, 527 }; 528 static struct attribute_group dcssblk_dev_attr_group = { 529 .attrs = dcssblk_dev_attrs, 530 }; 531 static const struct attribute_group *dcssblk_dev_attr_groups[] = { 532 &dcssblk_dev_attr_group, 533 NULL, 534 }; 535 536 static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info) 537 { 538 struct dax_device *dax_dev; 539 540 if (!IS_ENABLED(CONFIG_DCSSBLK_DAX)) 541 return 0; 542 543 dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); 544 if (IS_ERR(dax_dev)) 545 return PTR_ERR(dax_dev); 546 set_dax_synchronous(dax_dev); 547 dev_info->dax_dev = dax_dev; 548 return dax_add_host(dev_info->dax_dev, dev_info->gd); 549 } 550 551 /* 552 * device attribute for adding devices 553 */ 554 static ssize_t 555 dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 556 { 557 struct queue_limits lim = { 558 .logical_block_size = 4096, 559 .features = BLK_FEAT_DAX, 560 }; 561 int rc, i, j, num_of_segments; 562 struct dcssblk_dev_info *dev_info; 563 struct segment_info *seg_info, *temp; 564 char *local_buf; 565 unsigned long seg_byte_size; 566 567 dev_info = NULL; 568 seg_info = NULL; 569 if (dev != dcssblk_root_dev) { 570 rc = -EINVAL; 571 goto out_nobuf; 572 } 573 if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) { 574 rc = -ENAMETOOLONG; 575 goto out_nobuf; 576 } 577 578 local_buf = kmalloc(count + 1, GFP_KERNEL); 579 if (local_buf == NULL) { 580 rc = -ENOMEM; 581 goto out_nobuf; 582 } 583 584 /* 585 * parse input 586 */ 587 num_of_segments = 0; 588 for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { 589 for (j = i; j < count && 590 (buf[j] != ':') && 591 (buf[j] != '\0') && 592 (buf[j] != '\n'); j++) { 593 local_buf[j-i] = toupper(buf[j]); 594 } 595 local_buf[j-i] = '\0'; 596 if (((j - i) == 0) || ((j - i) > 8)) { 597 rc = -ENAMETOOLONG; 598 goto seg_list_del; 599 } 600 601 rc = dcssblk_load_segment(local_buf, &seg_info); 602 if (rc < 0) 603 goto seg_list_del; 604 /* 605 * get a struct dcssblk_dev_info 606 */ 607 if (num_of_segments == 0) { 608 dev_info = kzalloc(sizeof(struct dcssblk_dev_info), 609 GFP_KERNEL); 610 if (dev_info == NULL) { 611 rc = -ENOMEM; 612 goto out; 613 } 614 strscpy(dev_info->segment_name, local_buf); 615 dev_info->segment_type = seg_info->segment_type; 616 INIT_LIST_HEAD(&dev_info->seg_list); 617 } 618 list_add_tail(&seg_info->lh, &dev_info->seg_list); 619 num_of_segments++; 620 i = j; 621 622 if ((buf[j] == '\0') || (buf[j] == '\n')) 623 break; 624 } 625 626 /* no trailing colon at the end of the input */ 627 if ((i > 0) && (buf[i-1] == ':')) { 628 rc = -ENAMETOOLONG; 629 goto seg_list_del; 630 } 631 strscpy(local_buf, buf, i + 1); 632 dev_info->num_of_segments = num_of_segments; 633 rc = dcssblk_is_continuous(dev_info); 634 if (rc < 0) 635 goto seg_list_del; 636 637 dev_info->start = dcssblk_find_lowest_addr(dev_info); 638 dev_info->end = dcssblk_find_highest_addr(dev_info); 639 640 dev_set_name(&dev_info->dev, "%s", dev_info->segment_name); 641 dev_info->dev.release = dcssblk_release_segment; 642 dev_info->dev.groups = dcssblk_dev_attr_groups; 643 INIT_LIST_HEAD(&dev_info->lh); 644 dev_info->gd = blk_alloc_disk(&lim, NUMA_NO_NODE); 645 if (IS_ERR(dev_info->gd)) { 646 rc = PTR_ERR(dev_info->gd); 647 goto seg_list_del; 648 } 649 dev_info->gd->major = dcssblk_major; 650 dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK; 651 dev_info->gd->fops = &dcssblk_devops; 652 dev_info->gd->private_data = dev_info; 653 dev_info->gd->flags |= GENHD_FL_NO_PART; 654 655 seg_byte_size = (dev_info->end - dev_info->start + 1); 656 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors 657 pr_info("Loaded %s with total size %lu bytes and capacity %lu " 658 "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); 659 660 dev_info->save_pending = 0; 661 dev_info->is_shared = 1; 662 dev_info->dev.parent = dcssblk_root_dev; 663 664 /* 665 *get minor, add to list 666 */ 667 down_write(&dcssblk_devices_sem); 668 if (dcssblk_get_segment_by_name(local_buf)) { 669 rc = -EEXIST; 670 goto release_gd; 671 } 672 rc = dcssblk_assign_free_minor(dev_info); 673 if (rc) 674 goto release_gd; 675 sprintf(dev_info->gd->disk_name, "dcssblk%d", 676 dev_info->gd->first_minor); 677 list_add_tail(&dev_info->lh, &dcssblk_devices); 678 679 if (!try_module_get(THIS_MODULE)) { 680 rc = -ENODEV; 681 goto dev_list_del; 682 } 683 /* 684 * register the device 685 */ 686 rc = device_register(&dev_info->dev); 687 if (rc) 688 goto put_dev; 689 690 rc = dcssblk_setup_dax(dev_info); 691 if (rc) 692 goto out_dax; 693 694 get_device(&dev_info->dev); 695 rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL); 696 if (rc) 697 goto out_dax_host; 698 699 switch (dev_info->segment_type) { 700 case SEG_TYPE_SR: 701 case SEG_TYPE_ER: 702 case SEG_TYPE_SC: 703 set_disk_ro(dev_info->gd,1); 704 break; 705 default: 706 set_disk_ro(dev_info->gd,0); 707 break; 708 } 709 up_write(&dcssblk_devices_sem); 710 rc = count; 711 goto out; 712 713 out_dax_host: 714 put_device(&dev_info->dev); 715 dax_remove_host(dev_info->gd); 716 out_dax: 717 kill_dax(dev_info->dax_dev); 718 put_dax(dev_info->dax_dev); 719 put_dev: 720 list_del(&dev_info->lh); 721 put_disk(dev_info->gd); 722 list_for_each_entry(seg_info, &dev_info->seg_list, lh) { 723 segment_unload(seg_info->segment_name); 724 } 725 put_device(&dev_info->dev); 726 up_write(&dcssblk_devices_sem); 727 goto out; 728 dev_list_del: 729 list_del(&dev_info->lh); 730 release_gd: 731 put_disk(dev_info->gd); 732 up_write(&dcssblk_devices_sem); 733 seg_list_del: 734 if (dev_info == NULL) 735 goto out; 736 list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) { 737 list_del(&seg_info->lh); 738 segment_unload(seg_info->segment_name); 739 kfree(seg_info); 740 } 741 kfree(dev_info); 742 out: 743 kfree(local_buf); 744 out_nobuf: 745 return rc; 746 } 747 748 /* 749 * device attribute for removing devices 750 */ 751 static ssize_t 752 dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 753 { 754 struct dcssblk_dev_info *dev_info; 755 struct segment_info *entry; 756 int rc, i; 757 char *local_buf; 758 759 if (dev != dcssblk_root_dev) { 760 return -EINVAL; 761 } 762 local_buf = kmalloc(count + 1, GFP_KERNEL); 763 if (local_buf == NULL) { 764 return -ENOMEM; 765 } 766 /* 767 * parse input 768 */ 769 for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) { 770 local_buf[i] = toupper(buf[i]); 771 } 772 local_buf[i] = '\0'; 773 if ((i == 0) || (i > 8)) { 774 rc = -ENAMETOOLONG; 775 goto out_buf; 776 } 777 778 down_write(&dcssblk_devices_sem); 779 dev_info = dcssblk_get_device_by_name(local_buf); 780 if (dev_info == NULL) { 781 up_write(&dcssblk_devices_sem); 782 pr_warn("Device %s cannot be removed because it is not a known device\n", 783 local_buf); 784 rc = -ENODEV; 785 goto out_buf; 786 } 787 if (atomic_read(&dev_info->use_count) != 0) { 788 up_write(&dcssblk_devices_sem); 789 pr_warn("Device %s cannot be removed while it is in use\n", 790 local_buf); 791 rc = -EBUSY; 792 goto out_buf; 793 } 794 795 list_del(&dev_info->lh); 796 /* unload all related segments */ 797 list_for_each_entry(entry, &dev_info->seg_list, lh) 798 segment_unload(entry->segment_name); 799 up_write(&dcssblk_devices_sem); 800 801 dax_remove_host(dev_info->gd); 802 kill_dax(dev_info->dax_dev); 803 put_dax(dev_info->dax_dev); 804 del_gendisk(dev_info->gd); 805 put_disk(dev_info->gd); 806 807 device_unregister(&dev_info->dev); 808 put_device(&dev_info->dev); 809 810 rc = count; 811 out_buf: 812 kfree(local_buf); 813 return rc; 814 } 815 816 static int 817 dcssblk_open(struct gendisk *disk, blk_mode_t mode) 818 { 819 struct dcssblk_dev_info *dev_info = disk->private_data; 820 int rc; 821 822 if (NULL == dev_info) { 823 rc = -ENODEV; 824 goto out; 825 } 826 atomic_inc(&dev_info->use_count); 827 rc = 0; 828 out: 829 return rc; 830 } 831 832 static void 833 dcssblk_release(struct gendisk *disk) 834 { 835 struct dcssblk_dev_info *dev_info = disk->private_data; 836 struct segment_info *entry; 837 838 if (!dev_info) { 839 WARN_ON(1); 840 return; 841 } 842 down_write(&dcssblk_devices_sem); 843 if (atomic_dec_and_test(&dev_info->use_count) 844 && (dev_info->save_pending)) { 845 pr_info("Device %s has become idle and is being saved " 846 "now\n", dev_info->segment_name); 847 list_for_each_entry(entry, &dev_info->seg_list, lh) { 848 if (entry->segment_type == SEG_TYPE_EN || 849 entry->segment_type == SEG_TYPE_SN) 850 pr_warn("DCSS %s is of type SN or EN and cannot" 851 " be saved\n", entry->segment_name); 852 else 853 segment_save(entry->segment_name); 854 } 855 dev_info->save_pending = 0; 856 } 857 up_write(&dcssblk_devices_sem); 858 } 859 860 static void 861 dcssblk_submit_bio(struct bio *bio) 862 { 863 struct dcssblk_dev_info *dev_info; 864 struct bio_vec bvec; 865 struct bvec_iter iter; 866 unsigned long index; 867 void *page_addr; 868 unsigned long source_addr; 869 unsigned long bytes_done; 870 871 bytes_done = 0; 872 dev_info = bio->bi_bdev->bd_disk->private_data; 873 if (dev_info == NULL) 874 goto fail; 875 if (!IS_ALIGNED(bio->bi_iter.bi_sector, 8) || 876 !IS_ALIGNED(bio->bi_iter.bi_size, PAGE_SIZE)) 877 /* Request is not page-aligned. */ 878 goto fail; 879 /* verify data transfer direction */ 880 if (dev_info->is_shared) { 881 switch (dev_info->segment_type) { 882 case SEG_TYPE_SR: 883 case SEG_TYPE_ER: 884 case SEG_TYPE_SC: 885 /* cannot write to these segments */ 886 if (bio_data_dir(bio) == WRITE) { 887 pr_warn("Writing to %s failed because it is a read-only device\n", 888 dev_name(&dev_info->dev)); 889 goto fail; 890 } 891 } 892 } 893 894 index = (bio->bi_iter.bi_sector >> 3); 895 bio_for_each_segment(bvec, bio, iter) { 896 page_addr = bvec_virt(&bvec); 897 source_addr = dev_info->start + (index<<12) + bytes_done; 898 if (unlikely(!IS_ALIGNED((unsigned long)page_addr, PAGE_SIZE) || 899 !IS_ALIGNED(bvec.bv_len, PAGE_SIZE))) 900 // More paranoia. 901 goto fail; 902 if (bio_data_dir(bio) == READ) 903 memcpy(page_addr, __va(source_addr), bvec.bv_len); 904 else 905 memcpy(__va(source_addr), page_addr, bvec.bv_len); 906 bytes_done += bvec.bv_len; 907 } 908 bio_endio(bio); 909 return; 910 fail: 911 bio_io_error(bio); 912 } 913 914 static long 915 __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, 916 long nr_pages, void **kaddr, unsigned long *pfn) 917 { 918 resource_size_t offset = pgoff * PAGE_SIZE; 919 unsigned long dev_sz; 920 921 dev_sz = dev_info->end - dev_info->start + 1; 922 if (kaddr) 923 *kaddr = __va(dev_info->start + offset); 924 if (pfn) 925 *pfn = PFN_DOWN(dev_info->start + offset); 926 927 return (dev_sz - offset) / PAGE_SIZE; 928 } 929 930 static long 931 dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 932 long nr_pages, enum dax_access_mode mode, void **kaddr, 933 unsigned long *pfn) 934 { 935 struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev); 936 937 return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn); 938 } 939 940 static void 941 dcssblk_check_params(void) 942 { 943 int rc, i, j, k; 944 char buf[DCSSBLK_PARM_LEN + 1]; 945 struct dcssblk_dev_info *dev_info; 946 947 for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0'); 948 i++) { 949 for (j = i; (j < DCSSBLK_PARM_LEN) && 950 (dcssblk_segments[j] != ',') && 951 (dcssblk_segments[j] != '\0') && 952 (dcssblk_segments[j] != '('); j++) 953 { 954 buf[j-i] = dcssblk_segments[j]; 955 } 956 buf[j-i] = '\0'; 957 rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i); 958 if ((rc >= 0) && (dcssblk_segments[j] == '(')) { 959 for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++) 960 buf[k] = toupper(buf[k]); 961 buf[k] = '\0'; 962 if (!strncmp(&dcssblk_segments[j], "(local)", 7)) { 963 down_read(&dcssblk_devices_sem); 964 dev_info = dcssblk_get_device_by_name(buf); 965 up_read(&dcssblk_devices_sem); 966 if (dev_info) 967 dcssblk_shared_store(&dev_info->dev, 968 NULL, "0\n", 2); 969 } 970 } 971 while ((dcssblk_segments[j] != ',') && 972 (dcssblk_segments[j] != '\0')) 973 { 974 j++; 975 } 976 if (dcssblk_segments[j] == '\0') 977 break; 978 i = j; 979 } 980 } 981 982 /* 983 * The init/exit functions. 984 */ 985 static void __exit 986 dcssblk_exit(void) 987 { 988 root_device_unregister(dcssblk_root_dev); 989 unregister_blkdev(dcssblk_major, DCSSBLK_NAME); 990 } 991 992 static int __init 993 dcssblk_init(void) 994 { 995 int rc; 996 997 dcssblk_root_dev = root_device_register("dcssblk"); 998 if (IS_ERR(dcssblk_root_dev)) 999 return PTR_ERR(dcssblk_root_dev); 1000 rc = device_create_file(dcssblk_root_dev, &dev_attr_add); 1001 if (rc) 1002 goto out_root; 1003 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); 1004 if (rc) 1005 goto out_root; 1006 rc = register_blkdev(0, DCSSBLK_NAME); 1007 if (rc < 0) 1008 goto out_root; 1009 dcssblk_major = rc; 1010 init_rwsem(&dcssblk_devices_sem); 1011 1012 dcssblk_check_params(); 1013 return 0; 1014 1015 out_root: 1016 root_device_unregister(dcssblk_root_dev); 1017 1018 return rc; 1019 } 1020 1021 module_init(dcssblk_init); 1022 module_exit(dcssblk_exit); 1023 1024 module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444); 1025 MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, " 1026 "comma-separated list, names in each set separated " 1027 "by commas are separated by colons, each set contains " 1028 "names of contiguous segments and each name max. 8 chars.\n" 1029 "Adding \"(local)\" to the end of each set equals echoing 0 " 1030 "to /sys/devices/dcssblk/<device name>/shared after loading " 1031 "the contiguous segments - \n" 1032 "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\""); 1033 1034 MODULE_DESCRIPTION("S/390 block driver for DCSS memory"); 1035 MODULE_LICENSE("GPL"); 1036