1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * dcssblk.c -- the S/390 block driver for dcss memory 4 * 5 * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer 6 */ 7 8 #define KMSG_COMPONENT "dcssblk" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/module.h> 12 #include <linux/moduleparam.h> 13 #include <linux/ctype.h> 14 #include <linux/errno.h> 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/blkdev.h> 18 #include <linux/completion.h> 19 #include <linux/interrupt.h> 20 #include <linux/pfn_t.h> 21 #include <linux/uio.h> 22 #include <linux/dax.h> 23 #include <linux/io.h> 24 #include <asm/extmem.h> 25 26 #define DCSSBLK_NAME "dcssblk" 27 #define DCSSBLK_MINORS_PER_DISK 1 28 #define DCSSBLK_PARM_LEN 400 29 #define DCSS_BUS_ID_SIZE 20 30 31 static int dcssblk_open(struct gendisk *disk, blk_mode_t mode); 32 static void dcssblk_release(struct gendisk *disk); 33 static void dcssblk_submit_bio(struct bio *bio); 34 static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 35 long nr_pages, enum dax_access_mode mode, void **kaddr, 36 pfn_t *pfn); 37 38 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; 39 40 static int dcssblk_major; 41 static const struct block_device_operations dcssblk_devops = { 42 .owner = THIS_MODULE, 43 .submit_bio = dcssblk_submit_bio, 44 .open = dcssblk_open, 45 .release = dcssblk_release, 46 }; 47 48 static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev, 49 pgoff_t pgoff, size_t nr_pages) 50 { 51 long rc; 52 void *kaddr; 53 54 rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, 55 &kaddr, NULL); 56 if (rc < 0) 57 return dax_mem2blk_err(rc); 58 59 memset(kaddr, 0, nr_pages << PAGE_SHIFT); 60 dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT); 61 return 0; 62 } 63 64 static const struct dax_operations dcssblk_dax_ops = { 65 .direct_access = dcssblk_dax_direct_access, 66 .zero_page_range = dcssblk_dax_zero_page_range, 67 }; 68 69 struct dcssblk_dev_info { 70 struct list_head lh; 71 struct device dev; 72 char segment_name[DCSS_BUS_ID_SIZE]; 73 atomic_t use_count; 74 struct gendisk *gd; 75 unsigned long start; 76 unsigned long end; 77 int segment_type; 78 unsigned char save_pending; 79 unsigned char is_shared; 80 int num_of_segments; 81 struct list_head seg_list; 82 struct dax_device *dax_dev; 83 }; 84 85 struct segment_info { 86 struct list_head lh; 87 char segment_name[DCSS_BUS_ID_SIZE]; 88 unsigned long start; 89 unsigned long end; 90 int segment_type; 91 }; 92 93 static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf, 94 size_t count); 95 static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf, 96 size_t count); 97 98 static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store); 99 static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store); 100 101 static struct device *dcssblk_root_dev; 102 103 static LIST_HEAD(dcssblk_devices); 104 static struct rw_semaphore dcssblk_devices_sem; 105 106 /* 107 * release function for segment device. 108 */ 109 static void 110 dcssblk_release_segment(struct device *dev) 111 { 112 struct dcssblk_dev_info *dev_info; 113 struct segment_info *entry, *temp; 114 115 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 116 list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) { 117 list_del(&entry->lh); 118 kfree(entry); 119 } 120 kfree(dev_info); 121 module_put(THIS_MODULE); 122 } 123 124 /* 125 * get a minor number. needs to be called with 126 * down_write(&dcssblk_devices_sem) and the 127 * device needs to be enqueued before the semaphore is 128 * freed. 129 */ 130 static int 131 dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) 132 { 133 int minor, found; 134 struct dcssblk_dev_info *entry; 135 136 if (dev_info == NULL) 137 return -EINVAL; 138 for (minor = 0; minor < (1<<MINORBITS); minor++) { 139 found = 0; 140 // test if minor available 141 list_for_each_entry(entry, &dcssblk_devices, lh) 142 if (minor == entry->gd->first_minor) 143 found++; 144 if (!found) break; // got unused minor 145 } 146 if (found) 147 return -EBUSY; 148 dev_info->gd->first_minor = minor; 149 return 0; 150 } 151 152 /* 153 * get the struct dcssblk_dev_info from dcssblk_devices 154 * for the given name. 155 * down_read(&dcssblk_devices_sem) must be held. 156 */ 157 static struct dcssblk_dev_info * 158 dcssblk_get_device_by_name(char *name) 159 { 160 struct dcssblk_dev_info *entry; 161 162 list_for_each_entry(entry, &dcssblk_devices, lh) { 163 if (!strcmp(name, entry->segment_name)) { 164 return entry; 165 } 166 } 167 return NULL; 168 } 169 170 /* 171 * get the struct segment_info from seg_list 172 * for the given name. 173 * down_read(&dcssblk_devices_sem) must be held. 174 */ 175 static struct segment_info * 176 dcssblk_get_segment_by_name(char *name) 177 { 178 struct dcssblk_dev_info *dev_info; 179 struct segment_info *entry; 180 181 list_for_each_entry(dev_info, &dcssblk_devices, lh) { 182 list_for_each_entry(entry, &dev_info->seg_list, lh) { 183 if (!strcmp(name, entry->segment_name)) 184 return entry; 185 } 186 } 187 return NULL; 188 } 189 190 /* 191 * get the highest address of the multi-segment block. 192 */ 193 static unsigned long 194 dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info) 195 { 196 unsigned long highest_addr; 197 struct segment_info *entry; 198 199 highest_addr = 0; 200 list_for_each_entry(entry, &dev_info->seg_list, lh) { 201 if (highest_addr < entry->end) 202 highest_addr = entry->end; 203 } 204 return highest_addr; 205 } 206 207 /* 208 * get the lowest address of the multi-segment block. 209 */ 210 static unsigned long 211 dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info) 212 { 213 int set_first; 214 unsigned long lowest_addr; 215 struct segment_info *entry; 216 217 set_first = 0; 218 lowest_addr = 0; 219 list_for_each_entry(entry, &dev_info->seg_list, lh) { 220 if (set_first == 0) { 221 lowest_addr = entry->start; 222 set_first = 1; 223 } else { 224 if (lowest_addr > entry->start) 225 lowest_addr = entry->start; 226 } 227 } 228 return lowest_addr; 229 } 230 231 /* 232 * Check continuity of segments. 233 */ 234 static int 235 dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) 236 { 237 int i, j, rc; 238 struct segment_info *sort_list, *entry, temp; 239 240 if (dev_info->num_of_segments <= 1) 241 return 0; 242 243 sort_list = kcalloc(dev_info->num_of_segments, 244 sizeof(struct segment_info), 245 GFP_KERNEL); 246 if (sort_list == NULL) 247 return -ENOMEM; 248 i = 0; 249 list_for_each_entry(entry, &dev_info->seg_list, lh) { 250 memcpy(&sort_list[i], entry, sizeof(struct segment_info)); 251 i++; 252 } 253 254 /* sort segments */ 255 for (i = 0; i < dev_info->num_of_segments; i++) 256 for (j = 0; j < dev_info->num_of_segments; j++) 257 if (sort_list[j].start > sort_list[i].start) { 258 memcpy(&temp, &sort_list[i], 259 sizeof(struct segment_info)); 260 memcpy(&sort_list[i], &sort_list[j], 261 sizeof(struct segment_info)); 262 memcpy(&sort_list[j], &temp, 263 sizeof(struct segment_info)); 264 } 265 266 /* check continuity */ 267 for (i = 0; i < dev_info->num_of_segments - 1; i++) { 268 if ((sort_list[i].end + 1) != sort_list[i+1].start) { 269 pr_err("Adjacent DCSSs %s and %s are not " 270 "contiguous\n", sort_list[i].segment_name, 271 sort_list[i+1].segment_name); 272 rc = -EINVAL; 273 goto out; 274 } 275 /* EN and EW are allowed in a block device */ 276 if (sort_list[i].segment_type != sort_list[i+1].segment_type) { 277 if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) || 278 (sort_list[i].segment_type == SEG_TYPE_ER) || 279 !(sort_list[i+1].segment_type & 280 SEGMENT_EXCLUSIVE) || 281 (sort_list[i+1].segment_type == SEG_TYPE_ER)) { 282 pr_err("DCSS %s and DCSS %s have " 283 "incompatible types\n", 284 sort_list[i].segment_name, 285 sort_list[i+1].segment_name); 286 rc = -EINVAL; 287 goto out; 288 } 289 } 290 } 291 rc = 0; 292 out: 293 kfree(sort_list); 294 return rc; 295 } 296 297 /* 298 * Load a segment 299 */ 300 static int 301 dcssblk_load_segment(char *name, struct segment_info **seg_info) 302 { 303 int rc; 304 305 /* already loaded? */ 306 down_read(&dcssblk_devices_sem); 307 *seg_info = dcssblk_get_segment_by_name(name); 308 up_read(&dcssblk_devices_sem); 309 if (*seg_info != NULL) 310 return -EEXIST; 311 312 /* get a struct segment_info */ 313 *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL); 314 if (*seg_info == NULL) 315 return -ENOMEM; 316 317 strscpy((*seg_info)->segment_name, name); 318 319 /* load the segment */ 320 rc = segment_load(name, SEGMENT_SHARED, 321 &(*seg_info)->start, &(*seg_info)->end); 322 if (rc < 0) { 323 segment_warning(rc, (*seg_info)->segment_name); 324 kfree(*seg_info); 325 } else { 326 INIT_LIST_HEAD(&(*seg_info)->lh); 327 (*seg_info)->segment_type = rc; 328 } 329 return rc; 330 } 331 332 /* 333 * device attribute for switching shared/nonshared (exclusive) 334 * operation (show + store) 335 */ 336 static ssize_t 337 dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf) 338 { 339 struct dcssblk_dev_info *dev_info; 340 341 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 342 return sysfs_emit(buf, dev_info->is_shared ? "1\n" : "0\n"); 343 } 344 345 static ssize_t 346 dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) 347 { 348 struct dcssblk_dev_info *dev_info; 349 struct segment_info *entry, *temp; 350 int rc; 351 352 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) 353 return -EINVAL; 354 down_write(&dcssblk_devices_sem); 355 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 356 if (atomic_read(&dev_info->use_count)) { 357 rc = -EBUSY; 358 goto out; 359 } 360 if (inbuf[0] == '1') { 361 /* reload segments in shared mode */ 362 list_for_each_entry(entry, &dev_info->seg_list, lh) { 363 rc = segment_modify_shared(entry->segment_name, 364 SEGMENT_SHARED); 365 if (rc < 0) { 366 BUG_ON(rc == -EINVAL); 367 if (rc != -EAGAIN) 368 goto removeseg; 369 } 370 } 371 dev_info->is_shared = 1; 372 switch (dev_info->segment_type) { 373 case SEG_TYPE_SR: 374 case SEG_TYPE_ER: 375 case SEG_TYPE_SC: 376 set_disk_ro(dev_info->gd, 1); 377 } 378 } else if (inbuf[0] == '0') { 379 /* reload segments in exclusive mode */ 380 if (dev_info->segment_type == SEG_TYPE_SC) { 381 pr_err("DCSS %s is of type SC and cannot be " 382 "loaded as exclusive-writable\n", 383 dev_info->segment_name); 384 rc = -EINVAL; 385 goto out; 386 } 387 list_for_each_entry(entry, &dev_info->seg_list, lh) { 388 rc = segment_modify_shared(entry->segment_name, 389 SEGMENT_EXCLUSIVE); 390 if (rc < 0) { 391 BUG_ON(rc == -EINVAL); 392 if (rc != -EAGAIN) 393 goto removeseg; 394 } 395 } 396 dev_info->is_shared = 0; 397 set_disk_ro(dev_info->gd, 0); 398 } else { 399 rc = -EINVAL; 400 goto out; 401 } 402 rc = count; 403 goto out; 404 405 removeseg: 406 pr_err("DCSS device %s is removed after a failed access mode " 407 "change\n", dev_info->segment_name); 408 temp = entry; 409 list_for_each_entry(entry, &dev_info->seg_list, lh) { 410 if (entry != temp) 411 segment_unload(entry->segment_name); 412 } 413 list_del(&dev_info->lh); 414 up_write(&dcssblk_devices_sem); 415 416 dax_remove_host(dev_info->gd); 417 kill_dax(dev_info->dax_dev); 418 put_dax(dev_info->dax_dev); 419 del_gendisk(dev_info->gd); 420 put_disk(dev_info->gd); 421 422 if (device_remove_file_self(dev, attr)) { 423 device_unregister(dev); 424 put_device(dev); 425 } 426 return rc; 427 out: 428 up_write(&dcssblk_devices_sem); 429 return rc; 430 } 431 static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show, 432 dcssblk_shared_store); 433 434 /* 435 * device attribute for save operation on current copy 436 * of the segment. If the segment is busy, saving will 437 * become pending until it gets released, which can be 438 * undone by storing a non-true value to this entry. 439 * (show + store) 440 */ 441 static ssize_t 442 dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf) 443 { 444 struct dcssblk_dev_info *dev_info; 445 446 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 447 return sysfs_emit(buf, dev_info->save_pending ? "1\n" : "0\n"); 448 } 449 450 static ssize_t 451 dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) 452 { 453 struct dcssblk_dev_info *dev_info; 454 struct segment_info *entry; 455 456 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) 457 return -EINVAL; 458 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 459 460 down_write(&dcssblk_devices_sem); 461 if (inbuf[0] == '1') { 462 if (atomic_read(&dev_info->use_count) == 0) { 463 // device is idle => we save immediately 464 pr_info("All DCSSs that map to device %s are " 465 "saved\n", dev_info->segment_name); 466 list_for_each_entry(entry, &dev_info->seg_list, lh) { 467 if (entry->segment_type == SEG_TYPE_EN || 468 entry->segment_type == SEG_TYPE_SN) 469 pr_warn("DCSS %s is of type SN or EN" 470 " and cannot be saved\n", 471 entry->segment_name); 472 else 473 segment_save(entry->segment_name); 474 } 475 } else { 476 // device is busy => we save it when it becomes 477 // idle in dcssblk_release 478 pr_info("Device %s is in use, its DCSSs will be " 479 "saved when it becomes idle\n", 480 dev_info->segment_name); 481 dev_info->save_pending = 1; 482 } 483 } else if (inbuf[0] == '0') { 484 if (dev_info->save_pending) { 485 // device is busy & the user wants to undo his save 486 // request 487 dev_info->save_pending = 0; 488 pr_info("A pending save request for device %s " 489 "has been canceled\n", 490 dev_info->segment_name); 491 } 492 } else { 493 up_write(&dcssblk_devices_sem); 494 return -EINVAL; 495 } 496 up_write(&dcssblk_devices_sem); 497 return count; 498 } 499 static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show, 500 dcssblk_save_store); 501 502 /* 503 * device attribute for showing all segments in a device 504 */ 505 static ssize_t 506 dcssblk_seglist_show(struct device *dev, struct device_attribute *attr, 507 char *buf) 508 { 509 struct dcssblk_dev_info *dev_info; 510 struct segment_info *entry; 511 int i; 512 513 i = 0; 514 down_read(&dcssblk_devices_sem); 515 dev_info = container_of(dev, struct dcssblk_dev_info, dev); 516 list_for_each_entry(entry, &dev_info->seg_list, lh) 517 i += sysfs_emit_at(buf, i, "%s\n", entry->segment_name); 518 up_read(&dcssblk_devices_sem); 519 return i; 520 } 521 static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL); 522 523 static struct attribute *dcssblk_dev_attrs[] = { 524 &dev_attr_shared.attr, 525 &dev_attr_save.attr, 526 &dev_attr_seglist.attr, 527 NULL, 528 }; 529 static struct attribute_group dcssblk_dev_attr_group = { 530 .attrs = dcssblk_dev_attrs, 531 }; 532 static const struct attribute_group *dcssblk_dev_attr_groups[] = { 533 &dcssblk_dev_attr_group, 534 NULL, 535 }; 536 537 static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info) 538 { 539 struct dax_device *dax_dev; 540 541 if (!IS_ENABLED(CONFIG_DCSSBLK_DAX)) 542 return 0; 543 544 dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops); 545 if (IS_ERR(dax_dev)) 546 return PTR_ERR(dax_dev); 547 set_dax_synchronous(dax_dev); 548 dev_info->dax_dev = dax_dev; 549 return dax_add_host(dev_info->dax_dev, dev_info->gd); 550 } 551 552 /* 553 * device attribute for adding devices 554 */ 555 static ssize_t 556 dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 557 { 558 struct queue_limits lim = { 559 .logical_block_size = 4096, 560 .features = BLK_FEAT_DAX, 561 }; 562 int rc, i, j, num_of_segments; 563 struct dcssblk_dev_info *dev_info; 564 struct segment_info *seg_info, *temp; 565 char *local_buf; 566 unsigned long seg_byte_size; 567 568 dev_info = NULL; 569 seg_info = NULL; 570 if (dev != dcssblk_root_dev) { 571 rc = -EINVAL; 572 goto out_nobuf; 573 } 574 if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) { 575 rc = -ENAMETOOLONG; 576 goto out_nobuf; 577 } 578 579 local_buf = kmalloc(count + 1, GFP_KERNEL); 580 if (local_buf == NULL) { 581 rc = -ENOMEM; 582 goto out_nobuf; 583 } 584 585 /* 586 * parse input 587 */ 588 num_of_segments = 0; 589 for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { 590 for (j = i; j < count && 591 (buf[j] != ':') && 592 (buf[j] != '\0') && 593 (buf[j] != '\n'); j++) { 594 local_buf[j-i] = toupper(buf[j]); 595 } 596 local_buf[j-i] = '\0'; 597 if (((j - i) == 0) || ((j - i) > 8)) { 598 rc = -ENAMETOOLONG; 599 goto seg_list_del; 600 } 601 602 rc = dcssblk_load_segment(local_buf, &seg_info); 603 if (rc < 0) 604 goto seg_list_del; 605 /* 606 * get a struct dcssblk_dev_info 607 */ 608 if (num_of_segments == 0) { 609 dev_info = kzalloc(sizeof(struct dcssblk_dev_info), 610 GFP_KERNEL); 611 if (dev_info == NULL) { 612 rc = -ENOMEM; 613 goto out; 614 } 615 strscpy(dev_info->segment_name, local_buf); 616 dev_info->segment_type = seg_info->segment_type; 617 INIT_LIST_HEAD(&dev_info->seg_list); 618 } 619 list_add_tail(&seg_info->lh, &dev_info->seg_list); 620 num_of_segments++; 621 i = j; 622 623 if ((buf[j] == '\0') || (buf[j] == '\n')) 624 break; 625 } 626 627 /* no trailing colon at the end of the input */ 628 if ((i > 0) && (buf[i-1] == ':')) { 629 rc = -ENAMETOOLONG; 630 goto seg_list_del; 631 } 632 strscpy(local_buf, buf, i + 1); 633 dev_info->num_of_segments = num_of_segments; 634 rc = dcssblk_is_continuous(dev_info); 635 if (rc < 0) 636 goto seg_list_del; 637 638 dev_info->start = dcssblk_find_lowest_addr(dev_info); 639 dev_info->end = dcssblk_find_highest_addr(dev_info); 640 641 dev_set_name(&dev_info->dev, "%s", dev_info->segment_name); 642 dev_info->dev.release = dcssblk_release_segment; 643 dev_info->dev.groups = dcssblk_dev_attr_groups; 644 INIT_LIST_HEAD(&dev_info->lh); 645 dev_info->gd = blk_alloc_disk(&lim, NUMA_NO_NODE); 646 if (IS_ERR(dev_info->gd)) { 647 rc = PTR_ERR(dev_info->gd); 648 goto seg_list_del; 649 } 650 dev_info->gd->major = dcssblk_major; 651 dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK; 652 dev_info->gd->fops = &dcssblk_devops; 653 dev_info->gd->private_data = dev_info; 654 dev_info->gd->flags |= GENHD_FL_NO_PART; 655 656 seg_byte_size = (dev_info->end - dev_info->start + 1); 657 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors 658 pr_info("Loaded %s with total size %lu bytes and capacity %lu " 659 "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); 660 661 dev_info->save_pending = 0; 662 dev_info->is_shared = 1; 663 dev_info->dev.parent = dcssblk_root_dev; 664 665 /* 666 *get minor, add to list 667 */ 668 down_write(&dcssblk_devices_sem); 669 if (dcssblk_get_segment_by_name(local_buf)) { 670 rc = -EEXIST; 671 goto release_gd; 672 } 673 rc = dcssblk_assign_free_minor(dev_info); 674 if (rc) 675 goto release_gd; 676 sprintf(dev_info->gd->disk_name, "dcssblk%d", 677 dev_info->gd->first_minor); 678 list_add_tail(&dev_info->lh, &dcssblk_devices); 679 680 if (!try_module_get(THIS_MODULE)) { 681 rc = -ENODEV; 682 goto dev_list_del; 683 } 684 /* 685 * register the device 686 */ 687 rc = device_register(&dev_info->dev); 688 if (rc) 689 goto put_dev; 690 691 rc = dcssblk_setup_dax(dev_info); 692 if (rc) 693 goto out_dax; 694 695 get_device(&dev_info->dev); 696 rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL); 697 if (rc) 698 goto out_dax_host; 699 700 switch (dev_info->segment_type) { 701 case SEG_TYPE_SR: 702 case SEG_TYPE_ER: 703 case SEG_TYPE_SC: 704 set_disk_ro(dev_info->gd,1); 705 break; 706 default: 707 set_disk_ro(dev_info->gd,0); 708 break; 709 } 710 up_write(&dcssblk_devices_sem); 711 rc = count; 712 goto out; 713 714 out_dax_host: 715 put_device(&dev_info->dev); 716 dax_remove_host(dev_info->gd); 717 out_dax: 718 kill_dax(dev_info->dax_dev); 719 put_dax(dev_info->dax_dev); 720 put_dev: 721 list_del(&dev_info->lh); 722 put_disk(dev_info->gd); 723 list_for_each_entry(seg_info, &dev_info->seg_list, lh) { 724 segment_unload(seg_info->segment_name); 725 } 726 put_device(&dev_info->dev); 727 up_write(&dcssblk_devices_sem); 728 goto out; 729 dev_list_del: 730 list_del(&dev_info->lh); 731 release_gd: 732 put_disk(dev_info->gd); 733 up_write(&dcssblk_devices_sem); 734 seg_list_del: 735 if (dev_info == NULL) 736 goto out; 737 list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) { 738 list_del(&seg_info->lh); 739 segment_unload(seg_info->segment_name); 740 kfree(seg_info); 741 } 742 kfree(dev_info); 743 out: 744 kfree(local_buf); 745 out_nobuf: 746 return rc; 747 } 748 749 /* 750 * device attribute for removing devices 751 */ 752 static ssize_t 753 dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 754 { 755 struct dcssblk_dev_info *dev_info; 756 struct segment_info *entry; 757 int rc, i; 758 char *local_buf; 759 760 if (dev != dcssblk_root_dev) { 761 return -EINVAL; 762 } 763 local_buf = kmalloc(count + 1, GFP_KERNEL); 764 if (local_buf == NULL) { 765 return -ENOMEM; 766 } 767 /* 768 * parse input 769 */ 770 for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) { 771 local_buf[i] = toupper(buf[i]); 772 } 773 local_buf[i] = '\0'; 774 if ((i == 0) || (i > 8)) { 775 rc = -ENAMETOOLONG; 776 goto out_buf; 777 } 778 779 down_write(&dcssblk_devices_sem); 780 dev_info = dcssblk_get_device_by_name(local_buf); 781 if (dev_info == NULL) { 782 up_write(&dcssblk_devices_sem); 783 pr_warn("Device %s cannot be removed because it is not a known device\n", 784 local_buf); 785 rc = -ENODEV; 786 goto out_buf; 787 } 788 if (atomic_read(&dev_info->use_count) != 0) { 789 up_write(&dcssblk_devices_sem); 790 pr_warn("Device %s cannot be removed while it is in use\n", 791 local_buf); 792 rc = -EBUSY; 793 goto out_buf; 794 } 795 796 list_del(&dev_info->lh); 797 /* unload all related segments */ 798 list_for_each_entry(entry, &dev_info->seg_list, lh) 799 segment_unload(entry->segment_name); 800 up_write(&dcssblk_devices_sem); 801 802 dax_remove_host(dev_info->gd); 803 kill_dax(dev_info->dax_dev); 804 put_dax(dev_info->dax_dev); 805 del_gendisk(dev_info->gd); 806 put_disk(dev_info->gd); 807 808 device_unregister(&dev_info->dev); 809 put_device(&dev_info->dev); 810 811 rc = count; 812 out_buf: 813 kfree(local_buf); 814 return rc; 815 } 816 817 static int 818 dcssblk_open(struct gendisk *disk, blk_mode_t mode) 819 { 820 struct dcssblk_dev_info *dev_info = disk->private_data; 821 int rc; 822 823 if (NULL == dev_info) { 824 rc = -ENODEV; 825 goto out; 826 } 827 atomic_inc(&dev_info->use_count); 828 rc = 0; 829 out: 830 return rc; 831 } 832 833 static void 834 dcssblk_release(struct gendisk *disk) 835 { 836 struct dcssblk_dev_info *dev_info = disk->private_data; 837 struct segment_info *entry; 838 839 if (!dev_info) { 840 WARN_ON(1); 841 return; 842 } 843 down_write(&dcssblk_devices_sem); 844 if (atomic_dec_and_test(&dev_info->use_count) 845 && (dev_info->save_pending)) { 846 pr_info("Device %s has become idle and is being saved " 847 "now\n", dev_info->segment_name); 848 list_for_each_entry(entry, &dev_info->seg_list, lh) { 849 if (entry->segment_type == SEG_TYPE_EN || 850 entry->segment_type == SEG_TYPE_SN) 851 pr_warn("DCSS %s is of type SN or EN and cannot" 852 " be saved\n", entry->segment_name); 853 else 854 segment_save(entry->segment_name); 855 } 856 dev_info->save_pending = 0; 857 } 858 up_write(&dcssblk_devices_sem); 859 } 860 861 static void 862 dcssblk_submit_bio(struct bio *bio) 863 { 864 struct dcssblk_dev_info *dev_info; 865 struct bio_vec bvec; 866 struct bvec_iter iter; 867 unsigned long index; 868 void *page_addr; 869 unsigned long source_addr; 870 unsigned long bytes_done; 871 872 bytes_done = 0; 873 dev_info = bio->bi_bdev->bd_disk->private_data; 874 if (dev_info == NULL) 875 goto fail; 876 if (!IS_ALIGNED(bio->bi_iter.bi_sector, 8) || 877 !IS_ALIGNED(bio->bi_iter.bi_size, PAGE_SIZE)) 878 /* Request is not page-aligned. */ 879 goto fail; 880 /* verify data transfer direction */ 881 if (dev_info->is_shared) { 882 switch (dev_info->segment_type) { 883 case SEG_TYPE_SR: 884 case SEG_TYPE_ER: 885 case SEG_TYPE_SC: 886 /* cannot write to these segments */ 887 if (bio_data_dir(bio) == WRITE) { 888 pr_warn("Writing to %s failed because it is a read-only device\n", 889 dev_name(&dev_info->dev)); 890 goto fail; 891 } 892 } 893 } 894 895 index = (bio->bi_iter.bi_sector >> 3); 896 bio_for_each_segment(bvec, bio, iter) { 897 page_addr = bvec_virt(&bvec); 898 source_addr = dev_info->start + (index<<12) + bytes_done; 899 if (unlikely(!IS_ALIGNED((unsigned long)page_addr, PAGE_SIZE) || 900 !IS_ALIGNED(bvec.bv_len, PAGE_SIZE))) 901 // More paranoia. 902 goto fail; 903 if (bio_data_dir(bio) == READ) 904 memcpy(page_addr, __va(source_addr), bvec.bv_len); 905 else 906 memcpy(__va(source_addr), page_addr, bvec.bv_len); 907 bytes_done += bvec.bv_len; 908 } 909 bio_endio(bio); 910 return; 911 fail: 912 bio_io_error(bio); 913 } 914 915 static long 916 __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, 917 long nr_pages, void **kaddr, pfn_t *pfn) 918 { 919 resource_size_t offset = pgoff * PAGE_SIZE; 920 unsigned long dev_sz; 921 922 dev_sz = dev_info->end - dev_info->start + 1; 923 if (kaddr) 924 *kaddr = __va(dev_info->start + offset); 925 if (pfn) 926 *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 927 PFN_DEV); 928 929 return (dev_sz - offset) / PAGE_SIZE; 930 } 931 932 static long 933 dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 934 long nr_pages, enum dax_access_mode mode, void **kaddr, 935 pfn_t *pfn) 936 { 937 struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev); 938 939 return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn); 940 } 941 942 static void 943 dcssblk_check_params(void) 944 { 945 int rc, i, j, k; 946 char buf[DCSSBLK_PARM_LEN + 1]; 947 struct dcssblk_dev_info *dev_info; 948 949 for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0'); 950 i++) { 951 for (j = i; (j < DCSSBLK_PARM_LEN) && 952 (dcssblk_segments[j] != ',') && 953 (dcssblk_segments[j] != '\0') && 954 (dcssblk_segments[j] != '('); j++) 955 { 956 buf[j-i] = dcssblk_segments[j]; 957 } 958 buf[j-i] = '\0'; 959 rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i); 960 if ((rc >= 0) && (dcssblk_segments[j] == '(')) { 961 for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++) 962 buf[k] = toupper(buf[k]); 963 buf[k] = '\0'; 964 if (!strncmp(&dcssblk_segments[j], "(local)", 7)) { 965 down_read(&dcssblk_devices_sem); 966 dev_info = dcssblk_get_device_by_name(buf); 967 up_read(&dcssblk_devices_sem); 968 if (dev_info) 969 dcssblk_shared_store(&dev_info->dev, 970 NULL, "0\n", 2); 971 } 972 } 973 while ((dcssblk_segments[j] != ',') && 974 (dcssblk_segments[j] != '\0')) 975 { 976 j++; 977 } 978 if (dcssblk_segments[j] == '\0') 979 break; 980 i = j; 981 } 982 } 983 984 /* 985 * The init/exit functions. 986 */ 987 static void __exit 988 dcssblk_exit(void) 989 { 990 root_device_unregister(dcssblk_root_dev); 991 unregister_blkdev(dcssblk_major, DCSSBLK_NAME); 992 } 993 994 static int __init 995 dcssblk_init(void) 996 { 997 int rc; 998 999 dcssblk_root_dev = root_device_register("dcssblk"); 1000 if (IS_ERR(dcssblk_root_dev)) 1001 return PTR_ERR(dcssblk_root_dev); 1002 rc = device_create_file(dcssblk_root_dev, &dev_attr_add); 1003 if (rc) 1004 goto out_root; 1005 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); 1006 if (rc) 1007 goto out_root; 1008 rc = register_blkdev(0, DCSSBLK_NAME); 1009 if (rc < 0) 1010 goto out_root; 1011 dcssblk_major = rc; 1012 init_rwsem(&dcssblk_devices_sem); 1013 1014 dcssblk_check_params(); 1015 return 0; 1016 1017 out_root: 1018 root_device_unregister(dcssblk_root_dev); 1019 1020 return rc; 1021 } 1022 1023 module_init(dcssblk_init); 1024 module_exit(dcssblk_exit); 1025 1026 module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444); 1027 MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, " 1028 "comma-separated list, names in each set separated " 1029 "by commas are separated by colons, each set contains " 1030 "names of contiguous segments and each name max. 8 chars.\n" 1031 "Adding \"(local)\" to the end of each set equals echoing 0 " 1032 "to /sys/devices/dcssblk/<device name>/shared after loading " 1033 "the contiguous segments - \n" 1034 "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\""); 1035 1036 MODULE_DESCRIPTION("S/390 block driver for DCSS memory"); 1037 MODULE_LICENSE("GPL"); 1038