1 /******************************************************************************* 2 * Filename: target_core_rd.c 3 * 4 * This file contains the Storage Engine <-> Ramdisk transport 5 * specific functions. 6 * 7 * (c) Copyright 2003-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/string.h> 28 #include <linux/parser.h> 29 #include <linux/highmem.h> 30 #include <linux/timer.h> 31 #include <linux/scatterlist.h> 32 #include <linux/slab.h> 33 #include <linux/spinlock.h> 34 #include <scsi/scsi_proto.h> 35 36 #include <target/target_core_base.h> 37 #include <target/target_core_backend.h> 38 39 #include "target_core_rd.h" 40 41 static inline struct rd_dev *RD_DEV(struct se_device *dev) 42 { 43 return container_of(dev, struct rd_dev, dev); 44 } 45 46 static int rd_attach_hba(struct se_hba *hba, u32 host_id) 47 { 48 struct rd_host *rd_host; 49 50 rd_host = kzalloc(sizeof(*rd_host), GFP_KERNEL); 51 if (!rd_host) 52 return -ENOMEM; 53 54 rd_host->rd_host_id = host_id; 55 56 hba->hba_ptr = rd_host; 57 58 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" 59 " Generic Target Core Stack %s\n", hba->hba_id, 60 RD_HBA_VERSION, TARGET_CORE_VERSION); 61 62 return 0; 63 } 64 65 static void rd_detach_hba(struct se_hba *hba) 66 { 67 struct rd_host *rd_host = hba->hba_ptr; 68 69 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" 70 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); 71 72 kfree(rd_host); 73 hba->hba_ptr = NULL; 74 } 75 76 static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, 77 u32 sg_table_count) 78 { 79 struct page *pg; 80 struct scatterlist *sg; 81 u32 i, j, page_count = 0, sg_per_table; 82 83 for (i = 0; i < sg_table_count; i++) { 84 sg = sg_table[i].sg_table; 85 sg_per_table = sg_table[i].rd_sg_count; 86 87 for (j = 0; j < sg_per_table; j++) { 88 pg = sg_page(&sg[j]); 89 if (pg) { 90 __free_page(pg); 91 page_count++; 92 } 93 } 94 kfree(sg); 95 } 96 97 kfree(sg_table); 98 return page_count; 99 } 100 101 static void rd_release_device_space(struct rd_dev *rd_dev) 102 { 103 u32 page_count; 104 105 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) 106 return; 107 108 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, 109 rd_dev->sg_table_count); 110 111 pr_debug("CORE_RD[%u] - Released device space for Ramdisk" 112 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 113 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 114 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 115 116 rd_dev->sg_table_array = NULL; 117 rd_dev->sg_table_count = 0; 118 } 119 120 121 /* rd_build_device_space(): 122 * 123 * 124 */ 125 static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table, 126 u32 total_sg_needed, unsigned char init_payload) 127 { 128 u32 i = 0, j, page_offset = 0, sg_per_table; 129 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 130 sizeof(struct scatterlist)); 131 struct page *pg; 132 struct scatterlist *sg; 133 unsigned char *p; 134 135 while (total_sg_needed) { 136 unsigned int chain_entry = 0; 137 138 sg_per_table = (total_sg_needed > max_sg_per_table) ? 139 max_sg_per_table : total_sg_needed; 140 141 /* 142 * Reserve extra element for chain entry 143 */ 144 if (sg_per_table < total_sg_needed) 145 chain_entry = 1; 146 147 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), 148 GFP_KERNEL); 149 if (!sg) 150 return -ENOMEM; 151 152 sg_init_table(sg, sg_per_table + chain_entry); 153 154 if (i > 0) { 155 sg_chain(sg_table[i - 1].sg_table, 156 max_sg_per_table + 1, sg); 157 } 158 159 sg_table[i].sg_table = sg; 160 sg_table[i].rd_sg_count = sg_per_table; 161 sg_table[i].page_start_offset = page_offset; 162 sg_table[i++].page_end_offset = (page_offset + sg_per_table) 163 - 1; 164 165 for (j = 0; j < sg_per_table; j++) { 166 pg = alloc_pages(GFP_KERNEL, 0); 167 if (!pg) { 168 pr_err("Unable to allocate scatterlist" 169 " pages for struct rd_dev_sg_table\n"); 170 return -ENOMEM; 171 } 172 sg_assign_page(&sg[j], pg); 173 sg[j].length = PAGE_SIZE; 174 175 p = kmap(pg); 176 memset(p, init_payload, PAGE_SIZE); 177 kunmap(pg); 178 } 179 180 page_offset += sg_per_table; 181 total_sg_needed -= sg_per_table; 182 } 183 184 return 0; 185 } 186 187 static int rd_build_device_space(struct rd_dev *rd_dev) 188 { 189 struct rd_dev_sg_table *sg_table; 190 u32 sg_tables, total_sg_needed; 191 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 192 sizeof(struct scatterlist)); 193 int rc; 194 195 if (rd_dev->rd_page_count <= 0) { 196 pr_err("Illegal page count: %u for Ramdisk device\n", 197 rd_dev->rd_page_count); 198 return -EINVAL; 199 } 200 201 /* Don't need backing pages for NULLIO */ 202 if (rd_dev->rd_flags & RDF_NULLIO) 203 return 0; 204 205 total_sg_needed = rd_dev->rd_page_count; 206 207 sg_tables = (total_sg_needed / max_sg_per_table) + 1; 208 sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL); 209 if (!sg_table) 210 return -ENOMEM; 211 212 rd_dev->sg_table_array = sg_table; 213 rd_dev->sg_table_count = sg_tables; 214 215 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00); 216 if (rc) 217 return rc; 218 219 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 220 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 221 rd_dev->rd_dev_id, rd_dev->rd_page_count, 222 rd_dev->sg_table_count); 223 224 return 0; 225 } 226 227 static void rd_release_prot_space(struct rd_dev *rd_dev) 228 { 229 u32 page_count; 230 231 if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count) 232 return; 233 234 page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, 235 rd_dev->sg_prot_count); 236 237 pr_debug("CORE_RD[%u] - Released protection space for Ramdisk" 238 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 239 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 240 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 241 242 rd_dev->sg_prot_array = NULL; 243 rd_dev->sg_prot_count = 0; 244 } 245 246 static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size) 247 { 248 struct rd_dev_sg_table *sg_table; 249 u32 total_sg_needed, sg_tables; 250 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 251 sizeof(struct scatterlist)); 252 int rc; 253 254 if (rd_dev->rd_flags & RDF_NULLIO) 255 return 0; 256 /* 257 * prot_length=8byte dif data 258 * tot sg needed = rd_page_count * (PGSZ/block_size) * 259 * (prot_length/block_size) + pad 260 * PGSZ canceled each other. 261 */ 262 total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1; 263 264 sg_tables = (total_sg_needed / max_sg_per_table) + 1; 265 sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL); 266 if (!sg_table) 267 return -ENOMEM; 268 269 rd_dev->sg_prot_array = sg_table; 270 rd_dev->sg_prot_count = sg_tables; 271 272 rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff); 273 if (rc) 274 return rc; 275 276 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of" 277 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 278 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count); 279 280 return 0; 281 } 282 283 static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name) 284 { 285 struct rd_dev *rd_dev; 286 struct rd_host *rd_host = hba->hba_ptr; 287 288 rd_dev = kzalloc(sizeof(*rd_dev), GFP_KERNEL); 289 if (!rd_dev) 290 return NULL; 291 292 rd_dev->rd_host = rd_host; 293 294 return &rd_dev->dev; 295 } 296 297 static int rd_configure_device(struct se_device *dev) 298 { 299 struct rd_dev *rd_dev = RD_DEV(dev); 300 struct rd_host *rd_host = dev->se_hba->hba_ptr; 301 int ret; 302 303 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { 304 pr_debug("Missing rd_pages= parameter\n"); 305 return -EINVAL; 306 } 307 308 ret = rd_build_device_space(rd_dev); 309 if (ret < 0) 310 goto fail; 311 312 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; 313 dev->dev_attrib.hw_max_sectors = UINT_MAX; 314 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 315 dev->dev_attrib.is_nonrot = 1; 316 317 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 318 319 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of" 320 " %u pages in %u tables, %lu total bytes\n", 321 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, 322 rd_dev->sg_table_count, 323 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); 324 325 return 0; 326 327 fail: 328 rd_release_device_space(rd_dev); 329 return ret; 330 } 331 332 static void rd_dev_call_rcu(struct rcu_head *p) 333 { 334 struct se_device *dev = container_of(p, struct se_device, rcu_head); 335 struct rd_dev *rd_dev = RD_DEV(dev); 336 337 kfree(rd_dev); 338 } 339 340 static void rd_free_device(struct se_device *dev) 341 { 342 call_rcu(&dev->rcu_head, rd_dev_call_rcu); 343 } 344 345 static void rd_destroy_device(struct se_device *dev) 346 { 347 struct rd_dev *rd_dev = RD_DEV(dev); 348 349 rd_release_device_space(rd_dev); 350 } 351 352 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) 353 { 354 struct rd_dev_sg_table *sg_table; 355 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / 356 sizeof(struct scatterlist)); 357 358 i = page / sg_per_table; 359 if (i < rd_dev->sg_table_count) { 360 sg_table = &rd_dev->sg_table_array[i]; 361 if ((sg_table->page_start_offset <= page) && 362 (sg_table->page_end_offset >= page)) 363 return sg_table; 364 } 365 366 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", 367 page); 368 369 return NULL; 370 } 371 372 static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page) 373 { 374 struct rd_dev_sg_table *sg_table; 375 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE / 376 sizeof(struct scatterlist)); 377 378 i = page / sg_per_table; 379 if (i < rd_dev->sg_prot_count) { 380 sg_table = &rd_dev->sg_prot_array[i]; 381 if ((sg_table->page_start_offset <= page) && 382 (sg_table->page_end_offset >= page)) 383 return sg_table; 384 } 385 386 pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n", 387 page); 388 389 return NULL; 390 } 391 392 static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read) 393 { 394 struct se_device *se_dev = cmd->se_dev; 395 struct rd_dev *dev = RD_DEV(se_dev); 396 struct rd_dev_sg_table *prot_table; 397 struct scatterlist *prot_sg; 398 u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size; 399 u32 prot_offset, prot_page; 400 u32 prot_npages __maybe_unused; 401 u64 tmp; 402 sense_reason_t rc = 0; 403 404 tmp = cmd->t_task_lba * se_dev->prot_length; 405 prot_offset = do_div(tmp, PAGE_SIZE); 406 prot_page = tmp; 407 408 prot_table = rd_get_prot_table(dev, prot_page); 409 if (!prot_table) 410 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 411 412 prot_sg = &prot_table->sg_table[prot_page - 413 prot_table->page_start_offset]; 414 415 if (se_dev->dev_attrib.pi_prot_verify) { 416 if (is_read) 417 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, 418 prot_sg, prot_offset); 419 else 420 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, 421 cmd->t_prot_sg, 0); 422 } 423 if (!rc) 424 sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset); 425 426 return rc; 427 } 428 429 static sense_reason_t 430 rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 431 enum dma_data_direction data_direction) 432 { 433 struct se_device *se_dev = cmd->se_dev; 434 struct rd_dev *dev = RD_DEV(se_dev); 435 struct rd_dev_sg_table *table; 436 struct scatterlist *rd_sg; 437 struct sg_mapping_iter m; 438 u32 rd_offset; 439 u32 rd_size; 440 u32 rd_page; 441 u32 src_len; 442 u64 tmp; 443 sense_reason_t rc; 444 445 if (dev->rd_flags & RDF_NULLIO) { 446 target_complete_cmd(cmd, SAM_STAT_GOOD); 447 return 0; 448 } 449 450 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; 451 rd_offset = do_div(tmp, PAGE_SIZE); 452 rd_page = tmp; 453 rd_size = cmd->data_length; 454 455 table = rd_get_sg_table(dev, rd_page); 456 if (!table) 457 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 458 459 rd_sg = &table->sg_table[rd_page - table->page_start_offset]; 460 461 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", 462 dev->rd_dev_id, 463 data_direction == DMA_FROM_DEVICE ? "Read" : "Write", 464 cmd->t_task_lba, rd_size, rd_page, rd_offset); 465 466 if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && 467 data_direction == DMA_TO_DEVICE) { 468 rc = rd_do_prot_rw(cmd, false); 469 if (rc) 470 return rc; 471 } 472 473 src_len = PAGE_SIZE - rd_offset; 474 sg_miter_start(&m, sgl, sgl_nents, 475 data_direction == DMA_FROM_DEVICE ? 476 SG_MITER_TO_SG : SG_MITER_FROM_SG); 477 while (rd_size) { 478 u32 len; 479 void *rd_addr; 480 481 sg_miter_next(&m); 482 if (!(u32)m.length) { 483 pr_debug("RD[%u]: invalid sgl %p len %zu\n", 484 dev->rd_dev_id, m.addr, m.length); 485 sg_miter_stop(&m); 486 return TCM_INCORRECT_AMOUNT_OF_DATA; 487 } 488 len = min((u32)m.length, src_len); 489 if (len > rd_size) { 490 pr_debug("RD[%u]: size underrun page %d offset %d " 491 "size %d\n", dev->rd_dev_id, 492 rd_page, rd_offset, rd_size); 493 len = rd_size; 494 } 495 m.consumed = len; 496 497 rd_addr = sg_virt(rd_sg) + rd_offset; 498 499 if (data_direction == DMA_FROM_DEVICE) 500 memcpy(m.addr, rd_addr, len); 501 else 502 memcpy(rd_addr, m.addr, len); 503 504 rd_size -= len; 505 if (!rd_size) 506 continue; 507 508 src_len -= len; 509 if (src_len) { 510 rd_offset += len; 511 continue; 512 } 513 514 /* rd page completed, next one please */ 515 rd_page++; 516 rd_offset = 0; 517 src_len = PAGE_SIZE; 518 if (rd_page <= table->page_end_offset) { 519 rd_sg++; 520 continue; 521 } 522 523 table = rd_get_sg_table(dev, rd_page); 524 if (!table) { 525 sg_miter_stop(&m); 526 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 527 } 528 529 /* since we increment, the first sg entry is correct */ 530 rd_sg = table->sg_table; 531 } 532 sg_miter_stop(&m); 533 534 if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type && 535 data_direction == DMA_FROM_DEVICE) { 536 rc = rd_do_prot_rw(cmd, true); 537 if (rc) 538 return rc; 539 } 540 541 target_complete_cmd(cmd, SAM_STAT_GOOD); 542 return 0; 543 } 544 545 enum { 546 Opt_rd_pages, Opt_rd_nullio, Opt_err 547 }; 548 549 static match_table_t tokens = { 550 {Opt_rd_pages, "rd_pages=%d"}, 551 {Opt_rd_nullio, "rd_nullio=%d"}, 552 {Opt_err, NULL} 553 }; 554 555 static ssize_t rd_set_configfs_dev_params(struct se_device *dev, 556 const char *page, ssize_t count) 557 { 558 struct rd_dev *rd_dev = RD_DEV(dev); 559 char *orig, *ptr, *opts; 560 substring_t args[MAX_OPT_ARGS]; 561 int arg, token; 562 563 opts = kstrdup(page, GFP_KERNEL); 564 if (!opts) 565 return -ENOMEM; 566 567 orig = opts; 568 569 while ((ptr = strsep(&opts, ",\n")) != NULL) { 570 if (!*ptr) 571 continue; 572 573 token = match_token(ptr, tokens, args); 574 switch (token) { 575 case Opt_rd_pages: 576 match_int(args, &arg); 577 rd_dev->rd_page_count = arg; 578 pr_debug("RAMDISK: Referencing Page" 579 " Count: %u\n", rd_dev->rd_page_count); 580 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; 581 break; 582 case Opt_rd_nullio: 583 match_int(args, &arg); 584 if (arg != 1) 585 break; 586 587 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg); 588 rd_dev->rd_flags |= RDF_NULLIO; 589 break; 590 default: 591 break; 592 } 593 } 594 595 kfree(orig); 596 return count; 597 } 598 599 static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) 600 { 601 struct rd_dev *rd_dev = RD_DEV(dev); 602 603 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", 604 rd_dev->rd_dev_id); 605 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" 606 " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count, 607 PAGE_SIZE, rd_dev->sg_table_count, 608 !!(rd_dev->rd_flags & RDF_NULLIO)); 609 return bl; 610 } 611 612 static sector_t rd_get_blocks(struct se_device *dev) 613 { 614 struct rd_dev *rd_dev = RD_DEV(dev); 615 616 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / 617 dev->dev_attrib.block_size) - 1; 618 619 return blocks_long; 620 } 621 622 static int rd_init_prot(struct se_device *dev) 623 { 624 struct rd_dev *rd_dev = RD_DEV(dev); 625 626 if (!dev->dev_attrib.pi_prot_type) 627 return 0; 628 629 return rd_build_prot_space(rd_dev, dev->prot_length, 630 dev->dev_attrib.block_size); 631 } 632 633 static void rd_free_prot(struct se_device *dev) 634 { 635 struct rd_dev *rd_dev = RD_DEV(dev); 636 637 rd_release_prot_space(rd_dev); 638 } 639 640 static struct sbc_ops rd_sbc_ops = { 641 .execute_rw = rd_execute_rw, 642 }; 643 644 static sense_reason_t 645 rd_parse_cdb(struct se_cmd *cmd) 646 { 647 return sbc_parse_cdb(cmd, &rd_sbc_ops); 648 } 649 650 static const struct target_backend_ops rd_mcp_ops = { 651 .name = "rd_mcp", 652 .inquiry_prod = "RAMDISK-MCP", 653 .inquiry_rev = RD_MCP_VERSION, 654 .attach_hba = rd_attach_hba, 655 .detach_hba = rd_detach_hba, 656 .alloc_device = rd_alloc_device, 657 .configure_device = rd_configure_device, 658 .destroy_device = rd_destroy_device, 659 .free_device = rd_free_device, 660 .parse_cdb = rd_parse_cdb, 661 .set_configfs_dev_params = rd_set_configfs_dev_params, 662 .show_configfs_dev_params = rd_show_configfs_dev_params, 663 .get_device_type = sbc_get_device_type, 664 .get_blocks = rd_get_blocks, 665 .init_prot = rd_init_prot, 666 .free_prot = rd_free_prot, 667 .tb_dev_attrib_attrs = sbc_attrib_attrs, 668 }; 669 670 int __init rd_module_init(void) 671 { 672 return transport_backend_register(&rd_mcp_ops); 673 } 674 675 void rd_module_exit(void) 676 { 677 target_backend_unregister(&rd_mcp_ops); 678 } 679