1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Block driver for s390 storage class memory. 4 * 5 * Copyright IBM Corp. 2012 6 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> 7 */ 8 9 #define pr_fmt(fmt) "scm_block: " fmt 10 11 #include <linux/interrupt.h> 12 #include <linux/spinlock.h> 13 #include <linux/mempool.h> 14 #include <linux/module.h> 15 #include <linux/blkdev.h> 16 #include <linux/blk-mq.h> 17 #include <linux/slab.h> 18 #include <linux/list.h> 19 #include <linux/io.h> 20 #include <asm/eadm.h> 21 #include "scm_blk.h" 22 23 debug_info_t *scm_debug; 24 static int scm_major; 25 static mempool_t *aidaw_pool; 26 static DEFINE_SPINLOCK(list_lock); 27 static LIST_HEAD(inactive_requests); 28 static unsigned int nr_requests = 64; 29 static unsigned int nr_requests_per_io = 8; 30 static atomic_t nr_devices = ATOMIC_INIT(0); 31 module_param(nr_requests, uint, S_IRUGO); 32 MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); 33 34 module_param(nr_requests_per_io, uint, S_IRUGO); 35 MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO."); 36 37 MODULE_DESCRIPTION("Block driver for s390 storage class memory."); 38 MODULE_LICENSE("GPL"); 39 MODULE_ALIAS("scm:scmdev*"); 40 41 static void __scm_free_rq(struct scm_request *scmrq) 42 { 43 struct aob_rq_header *aobrq = to_aobrq(scmrq); 44 45 free_page((unsigned long) scmrq->aob); 46 kfree(scmrq->request); 47 kfree(aobrq); 48 } 49 50 static void scm_free_rqs(void) 51 { 52 struct list_head *iter, *safe; 53 struct scm_request *scmrq; 54 55 spin_lock_irq(&list_lock); 56 list_for_each_safe(iter, safe, &inactive_requests) { 57 scmrq = list_entry(iter, struct scm_request, list); 58 list_del(&scmrq->list); 59 __scm_free_rq(scmrq); 60 } 61 spin_unlock_irq(&list_lock); 62 63 mempool_destroy(aidaw_pool); 64 } 65 66 static int __scm_alloc_rq(void) 67 { 68 struct aob_rq_header *aobrq; 69 struct scm_request *scmrq; 70 71 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL); 72 if (!aobrq) 73 return -ENOMEM; 74 75 scmrq = (void *) aobrq->data; 76 scmrq->aob = (void *) get_zeroed_page(GFP_DMA); 77 if (!scmrq->aob) 78 goto free; 79 80 scmrq->request = kzalloc_objs(scmrq->request[0], nr_requests_per_io); 81 if (!scmrq->request) 82 goto free; 83 84 INIT_LIST_HEAD(&scmrq->list); 85 spin_lock_irq(&list_lock); 86 list_add(&scmrq->list, &inactive_requests); 87 spin_unlock_irq(&list_lock); 88 89 return 0; 90 free: 91 __scm_free_rq(scmrq); 92 return -ENOMEM; 93 } 94 95 static int scm_alloc_rqs(unsigned int nrqs) 96 { 97 int ret = 0; 98 99 aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0); 100 if (!aidaw_pool) 101 return -ENOMEM; 102 103 while (nrqs-- && !ret) 104 ret = __scm_alloc_rq(); 105 106 return ret; 107 } 108 109 static struct scm_request *scm_request_fetch(void) 110 { 111 struct scm_request *scmrq = NULL; 112 113 spin_lock_irq(&list_lock); 114 if (list_empty(&inactive_requests)) 115 goto out; 116 scmrq = list_first_entry(&inactive_requests, struct scm_request, list); 117 list_del(&scmrq->list); 118 out: 119 spin_unlock_irq(&list_lock); 120 return scmrq; 121 } 122 123 static void scm_request_done(struct scm_request *scmrq) 124 { 125 unsigned long flags; 126 struct msb *msb; 127 u64 aidaw; 128 int i; 129 130 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { 131 msb = &scmrq->aob->msb[i]; 132 aidaw = (u64)dma64_to_virt(msb->data_addr); 133 134 if ((msb->flags & MSB_FLAG_IDA) && aidaw && 135 IS_ALIGNED(aidaw, PAGE_SIZE)) 136 mempool_free(virt_to_page((void *)aidaw), aidaw_pool); 137 } 138 139 spin_lock_irqsave(&list_lock, flags); 140 list_add(&scmrq->list, &inactive_requests); 141 spin_unlock_irqrestore(&list_lock, flags); 142 } 143 144 static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) 145 { 146 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; 147 } 148 149 static inline struct aidaw *scm_aidaw_alloc(void) 150 { 151 struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC); 152 153 return page ? page_address(page) : NULL; 154 } 155 156 static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw) 157 { 158 unsigned long _aidaw = (unsigned long) aidaw; 159 unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw; 160 161 return (bytes / sizeof(*aidaw)) * PAGE_SIZE; 162 } 163 164 struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes) 165 { 166 struct aidaw *aidaw; 167 168 if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes) 169 return scmrq->next_aidaw; 170 171 aidaw = scm_aidaw_alloc(); 172 if (aidaw) 173 memset(aidaw, 0, PAGE_SIZE); 174 return aidaw; 175 } 176 177 static int scm_request_prepare(struct scm_request *scmrq) 178 { 179 struct scm_blk_dev *bdev = scmrq->bdev; 180 struct scm_device *scmdev = bdev->gendisk->private_data; 181 int pos = scmrq->aob->request.msb_count; 182 struct msb *msb = &scmrq->aob->msb[pos]; 183 struct request *req = scmrq->request[pos]; 184 struct req_iterator iter; 185 struct aidaw *aidaw; 186 struct bio_vec bv; 187 188 aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req)); 189 if (!aidaw) 190 return -ENOMEM; 191 192 msb->bs = MSB_BS_4K; 193 scmrq->aob->request.msb_count++; 194 msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); 195 msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; 196 msb->flags |= MSB_FLAG_IDA; 197 msb->data_addr = virt_to_dma64(aidaw); 198 199 rq_for_each_segment(bv, req, iter) { 200 WARN_ON(bv.bv_offset); 201 msb->blk_count += bv.bv_len >> 12; 202 aidaw->data_addr = virt_to_dma64(page_address(bv.bv_page)); 203 aidaw++; 204 } 205 206 scmrq->next_aidaw = aidaw; 207 return 0; 208 } 209 210 static inline void scm_request_set(struct scm_request *scmrq, 211 struct request *req) 212 { 213 scmrq->request[scmrq->aob->request.msb_count] = req; 214 } 215 216 static inline void scm_request_init(struct scm_blk_dev *bdev, 217 struct scm_request *scmrq) 218 { 219 struct aob_rq_header *aobrq = to_aobrq(scmrq); 220 struct aob *aob = scmrq->aob; 221 222 memset(scmrq->request, 0, 223 nr_requests_per_io * sizeof(scmrq->request[0])); 224 memset(aob, 0, sizeof(*aob)); 225 aobrq->scmdev = bdev->scmdev; 226 aob->request.cmd_code = ARQB_CMD_MOVE; 227 aob->request.data = (u64) aobrq; 228 scmrq->bdev = bdev; 229 scmrq->retries = 4; 230 scmrq->error = BLK_STS_OK; 231 /* We don't use all msbs - place aidaws at the end of the aob page. */ 232 scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; 233 } 234 235 static void scm_request_requeue(struct scm_request *scmrq) 236 { 237 struct scm_blk_dev *bdev = scmrq->bdev; 238 int i; 239 240 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) 241 blk_mq_requeue_request(scmrq->request[i], false); 242 243 atomic_dec(&bdev->queued_reqs); 244 scm_request_done(scmrq); 245 blk_mq_kick_requeue_list(bdev->rq); 246 } 247 248 static void scm_request_finish(struct scm_request *scmrq) 249 { 250 struct scm_blk_dev *bdev = scmrq->bdev; 251 blk_status_t *error; 252 int i; 253 254 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { 255 error = blk_mq_rq_to_pdu(scmrq->request[i]); 256 *error = scmrq->error; 257 if (likely(!blk_should_fake_timeout(scmrq->request[i]->q))) 258 blk_mq_complete_request(scmrq->request[i]); 259 } 260 261 atomic_dec(&bdev->queued_reqs); 262 scm_request_done(scmrq); 263 } 264 265 static void scm_request_start(struct scm_request *scmrq) 266 { 267 struct scm_blk_dev *bdev = scmrq->bdev; 268 269 atomic_inc(&bdev->queued_reqs); 270 if (eadm_start_aob(scmrq->aob)) { 271 SCM_LOG(5, "no subchannel"); 272 scm_request_requeue(scmrq); 273 } 274 } 275 276 struct scm_queue { 277 struct scm_request *scmrq; 278 spinlock_t lock; 279 }; 280 281 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx, 282 const struct blk_mq_queue_data *qd) 283 { 284 struct scm_device *scmdev = hctx->queue->queuedata; 285 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); 286 struct scm_queue *sq = hctx->driver_data; 287 struct request *req = qd->rq; 288 struct scm_request *scmrq; 289 290 spin_lock(&sq->lock); 291 if (!scm_permit_request(bdev, req)) { 292 spin_unlock(&sq->lock); 293 return BLK_STS_RESOURCE; 294 } 295 296 scmrq = sq->scmrq; 297 if (!scmrq) { 298 scmrq = scm_request_fetch(); 299 if (!scmrq) { 300 SCM_LOG(5, "no request"); 301 spin_unlock(&sq->lock); 302 return BLK_STS_RESOURCE; 303 } 304 scm_request_init(bdev, scmrq); 305 sq->scmrq = scmrq; 306 } 307 scm_request_set(scmrq, req); 308 309 if (scm_request_prepare(scmrq)) { 310 SCM_LOG(5, "aidaw alloc failed"); 311 scm_request_set(scmrq, NULL); 312 313 if (scmrq->aob->request.msb_count) 314 scm_request_start(scmrq); 315 316 sq->scmrq = NULL; 317 spin_unlock(&sq->lock); 318 return BLK_STS_RESOURCE; 319 } 320 blk_mq_start_request(req); 321 322 if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) { 323 scm_request_start(scmrq); 324 sq->scmrq = NULL; 325 } 326 spin_unlock(&sq->lock); 327 return BLK_STS_OK; 328 } 329 330 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 331 unsigned int idx) 332 { 333 struct scm_queue *qd = kzalloc_obj(*qd); 334 335 if (!qd) 336 return -ENOMEM; 337 338 spin_lock_init(&qd->lock); 339 hctx->driver_data = qd; 340 341 return 0; 342 } 343 344 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 345 { 346 struct scm_queue *qd = hctx->driver_data; 347 348 WARN_ON(qd->scmrq); 349 kfree(hctx->driver_data); 350 hctx->driver_data = NULL; 351 } 352 353 static void __scmrq_log_error(struct scm_request *scmrq) 354 { 355 struct aob *aob = scmrq->aob; 356 357 if (scmrq->error == BLK_STS_TIMEOUT) 358 SCM_LOG(1, "Request timeout"); 359 else { 360 SCM_LOG(1, "Request error"); 361 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response)); 362 } 363 if (scmrq->retries) 364 SCM_LOG(1, "Retry request"); 365 else 366 pr_err("An I/O operation to SCM failed with rc=%d\n", 367 scmrq->error); 368 } 369 370 static void scm_blk_handle_error(struct scm_request *scmrq) 371 { 372 struct scm_blk_dev *bdev = scmrq->bdev; 373 unsigned long flags; 374 375 if (scmrq->error != BLK_STS_IOERR) 376 goto restart; 377 378 /* For -EIO the response block is valid. */ 379 switch (scmrq->aob->response.eqc) { 380 case EQC_WR_PROHIBIT: 381 spin_lock_irqsave(&bdev->lock, flags); 382 if (bdev->state != SCM_WR_PROHIBIT) 383 pr_info("%lx: Write access to the SCM increment is suspended\n", 384 (unsigned long) bdev->scmdev->address); 385 bdev->state = SCM_WR_PROHIBIT; 386 spin_unlock_irqrestore(&bdev->lock, flags); 387 goto requeue; 388 default: 389 break; 390 } 391 392 restart: 393 if (!eadm_start_aob(scmrq->aob)) 394 return; 395 396 requeue: 397 scm_request_requeue(scmrq); 398 } 399 400 void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error) 401 { 402 struct scm_request *scmrq = data; 403 404 scmrq->error = error; 405 if (error) { 406 __scmrq_log_error(scmrq); 407 if (scmrq->retries-- > 0) { 408 scm_blk_handle_error(scmrq); 409 return; 410 } 411 } 412 413 scm_request_finish(scmrq); 414 } 415 416 static void scm_blk_request_done(struct request *req) 417 { 418 blk_status_t *error = blk_mq_rq_to_pdu(req); 419 420 blk_mq_end_request(req, *error); 421 } 422 423 static const struct block_device_operations scm_blk_devops = { 424 .owner = THIS_MODULE, 425 }; 426 427 static const struct blk_mq_ops scm_mq_ops = { 428 .queue_rq = scm_blk_request, 429 .complete = scm_blk_request_done, 430 .init_hctx = scm_blk_init_hctx, 431 .exit_hctx = scm_blk_exit_hctx, 432 }; 433 434 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) 435 { 436 struct queue_limits lim = { 437 .logical_block_size = 1 << 12, 438 }; 439 unsigned int devindex; 440 int len, ret; 441 442 lim.max_segments = min(scmdev->nr_max_block, 443 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw))); 444 lim.max_hw_sectors = lim.max_segments << 3; /* 8 * 512 = blk_size */ 445 446 devindex = atomic_inc_return(&nr_devices) - 1; 447 /* scma..scmz + scmaa..scmzz */ 448 if (devindex > 701) { 449 ret = -ENODEV; 450 goto out; 451 } 452 453 bdev->scmdev = scmdev; 454 bdev->state = SCM_OPER; 455 spin_lock_init(&bdev->lock); 456 atomic_set(&bdev->queued_reqs, 0); 457 458 bdev->tag_set.ops = &scm_mq_ops; 459 bdev->tag_set.cmd_size = sizeof(blk_status_t); 460 bdev->tag_set.nr_hw_queues = nr_requests; 461 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; 462 bdev->tag_set.numa_node = NUMA_NO_NODE; 463 464 ret = blk_mq_alloc_tag_set(&bdev->tag_set); 465 if (ret) 466 goto out; 467 468 bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, &lim, scmdev); 469 if (IS_ERR(bdev->gendisk)) { 470 ret = PTR_ERR(bdev->gendisk); 471 goto out_tag; 472 } 473 bdev->gendisk->private_data = scmdev; 474 bdev->gendisk->fops = &scm_blk_devops; 475 bdev->gendisk->major = scm_major; 476 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; 477 bdev->gendisk->minors = SCM_NR_PARTS; 478 479 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); 480 if (devindex > 25) { 481 len += snprintf(bdev->gendisk->disk_name + len, 482 DISK_NAME_LEN - len, "%c", 483 'a' + (devindex / 26) - 1); 484 devindex = devindex % 26; 485 } 486 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c", 487 'a' + devindex); 488 489 /* 512 byte sectors */ 490 set_capacity(bdev->gendisk, scmdev->size >> 9); 491 ret = device_add_disk(&scmdev->dev, bdev->gendisk, NULL); 492 if (ret) 493 goto out_cleanup_disk; 494 495 return 0; 496 497 out_cleanup_disk: 498 put_disk(bdev->gendisk); 499 out_tag: 500 blk_mq_free_tag_set(&bdev->tag_set); 501 out: 502 atomic_dec(&nr_devices); 503 return ret; 504 } 505 506 void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) 507 { 508 del_gendisk(bdev->gendisk); 509 put_disk(bdev->gendisk); 510 blk_mq_free_tag_set(&bdev->tag_set); 511 } 512 513 void scm_blk_set_available(struct scm_blk_dev *bdev) 514 { 515 unsigned long flags; 516 517 spin_lock_irqsave(&bdev->lock, flags); 518 if (bdev->state == SCM_WR_PROHIBIT) 519 pr_info("%lx: Write access to the SCM increment is restored\n", 520 (unsigned long) bdev->scmdev->address); 521 bdev->state = SCM_OPER; 522 spin_unlock_irqrestore(&bdev->lock, flags); 523 } 524 525 static bool __init scm_blk_params_valid(void) 526 { 527 if (!nr_requests_per_io || nr_requests_per_io > 64) 528 return false; 529 530 return true; 531 } 532 533 static int __init scm_blk_init(void) 534 { 535 int ret = -EINVAL; 536 537 if (!scm_blk_params_valid()) 538 goto out; 539 540 ret = register_blkdev(0, "scm"); 541 if (ret < 0) 542 goto out; 543 544 scm_major = ret; 545 ret = scm_alloc_rqs(nr_requests); 546 if (ret) 547 goto out_free; 548 549 scm_debug = debug_register("scm_log", 16, 1, 16); 550 if (!scm_debug) { 551 ret = -ENOMEM; 552 goto out_free; 553 } 554 555 debug_register_view(scm_debug, &debug_hex_ascii_view); 556 debug_set_level(scm_debug, 2); 557 558 ret = scm_drv_init(); 559 if (ret) 560 goto out_dbf; 561 562 return ret; 563 564 out_dbf: 565 debug_unregister(scm_debug); 566 out_free: 567 scm_free_rqs(); 568 unregister_blkdev(scm_major, "scm"); 569 out: 570 return ret; 571 } 572 module_init(scm_blk_init); 573 574 static void __exit scm_blk_cleanup(void) 575 { 576 scm_drv_cleanup(); 577 debug_unregister(scm_debug); 578 scm_free_rqs(); 579 unregister_blkdev(scm_major, "scm"); 580 } 581 module_exit(scm_blk_cleanup); 582