1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014-2016 Christoph Hellwig. 4 */ 5 #include <linux/sunrpc/svc.h> 6 #include <linux/blkdev.h> 7 #include <linux/nfs4.h> 8 #include <linux/nfs_fs.h> 9 #include <linux/nfs_xdr.h> 10 #include <linux/pr.h> 11 12 #include "blocklayout.h" 13 #include "../nfs4trace.h" 14 15 #define NFSDBG_FACILITY NFSDBG_PNFS_LD 16 17 static void bl_unregister_scsi(struct pnfs_block_dev *dev) 18 { 19 struct block_device *bdev = file_bdev(dev->bdev_file); 20 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; 21 int status; 22 23 status = ops->pr_register(bdev, dev->pr_key, 0, false); 24 if (status) 25 trace_bl_pr_key_unreg_err(bdev, dev->pr_key, status); 26 else 27 trace_bl_pr_key_unreg(bdev, dev->pr_key); 28 } 29 30 static bool bl_register_scsi(struct pnfs_block_dev *dev) 31 { 32 struct block_device *bdev = file_bdev(dev->bdev_file); 33 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; 34 int status; 35 36 if (test_and_set_bit(PNFS_BDEV_REGISTERED, &dev->flags)) 37 return true; 38 39 status = ops->pr_register(bdev, 0, dev->pr_key, true); 40 if (status) { 41 trace_bl_pr_key_reg_err(bdev, dev->pr_key, status); 42 return false; 43 } 44 trace_bl_pr_key_reg(bdev, dev->pr_key); 45 return true; 46 } 47 48 static void bl_unregister_dev(struct pnfs_block_dev *dev) 49 { 50 u32 i; 51 52 if (dev->nr_children) { 53 for (i = 0; i < dev->nr_children; i++) 54 bl_unregister_dev(&dev->children[i]); 55 return; 56 } 57 58 if (dev->type == PNFS_BLOCK_VOLUME_SCSI && 59 test_and_clear_bit(PNFS_BDEV_REGISTERED, &dev->flags)) 60 bl_unregister_scsi(dev); 61 } 62 63 bool bl_register_dev(struct pnfs_block_dev *dev) 64 { 65 u32 i; 66 67 if (dev->nr_children) { 68 for (i = 0; i < dev->nr_children; i++) { 69 if (!bl_register_dev(&dev->children[i])) { 70 while (i > 0) 71 bl_unregister_dev(&dev->children[--i]); 72 return false; 73 } 74 } 75 return true; 76 } 77 78 if (dev->type == PNFS_BLOCK_VOLUME_SCSI) 79 return bl_register_scsi(dev); 80 return true; 81 } 82 83 static void 84 bl_free_device(struct pnfs_block_dev *dev) 85 { 86 bl_unregister_dev(dev); 87 88 if (dev->nr_children) { 89 int i; 90 91 for (i = 0; i < dev->nr_children; i++) 92 bl_free_device(&dev->children[i]); 93 kfree(dev->children); 94 } else { 95 if (dev->bdev_file) 96 fput(dev->bdev_file); 97 } 98 } 99 100 void 101 bl_free_deviceid_node(struct nfs4_deviceid_node *d) 102 { 103 struct pnfs_block_dev *dev = 104 container_of(d, struct pnfs_block_dev, node); 105 106 bl_free_device(dev); 107 kfree_rcu(dev, node.rcu); 108 } 109 110 static int 111 nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b) 112 { 113 __be32 *p; 114 int i; 115 116 p = xdr_inline_decode(xdr, 4); 117 if (!p) 118 return -EIO; 119 b->type = be32_to_cpup(p++); 120 121 switch (b->type) { 122 case PNFS_BLOCK_VOLUME_SIMPLE: 123 p = xdr_inline_decode(xdr, 4); 124 if (!p) 125 return -EIO; 126 b->simple.nr_sigs = be32_to_cpup(p++); 127 if (!b->simple.nr_sigs || b->simple.nr_sigs > PNFS_BLOCK_MAX_UUIDS) { 128 dprintk("Bad signature count: %d\n", b->simple.nr_sigs); 129 return -EIO; 130 } 131 132 b->simple.len = 4 + 4; 133 for (i = 0; i < b->simple.nr_sigs; i++) { 134 p = xdr_inline_decode(xdr, 8 + 4); 135 if (!p) 136 return -EIO; 137 p = xdr_decode_hyper(p, &b->simple.sigs[i].offset); 138 b->simple.sigs[i].sig_len = be32_to_cpup(p++); 139 if (b->simple.sigs[i].sig_len > PNFS_BLOCK_UUID_LEN) { 140 pr_info("signature too long: %d\n", 141 b->simple.sigs[i].sig_len); 142 return -EIO; 143 } 144 145 p = xdr_inline_decode(xdr, b->simple.sigs[i].sig_len); 146 if (!p) 147 return -EIO; 148 memcpy(&b->simple.sigs[i].sig, p, 149 b->simple.sigs[i].sig_len); 150 151 b->simple.len += 8 + 4 + \ 152 (XDR_QUADLEN(b->simple.sigs[i].sig_len) << 2); 153 } 154 break; 155 case PNFS_BLOCK_VOLUME_SLICE: 156 p = xdr_inline_decode(xdr, 8 + 8 + 4); 157 if (!p) 158 return -EIO; 159 p = xdr_decode_hyper(p, &b->slice.start); 160 p = xdr_decode_hyper(p, &b->slice.len); 161 b->slice.volume = be32_to_cpup(p++); 162 break; 163 case PNFS_BLOCK_VOLUME_CONCAT: 164 p = xdr_inline_decode(xdr, 4); 165 if (!p) 166 return -EIO; 167 168 b->concat.volumes_count = be32_to_cpup(p++); 169 if (b->concat.volumes_count > PNFS_BLOCK_MAX_DEVICES) { 170 dprintk("Too many volumes: %d\n", b->concat.volumes_count); 171 return -EIO; 172 } 173 174 p = xdr_inline_decode(xdr, b->concat.volumes_count * 4); 175 if (!p) 176 return -EIO; 177 for (i = 0; i < b->concat.volumes_count; i++) 178 b->concat.volumes[i] = be32_to_cpup(p++); 179 break; 180 case PNFS_BLOCK_VOLUME_STRIPE: 181 p = xdr_inline_decode(xdr, 8 + 4); 182 if (!p) 183 return -EIO; 184 185 p = xdr_decode_hyper(p, &b->stripe.chunk_size); 186 b->stripe.volumes_count = be32_to_cpup(p++); 187 if (b->stripe.volumes_count > PNFS_BLOCK_MAX_DEVICES) { 188 dprintk("Too many volumes: %d\n", b->stripe.volumes_count); 189 return -EIO; 190 } 191 192 p = xdr_inline_decode(xdr, b->stripe.volumes_count * 4); 193 if (!p) 194 return -EIO; 195 for (i = 0; i < b->stripe.volumes_count; i++) 196 b->stripe.volumes[i] = be32_to_cpup(p++); 197 break; 198 case PNFS_BLOCK_VOLUME_SCSI: 199 p = xdr_inline_decode(xdr, 4 + 4 + 4); 200 if (!p) 201 return -EIO; 202 b->scsi.code_set = be32_to_cpup(p++); 203 b->scsi.designator_type = be32_to_cpup(p++); 204 b->scsi.designator_len = be32_to_cpup(p++); 205 p = xdr_inline_decode(xdr, b->scsi.designator_len); 206 if (!p) 207 return -EIO; 208 if (b->scsi.designator_len > 256) 209 return -EIO; 210 memcpy(&b->scsi.designator, p, b->scsi.designator_len); 211 p = xdr_inline_decode(xdr, 8); 212 if (!p) 213 return -EIO; 214 p = xdr_decode_hyper(p, &b->scsi.pr_key); 215 break; 216 default: 217 dprintk("unknown volume type!\n"); 218 return -EIO; 219 } 220 221 return 0; 222 } 223 224 static bool bl_map_simple(struct pnfs_block_dev *dev, u64 offset, 225 struct pnfs_block_dev_map *map) 226 { 227 map->start = dev->start; 228 map->len = dev->len; 229 map->disk_offset = dev->disk_offset; 230 map->bdev = file_bdev(dev->bdev_file); 231 return true; 232 } 233 234 static bool bl_map_concat(struct pnfs_block_dev *dev, u64 offset, 235 struct pnfs_block_dev_map *map) 236 { 237 int i; 238 239 for (i = 0; i < dev->nr_children; i++) { 240 struct pnfs_block_dev *child = &dev->children[i]; 241 242 if (child->start > offset || 243 child->start + child->len <= offset) 244 continue; 245 246 child->map(child, offset - child->start, map); 247 return true; 248 } 249 250 dprintk("%s: ran off loop!\n", __func__); 251 return false; 252 } 253 254 static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset, 255 struct pnfs_block_dev_map *map) 256 { 257 struct pnfs_block_dev *child; 258 u64 chunk; 259 u32 chunk_idx; 260 u64 disk_chunk; 261 u64 disk_offset; 262 263 chunk = div_u64(offset, dev->chunk_size); 264 disk_chunk = div_u64_rem(chunk, dev->nr_children, &chunk_idx); 265 266 if (chunk_idx >= dev->nr_children) { 267 dprintk("%s: invalid chunk idx %d (%lld/%lld)\n", 268 __func__, chunk_idx, offset, dev->chunk_size); 269 /* error, should not happen */ 270 return false; 271 } 272 273 /* truncate offset to the beginning of the stripe */ 274 offset = chunk * dev->chunk_size; 275 276 /* disk offset of the stripe */ 277 disk_offset = disk_chunk * dev->chunk_size; 278 279 child = &dev->children[chunk_idx]; 280 child->map(child, disk_offset, map); 281 282 map->start += offset; 283 map->disk_offset += disk_offset; 284 map->len = dev->chunk_size; 285 return true; 286 } 287 288 static int 289 bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, 290 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 291 292 293 static int 294 bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d, 295 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 296 { 297 struct pnfs_block_volume *v = &volumes[idx]; 298 struct file *bdev_file; 299 dev_t dev; 300 301 dev = bl_resolve_deviceid(server, v, gfp_mask); 302 if (!dev) 303 return -EIO; 304 305 bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE, 306 NULL, NULL); 307 if (IS_ERR(bdev_file)) { 308 printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n", 309 MAJOR(dev), MINOR(dev), PTR_ERR(bdev_file)); 310 return PTR_ERR(bdev_file); 311 } 312 d->bdev_file = bdev_file; 313 d->len = bdev_nr_bytes(file_bdev(bdev_file)); 314 d->map = bl_map_simple; 315 316 printk(KERN_INFO "pNFS: using block device %s\n", 317 file_bdev(bdev_file)->bd_disk->disk_name); 318 return 0; 319 } 320 321 static bool 322 bl_validate_designator(struct pnfs_block_volume *v) 323 { 324 switch (v->scsi.designator_type) { 325 case PS_DESIGNATOR_EUI64: 326 if (v->scsi.code_set != PS_CODE_SET_BINARY) 327 return false; 328 329 if (v->scsi.designator_len != 8 && 330 v->scsi.designator_len != 10 && 331 v->scsi.designator_len != 16) 332 return false; 333 334 return true; 335 case PS_DESIGNATOR_NAA: 336 if (v->scsi.code_set != PS_CODE_SET_BINARY) 337 return false; 338 339 if (v->scsi.designator_len != 8 && 340 v->scsi.designator_len != 16) 341 return false; 342 343 return true; 344 case PS_DESIGNATOR_T10: 345 case PS_DESIGNATOR_NAME: 346 pr_err("pNFS: unsupported designator " 347 "(code set %d, type %d, len %d.\n", 348 v->scsi.code_set, 349 v->scsi.designator_type, 350 v->scsi.designator_len); 351 return false; 352 default: 353 pr_err("pNFS: invalid designator " 354 "(code set %d, type %d, len %d.\n", 355 v->scsi.code_set, 356 v->scsi.designator_type, 357 v->scsi.designator_len); 358 return false; 359 } 360 } 361 362 static struct file * 363 bl_open_path(struct pnfs_block_volume *v, const char *prefix) 364 { 365 struct file *bdev_file; 366 const char *devname; 367 368 devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/%s%*phN", 369 prefix, v->scsi.designator_len, v->scsi.designator); 370 if (!devname) 371 return ERR_PTR(-ENOMEM); 372 373 bdev_file = bdev_file_open_by_path(devname, BLK_OPEN_READ | BLK_OPEN_WRITE, 374 NULL, NULL); 375 if (IS_ERR(bdev_file)) { 376 dprintk("failed to open device %s (%ld)\n", 377 devname, PTR_ERR(bdev_file)); 378 } 379 380 kfree(devname); 381 return bdev_file; 382 } 383 384 static int 385 bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d, 386 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 387 { 388 struct pnfs_block_volume *v = &volumes[idx]; 389 struct block_device *bdev; 390 const struct pr_ops *ops; 391 struct file *bdev_file; 392 int error; 393 394 if (!bl_validate_designator(v)) 395 return -EINVAL; 396 397 /* 398 * Try to open the RH/Fedora specific dm-mpath udev path first, as the 399 * wwn- links will only point to the first discovered SCSI device there. 400 * On other distributions like Debian, the default SCSI by-id path will 401 * point to the dm-multipath device if one exists. 402 */ 403 bdev_file = bl_open_path(v, "dm-uuid-mpath-0x"); 404 if (IS_ERR(bdev_file)) 405 bdev_file = bl_open_path(v, "wwn-0x"); 406 if (IS_ERR(bdev_file)) 407 bdev_file = bl_open_path(v, "nvme-eui."); 408 if (IS_ERR(bdev_file)) { 409 pr_warn("pNFS: no device found for volume %*phN\n", 410 v->scsi.designator_len, v->scsi.designator); 411 return PTR_ERR(bdev_file); 412 } 413 d->bdev_file = bdev_file; 414 bdev = file_bdev(bdev_file); 415 416 d->len = bdev_nr_bytes(bdev); 417 d->map = bl_map_simple; 418 d->pr_key = v->scsi.pr_key; 419 420 if (d->len == 0) 421 return -ENODEV; 422 423 ops = bdev->bd_disk->fops->pr_ops; 424 if (!ops) { 425 pr_err("pNFS: block device %s does not support reservations.", 426 bdev->bd_disk->disk_name); 427 error = -EINVAL; 428 goto out_blkdev_put; 429 } 430 431 return 0; 432 433 out_blkdev_put: 434 fput(d->bdev_file); 435 return error; 436 } 437 438 static int 439 bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d, 440 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 441 { 442 struct pnfs_block_volume *v = &volumes[idx]; 443 int ret; 444 445 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); 446 if (ret) 447 return ret; 448 449 d->disk_offset = v->slice.start; 450 d->len = v->slice.len; 451 return 0; 452 } 453 454 static int 455 bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d, 456 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 457 { 458 struct pnfs_block_volume *v = &volumes[idx]; 459 u64 len = 0; 460 int ret, i; 461 462 d->children = kcalloc(v->concat.volumes_count, 463 sizeof(struct pnfs_block_dev), gfp_mask); 464 if (!d->children) 465 return -ENOMEM; 466 467 for (i = 0; i < v->concat.volumes_count; i++) { 468 ret = bl_parse_deviceid(server, &d->children[i], 469 volumes, v->concat.volumes[i], gfp_mask); 470 if (ret) 471 return ret; 472 473 d->nr_children++; 474 d->children[i].start += len; 475 len += d->children[i].len; 476 } 477 478 d->len = len; 479 d->map = bl_map_concat; 480 return 0; 481 } 482 483 static int 484 bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d, 485 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 486 { 487 struct pnfs_block_volume *v = &volumes[idx]; 488 u64 len = 0; 489 int ret, i; 490 491 d->children = kcalloc(v->stripe.volumes_count, 492 sizeof(struct pnfs_block_dev), gfp_mask); 493 if (!d->children) 494 return -ENOMEM; 495 496 for (i = 0; i < v->stripe.volumes_count; i++) { 497 ret = bl_parse_deviceid(server, &d->children[i], 498 volumes, v->stripe.volumes[i], gfp_mask); 499 if (ret) 500 return ret; 501 502 d->nr_children++; 503 len += d->children[i].len; 504 } 505 506 d->len = len; 507 d->chunk_size = v->stripe.chunk_size; 508 d->map = bl_map_stripe; 509 return 0; 510 } 511 512 static int 513 bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, 514 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 515 { 516 d->type = volumes[idx].type; 517 518 switch (d->type) { 519 case PNFS_BLOCK_VOLUME_SIMPLE: 520 return bl_parse_simple(server, d, volumes, idx, gfp_mask); 521 case PNFS_BLOCK_VOLUME_SLICE: 522 return bl_parse_slice(server, d, volumes, idx, gfp_mask); 523 case PNFS_BLOCK_VOLUME_CONCAT: 524 return bl_parse_concat(server, d, volumes, idx, gfp_mask); 525 case PNFS_BLOCK_VOLUME_STRIPE: 526 return bl_parse_stripe(server, d, volumes, idx, gfp_mask); 527 case PNFS_BLOCK_VOLUME_SCSI: 528 return bl_parse_scsi(server, d, volumes, idx, gfp_mask); 529 default: 530 dprintk("unsupported volume type: %d\n", d->type); 531 return -EIO; 532 } 533 } 534 535 struct nfs4_deviceid_node * 536 bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, 537 gfp_t gfp_mask) 538 { 539 struct nfs4_deviceid_node *node = NULL; 540 struct pnfs_block_volume *volumes; 541 struct pnfs_block_dev *top; 542 struct xdr_stream xdr; 543 struct xdr_buf buf; 544 struct page *scratch; 545 int nr_volumes, ret, i; 546 __be32 *p; 547 548 scratch = alloc_page(gfp_mask); 549 if (!scratch) 550 goto out; 551 552 xdr_init_decode_pages(&xdr, &buf, pdev->pages, pdev->pglen); 553 xdr_set_scratch_page(&xdr, scratch); 554 555 p = xdr_inline_decode(&xdr, sizeof(__be32)); 556 if (!p) 557 goto out_free_scratch; 558 nr_volumes = be32_to_cpup(p++); 559 560 volumes = kcalloc(nr_volumes, sizeof(struct pnfs_block_volume), 561 gfp_mask); 562 if (!volumes) 563 goto out_free_scratch; 564 565 for (i = 0; i < nr_volumes; i++) { 566 ret = nfs4_block_decode_volume(&xdr, &volumes[i]); 567 if (ret < 0) 568 goto out_free_volumes; 569 } 570 571 top = kzalloc(sizeof(*top), gfp_mask); 572 if (!top) 573 goto out_free_volumes; 574 575 ret = bl_parse_deviceid(server, top, volumes, nr_volumes - 1, gfp_mask); 576 577 node = &top->node; 578 nfs4_init_deviceid_node(node, server, &pdev->dev_id); 579 if (ret) 580 nfs4_mark_deviceid_unavailable(node); 581 582 out_free_volumes: 583 kfree(volumes); 584 out_free_scratch: 585 __free_page(scratch); 586 out: 587 return node; 588 } 589