1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014-2016 Christoph Hellwig. 4 */ 5 #include <linux/sunrpc/svc.h> 6 #include <linux/blkdev.h> 7 #include <linux/nfs4.h> 8 #include <linux/nfs_fs.h> 9 #include <linux/nfs_xdr.h> 10 #include <linux/pr.h> 11 12 #include "blocklayout.h" 13 14 #define NFSDBG_FACILITY NFSDBG_PNFS_LD 15 16 static void 17 bl_free_device(struct pnfs_block_dev *dev) 18 { 19 if (dev->nr_children) { 20 int i; 21 22 for (i = 0; i < dev->nr_children; i++) 23 bl_free_device(&dev->children[i]); 24 kfree(dev->children); 25 } else { 26 if (dev->pr_registered) { 27 const struct pr_ops *ops = 28 dev->bdev->bd_disk->fops->pr_ops; 29 int error; 30 31 error = ops->pr_register(dev->bdev, dev->pr_key, 0, 32 false); 33 if (error) 34 pr_err("failed to unregister PR key.\n"); 35 } 36 37 if (dev->bdev) 38 blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE); 39 } 40 } 41 42 void 43 bl_free_deviceid_node(struct nfs4_deviceid_node *d) 44 { 45 struct pnfs_block_dev *dev = 46 container_of(d, struct pnfs_block_dev, node); 47 48 bl_free_device(dev); 49 kfree_rcu(dev, node.rcu); 50 } 51 52 static int 53 nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b) 54 { 55 __be32 *p; 56 int i; 57 58 p = xdr_inline_decode(xdr, 4); 59 if (!p) 60 return -EIO; 61 b->type = be32_to_cpup(p++); 62 63 switch (b->type) { 64 case PNFS_BLOCK_VOLUME_SIMPLE: 65 p = xdr_inline_decode(xdr, 4); 66 if (!p) 67 return -EIO; 68 b->simple.nr_sigs = be32_to_cpup(p++); 69 if (!b->simple.nr_sigs || b->simple.nr_sigs > PNFS_BLOCK_MAX_UUIDS) { 70 dprintk("Bad signature count: %d\n", b->simple.nr_sigs); 71 return -EIO; 72 } 73 74 b->simple.len = 4 + 4; 75 for (i = 0; i < b->simple.nr_sigs; i++) { 76 p = xdr_inline_decode(xdr, 8 + 4); 77 if (!p) 78 return -EIO; 79 p = xdr_decode_hyper(p, &b->simple.sigs[i].offset); 80 b->simple.sigs[i].sig_len = be32_to_cpup(p++); 81 if (b->simple.sigs[i].sig_len > PNFS_BLOCK_UUID_LEN) { 82 pr_info("signature too long: %d\n", 83 b->simple.sigs[i].sig_len); 84 return -EIO; 85 } 86 87 p = xdr_inline_decode(xdr, b->simple.sigs[i].sig_len); 88 if (!p) 89 return -EIO; 90 memcpy(&b->simple.sigs[i].sig, p, 91 b->simple.sigs[i].sig_len); 92 93 b->simple.len += 8 + 4 + \ 94 (XDR_QUADLEN(b->simple.sigs[i].sig_len) << 2); 95 } 96 break; 97 case PNFS_BLOCK_VOLUME_SLICE: 98 p = xdr_inline_decode(xdr, 8 + 8 + 4); 99 if (!p) 100 return -EIO; 101 p = xdr_decode_hyper(p, &b->slice.start); 102 p = xdr_decode_hyper(p, &b->slice.len); 103 b->slice.volume = be32_to_cpup(p++); 104 break; 105 case PNFS_BLOCK_VOLUME_CONCAT: 106 p = xdr_inline_decode(xdr, 4); 107 if (!p) 108 return -EIO; 109 110 b->concat.volumes_count = be32_to_cpup(p++); 111 if (b->concat.volumes_count > PNFS_BLOCK_MAX_DEVICES) { 112 dprintk("Too many volumes: %d\n", b->concat.volumes_count); 113 return -EIO; 114 } 115 116 p = xdr_inline_decode(xdr, b->concat.volumes_count * 4); 117 if (!p) 118 return -EIO; 119 for (i = 0; i < b->concat.volumes_count; i++) 120 b->concat.volumes[i] = be32_to_cpup(p++); 121 break; 122 case PNFS_BLOCK_VOLUME_STRIPE: 123 p = xdr_inline_decode(xdr, 8 + 4); 124 if (!p) 125 return -EIO; 126 127 p = xdr_decode_hyper(p, &b->stripe.chunk_size); 128 b->stripe.volumes_count = be32_to_cpup(p++); 129 if (b->stripe.volumes_count > PNFS_BLOCK_MAX_DEVICES) { 130 dprintk("Too many volumes: %d\n", b->stripe.volumes_count); 131 return -EIO; 132 } 133 134 p = xdr_inline_decode(xdr, b->stripe.volumes_count * 4); 135 if (!p) 136 return -EIO; 137 for (i = 0; i < b->stripe.volumes_count; i++) 138 b->stripe.volumes[i] = be32_to_cpup(p++); 139 break; 140 case PNFS_BLOCK_VOLUME_SCSI: 141 p = xdr_inline_decode(xdr, 4 + 4 + 4); 142 if (!p) 143 return -EIO; 144 b->scsi.code_set = be32_to_cpup(p++); 145 b->scsi.designator_type = be32_to_cpup(p++); 146 b->scsi.designator_len = be32_to_cpup(p++); 147 p = xdr_inline_decode(xdr, b->scsi.designator_len); 148 if (!p) 149 return -EIO; 150 if (b->scsi.designator_len > 256) 151 return -EIO; 152 memcpy(&b->scsi.designator, p, b->scsi.designator_len); 153 p = xdr_inline_decode(xdr, 8); 154 if (!p) 155 return -EIO; 156 p = xdr_decode_hyper(p, &b->scsi.pr_key); 157 break; 158 default: 159 dprintk("unknown volume type!\n"); 160 return -EIO; 161 } 162 163 return 0; 164 } 165 166 static bool bl_map_simple(struct pnfs_block_dev *dev, u64 offset, 167 struct pnfs_block_dev_map *map) 168 { 169 map->start = dev->start; 170 map->len = dev->len; 171 map->disk_offset = dev->disk_offset; 172 map->bdev = dev->bdev; 173 return true; 174 } 175 176 static bool bl_map_concat(struct pnfs_block_dev *dev, u64 offset, 177 struct pnfs_block_dev_map *map) 178 { 179 int i; 180 181 for (i = 0; i < dev->nr_children; i++) { 182 struct pnfs_block_dev *child = &dev->children[i]; 183 184 if (child->start > offset || 185 child->start + child->len <= offset) 186 continue; 187 188 child->map(child, offset - child->start, map); 189 return true; 190 } 191 192 dprintk("%s: ran off loop!\n", __func__); 193 return false; 194 } 195 196 static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset, 197 struct pnfs_block_dev_map *map) 198 { 199 struct pnfs_block_dev *child; 200 u64 chunk; 201 u32 chunk_idx; 202 u64 disk_offset; 203 204 chunk = div_u64(offset, dev->chunk_size); 205 div_u64_rem(chunk, dev->nr_children, &chunk_idx); 206 207 if (chunk_idx >= dev->nr_children) { 208 dprintk("%s: invalid chunk idx %d (%lld/%lld)\n", 209 __func__, chunk_idx, offset, dev->chunk_size); 210 /* error, should not happen */ 211 return false; 212 } 213 214 /* truncate offset to the beginning of the stripe */ 215 offset = chunk * dev->chunk_size; 216 217 /* disk offset of the stripe */ 218 disk_offset = div_u64(offset, dev->nr_children); 219 220 child = &dev->children[chunk_idx]; 221 child->map(child, disk_offset, map); 222 223 map->start += offset; 224 map->disk_offset += disk_offset; 225 map->len = dev->chunk_size; 226 return true; 227 } 228 229 static int 230 bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, 231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 232 233 234 static int 235 bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d, 236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 237 { 238 struct pnfs_block_volume *v = &volumes[idx]; 239 struct block_device *bdev; 240 dev_t dev; 241 242 dev = bl_resolve_deviceid(server, v, gfp_mask); 243 if (!dev) 244 return -EIO; 245 246 bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL); 247 if (IS_ERR(bdev)) { 248 printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n", 249 MAJOR(dev), MINOR(dev), PTR_ERR(bdev)); 250 return PTR_ERR(bdev); 251 } 252 d->bdev = bdev; 253 254 255 d->len = bdev_nr_bytes(d->bdev); 256 d->map = bl_map_simple; 257 258 printk(KERN_INFO "pNFS: using block device %s\n", 259 d->bdev->bd_disk->disk_name); 260 return 0; 261 } 262 263 static bool 264 bl_validate_designator(struct pnfs_block_volume *v) 265 { 266 switch (v->scsi.designator_type) { 267 case PS_DESIGNATOR_EUI64: 268 if (v->scsi.code_set != PS_CODE_SET_BINARY) 269 return false; 270 271 if (v->scsi.designator_len != 8 && 272 v->scsi.designator_len != 10 && 273 v->scsi.designator_len != 16) 274 return false; 275 276 return true; 277 case PS_DESIGNATOR_NAA: 278 if (v->scsi.code_set != PS_CODE_SET_BINARY) 279 return false; 280 281 if (v->scsi.designator_len != 8 && 282 v->scsi.designator_len != 16) 283 return false; 284 285 return true; 286 case PS_DESIGNATOR_T10: 287 case PS_DESIGNATOR_NAME: 288 pr_err("pNFS: unsupported designator " 289 "(code set %d, type %d, len %d.\n", 290 v->scsi.code_set, 291 v->scsi.designator_type, 292 v->scsi.designator_len); 293 return false; 294 default: 295 pr_err("pNFS: invalid designator " 296 "(code set %d, type %d, len %d.\n", 297 v->scsi.code_set, 298 v->scsi.designator_type, 299 v->scsi.designator_len); 300 return false; 301 } 302 } 303 304 static struct block_device * 305 bl_open_path(struct pnfs_block_volume *v, const char *prefix) 306 { 307 struct block_device *bdev; 308 const char *devname; 309 310 devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/%s%*phN", 311 prefix, v->scsi.designator_len, v->scsi.designator); 312 if (!devname) 313 return ERR_PTR(-ENOMEM); 314 315 bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL); 316 if (IS_ERR(bdev)) { 317 pr_warn("pNFS: failed to open device %s (%ld)\n", 318 devname, PTR_ERR(bdev)); 319 } 320 321 kfree(devname); 322 return bdev; 323 } 324 325 static int 326 bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d, 327 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 328 { 329 struct pnfs_block_volume *v = &volumes[idx]; 330 struct block_device *bdev; 331 const struct pr_ops *ops; 332 int error; 333 334 if (!bl_validate_designator(v)) 335 return -EINVAL; 336 337 /* 338 * Try to open the RH/Fedora specific dm-mpath udev path first, as the 339 * wwn- links will only point to the first discovered SCSI device there. 340 * On other distributions like Debian, the default SCSI by-id path will 341 * point to the dm-multipath device if one exists. 342 */ 343 bdev = bl_open_path(v, "dm-uuid-mpath-0x"); 344 if (IS_ERR(bdev)) 345 bdev = bl_open_path(v, "wwn-0x"); 346 if (IS_ERR(bdev)) 347 return PTR_ERR(bdev); 348 d->bdev = bdev; 349 350 d->len = bdev_nr_bytes(d->bdev); 351 d->map = bl_map_simple; 352 d->pr_key = v->scsi.pr_key; 353 354 pr_info("pNFS: using block device %s (reservation key 0x%llx)\n", 355 d->bdev->bd_disk->disk_name, d->pr_key); 356 357 ops = d->bdev->bd_disk->fops->pr_ops; 358 if (!ops) { 359 pr_err("pNFS: block device %s does not support reservations.", 360 d->bdev->bd_disk->disk_name); 361 error = -EINVAL; 362 goto out_blkdev_put; 363 } 364 365 error = ops->pr_register(d->bdev, 0, d->pr_key, true); 366 if (error) { 367 pr_err("pNFS: failed to register key for block device %s.", 368 d->bdev->bd_disk->disk_name); 369 goto out_blkdev_put; 370 } 371 372 d->pr_registered = true; 373 return 0; 374 375 out_blkdev_put: 376 blkdev_put(d->bdev, FMODE_READ | FMODE_WRITE); 377 return error; 378 } 379 380 static int 381 bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d, 382 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 383 { 384 struct pnfs_block_volume *v = &volumes[idx]; 385 int ret; 386 387 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); 388 if (ret) 389 return ret; 390 391 d->disk_offset = v->slice.start; 392 d->len = v->slice.len; 393 return 0; 394 } 395 396 static int 397 bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d, 398 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 399 { 400 struct pnfs_block_volume *v = &volumes[idx]; 401 u64 len = 0; 402 int ret, i; 403 404 d->children = kcalloc(v->concat.volumes_count, 405 sizeof(struct pnfs_block_dev), GFP_KERNEL); 406 if (!d->children) 407 return -ENOMEM; 408 409 for (i = 0; i < v->concat.volumes_count; i++) { 410 ret = bl_parse_deviceid(server, &d->children[i], 411 volumes, v->concat.volumes[i], gfp_mask); 412 if (ret) 413 return ret; 414 415 d->nr_children++; 416 d->children[i].start += len; 417 len += d->children[i].len; 418 } 419 420 d->len = len; 421 d->map = bl_map_concat; 422 return 0; 423 } 424 425 static int 426 bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d, 427 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 428 { 429 struct pnfs_block_volume *v = &volumes[idx]; 430 u64 len = 0; 431 int ret, i; 432 433 d->children = kcalloc(v->stripe.volumes_count, 434 sizeof(struct pnfs_block_dev), GFP_KERNEL); 435 if (!d->children) 436 return -ENOMEM; 437 438 for (i = 0; i < v->stripe.volumes_count; i++) { 439 ret = bl_parse_deviceid(server, &d->children[i], 440 volumes, v->stripe.volumes[i], gfp_mask); 441 if (ret) 442 return ret; 443 444 d->nr_children++; 445 len += d->children[i].len; 446 } 447 448 d->len = len; 449 d->chunk_size = v->stripe.chunk_size; 450 d->map = bl_map_stripe; 451 return 0; 452 } 453 454 static int 455 bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, 456 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) 457 { 458 switch (volumes[idx].type) { 459 case PNFS_BLOCK_VOLUME_SIMPLE: 460 return bl_parse_simple(server, d, volumes, idx, gfp_mask); 461 case PNFS_BLOCK_VOLUME_SLICE: 462 return bl_parse_slice(server, d, volumes, idx, gfp_mask); 463 case PNFS_BLOCK_VOLUME_CONCAT: 464 return bl_parse_concat(server, d, volumes, idx, gfp_mask); 465 case PNFS_BLOCK_VOLUME_STRIPE: 466 return bl_parse_stripe(server, d, volumes, idx, gfp_mask); 467 case PNFS_BLOCK_VOLUME_SCSI: 468 return bl_parse_scsi(server, d, volumes, idx, gfp_mask); 469 default: 470 dprintk("unsupported volume type: %d\n", volumes[idx].type); 471 return -EIO; 472 } 473 } 474 475 struct nfs4_deviceid_node * 476 bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, 477 gfp_t gfp_mask) 478 { 479 struct nfs4_deviceid_node *node = NULL; 480 struct pnfs_block_volume *volumes; 481 struct pnfs_block_dev *top; 482 struct xdr_stream xdr; 483 struct xdr_buf buf; 484 struct page *scratch; 485 int nr_volumes, ret, i; 486 __be32 *p; 487 488 scratch = alloc_page(gfp_mask); 489 if (!scratch) 490 goto out; 491 492 xdr_init_decode_pages(&xdr, &buf, pdev->pages, pdev->pglen); 493 xdr_set_scratch_page(&xdr, scratch); 494 495 p = xdr_inline_decode(&xdr, sizeof(__be32)); 496 if (!p) 497 goto out_free_scratch; 498 nr_volumes = be32_to_cpup(p++); 499 500 volumes = kcalloc(nr_volumes, sizeof(struct pnfs_block_volume), 501 gfp_mask); 502 if (!volumes) 503 goto out_free_scratch; 504 505 for (i = 0; i < nr_volumes; i++) { 506 ret = nfs4_block_decode_volume(&xdr, &volumes[i]); 507 if (ret < 0) 508 goto out_free_volumes; 509 } 510 511 top = kzalloc(sizeof(*top), gfp_mask); 512 if (!top) 513 goto out_free_volumes; 514 515 ret = bl_parse_deviceid(server, top, volumes, nr_volumes - 1, gfp_mask); 516 517 node = &top->node; 518 nfs4_init_deviceid_node(node, server, &pdev->dev_id); 519 if (ret) 520 nfs4_mark_deviceid_unavailable(node); 521 522 out_free_volumes: 523 kfree(volumes); 524 out_free_scratch: 525 __free_page(scratch); 526 out: 527 return node; 528 } 529