xref: /linux/fs/nfs/blocklayout/dev.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014-2016 Christoph Hellwig.
4  */
5 #include <linux/sunrpc/svc.h>
6 #include <linux/blkdev.h>
7 #include <linux/nfs4.h>
8 #include <linux/nfs_fs.h>
9 #include <linux/nfs_xdr.h>
10 #include <linux/pr.h>
11 
12 #include "blocklayout.h"
13 #include "../nfs4trace.h"
14 
15 #define NFSDBG_FACILITY		NFSDBG_PNFS_LD
16 
bl_unregister_scsi(struct pnfs_block_dev * dev)17 static void bl_unregister_scsi(struct pnfs_block_dev *dev)
18 {
19 	struct block_device *bdev = file_bdev(dev->bdev_file);
20 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
21 	int status;
22 
23 	if (!test_and_clear_bit(PNFS_BDEV_REGISTERED, &dev->flags))
24 		return;
25 
26 	status = ops->pr_register(bdev, dev->pr_key, 0, false);
27 	if (status)
28 		trace_bl_pr_key_unreg_err(bdev, dev->pr_key, status);
29 	else
30 		trace_bl_pr_key_unreg(bdev, dev->pr_key);
31 }
32 
bl_register_scsi(struct pnfs_block_dev * dev)33 static bool bl_register_scsi(struct pnfs_block_dev *dev)
34 {
35 	struct block_device *bdev = file_bdev(dev->bdev_file);
36 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
37 	int status;
38 
39 	if (test_and_set_bit(PNFS_BDEV_REGISTERED, &dev->flags))
40 		return true;
41 
42 	status = ops->pr_register(bdev, 0, dev->pr_key, true);
43 	if (status) {
44 		trace_bl_pr_key_reg_err(bdev, dev->pr_key, status);
45 		return false;
46 	}
47 	trace_bl_pr_key_reg(bdev, dev->pr_key);
48 	return true;
49 }
50 
bl_unregister_dev(struct pnfs_block_dev * dev)51 static void bl_unregister_dev(struct pnfs_block_dev *dev)
52 {
53 	u32 i;
54 
55 	if (dev->nr_children) {
56 		for (i = 0; i < dev->nr_children; i++)
57 			bl_unregister_dev(&dev->children[i]);
58 		return;
59 	}
60 
61 	if (dev->type == PNFS_BLOCK_VOLUME_SCSI)
62 		bl_unregister_scsi(dev);
63 }
64 
bl_register_dev(struct pnfs_block_dev * dev)65 bool bl_register_dev(struct pnfs_block_dev *dev)
66 {
67 	u32 i;
68 
69 	if (dev->nr_children) {
70 		for (i = 0; i < dev->nr_children; i++) {
71 			if (!bl_register_dev(&dev->children[i])) {
72 				while (i > 0)
73 					bl_unregister_dev(&dev->children[--i]);
74 				return false;
75 			}
76 		}
77 		return true;
78 	}
79 
80 	if (dev->type == PNFS_BLOCK_VOLUME_SCSI)
81 		return bl_register_scsi(dev);
82 	return true;
83 }
84 
85 static void
bl_free_device(struct pnfs_block_dev * dev)86 bl_free_device(struct pnfs_block_dev *dev)
87 {
88 	bl_unregister_dev(dev);
89 
90 	if (dev->nr_children) {
91 		int i;
92 
93 		for (i = 0; i < dev->nr_children; i++)
94 			bl_free_device(&dev->children[i]);
95 		kfree(dev->children);
96 	} else {
97 		if (dev->bdev_file)
98 			fput(dev->bdev_file);
99 	}
100 }
101 
102 void
bl_free_deviceid_node(struct nfs4_deviceid_node * d)103 bl_free_deviceid_node(struct nfs4_deviceid_node *d)
104 {
105 	struct pnfs_block_dev *dev =
106 		container_of(d, struct pnfs_block_dev, node);
107 
108 	bl_free_device(dev);
109 	kfree_rcu(dev, node.rcu);
110 }
111 
112 static int
nfs4_block_decode_volume(struct xdr_stream * xdr,struct pnfs_block_volume * b)113 nfs4_block_decode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
114 {
115 	__be32 *p;
116 	int i;
117 
118 	p = xdr_inline_decode(xdr, 4);
119 	if (!p)
120 		return -EIO;
121 	b->type = be32_to_cpup(p++);
122 
123 	switch (b->type) {
124 	case PNFS_BLOCK_VOLUME_SIMPLE:
125 		p = xdr_inline_decode(xdr, 4);
126 		if (!p)
127 			return -EIO;
128 		b->simple.nr_sigs = be32_to_cpup(p++);
129 		if (!b->simple.nr_sigs || b->simple.nr_sigs > PNFS_BLOCK_MAX_UUIDS) {
130 			dprintk("Bad signature count: %d\n", b->simple.nr_sigs);
131 			return -EIO;
132 		}
133 
134 		b->simple.len = 4 + 4;
135 		for (i = 0; i < b->simple.nr_sigs; i++) {
136 			p = xdr_inline_decode(xdr, 8 + 4);
137 			if (!p)
138 				return -EIO;
139 			p = xdr_decode_hyper(p, &b->simple.sigs[i].offset);
140 			b->simple.sigs[i].sig_len = be32_to_cpup(p++);
141 			if (b->simple.sigs[i].sig_len > PNFS_BLOCK_UUID_LEN) {
142 				pr_info("signature too long: %d\n",
143 					b->simple.sigs[i].sig_len);
144 				return -EIO;
145 			}
146 
147 			p = xdr_inline_decode(xdr, b->simple.sigs[i].sig_len);
148 			if (!p)
149 				return -EIO;
150 			memcpy(&b->simple.sigs[i].sig, p,
151 				b->simple.sigs[i].sig_len);
152 
153 			b->simple.len += 8 + 4 + \
154 				(XDR_QUADLEN(b->simple.sigs[i].sig_len) << 2);
155 		}
156 		break;
157 	case PNFS_BLOCK_VOLUME_SLICE:
158 		p = xdr_inline_decode(xdr, 8 + 8 + 4);
159 		if (!p)
160 			return -EIO;
161 		p = xdr_decode_hyper(p, &b->slice.start);
162 		p = xdr_decode_hyper(p, &b->slice.len);
163 		b->slice.volume = be32_to_cpup(p++);
164 		break;
165 	case PNFS_BLOCK_VOLUME_CONCAT:
166 		p = xdr_inline_decode(xdr, 4);
167 		if (!p)
168 			return -EIO;
169 
170 		b->concat.volumes_count = be32_to_cpup(p++);
171 		if (b->concat.volumes_count > PNFS_BLOCK_MAX_DEVICES) {
172 			dprintk("Too many volumes: %d\n", b->concat.volumes_count);
173 			return -EIO;
174 		}
175 
176 		p = xdr_inline_decode(xdr, b->concat.volumes_count * 4);
177 		if (!p)
178 			return -EIO;
179 		for (i = 0; i < b->concat.volumes_count; i++)
180 			b->concat.volumes[i] = be32_to_cpup(p++);
181 		break;
182 	case PNFS_BLOCK_VOLUME_STRIPE:
183 		p = xdr_inline_decode(xdr, 8 + 4);
184 		if (!p)
185 			return -EIO;
186 
187 		p = xdr_decode_hyper(p, &b->stripe.chunk_size);
188 		b->stripe.volumes_count = be32_to_cpup(p++);
189 		if (b->stripe.volumes_count > PNFS_BLOCK_MAX_DEVICES) {
190 			dprintk("Too many volumes: %d\n", b->stripe.volumes_count);
191 			return -EIO;
192 		}
193 
194 		p = xdr_inline_decode(xdr, b->stripe.volumes_count * 4);
195 		if (!p)
196 			return -EIO;
197 		for (i = 0; i < b->stripe.volumes_count; i++)
198 			b->stripe.volumes[i] = be32_to_cpup(p++);
199 		break;
200 	case PNFS_BLOCK_VOLUME_SCSI:
201 		p = xdr_inline_decode(xdr, 4 + 4 + 4);
202 		if (!p)
203 			return -EIO;
204 		b->scsi.code_set = be32_to_cpup(p++);
205 		b->scsi.designator_type = be32_to_cpup(p++);
206 		b->scsi.designator_len = be32_to_cpup(p++);
207 		p = xdr_inline_decode(xdr, b->scsi.designator_len);
208 		if (!p)
209 			return -EIO;
210 		if (b->scsi.designator_len > 256)
211 			return -EIO;
212 		memcpy(&b->scsi.designator, p, b->scsi.designator_len);
213 		p = xdr_inline_decode(xdr, 8);
214 		if (!p)
215 			return -EIO;
216 		p = xdr_decode_hyper(p, &b->scsi.pr_key);
217 		break;
218 	default:
219 		dprintk("unknown volume type!\n");
220 		return -EIO;
221 	}
222 
223 	return 0;
224 }
225 
bl_map_simple(struct pnfs_block_dev * dev,u64 offset,struct pnfs_block_dev_map * map)226 static bool bl_map_simple(struct pnfs_block_dev *dev, u64 offset,
227 		struct pnfs_block_dev_map *map)
228 {
229 	map->start = dev->start;
230 	map->len = dev->len;
231 	map->disk_offset = dev->disk_offset;
232 	map->bdev = file_bdev(dev->bdev_file);
233 	return true;
234 }
235 
bl_map_concat(struct pnfs_block_dev * dev,u64 offset,struct pnfs_block_dev_map * map)236 static bool bl_map_concat(struct pnfs_block_dev *dev, u64 offset,
237 		struct pnfs_block_dev_map *map)
238 {
239 	int i;
240 
241 	for (i = 0; i < dev->nr_children; i++) {
242 		struct pnfs_block_dev *child = &dev->children[i];
243 
244 		if (child->start > offset ||
245 		    child->start + child->len <= offset)
246 			continue;
247 
248 		child->map(child, offset - child->start, map);
249 		return true;
250 	}
251 
252 	dprintk("%s: ran off loop!\n", __func__);
253 	return false;
254 }
255 
bl_map_stripe(struct pnfs_block_dev * dev,u64 offset,struct pnfs_block_dev_map * map)256 static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
257 		struct pnfs_block_dev_map *map)
258 {
259 	struct pnfs_block_dev *child;
260 	u64 chunk;
261 	u32 chunk_idx;
262 	u64 disk_offset;
263 
264 	chunk = div_u64(offset, dev->chunk_size);
265 	div_u64_rem(chunk, dev->nr_children, &chunk_idx);
266 
267 	if (chunk_idx >= dev->nr_children) {
268 		dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
269 			__func__, chunk_idx, offset, dev->chunk_size);
270 		/* error, should not happen */
271 		return false;
272 	}
273 
274 	/* truncate offset to the beginning of the stripe */
275 	offset = chunk * dev->chunk_size;
276 
277 	/* disk offset of the stripe */
278 	disk_offset = div_u64(offset, dev->nr_children);
279 
280 	child = &dev->children[chunk_idx];
281 	child->map(child, disk_offset, map);
282 
283 	map->start += offset;
284 	map->disk_offset += disk_offset;
285 	map->len = dev->chunk_size;
286 	return true;
287 }
288 
289 static int
290 bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d,
291 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
292 
293 
294 static int
bl_parse_simple(struct nfs_server * server,struct pnfs_block_dev * d,struct pnfs_block_volume * volumes,int idx,gfp_t gfp_mask)295 bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d,
296 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
297 {
298 	struct pnfs_block_volume *v = &volumes[idx];
299 	struct file *bdev_file;
300 	dev_t dev;
301 
302 	dev = bl_resolve_deviceid(server, v, gfp_mask);
303 	if (!dev)
304 		return -EIO;
305 
306 	bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
307 				       NULL, NULL);
308 	if (IS_ERR(bdev_file)) {
309 		printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n",
310 			MAJOR(dev), MINOR(dev), PTR_ERR(bdev_file));
311 		return PTR_ERR(bdev_file);
312 	}
313 	d->bdev_file = bdev_file;
314 	d->len = bdev_nr_bytes(file_bdev(bdev_file));
315 	d->map = bl_map_simple;
316 
317 	printk(KERN_INFO "pNFS: using block device %s\n",
318 		file_bdev(bdev_file)->bd_disk->disk_name);
319 	return 0;
320 }
321 
322 static bool
bl_validate_designator(struct pnfs_block_volume * v)323 bl_validate_designator(struct pnfs_block_volume *v)
324 {
325 	switch (v->scsi.designator_type) {
326 	case PS_DESIGNATOR_EUI64:
327 		if (v->scsi.code_set != PS_CODE_SET_BINARY)
328 			return false;
329 
330 		if (v->scsi.designator_len != 8 &&
331 		    v->scsi.designator_len != 10 &&
332 		    v->scsi.designator_len != 16)
333 			return false;
334 
335 		return true;
336 	case PS_DESIGNATOR_NAA:
337 		if (v->scsi.code_set != PS_CODE_SET_BINARY)
338 			return false;
339 
340 		if (v->scsi.designator_len != 8 &&
341 		    v->scsi.designator_len != 16)
342 			return false;
343 
344 		return true;
345 	case PS_DESIGNATOR_T10:
346 	case PS_DESIGNATOR_NAME:
347 		pr_err("pNFS: unsupported designator "
348 			"(code set %d, type %d, len %d.\n",
349 			v->scsi.code_set,
350 			v->scsi.designator_type,
351 			v->scsi.designator_len);
352 		return false;
353 	default:
354 		pr_err("pNFS: invalid designator "
355 			"(code set %d, type %d, len %d.\n",
356 			v->scsi.code_set,
357 			v->scsi.designator_type,
358 			v->scsi.designator_len);
359 		return false;
360 	}
361 }
362 
363 static struct file *
bl_open_path(struct pnfs_block_volume * v,const char * prefix)364 bl_open_path(struct pnfs_block_volume *v, const char *prefix)
365 {
366 	struct file *bdev_file;
367 	const char *devname;
368 
369 	devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/%s%*phN",
370 			prefix, v->scsi.designator_len, v->scsi.designator);
371 	if (!devname)
372 		return ERR_PTR(-ENOMEM);
373 
374 	bdev_file = bdev_file_open_by_path(devname, BLK_OPEN_READ | BLK_OPEN_WRITE,
375 					NULL, NULL);
376 	if (IS_ERR(bdev_file)) {
377 		dprintk("failed to open device %s (%ld)\n",
378 			devname, PTR_ERR(bdev_file));
379 	}
380 
381 	kfree(devname);
382 	return bdev_file;
383 }
384 
385 static int
bl_parse_scsi(struct nfs_server * server,struct pnfs_block_dev * d,struct pnfs_block_volume * volumes,int idx,gfp_t gfp_mask)386 bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
387 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
388 {
389 	struct pnfs_block_volume *v = &volumes[idx];
390 	struct block_device *bdev;
391 	const struct pr_ops *ops;
392 	struct file *bdev_file;
393 	int error;
394 
395 	if (!bl_validate_designator(v))
396 		return -EINVAL;
397 
398 	/*
399 	 * Try to open the RH/Fedora specific dm-mpath udev path first, as the
400 	 * wwn- links will only point to the first discovered SCSI device there.
401 	 * On other distributions like Debian, the default SCSI by-id path will
402 	 * point to the dm-multipath device if one exists.
403 	 */
404 	bdev_file = bl_open_path(v, "dm-uuid-mpath-0x");
405 	if (IS_ERR(bdev_file))
406 		bdev_file = bl_open_path(v, "wwn-0x");
407 	if (IS_ERR(bdev_file))
408 		bdev_file = bl_open_path(v, "nvme-eui.");
409 	if (IS_ERR(bdev_file)) {
410 		pr_warn("pNFS: no device found for volume %*phN\n",
411 			v->scsi.designator_len, v->scsi.designator);
412 		return PTR_ERR(bdev_file);
413 	}
414 	d->bdev_file = bdev_file;
415 	bdev = file_bdev(bdev_file);
416 
417 	d->len = bdev_nr_bytes(bdev);
418 	d->map = bl_map_simple;
419 	d->pr_key = v->scsi.pr_key;
420 
421 	if (d->len == 0)
422 		return -ENODEV;
423 
424 	ops = bdev->bd_disk->fops->pr_ops;
425 	if (!ops) {
426 		pr_err("pNFS: block device %s does not support reservations.",
427 				bdev->bd_disk->disk_name);
428 		error = -EINVAL;
429 		goto out_blkdev_put;
430 	}
431 
432 	return 0;
433 
434 out_blkdev_put:
435 	fput(d->bdev_file);
436 	return error;
437 }
438 
439 static int
bl_parse_slice(struct nfs_server * server,struct pnfs_block_dev * d,struct pnfs_block_volume * volumes,int idx,gfp_t gfp_mask)440 bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d,
441 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
442 {
443 	struct pnfs_block_volume *v = &volumes[idx];
444 	int ret;
445 
446 	ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask);
447 	if (ret)
448 		return ret;
449 
450 	d->disk_offset = v->slice.start;
451 	d->len = v->slice.len;
452 	return 0;
453 }
454 
455 static int
bl_parse_concat(struct nfs_server * server,struct pnfs_block_dev * d,struct pnfs_block_volume * volumes,int idx,gfp_t gfp_mask)456 bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d,
457 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
458 {
459 	struct pnfs_block_volume *v = &volumes[idx];
460 	u64 len = 0;
461 	int ret, i;
462 
463 	d->children = kcalloc(v->concat.volumes_count,
464 			sizeof(struct pnfs_block_dev), gfp_mask);
465 	if (!d->children)
466 		return -ENOMEM;
467 
468 	for (i = 0; i < v->concat.volumes_count; i++) {
469 		ret = bl_parse_deviceid(server, &d->children[i],
470 				volumes, v->concat.volumes[i], gfp_mask);
471 		if (ret)
472 			return ret;
473 
474 		d->nr_children++;
475 		d->children[i].start += len;
476 		len += d->children[i].len;
477 	}
478 
479 	d->len = len;
480 	d->map = bl_map_concat;
481 	return 0;
482 }
483 
484 static int
bl_parse_stripe(struct nfs_server * server,struct pnfs_block_dev * d,struct pnfs_block_volume * volumes,int idx,gfp_t gfp_mask)485 bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d,
486 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
487 {
488 	struct pnfs_block_volume *v = &volumes[idx];
489 	u64 len = 0;
490 	int ret, i;
491 
492 	d->children = kcalloc(v->stripe.volumes_count,
493 			sizeof(struct pnfs_block_dev), gfp_mask);
494 	if (!d->children)
495 		return -ENOMEM;
496 
497 	for (i = 0; i < v->stripe.volumes_count; i++) {
498 		ret = bl_parse_deviceid(server, &d->children[i],
499 				volumes, v->stripe.volumes[i], gfp_mask);
500 		if (ret)
501 			return ret;
502 
503 		d->nr_children++;
504 		len += d->children[i].len;
505 	}
506 
507 	d->len = len;
508 	d->chunk_size = v->stripe.chunk_size;
509 	d->map = bl_map_stripe;
510 	return 0;
511 }
512 
513 static int
bl_parse_deviceid(struct nfs_server * server,struct pnfs_block_dev * d,struct pnfs_block_volume * volumes,int idx,gfp_t gfp_mask)514 bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d,
515 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
516 {
517 	d->type = volumes[idx].type;
518 
519 	switch (d->type) {
520 	case PNFS_BLOCK_VOLUME_SIMPLE:
521 		return bl_parse_simple(server, d, volumes, idx, gfp_mask);
522 	case PNFS_BLOCK_VOLUME_SLICE:
523 		return bl_parse_slice(server, d, volumes, idx, gfp_mask);
524 	case PNFS_BLOCK_VOLUME_CONCAT:
525 		return bl_parse_concat(server, d, volumes, idx, gfp_mask);
526 	case PNFS_BLOCK_VOLUME_STRIPE:
527 		return bl_parse_stripe(server, d, volumes, idx, gfp_mask);
528 	case PNFS_BLOCK_VOLUME_SCSI:
529 		return bl_parse_scsi(server, d, volumes, idx, gfp_mask);
530 	default:
531 		dprintk("unsupported volume type: %d\n", d->type);
532 		return -EIO;
533 	}
534 }
535 
536 struct nfs4_deviceid_node *
bl_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_mask)537 bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
538 		gfp_t gfp_mask)
539 {
540 	struct nfs4_deviceid_node *node = NULL;
541 	struct pnfs_block_volume *volumes;
542 	struct pnfs_block_dev *top;
543 	struct xdr_stream xdr;
544 	struct xdr_buf buf;
545 	struct page *scratch;
546 	int nr_volumes, ret, i;
547 	__be32 *p;
548 
549 	scratch = alloc_page(gfp_mask);
550 	if (!scratch)
551 		goto out;
552 
553 	xdr_init_decode_pages(&xdr, &buf, pdev->pages, pdev->pglen);
554 	xdr_set_scratch_page(&xdr, scratch);
555 
556 	p = xdr_inline_decode(&xdr, sizeof(__be32));
557 	if (!p)
558 		goto out_free_scratch;
559 	nr_volumes = be32_to_cpup(p++);
560 
561 	volumes = kcalloc(nr_volumes, sizeof(struct pnfs_block_volume),
562 			  gfp_mask);
563 	if (!volumes)
564 		goto out_free_scratch;
565 
566 	for (i = 0; i < nr_volumes; i++) {
567 		ret = nfs4_block_decode_volume(&xdr, &volumes[i]);
568 		if (ret < 0)
569 			goto out_free_volumes;
570 	}
571 
572 	top = kzalloc(sizeof(*top), gfp_mask);
573 	if (!top)
574 		goto out_free_volumes;
575 
576 	ret = bl_parse_deviceid(server, top, volumes, nr_volumes - 1, gfp_mask);
577 
578 	node = &top->node;
579 	nfs4_init_deviceid_node(node, server, &pdev->dev_id);
580 	if (ret)
581 		nfs4_mark_deviceid_unavailable(node);
582 
583 out_free_volumes:
584 	kfree(volumes);
585 out_free_scratch:
586 	__free_page(scratch);
587 out:
588 	return node;
589 }
590