1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014-2016 Christoph Hellwig.
4 */
5 #include <linux/exportfs.h>
6 #include <linux/iomap.h>
7 #include <linux/slab.h>
8 #include <linux/pr.h>
9
10 #include <linux/nfsd/debug.h>
11
12 #include "blocklayoutxdr.h"
13 #include "pnfs.h"
14 #include "filecache.h"
15 #include "vfs.h"
16 #include "trace.h"
17
18 #define NFSDDBG_FACILITY NFSDDBG_PNFS
19
20
21 /*
22 * Get an extent from the file system that starts at offset or below
23 * and may be shorter than the requested length.
24 */
25 static __be32
nfsd4_block_map_extent(struct inode * inode,const struct svc_fh * fhp,u64 offset,u64 length,u32 iomode,u64 minlength,struct pnfs_block_extent * bex)26 nfsd4_block_map_extent(struct inode *inode, const struct svc_fh *fhp,
27 u64 offset, u64 length, u32 iomode, u64 minlength,
28 struct pnfs_block_extent *bex)
29 {
30 struct super_block *sb = inode->i_sb;
31 struct iomap iomap;
32 u32 device_generation = 0;
33 int error;
34
35 error = sb->s_export_op->map_blocks(inode, offset, length, &iomap,
36 iomode != IOMODE_READ, &device_generation);
37 if (error) {
38 if (error == -ENXIO)
39 return nfserr_layoutunavailable;
40 return nfserrno(error);
41 }
42
43 switch (iomap.type) {
44 case IOMAP_MAPPED:
45 if (iomode == IOMODE_READ)
46 bex->es = PNFS_BLOCK_READ_DATA;
47 else
48 bex->es = PNFS_BLOCK_READWRITE_DATA;
49 bex->soff = iomap.addr;
50 break;
51 case IOMAP_UNWRITTEN:
52 if (iomode & IOMODE_RW) {
53 /*
54 * Crack monkey special case from section 2.3.1.
55 */
56 if (minlength == 0) {
57 dprintk("pnfsd: no soup for you!\n");
58 return nfserr_layoutunavailable;
59 }
60
61 bex->es = PNFS_BLOCK_INVALID_DATA;
62 bex->soff = iomap.addr;
63 break;
64 }
65 fallthrough;
66 case IOMAP_HOLE:
67 if (iomode == IOMODE_READ) {
68 bex->es = PNFS_BLOCK_NONE_DATA;
69 break;
70 }
71 fallthrough;
72 case IOMAP_DELALLOC:
73 default:
74 WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type);
75 return nfserr_layoutunavailable;
76 }
77
78 error = nfsd4_set_deviceid(&bex->vol_id, fhp, device_generation);
79 if (error)
80 return nfserrno(error);
81
82 bex->foff = iomap.offset;
83 bex->len = iomap.length;
84 return nfs_ok;
85 }
86
87 static __be32
nfsd4_block_proc_layoutget(struct svc_rqst * rqstp,struct inode * inode,const struct svc_fh * fhp,struct nfsd4_layoutget * args)88 nfsd4_block_proc_layoutget(struct svc_rqst *rqstp, struct inode *inode,
89 const struct svc_fh *fhp, struct nfsd4_layoutget *args)
90 {
91 struct nfsd4_layout_seg *seg = &args->lg_seg;
92 struct pnfs_block_layout *bl;
93 struct pnfs_block_extent *first_bex, *last_bex;
94 u64 offset = seg->offset, length = seg->length;
95 u32 i, nr_extents_max, block_size = i_blocksize(inode);
96 __be32 nfserr;
97
98 if (locks_in_grace(SVC_NET(rqstp)))
99 return nfserr_grace;
100
101 nfserr = nfserr_layoutunavailable;
102 if (seg->offset & (block_size - 1)) {
103 dprintk("pnfsd: I/O misaligned\n");
104 goto out_error;
105 }
106
107 /*
108 * RFC 8881, section 3.3.17:
109 * The layout4 data type defines a layout for a file.
110 *
111 * RFC 8881, section 18.43.3:
112 * The loga_maxcount field specifies the maximum layout size
113 * (in bytes) that the client can handle. If the size of the
114 * layout structure exceeds the size specified by maxcount,
115 * the metadata server will return the NFS4ERR_TOOSMALL error.
116 */
117 nfserr = nfserr_toosmall;
118 if (args->lg_maxcount < PNFS_BLOCK_LAYOUT4_SIZE +
119 PNFS_BLOCK_EXTENT_SIZE)
120 goto out_error;
121
122 /*
123 * Limit the maximum layout size to avoid allocating
124 * a large buffer on the server for each layout request.
125 */
126 nr_extents_max = (min(args->lg_maxcount, PAGE_SIZE) -
127 PNFS_BLOCK_LAYOUT4_SIZE) / PNFS_BLOCK_EXTENT_SIZE;
128
129 /*
130 * Some clients barf on non-zero block numbers for NONE or INVALID
131 * layouts, so make sure to zero the whole structure.
132 */
133 nfserr = nfserrno(-ENOMEM);
134 bl = kzalloc(struct_size(bl, extents, nr_extents_max), GFP_KERNEL);
135 if (!bl)
136 goto out_error;
137 bl->nr_extents = nr_extents_max;
138 args->lg_content = bl;
139
140 for (i = 0; i < bl->nr_extents; i++) {
141 struct pnfs_block_extent *bex = bl->extents + i;
142 u64 bex_length;
143
144 nfserr = nfsd4_block_map_extent(inode, fhp, offset, length,
145 seg->iomode, args->lg_minlength, bex);
146 if (nfserr != nfs_ok)
147 goto out_error;
148
149 bex_length = bex->len - (offset - bex->foff);
150 if (bex_length >= length) {
151 bl->nr_extents = i + 1;
152 break;
153 }
154
155 offset = bex->foff + bex->len;
156 length -= bex_length;
157 }
158
159 first_bex = bl->extents;
160 last_bex = bl->extents + bl->nr_extents - 1;
161
162 nfserr = nfserr_layoutunavailable;
163 length = last_bex->foff + last_bex->len - seg->offset;
164 if (length < args->lg_minlength) {
165 dprintk("pnfsd: extent smaller than minlength\n");
166 goto out_error;
167 }
168
169 seg->offset = first_bex->foff;
170 seg->length = last_bex->foff - first_bex->foff + last_bex->len;
171 return nfs_ok;
172
173 out_error:
174 seg->length = 0;
175 return nfserr;
176 }
177
178 static __be32
nfsd4_block_commit_blocks(struct inode * inode,struct nfsd4_layoutcommit * lcp,struct iomap * iomaps,int nr_iomaps)179 nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
180 struct iomap *iomaps, int nr_iomaps)
181 {
182 struct timespec64 mtime = inode_get_mtime(inode);
183 struct iattr iattr = { .ia_valid = 0 };
184 int error;
185
186 if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
187 timespec64_compare(&lcp->lc_mtime, &mtime) < 0)
188 lcp->lc_mtime = current_time(inode);
189 iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
190 iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
191
192 if (lcp->lc_size_chg) {
193 iattr.ia_valid |= ATTR_SIZE;
194 iattr.ia_size = lcp->lc_newsize;
195 }
196
197 error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
198 nr_iomaps, &iattr);
199 kfree(iomaps);
200 return nfserrno(error);
201 }
202
203 #ifdef CONFIG_NFSD_BLOCKLAYOUT
204 static int
nfsd4_block_get_device_info_simple(struct super_block * sb,struct nfsd4_getdeviceinfo * gdp)205 nfsd4_block_get_device_info_simple(struct super_block *sb,
206 struct nfsd4_getdeviceinfo *gdp)
207 {
208 struct pnfs_block_deviceaddr *dev;
209 struct pnfs_block_volume *b;
210
211 dev = kzalloc(struct_size(dev, volumes, 1), GFP_KERNEL);
212 if (!dev)
213 return -ENOMEM;
214 gdp->gd_device = dev;
215
216 dev->nr_volumes = 1;
217 b = &dev->volumes[0];
218
219 b->type = PNFS_BLOCK_VOLUME_SIMPLE;
220 b->simple.sig_len = PNFS_BLOCK_UUID_LEN;
221 return sb->s_export_op->get_uuid(sb, b->simple.sig, &b->simple.sig_len,
222 &b->simple.offset);
223 }
224
225 static __be32
nfsd4_block_proc_getdeviceinfo(struct super_block * sb,struct svc_rqst * rqstp,struct nfs4_client * clp,struct nfsd4_getdeviceinfo * gdp)226 nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
227 struct svc_rqst *rqstp,
228 struct nfs4_client *clp,
229 struct nfsd4_getdeviceinfo *gdp)
230 {
231 if (bdev_is_partition(sb->s_bdev))
232 return nfserr_inval;
233 return nfserrno(nfsd4_block_get_device_info_simple(sb, gdp));
234 }
235
236 static __be32
nfsd4_block_proc_layoutcommit(struct inode * inode,struct svc_rqst * rqstp,struct nfsd4_layoutcommit * lcp)237 nfsd4_block_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
238 struct nfsd4_layoutcommit *lcp)
239 {
240 struct iomap *iomaps;
241 int nr_iomaps;
242 __be32 nfserr;
243
244 rqstp->rq_arg = lcp->lc_up_layout;
245 svcxdr_init_decode(rqstp);
246
247 nfserr = nfsd4_block_decode_layoutupdate(&rqstp->rq_arg_stream,
248 &iomaps, &nr_iomaps, i_blocksize(inode));
249 if (nfserr != nfs_ok)
250 return nfserr;
251
252 return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
253 }
254
255 const struct nfsd4_layout_ops bl_layout_ops = {
256 /*
257 * Pretend that we send notification to the client. This is a blatant
258 * lie to force recent Linux clients to cache our device IDs.
259 * We rarely ever change the device ID, so the harm of leaking deviceids
260 * for a while isn't too bad. Unfortunately RFC5661 is a complete mess
261 * in this regard, but I filed errata 4119 for this a while ago, and
262 * hopefully the Linux client will eventually start caching deviceids
263 * without this again.
264 */
265 .notify_types =
266 NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
267 .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo,
268 .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
269 .proc_layoutget = nfsd4_block_proc_layoutget,
270 .encode_layoutget = nfsd4_block_encode_layoutget,
271 .proc_layoutcommit = nfsd4_block_proc_layoutcommit,
272 };
273 #endif /* CONFIG_NFSD_BLOCKLAYOUT */
274
275 #ifdef CONFIG_NFSD_SCSILAYOUT
276 #define NFSD_MDS_PR_KEY 0x0100000000000000ULL
277
278 /*
279 * We use the client ID as a unique key for the reservations.
280 * This allows us to easily fence a client when recalls fail.
281 */
nfsd4_scsi_pr_key(struct nfs4_client * clp)282 static u64 nfsd4_scsi_pr_key(struct nfs4_client *clp)
283 {
284 return ((u64)clp->cl_clientid.cl_boot << 32) | clp->cl_clientid.cl_id;
285 }
286
287 static const u8 designator_types[] = {
288 PS_DESIGNATOR_EUI64,
289 PS_DESIGNATOR_NAA,
290 };
291
292 static int
nfsd4_block_get_unique_id(struct gendisk * disk,struct pnfs_block_volume * b)293 nfsd4_block_get_unique_id(struct gendisk *disk, struct pnfs_block_volume *b)
294 {
295 int ret, i;
296
297 for (i = 0; i < ARRAY_SIZE(designator_types); i++) {
298 u8 type = designator_types[i];
299
300 ret = disk->fops->get_unique_id(disk, b->scsi.designator, type);
301 if (ret > 0) {
302 b->scsi.code_set = PS_CODE_SET_BINARY;
303 b->scsi.designator_type = type;
304 b->scsi.designator_len = ret;
305 return 0;
306 }
307 }
308
309 return -EINVAL;
310 }
311
312 static int
nfsd4_block_get_device_info_scsi(struct super_block * sb,struct nfs4_client * clp,struct nfsd4_getdeviceinfo * gdp)313 nfsd4_block_get_device_info_scsi(struct super_block *sb,
314 struct nfs4_client *clp,
315 struct nfsd4_getdeviceinfo *gdp)
316 {
317 struct pnfs_block_deviceaddr *dev;
318 struct pnfs_block_volume *b;
319 const struct pr_ops *ops;
320 int ret;
321
322 dev = kzalloc(struct_size(dev, volumes, 1), GFP_KERNEL);
323 if (!dev)
324 return -ENOMEM;
325 gdp->gd_device = dev;
326
327 dev->nr_volumes = 1;
328 b = &dev->volumes[0];
329
330 b->type = PNFS_BLOCK_VOLUME_SCSI;
331 b->scsi.pr_key = nfsd4_scsi_pr_key(clp);
332
333 ret = nfsd4_block_get_unique_id(sb->s_bdev->bd_disk, b);
334 if (ret < 0)
335 goto out_free_dev;
336
337 ret = -EINVAL;
338 ops = sb->s_bdev->bd_disk->fops->pr_ops;
339 if (!ops) {
340 pr_err("pNFS: device %s does not support PRs.\n",
341 sb->s_id);
342 goto out_free_dev;
343 }
344
345 ret = ops->pr_register(sb->s_bdev, 0, NFSD_MDS_PR_KEY, true);
346 if (ret) {
347 pr_err("pNFS: failed to register key for device %s.\n",
348 sb->s_id);
349 goto out_free_dev;
350 }
351
352 ret = ops->pr_reserve(sb->s_bdev, NFSD_MDS_PR_KEY,
353 PR_EXCLUSIVE_ACCESS_REG_ONLY, 0);
354 if (ret) {
355 pr_err("pNFS: failed to reserve device %s.\n",
356 sb->s_id);
357 goto out_free_dev;
358 }
359
360 return 0;
361
362 out_free_dev:
363 kfree(dev);
364 gdp->gd_device = NULL;
365 return ret;
366 }
367
368 static __be32
nfsd4_scsi_proc_getdeviceinfo(struct super_block * sb,struct svc_rqst * rqstp,struct nfs4_client * clp,struct nfsd4_getdeviceinfo * gdp)369 nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb,
370 struct svc_rqst *rqstp,
371 struct nfs4_client *clp,
372 struct nfsd4_getdeviceinfo *gdp)
373 {
374 if (bdev_is_partition(sb->s_bdev))
375 return nfserr_inval;
376 return nfserrno(nfsd4_block_get_device_info_scsi(sb, clp, gdp));
377 }
378 static __be32
nfsd4_scsi_proc_layoutcommit(struct inode * inode,struct svc_rqst * rqstp,struct nfsd4_layoutcommit * lcp)379 nfsd4_scsi_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
380 struct nfsd4_layoutcommit *lcp)
381 {
382 struct iomap *iomaps;
383 int nr_iomaps;
384 __be32 nfserr;
385
386 rqstp->rq_arg = lcp->lc_up_layout;
387 svcxdr_init_decode(rqstp);
388
389 nfserr = nfsd4_scsi_decode_layoutupdate(&rqstp->rq_arg_stream,
390 &iomaps, &nr_iomaps, i_blocksize(inode));
391 if (nfserr != nfs_ok)
392 return nfserr;
393
394 return nfsd4_block_commit_blocks(inode, lcp, iomaps, nr_iomaps);
395 }
396
397 static void
nfsd4_scsi_fence_client(struct nfs4_layout_stateid * ls,struct nfsd_file * file)398 nfsd4_scsi_fence_client(struct nfs4_layout_stateid *ls, struct nfsd_file *file)
399 {
400 struct nfs4_client *clp = ls->ls_stid.sc_client;
401 struct block_device *bdev = file->nf_file->f_path.mnt->mnt_sb->s_bdev;
402 int status;
403
404 status = bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY,
405 nfsd4_scsi_pr_key(clp),
406 PR_EXCLUSIVE_ACCESS_REG_ONLY, true);
407 trace_nfsd_pnfs_fence(clp, bdev->bd_disk->disk_name, status);
408 }
409
410 const struct nfsd4_layout_ops scsi_layout_ops = {
411 /*
412 * Pretend that we send notification to the client. This is a blatant
413 * lie to force recent Linux clients to cache our device IDs.
414 * We rarely ever change the device ID, so the harm of leaking deviceids
415 * for a while isn't too bad. Unfortunately RFC5661 is a complete mess
416 * in this regard, but I filed errata 4119 for this a while ago, and
417 * hopefully the Linux client will eventually start caching deviceids
418 * without this again.
419 */
420 .notify_types =
421 NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
422 .proc_getdeviceinfo = nfsd4_scsi_proc_getdeviceinfo,
423 .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
424 .proc_layoutget = nfsd4_block_proc_layoutget,
425 .encode_layoutget = nfsd4_block_encode_layoutget,
426 .proc_layoutcommit = nfsd4_scsi_proc_layoutcommit,
427 .fence_client = nfsd4_scsi_fence_client,
428 };
429 #endif /* CONFIG_NFSD_SCSILAYOUT */
430