xref: /linux/drivers/nvdimm/claim.c (revision 447d2d272e4e0c7cd9dfc6aeeadad9d70b3fb1ef)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/device.h>
6 #include <linux/sizes.h>
7 #include <linux/badblocks.h>
8 #include "nd-core.h"
9 #include "pmem.h"
10 #include "pfn.h"
11 #include "btt.h"
12 #include "nd.h"
13 
__nd_detach_ndns(struct device * dev,struct nd_namespace_common ** _ndns)14 void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
15 {
16 	struct nd_namespace_common *ndns = *_ndns;
17 	struct nvdimm_bus *nvdimm_bus;
18 
19 	if (!ndns)
20 		return;
21 
22 	nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev);
23 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
24 	dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
25 	ndns->claim = NULL;
26 	*_ndns = NULL;
27 	put_device(&ndns->dev);
28 }
29 
nd_detach_ndns(struct device * dev,struct nd_namespace_common ** _ndns)30 void nd_detach_ndns(struct device *dev,
31 		struct nd_namespace_common **_ndns)
32 {
33 	struct nd_namespace_common *ndns = *_ndns;
34 
35 	if (!ndns)
36 		return;
37 	get_device(&ndns->dev);
38 	nvdimm_bus_lock(&ndns->dev);
39 	__nd_detach_ndns(dev, _ndns);
40 	nvdimm_bus_unlock(&ndns->dev);
41 	put_device(&ndns->dev);
42 }
43 
__nd_attach_ndns(struct device * dev,struct nd_namespace_common * attach,struct nd_namespace_common ** _ndns)44 bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
45 		struct nd_namespace_common **_ndns)
46 {
47 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
48 
49 	if (attach->claim)
50 		return false;
51 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
52 	dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
53 	attach->claim = dev;
54 	*_ndns = attach;
55 	get_device(&attach->dev);
56 	return true;
57 }
58 
is_idle(struct device * dev,struct nd_namespace_common * ndns)59 static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
60 {
61 	struct nd_region *nd_region = to_nd_region(dev->parent);
62 	struct device *seed = NULL;
63 
64 	if (is_nd_btt(dev))
65 		seed = nd_region->btt_seed;
66 	else if (is_nd_pfn(dev))
67 		seed = nd_region->pfn_seed;
68 	else if (is_nd_dax(dev))
69 		seed = nd_region->dax_seed;
70 
71 	if (seed == dev || ndns || dev->driver)
72 		return false;
73 	return true;
74 }
75 
to_nd_pfn_safe(struct device * dev)76 struct nd_pfn *to_nd_pfn_safe(struct device *dev)
77 {
78 	/*
79 	 * pfn device attributes are re-used by dax device instances, so we
80 	 * need to be careful to correct device-to-nd_pfn conversion.
81 	 */
82 	if (is_nd_pfn(dev))
83 		return to_nd_pfn(dev);
84 
85 	if (is_nd_dax(dev)) {
86 		struct nd_dax *nd_dax = to_nd_dax(dev);
87 
88 		return &nd_dax->nd_pfn;
89 	}
90 
91 	WARN_ON(1);
92 	return NULL;
93 }
94 
nd_detach_and_reset(struct device * dev,struct nd_namespace_common ** _ndns)95 static void nd_detach_and_reset(struct device *dev,
96 		struct nd_namespace_common **_ndns)
97 {
98 	/* detach the namespace and destroy / reset the device */
99 	__nd_detach_ndns(dev, _ndns);
100 	if (is_idle(dev, *_ndns)) {
101 		nd_device_unregister(dev, ND_ASYNC);
102 	} else if (is_nd_btt(dev)) {
103 		struct nd_btt *nd_btt = to_nd_btt(dev);
104 
105 		nd_btt->lbasize = 0;
106 		kfree(nd_btt->uuid);
107 		nd_btt->uuid = NULL;
108 	} else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
109 		struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
110 
111 		kfree(nd_pfn->uuid);
112 		nd_pfn->uuid = NULL;
113 		nd_pfn->mode = PFN_MODE_NONE;
114 	}
115 }
116 
nd_namespace_store(struct device * dev,struct nd_namespace_common ** _ndns,const char * buf,size_t len)117 ssize_t nd_namespace_store(struct device *dev,
118 		struct nd_namespace_common **_ndns, const char *buf,
119 		size_t len)
120 {
121 	struct nd_namespace_common *ndns;
122 	struct device *found;
123 	char *name;
124 
125 	if (dev->driver) {
126 		dev_dbg(dev, "namespace already active\n");
127 		return -EBUSY;
128 	}
129 
130 	name = kstrndup(buf, len, GFP_KERNEL);
131 	if (!name)
132 		return -ENOMEM;
133 	strim(name);
134 
135 	if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
136 		/* pass */;
137 	else {
138 		len = -EINVAL;
139 		goto out;
140 	}
141 
142 	ndns = *_ndns;
143 	if (strcmp(name, "") == 0) {
144 		nd_detach_and_reset(dev, _ndns);
145 		goto out;
146 	} else if (ndns) {
147 		dev_dbg(dev, "namespace already set to: %s\n",
148 				dev_name(&ndns->dev));
149 		len = -EBUSY;
150 		goto out;
151 	}
152 
153 	found = device_find_child_by_name(dev->parent, name);
154 	if (!found) {
155 		dev_dbg(dev, "'%s' not found under %s\n", name,
156 				dev_name(dev->parent));
157 		len = -ENODEV;
158 		goto out;
159 	}
160 
161 	ndns = to_ndns(found);
162 
163 	switch (ndns->claim_class) {
164 	case NVDIMM_CCLASS_NONE:
165 		break;
166 	case NVDIMM_CCLASS_BTT:
167 	case NVDIMM_CCLASS_BTT2:
168 		if (!is_nd_btt(dev)) {
169 			len = -EBUSY;
170 			goto out_attach;
171 		}
172 		break;
173 	case NVDIMM_CCLASS_PFN:
174 		if (!is_nd_pfn(dev)) {
175 			len = -EBUSY;
176 			goto out_attach;
177 		}
178 		break;
179 	case NVDIMM_CCLASS_DAX:
180 		if (!is_nd_dax(dev)) {
181 			len = -EBUSY;
182 			goto out_attach;
183 		}
184 		break;
185 	default:
186 		len = -EBUSY;
187 		goto out_attach;
188 		break;
189 	}
190 
191 	if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
192 		dev_dbg(dev, "%s too small to host\n", name);
193 		len = -ENXIO;
194 		goto out_attach;
195 	}
196 
197 	WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
198 	if (!__nd_attach_ndns(dev, ndns, _ndns)) {
199 		dev_dbg(dev, "%s already claimed\n",
200 				dev_name(&ndns->dev));
201 		len = -EBUSY;
202 	}
203 
204  out_attach:
205 	put_device(&ndns->dev); /* from device_find_child */
206  out:
207 	kfree(name);
208 	return len;
209 }
210 
211 /*
212  * nd_sb_checksum: compute checksum for a generic info block
213  *
214  * Returns a fletcher64 checksum of everything in the given info block
215  * except the last field (since that's where the checksum lives).
216  */
nd_sb_checksum(struct nd_gen_sb * nd_gen_sb)217 u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
218 {
219 	u64 sum;
220 	__le64 sum_save;
221 
222 	BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
223 	BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
224 	BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
225 
226 	sum_save = nd_gen_sb->checksum;
227 	nd_gen_sb->checksum = 0;
228 	sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
229 	nd_gen_sb->checksum = sum_save;
230 	return sum;
231 }
232 EXPORT_SYMBOL(nd_sb_checksum);
233 
nsio_rw_bytes(struct nd_namespace_common * ndns,resource_size_t offset,void * buf,size_t size,int rw,unsigned long flags)234 static int nsio_rw_bytes(struct nd_namespace_common *ndns,
235 		resource_size_t offset, void *buf, size_t size, int rw,
236 		unsigned long flags)
237 {
238 	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
239 	unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
240 	sector_t sector = offset >> 9;
241 	int rc = 0, ret = 0;
242 
243 	if (unlikely(!size))
244 		return 0;
245 
246 	if (unlikely(offset + size > nsio->size)) {
247 		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
248 		return -EFAULT;
249 	}
250 
251 	if (rw == READ) {
252 		if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
253 			return -EIO;
254 		if (copy_mc_to_kernel(buf, nsio->addr + offset, size) != 0)
255 			return -EIO;
256 		return 0;
257 	}
258 
259 	if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
260 		if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
261 				&& !(flags & NVDIMM_IO_ATOMIC)) {
262 			long cleared;
263 
264 			might_sleep();
265 			cleared = nvdimm_clear_poison(&ndns->dev,
266 					nsio->res.start + offset, size);
267 			if (cleared < size)
268 				rc = -EIO;
269 			if (cleared > 0 && cleared / 512) {
270 				cleared /= 512;
271 				badblocks_clear(&nsio->bb, sector, cleared);
272 			}
273 			arch_invalidate_pmem(nsio->addr + offset, size);
274 		} else
275 			rc = -EIO;
276 	}
277 
278 	memcpy_flushcache(nsio->addr + offset, buf, size);
279 	ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL);
280 	if (ret)
281 		rc = ret;
282 
283 	return rc;
284 }
285 
devm_nsio_enable(struct device * dev,struct nd_namespace_io * nsio,resource_size_t size)286 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
287 		resource_size_t size)
288 {
289 	struct nd_namespace_common *ndns = &nsio->common;
290 	struct range range = {
291 		.start = nsio->res.start,
292 		.end = nsio->res.end,
293 	};
294 
295 	nsio->size = size;
296 	if (!devm_request_mem_region(dev, range.start, size,
297 				dev_name(&ndns->dev))) {
298 		dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
299 		return -EBUSY;
300 	}
301 
302 	ndns->rw_bytes = nsio_rw_bytes;
303 	if (devm_init_badblocks(dev, &nsio->bb))
304 		return -ENOMEM;
305 	nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
306 			&range);
307 
308 	nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
309 
310 	return PTR_ERR_OR_ZERO(nsio->addr);
311 }
312 
devm_nsio_disable(struct device * dev,struct nd_namespace_io * nsio)313 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
314 {
315 	struct resource *res = &nsio->res;
316 
317 	devm_memunmap(dev, nsio->addr);
318 	devm_exit_badblocks(dev, &nsio->bb);
319 	devm_release_mem_region(dev, res->start, nsio->size);
320 }
321