xref: /linux/drivers/nvdimm/claim.c (revision ba9dac987319d4f3969691dcf366ef19c9ed8281)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/device.h>
6 #include <linux/sizes.h>
7 #include <linux/badblocks.h>
8 #include "nd-core.h"
9 #include "pmem.h"
10 #include "pfn.h"
11 #include "btt.h"
12 #include "nd.h"
13 
__nd_detach_ndns(struct device * dev,struct nd_namespace_common ** _ndns)14 void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
15 {
16 	struct nd_namespace_common *ndns = *_ndns;
17 	struct nvdimm_bus *nvdimm_bus;
18 
19 	if (!ndns)
20 		return;
21 
22 	nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev);
23 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
24 	dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
25 	ndns->claim = NULL;
26 	*_ndns = NULL;
27 	put_device(&ndns->dev);
28 }
29 
nd_detach_ndns(struct device * dev,struct nd_namespace_common ** _ndns)30 void nd_detach_ndns(struct device *dev,
31 		struct nd_namespace_common **_ndns)
32 {
33 	struct nd_namespace_common *ndns = *_ndns;
34 
35 	if (!ndns)
36 		return;
37 
38 	struct device *ndev __free(put_device) = get_device(&ndns->dev);
39 	guard(nvdimm_bus)(ndev);
40 	__nd_detach_ndns(dev, _ndns);
41 }
42 
__nd_attach_ndns(struct device * dev,struct nd_namespace_common * attach,struct nd_namespace_common ** _ndns)43 bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
44 		struct nd_namespace_common **_ndns)
45 {
46 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
47 
48 	if (attach->claim)
49 		return false;
50 	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
51 	dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
52 	attach->claim = dev;
53 	*_ndns = attach;
54 	get_device(&attach->dev);
55 	return true;
56 }
57 
is_idle(struct device * dev,struct nd_namespace_common * ndns)58 static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
59 {
60 	struct nd_region *nd_region = to_nd_region(dev->parent);
61 	struct device *seed = NULL;
62 
63 	if (is_nd_btt(dev))
64 		seed = nd_region->btt_seed;
65 	else if (is_nd_pfn(dev))
66 		seed = nd_region->pfn_seed;
67 	else if (is_nd_dax(dev))
68 		seed = nd_region->dax_seed;
69 
70 	if (seed == dev || ndns || dev->driver)
71 		return false;
72 	return true;
73 }
74 
to_nd_pfn_safe(struct device * dev)75 struct nd_pfn *to_nd_pfn_safe(struct device *dev)
76 {
77 	/*
78 	 * pfn device attributes are re-used by dax device instances, so we
79 	 * need to be careful to correct device-to-nd_pfn conversion.
80 	 */
81 	if (is_nd_pfn(dev))
82 		return to_nd_pfn(dev);
83 
84 	if (is_nd_dax(dev)) {
85 		struct nd_dax *nd_dax = to_nd_dax(dev);
86 
87 		return &nd_dax->nd_pfn;
88 	}
89 
90 	WARN_ON(1);
91 	return NULL;
92 }
93 
nd_detach_and_reset(struct device * dev,struct nd_namespace_common ** _ndns)94 static void nd_detach_and_reset(struct device *dev,
95 		struct nd_namespace_common **_ndns)
96 {
97 	/* detach the namespace and destroy / reset the device */
98 	__nd_detach_ndns(dev, _ndns);
99 	if (is_idle(dev, *_ndns)) {
100 		nd_device_unregister(dev, ND_ASYNC);
101 	} else if (is_nd_btt(dev)) {
102 		struct nd_btt *nd_btt = to_nd_btt(dev);
103 
104 		nd_btt->lbasize = 0;
105 		kfree(nd_btt->uuid);
106 		nd_btt->uuid = NULL;
107 	} else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
108 		struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
109 
110 		kfree(nd_pfn->uuid);
111 		nd_pfn->uuid = NULL;
112 		nd_pfn->mode = PFN_MODE_NONE;
113 	}
114 }
115 
nd_namespace_store(struct device * dev,struct nd_namespace_common ** _ndns,const char * buf,size_t len)116 ssize_t nd_namespace_store(struct device *dev,
117 		struct nd_namespace_common **_ndns, const char *buf,
118 		size_t len)
119 {
120 	struct nd_namespace_common *ndns;
121 	struct device *found;
122 	char *name;
123 
124 	if (dev->driver) {
125 		dev_dbg(dev, "namespace already active\n");
126 		return -EBUSY;
127 	}
128 
129 	name = kstrndup(buf, len, GFP_KERNEL);
130 	if (!name)
131 		return -ENOMEM;
132 	strim(name);
133 
134 	if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
135 		/* pass */;
136 	else {
137 		len = -EINVAL;
138 		goto out;
139 	}
140 
141 	ndns = *_ndns;
142 	if (strcmp(name, "") == 0) {
143 		nd_detach_and_reset(dev, _ndns);
144 		goto out;
145 	} else if (ndns) {
146 		dev_dbg(dev, "namespace already set to: %s\n",
147 				dev_name(&ndns->dev));
148 		len = -EBUSY;
149 		goto out;
150 	}
151 
152 	found = device_find_child_by_name(dev->parent, name);
153 	if (!found) {
154 		dev_dbg(dev, "'%s' not found under %s\n", name,
155 				dev_name(dev->parent));
156 		len = -ENODEV;
157 		goto out;
158 	}
159 
160 	ndns = to_ndns(found);
161 
162 	switch (ndns->claim_class) {
163 	case NVDIMM_CCLASS_NONE:
164 		break;
165 	case NVDIMM_CCLASS_BTT:
166 	case NVDIMM_CCLASS_BTT2:
167 		if (!is_nd_btt(dev)) {
168 			len = -EBUSY;
169 			goto out_attach;
170 		}
171 		break;
172 	case NVDIMM_CCLASS_PFN:
173 		if (!is_nd_pfn(dev)) {
174 			len = -EBUSY;
175 			goto out_attach;
176 		}
177 		break;
178 	case NVDIMM_CCLASS_DAX:
179 		if (!is_nd_dax(dev)) {
180 			len = -EBUSY;
181 			goto out_attach;
182 		}
183 		break;
184 	default:
185 		len = -EBUSY;
186 		goto out_attach;
187 		break;
188 	}
189 
190 	if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
191 		dev_dbg(dev, "%s too small to host\n", name);
192 		len = -ENXIO;
193 		goto out_attach;
194 	}
195 
196 	WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
197 	if (!__nd_attach_ndns(dev, ndns, _ndns)) {
198 		dev_dbg(dev, "%s already claimed\n",
199 				dev_name(&ndns->dev));
200 		len = -EBUSY;
201 	}
202 
203  out_attach:
204 	put_device(&ndns->dev); /* from device_find_child */
205  out:
206 	kfree(name);
207 	return len;
208 }
209 
210 /*
211  * nd_sb_checksum: compute checksum for a generic info block
212  *
213  * Returns a fletcher64 checksum of everything in the given info block
214  * except the last field (since that's where the checksum lives).
215  */
nd_sb_checksum(struct nd_gen_sb * nd_gen_sb)216 u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
217 {
218 	u64 sum;
219 	__le64 sum_save;
220 
221 	BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
222 	BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
223 	BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
224 
225 	sum_save = nd_gen_sb->checksum;
226 	nd_gen_sb->checksum = 0;
227 	sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
228 	nd_gen_sb->checksum = sum_save;
229 	return sum;
230 }
231 EXPORT_SYMBOL(nd_sb_checksum);
232 
nsio_rw_bytes(struct nd_namespace_common * ndns,resource_size_t offset,void * buf,size_t size,int rw,unsigned long flags)233 static int nsio_rw_bytes(struct nd_namespace_common *ndns,
234 		resource_size_t offset, void *buf, size_t size, int rw,
235 		unsigned long flags)
236 {
237 	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
238 	unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
239 	sector_t sector = offset >> 9;
240 	int rc = 0, ret = 0;
241 
242 	if (unlikely(!size))
243 		return 0;
244 
245 	if (unlikely(offset + size > nsio->size)) {
246 		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
247 		return -EFAULT;
248 	}
249 
250 	if (rw == READ) {
251 		if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
252 			return -EIO;
253 		if (copy_mc_to_kernel(buf, nsio->addr + offset, size) != 0)
254 			return -EIO;
255 		return 0;
256 	}
257 
258 	if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
259 		if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
260 				&& !(flags & NVDIMM_IO_ATOMIC)) {
261 			long cleared;
262 
263 			might_sleep();
264 			cleared = nvdimm_clear_poison(&ndns->dev,
265 					nsio->res.start + offset, size);
266 			if (cleared < size)
267 				rc = -EIO;
268 			if (cleared > 0 && cleared / 512) {
269 				cleared /= 512;
270 				badblocks_clear(&nsio->bb, sector, cleared);
271 			}
272 			arch_invalidate_pmem(nsio->addr + offset, size);
273 		} else
274 			rc = -EIO;
275 	}
276 
277 	memcpy_flushcache(nsio->addr + offset, buf, size);
278 	ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL);
279 	if (ret)
280 		rc = ret;
281 
282 	return rc;
283 }
284 
devm_nsio_enable(struct device * dev,struct nd_namespace_io * nsio,resource_size_t size)285 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
286 		resource_size_t size)
287 {
288 	struct nd_namespace_common *ndns = &nsio->common;
289 	struct range range = {
290 		.start = nsio->res.start,
291 		.end = nsio->res.end,
292 	};
293 
294 	nsio->size = size;
295 	if (!devm_request_mem_region(dev, range.start, size,
296 				dev_name(&ndns->dev))) {
297 		dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
298 		return -EBUSY;
299 	}
300 
301 	ndns->rw_bytes = nsio_rw_bytes;
302 	if (devm_init_badblocks(dev, &nsio->bb))
303 		return -ENOMEM;
304 	nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
305 			&range);
306 
307 	nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
308 
309 	return PTR_ERR_OR_ZERO(nsio->addr);
310 }
311 
devm_nsio_disable(struct device * dev,struct nd_namespace_io * nsio)312 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
313 {
314 	struct resource *res = &nsio->res;
315 
316 	devm_memunmap(dev, nsio->addr);
317 	devm_exit_badblocks(dev, &nsio->bb);
318 	devm_release_mem_region(dev, res->start, nsio->size);
319 }
320