xref: /linux/drivers/nvdimm/claim.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/device.h>
14 #include <linux/sizes.h>
15 #include <linux/pmem.h>
16 #include "nd-core.h"
17 #include "pfn.h"
18 #include "btt.h"
19 #include "nd.h"
20 
21 void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
22 {
23 	struct nd_namespace_common *ndns = *_ndns;
24 
25 	lockdep_assert_held(&ndns->dev.mutex);
26 	dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
27 	ndns->claim = NULL;
28 	*_ndns = NULL;
29 	put_device(&ndns->dev);
30 }
31 
32 void nd_detach_ndns(struct device *dev,
33 		struct nd_namespace_common **_ndns)
34 {
35 	struct nd_namespace_common *ndns = *_ndns;
36 
37 	if (!ndns)
38 		return;
39 	get_device(&ndns->dev);
40 	device_lock(&ndns->dev);
41 	__nd_detach_ndns(dev, _ndns);
42 	device_unlock(&ndns->dev);
43 	put_device(&ndns->dev);
44 }
45 
46 bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
47 		struct nd_namespace_common **_ndns)
48 {
49 	if (attach->claim)
50 		return false;
51 	lockdep_assert_held(&attach->dev.mutex);
52 	dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
53 	attach->claim = dev;
54 	*_ndns = attach;
55 	get_device(&attach->dev);
56 	return true;
57 }
58 
59 bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
60 		struct nd_namespace_common **_ndns)
61 {
62 	bool claimed;
63 
64 	device_lock(&attach->dev);
65 	claimed = __nd_attach_ndns(dev, attach, _ndns);
66 	device_unlock(&attach->dev);
67 	return claimed;
68 }
69 
70 static int namespace_match(struct device *dev, void *data)
71 {
72 	char *name = data;
73 
74 	return strcmp(name, dev_name(dev)) == 0;
75 }
76 
77 static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
78 {
79 	struct nd_region *nd_region = to_nd_region(dev->parent);
80 	struct device *seed = NULL;
81 
82 	if (is_nd_btt(dev))
83 		seed = nd_region->btt_seed;
84 	else if (is_nd_pfn(dev))
85 		seed = nd_region->pfn_seed;
86 	else if (is_nd_dax(dev))
87 		seed = nd_region->dax_seed;
88 
89 	if (seed == dev || ndns || dev->driver)
90 		return false;
91 	return true;
92 }
93 
94 struct nd_pfn *to_nd_pfn_safe(struct device *dev)
95 {
96 	/*
97 	 * pfn device attributes are re-used by dax device instances, so we
98 	 * need to be careful to correct device-to-nd_pfn conversion.
99 	 */
100 	if (is_nd_pfn(dev))
101 		return to_nd_pfn(dev);
102 
103 	if (is_nd_dax(dev)) {
104 		struct nd_dax *nd_dax = to_nd_dax(dev);
105 
106 		return &nd_dax->nd_pfn;
107 	}
108 
109 	WARN_ON(1);
110 	return NULL;
111 }
112 
113 static void nd_detach_and_reset(struct device *dev,
114 		struct nd_namespace_common **_ndns)
115 {
116 	/* detach the namespace and destroy / reset the device */
117 	nd_detach_ndns(dev, _ndns);
118 	if (is_idle(dev, *_ndns)) {
119 		nd_device_unregister(dev, ND_ASYNC);
120 	} else if (is_nd_btt(dev)) {
121 		struct nd_btt *nd_btt = to_nd_btt(dev);
122 
123 		nd_btt->lbasize = 0;
124 		kfree(nd_btt->uuid);
125 		nd_btt->uuid = NULL;
126 	} else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
127 		struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
128 
129 		kfree(nd_pfn->uuid);
130 		nd_pfn->uuid = NULL;
131 		nd_pfn->mode = PFN_MODE_NONE;
132 	}
133 }
134 
135 ssize_t nd_namespace_store(struct device *dev,
136 		struct nd_namespace_common **_ndns, const char *buf,
137 		size_t len)
138 {
139 	struct nd_namespace_common *ndns;
140 	struct device *found;
141 	char *name;
142 
143 	if (dev->driver) {
144 		dev_dbg(dev, "%s: -EBUSY\n", __func__);
145 		return -EBUSY;
146 	}
147 
148 	name = kstrndup(buf, len, GFP_KERNEL);
149 	if (!name)
150 		return -ENOMEM;
151 	strim(name);
152 
153 	if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
154 		/* pass */;
155 	else {
156 		len = -EINVAL;
157 		goto out;
158 	}
159 
160 	ndns = *_ndns;
161 	if (strcmp(name, "") == 0) {
162 		nd_detach_and_reset(dev, _ndns);
163 		goto out;
164 	} else if (ndns) {
165 		dev_dbg(dev, "namespace already set to: %s\n",
166 				dev_name(&ndns->dev));
167 		len = -EBUSY;
168 		goto out;
169 	}
170 
171 	found = device_find_child(dev->parent, name, namespace_match);
172 	if (!found) {
173 		dev_dbg(dev, "'%s' not found under %s\n", name,
174 				dev_name(dev->parent));
175 		len = -ENODEV;
176 		goto out;
177 	}
178 
179 	ndns = to_ndns(found);
180 	if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
181 		dev_dbg(dev, "%s too small to host\n", name);
182 		len = -ENXIO;
183 		goto out_attach;
184 	}
185 
186 	WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
187 	if (!nd_attach_ndns(dev, ndns, _ndns)) {
188 		dev_dbg(dev, "%s already claimed\n",
189 				dev_name(&ndns->dev));
190 		len = -EBUSY;
191 	}
192 
193  out_attach:
194 	put_device(&ndns->dev); /* from device_find_child */
195  out:
196 	kfree(name);
197 	return len;
198 }
199 
200 /*
201  * nd_sb_checksum: compute checksum for a generic info block
202  *
203  * Returns a fletcher64 checksum of everything in the given info block
204  * except the last field (since that's where the checksum lives).
205  */
206 u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
207 {
208 	u64 sum;
209 	__le64 sum_save;
210 
211 	BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
212 	BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
213 	BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
214 
215 	sum_save = nd_gen_sb->checksum;
216 	nd_gen_sb->checksum = 0;
217 	sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
218 	nd_gen_sb->checksum = sum_save;
219 	return sum;
220 }
221 EXPORT_SYMBOL(nd_sb_checksum);
222 
223 static int nsio_rw_bytes(struct nd_namespace_common *ndns,
224 		resource_size_t offset, void *buf, size_t size, int rw)
225 {
226 	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
227 	unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
228 	sector_t sector = offset >> 9;
229 	int rc = 0;
230 
231 	if (unlikely(!size))
232 		return 0;
233 
234 	if (unlikely(offset + size > nsio->size)) {
235 		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
236 		return -EFAULT;
237 	}
238 
239 	if (rw == READ) {
240 		if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
241 			return -EIO;
242 		return memcpy_from_pmem(buf, nsio->addr + offset, size);
243 	}
244 
245 	if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
246 		/*
247 		 * FIXME: nsio_rw_bytes() may be called from atomic
248 		 * context in the btt case and nvdimm_clear_poison()
249 		 * takes a sleeping lock. Until the locking can be
250 		 * reworked this capability requires that the namespace
251 		 * is not claimed by btt.
252 		 */
253 		if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
254 				&& (!ndns->claim || !is_nd_btt(ndns->claim))) {
255 			long cleared;
256 
257 			cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
258 			if (cleared < size)
259 				rc = -EIO;
260 			if (cleared > 0 && cleared / 512) {
261 				cleared /= 512;
262 				badblocks_clear(&nsio->bb, sector, cleared);
263 			}
264 			invalidate_pmem(nsio->addr + offset, size);
265 		} else
266 			rc = -EIO;
267 	}
268 
269 	memcpy_to_pmem(nsio->addr + offset, buf, size);
270 	nvdimm_flush(to_nd_region(ndns->dev.parent));
271 
272 	return rc;
273 }
274 
275 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
276 {
277 	struct resource *res = &nsio->res;
278 	struct nd_namespace_common *ndns = &nsio->common;
279 
280 	nsio->size = resource_size(res);
281 	if (!devm_request_mem_region(dev, res->start, resource_size(res),
282 				dev_name(&ndns->dev))) {
283 		dev_warn(dev, "could not reserve region %pR\n", res);
284 		return -EBUSY;
285 	}
286 
287 	ndns->rw_bytes = nsio_rw_bytes;
288 	if (devm_init_badblocks(dev, &nsio->bb))
289 		return -ENOMEM;
290 	nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
291 			&nsio->res);
292 
293 	nsio->addr = devm_memremap(dev, res->start, resource_size(res),
294 			ARCH_MEMREMAP_PMEM);
295 
296 	return PTR_ERR_OR_ZERO(nsio->addr);
297 }
298 EXPORT_SYMBOL_GPL(devm_nsio_enable);
299 
300 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
301 {
302 	struct resource *res = &nsio->res;
303 
304 	devm_memunmap(dev, nsio->addr);
305 	devm_exit_badblocks(dev, &nsio->bb);
306 	devm_release_mem_region(dev, res->start, resource_size(res));
307 }
308 EXPORT_SYMBOL_GPL(devm_nsio_disable);
309