xref: /illumos-gate/usr/src/uts/common/os/ddi_ufm.c (revision 45ede40b2394db7967e59f19288fae9b62efd4aa)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2019 Joyent, Inc.
14  * Copyright 2020 Oxide Computer Company
15  */
16 
17 #include <sys/avl.h>
18 #include <sys/ddi_ufm.h>
19 #include <sys/ddi_ufm_impl.h>
20 #include <sys/debug.h>
21 #include <sys/kmem.h>
22 #include <sys/sunddi.h>
23 #include <sys/stddef.h>
24 #include <sys/sunndi.h>
25 #include <sys/file.h>
26 #include <sys/sysmacros.h>
27 
28 /*
29  * The UFM subsystem tracks its internal state with respect to device
30  * drivers that participate in the DDI UFM subsystem on a per-instance basis
31  * via ddi_ufm_handle_t structures (see ddi_ufm_impl.h).  This is known as the
32  * UFM handle.  The UFM handle contains a pointer to the driver's UFM ops,
33  * which the ufm(7D) pseudo driver uses to invoke the UFM entry points in
34  * response to DDI UFM ioctls.  Additionally, the DDI UFM subsystem uses the
35  * handle to maintain cached UFM image and slot data.
36  *
37  * In order to track and provide fast lookups of a driver instance's UFM
38  * handle, the DDI UFM subsystem stores a pointer to the handle in a global AVL
39  * tree. UFM handles are added to the tree when a driver calls ddi_ufm_init(9E)
40  * and removed from the tree when a driver calls ddi_ufm_fini(9E).
41  *
42  * Some notes on the locking strategy/rules.
43  *
44  * All access to the tree is serialized via the mutex, ufm_lock.
45  * Additionally, each UFM handle is protected by a per-handle mutex.
46  *
47  * Code must acquire ufm_lock in order to walk the tree.  Before reading or
48  * modifying the state of any UFM handle, code must then acquire the
49  * UFM handle lock.  Once the UFM handle lock has been acquired, ufm_lock
50  * should be dropped.
51  *
52  * Only one UFM handle lock should be held at any time.
53  * If a UFM handle lock is held, it must be released before attempting to
54  * re-acquire ufm_lock.
55  *
56  * For example, the lock sequence for calling a UFM entry point and/or
57  * reading/modifying UFM handle state would be as follows:
58  * - acquire ufm_lock
59  * - walk tree to find UFH handle
60  * - acquire UFM handle lock
61  * - release ufm_lock
62  * - call entry point and/or access handle state
63  *
64  * Testing
65  * -------
66  * A set of automated tests for the DDI UFM subsystem exists at:
67  * usr/src/test/os-tests/tests/ddi_ufm/
68  *
69  * These tests should be run whenever changes are made to the DDI UFM
70  * subsystem or the ufm driver.
71  */
72 
73 /*
74  * Amount of data to read in one go (1 MiB).
75  */
76 #define	UFM_READ_STRIDE	(1024 * 1024)
77 
78 static avl_tree_t ufm_handles;
79 static kmutex_t ufm_lock;
80 
81 static int ufm_handle_compare(const void *, const void *);
82 
83 static void
84 ufm_cache_invalidate(ddi_ufm_handle_t *ufmh)
85 {
86 	ASSERT(MUTEX_HELD(&ufmh->ufmh_lock));
87 
88 	if (ufmh->ufmh_images == NULL)
89 		return;
90 
91 	for (uint_t i = 0; i < ufmh->ufmh_nimages; i++) {
92 		struct ddi_ufm_image *img = &ufmh->ufmh_images[i];
93 
94 		if (img->ufmi_slots == NULL)
95 			continue;
96 
97 		for (uint_t s = 0; s < img->ufmi_nslots; s++) {
98 			struct ddi_ufm_slot *slot = &img->ufmi_slots[s];
99 
100 			if (slot->ufms_version != NULL)
101 				strfree(slot->ufms_version);
102 			nvlist_free(slot->ufms_misc);
103 		}
104 		kmem_free(img->ufmi_slots,
105 		    (img->ufmi_nslots * sizeof (ddi_ufm_slot_t)));
106 		if (img->ufmi_desc != NULL)
107 			strfree(img->ufmi_desc);
108 		nvlist_free(img->ufmi_misc);
109 	}
110 
111 	kmem_free(ufmh->ufmh_images,
112 	    (ufmh->ufmh_nimages * sizeof (ddi_ufm_image_t)));
113 	ufmh->ufmh_images = NULL;
114 	ufmh->ufmh_nimages = 0;
115 	ufmh->ufmh_caps = 0;
116 	nvlist_free(ufmh->ufmh_report);
117 	ufmh->ufmh_report = NULL;
118 }
119 
120 static void
121 free_nvlist_array(nvlist_t **nvlarr, uint_t nelems)
122 {
123 	for (uint_t i = 0; i < nelems; i++) {
124 		if (nvlarr[i] != NULL)
125 			nvlist_free(nvlarr[i]);
126 	}
127 	kmem_free(nvlarr, nelems * sizeof (nvlist_t *));
128 }
129 
130 int
131 ufm_cache_fill(ddi_ufm_handle_t *ufmh)
132 {
133 	int ret;
134 	uint_t nimgs;
135 	ddi_ufm_cap_t caps;
136 	nvlist_t **images = NULL, **slots = NULL;
137 
138 	ASSERT(MUTEX_HELD(&ufmh->ufmh_lock));
139 
140 	/*
141 	 * Check whether we already have a cached report and if so, return
142 	 * straight away.
143 	 */
144 	if (ufmh->ufmh_report != NULL)
145 		return (0);
146 
147 	/*
148 	 * First check which UFM caps this driver supports.  If it doesn't
149 	 * support DDI_UFM_CAP_REPORT, then there's nothing to cache and we
150 	 * can just return.
151 	 */
152 	ret = ufmh->ufmh_ops->ddi_ufm_op_getcaps(ufmh, ufmh->ufmh_arg, &caps);
153 	if (ret != 0)
154 		return (ret);
155 
156 	ufmh->ufmh_caps = caps;
157 	if ((ufmh->ufmh_caps & DDI_UFM_CAP_REPORT) == 0)
158 		return (ENOTSUP);
159 
160 	/*
161 	 * Next, figure out how many UFM images the device has.  If a
162 	 * ddi_ufm_op_nimages entry point wasn't specified, then we assume
163 	 * that the device has a single image.
164 	 */
165 	if (ufmh->ufmh_ops->ddi_ufm_op_nimages != NULL) {
166 		ret = ufmh->ufmh_ops->ddi_ufm_op_nimages(ufmh, ufmh->ufmh_arg,
167 		    &nimgs);
168 		if (ret == 0 && nimgs > 0)
169 			ufmh->ufmh_nimages = nimgs;
170 		else
171 			goto cache_fail;
172 	} else {
173 		ufmh->ufmh_nimages = 1;
174 	}
175 
176 	/*
177 	 * Now that we know how many images we're dealing with, allocate space
178 	 * for an appropriately-sized array of ddi_ufm_image_t structs and then
179 	 * iterate through them calling the ddi_ufm_op_fill_image entry point
180 	 * so that the driver can fill them in.
181 	 */
182 	ufmh->ufmh_images =
183 	    kmem_zalloc((sizeof (ddi_ufm_image_t) * ufmh->ufmh_nimages),
184 	    KM_NOSLEEP | KM_NORMALPRI);
185 	if (ufmh->ufmh_images == NULL)
186 		return (ENOMEM);
187 
188 	for (uint_t i = 0; i < ufmh->ufmh_nimages; i++) {
189 		struct ddi_ufm_image *img = &ufmh->ufmh_images[i];
190 
191 		ret = ufmh->ufmh_ops->ddi_ufm_op_fill_image(ufmh,
192 		    ufmh->ufmh_arg, i, img);
193 
194 		if (ret != 0)
195 			goto cache_fail;
196 
197 		if (img->ufmi_desc == NULL || img->ufmi_nslots == 0) {
198 			ret = EIO;
199 			goto cache_fail;
200 		}
201 
202 		img->ufmi_slots =
203 		    kmem_zalloc((sizeof (ddi_ufm_slot_t) * img->ufmi_nslots),
204 		    KM_NOSLEEP | KM_NORMALPRI);
205 		if (img->ufmi_slots == NULL) {
206 			ret = ENOMEM;
207 			goto cache_fail;
208 		}
209 
210 		for (uint_t s = 0; s < img->ufmi_nslots; s++) {
211 			struct ddi_ufm_slot *slot = &img->ufmi_slots[s];
212 
213 			ret = ufmh->ufmh_ops->ddi_ufm_op_fill_slot(ufmh,
214 			    ufmh->ufmh_arg, i, s, slot);
215 
216 			if (ret != 0)
217 				goto cache_fail;
218 
219 			ASSERT(slot->ufms_attrs & DDI_UFM_ATTR_EMPTY ||
220 			    slot->ufms_version != NULL);
221 		}
222 	}
223 	images = kmem_zalloc(sizeof (nvlist_t *) * ufmh->ufmh_nimages,
224 	    KM_SLEEP);
225 	for (uint_t i = 0; i < ufmh->ufmh_nimages; i ++) {
226 		ddi_ufm_image_t *img = &ufmh->ufmh_images[i];
227 
228 		images[i] = fnvlist_alloc();
229 		fnvlist_add_string(images[i], DDI_UFM_NV_IMAGE_DESC,
230 		    img->ufmi_desc);
231 		if (img->ufmi_misc != NULL) {
232 			fnvlist_add_nvlist(images[i], DDI_UFM_NV_IMAGE_MISC,
233 			    img->ufmi_misc);
234 		}
235 
236 		slots = kmem_zalloc(sizeof (nvlist_t *) * img->ufmi_nslots,
237 		    KM_SLEEP);
238 		for (uint_t s = 0; s < img->ufmi_nslots; s++) {
239 			ddi_ufm_slot_t *slot = &img->ufmi_slots[s];
240 
241 			slots[s] = fnvlist_alloc();
242 			fnvlist_add_uint32(slots[s], DDI_UFM_NV_SLOT_ATTR,
243 			    slot->ufms_attrs);
244 			if (slot->ufms_attrs & DDI_UFM_ATTR_EMPTY)
245 				continue;
246 
247 			if (slot->ufms_imgsize != 0) {
248 				fnvlist_add_uint64(slots[s],
249 				    DDI_UFM_NV_SLOT_IMGSIZE,
250 				    slot->ufms_imgsize);
251 			}
252 
253 			fnvlist_add_string(slots[s], DDI_UFM_NV_SLOT_VERSION,
254 			    slot->ufms_version);
255 			if (slot->ufms_misc != NULL) {
256 				fnvlist_add_nvlist(slots[s],
257 				    DDI_UFM_NV_SLOT_MISC, slot->ufms_misc);
258 			}
259 		}
260 		fnvlist_add_nvlist_array(images[i], DDI_UFM_NV_IMAGE_SLOTS,
261 		    slots, img->ufmi_nslots);
262 		free_nvlist_array(slots, img->ufmi_nslots);
263 	}
264 	ufmh->ufmh_report = fnvlist_alloc();
265 	fnvlist_add_nvlist_array(ufmh->ufmh_report, DDI_UFM_NV_IMAGES, images,
266 	    ufmh->ufmh_nimages);
267 	free_nvlist_array(images, ufmh->ufmh_nimages);
268 
269 	return (0);
270 
271 cache_fail:
272 	ufm_cache_invalidate(ufmh);
273 	return (ret);
274 }
275 
276 int
277 ufm_read_img(ddi_ufm_handle_t *ufmh, uint_t img, uint_t slot, uint64_t len,
278     uint64_t off, uintptr_t uaddr, uint64_t *nreadp, int copyflags)
279 {
280 	int ret = 0;
281 	ddi_ufm_cap_t caps;
282 	void *buf;
283 	uint64_t nread;
284 
285 	ret = ufmh->ufmh_ops->ddi_ufm_op_getcaps(ufmh, ufmh->ufmh_arg, &caps);
286 	if (ret != 0) {
287 		return (ret);
288 	}
289 
290 	if ((caps & DDI_UFM_CAP_READIMG) == 0 ||
291 	    ufmh->ufmh_ops->ddi_ufm_op_readimg == NULL) {
292 		return (ENOTSUP);
293 	}
294 
295 	if (off + len < MAX(off, len)) {
296 		return (EOVERFLOW);
297 	}
298 
299 	buf = kmem_zalloc(UFM_READ_STRIDE, KM_SLEEP);
300 	nread = 0;
301 	while (len > 0) {
302 		uint64_t toread = MIN(len, UFM_READ_STRIDE);
303 		uint64_t iter;
304 
305 		ret = ufmh->ufmh_ops->ddi_ufm_op_readimg(ufmh, ufmh->ufmh_arg,
306 		    img, slot, toread, off + nread, buf, &iter);
307 		if (ret != 0) {
308 			break;
309 		}
310 
311 		if (ddi_copyout(buf, (void *)(uintptr_t)(uaddr + nread), iter,
312 		    copyflags & FKIOCTL) != 0) {
313 			ret = EFAULT;
314 			break;
315 		}
316 
317 		nread += iter;
318 		len -= iter;
319 	}
320 
321 	*nreadp = nread;
322 	kmem_free(buf, UFM_READ_STRIDE);
323 	return (ret);
324 }
325 
326 /*
327  * This gets called early in boot by setup_ddi().
328  */
329 void
330 ufm_init(void)
331 {
332 	mutex_init(&ufm_lock, NULL, MUTEX_DEFAULT, NULL);
333 
334 	avl_create(&ufm_handles, ufm_handle_compare,
335 	    sizeof (ddi_ufm_handle_t),
336 	    offsetof(ddi_ufm_handle_t, ufmh_link));
337 }
338 
339 static int
340 ufm_handle_compare(const void *a1, const void *a2)
341 {
342 	const struct ddi_ufm_handle *hdl1, *hdl2;
343 	int cmp;
344 
345 	hdl1 = (struct ddi_ufm_handle *)a1;
346 	hdl2 = (struct ddi_ufm_handle *)a2;
347 
348 	cmp = strcmp(hdl1->ufmh_devpath, hdl2->ufmh_devpath);
349 
350 	if (cmp > 0)
351 		return (1);
352 	else if (cmp < 0)
353 		return (-1);
354 	else
355 		return (0);
356 }
357 
358 /*
359  * This is used by the ufm driver to lookup the UFM handle associated with a
360  * particular devpath.
361  *
362  * On success, this function returns the reqested UFH handle, with its lock
363  * held.  Caller is responsible to dropping the lock when it is done with the
364  * handle.
365  */
366 struct ddi_ufm_handle *
367 ufm_find(const char *devpath)
368 {
369 	struct ddi_ufm_handle find = { 0 }, *ufmh;
370 
371 	(void) strlcpy(find.ufmh_devpath, devpath, MAXPATHLEN);
372 
373 	mutex_enter(&ufm_lock);
374 	ufmh = avl_find(&ufm_handles, &find, NULL);
375 	if (ufmh != NULL)
376 		mutex_enter(&ufmh->ufmh_lock);
377 	mutex_exit(&ufm_lock);
378 
379 	return (ufmh);
380 }
381 
382 int
383 ddi_ufm_init(dev_info_t *dip, uint_t version, ddi_ufm_ops_t *ufmops,
384     ddi_ufm_handle_t **ufmh, void *arg)
385 {
386 	ddi_ufm_handle_t *old_ufmh;
387 	char devpath[MAXPATHLEN];
388 
389 	VERIFY(version != 0 && ufmops != NULL);
390 	VERIFY(ufmops->ddi_ufm_op_fill_image != NULL &&
391 	    ufmops->ddi_ufm_op_fill_slot != NULL &&
392 	    ufmops->ddi_ufm_op_getcaps != NULL);
393 
394 	if (version < DDI_UFM_VERSION_ONE || version > DDI_UFM_CURRENT_VERSION)
395 		return (ENOTSUP);
396 
397 	/*
398 	 * First we check if we already have a UFM handle for this device
399 	 * instance.  This can happen if the module got unloaded or the driver
400 	 * was suspended after previously registering with the UFM subsystem.
401 	 *
402 	 * If we find an old handle then we simply reset its state and hand it
403 	 * back to the driver.
404 	 *
405 	 * If we don't find an old handle then this is a new registration, so
406 	 * we allocate and initialize a new handle.
407 	 *
408 	 * In either case, we don't need to NULL-out the other fields (like
409 	 * ufmh_report) as in order for them to be referenced, ufmh_state has to
410 	 * first transition to DDI_UFM_STATE_READY.  The only way that can
411 	 * happen is for the driver to call ddi_ufm_update(), which will call
412 	 * ufm_cache_invalidate(), which in turn will take care of properly
413 	 * cleaning up and reinitializing the other fields in the handle.
414 	 */
415 	(void) ddi_pathname(dip, devpath);
416 	if ((old_ufmh = ufm_find(devpath)) != NULL) {
417 		*ufmh = old_ufmh;
418 	} else {
419 		*ufmh = kmem_zalloc(sizeof (ddi_ufm_handle_t), KM_SLEEP);
420 		(void) strlcpy((*ufmh)->ufmh_devpath, devpath, MAXPATHLEN);
421 		mutex_init(&(*ufmh)->ufmh_lock, NULL, MUTEX_DEFAULT, NULL);
422 	}
423 	(*ufmh)->ufmh_ops = ufmops;
424 	(*ufmh)->ufmh_arg = arg;
425 	(*ufmh)->ufmh_version = version;
426 	(*ufmh)->ufmh_state = DDI_UFM_STATE_INIT;
427 
428 	/*
429 	 * If this is a new registration, add the UFM handle to the global AVL
430 	 * tree of handles.
431 	 *
432 	 * Otherwise, if it's an old registration then ufm_find() will have
433 	 * returned the old handle with the lock already held, so we need to
434 	 * release it before returning.
435 	 */
436 	if (old_ufmh == NULL) {
437 		mutex_enter(&ufm_lock);
438 		avl_add(&ufm_handles, *ufmh);
439 		mutex_exit(&ufm_lock);
440 	} else {
441 		mutex_exit(&old_ufmh->ufmh_lock);
442 	}
443 
444 	/*
445 	 * Give a hint in the devinfo tree that this device supports UFM
446 	 * capabilities.
447 	 */
448 	(void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip, "ddi-ufm-capable");
449 
450 	return (DDI_SUCCESS);
451 }
452 
453 void
454 ddi_ufm_fini(ddi_ufm_handle_t *ufmh)
455 {
456 	VERIFY(ufmh != NULL);
457 
458 	mutex_enter(&ufmh->ufmh_lock);
459 	ufmh->ufmh_state |= DDI_UFM_STATE_SHUTTING_DOWN;
460 	ufm_cache_invalidate(ufmh);
461 	mutex_exit(&ufmh->ufmh_lock);
462 }
463 
464 void
465 ddi_ufm_update(ddi_ufm_handle_t *ufmh)
466 {
467 	VERIFY(ufmh != NULL);
468 
469 	mutex_enter(&ufmh->ufmh_lock);
470 	if (ufmh->ufmh_state & DDI_UFM_STATE_SHUTTING_DOWN) {
471 		mutex_exit(&ufmh->ufmh_lock);
472 		return;
473 	}
474 	ufm_cache_invalidate(ufmh);
475 	ufmh->ufmh_state |= DDI_UFM_STATE_READY;
476 	mutex_exit(&ufmh->ufmh_lock);
477 }
478 
479 void
480 ddi_ufm_image_set_desc(ddi_ufm_image_t *uip, const char *desc)
481 {
482 	VERIFY(uip != NULL && desc != NULL);
483 	if (uip->ufmi_desc != NULL)
484 		strfree(uip->ufmi_desc);
485 
486 	uip->ufmi_desc = ddi_strdup(desc, KM_SLEEP);
487 }
488 
489 void
490 ddi_ufm_image_set_nslots(ddi_ufm_image_t *uip, uint_t nslots)
491 {
492 	VERIFY(uip != NULL);
493 	uip->ufmi_nslots = nslots;
494 }
495 
496 void
497 ddi_ufm_image_set_misc(ddi_ufm_image_t *uip, nvlist_t *misc)
498 {
499 	VERIFY(uip != NULL && misc != NULL);
500 	nvlist_free(uip->ufmi_misc);
501 	uip->ufmi_misc = misc;
502 }
503 
504 void
505 ddi_ufm_slot_set_version(ddi_ufm_slot_t *usp, const char *version)
506 {
507 	VERIFY(usp != NULL && version != NULL);
508 	if (usp->ufms_version != NULL)
509 		strfree(usp->ufms_version);
510 
511 	usp->ufms_version = ddi_strdup(version, KM_SLEEP);
512 }
513 
514 void
515 ddi_ufm_slot_set_attrs(ddi_ufm_slot_t *usp, ddi_ufm_attr_t attr)
516 {
517 	VERIFY(usp != NULL && attr <= DDI_UFM_ATTR_MAX);
518 	usp->ufms_attrs = attr;
519 }
520 
521 void
522 ddi_ufm_slot_set_misc(ddi_ufm_slot_t *usp, nvlist_t *misc)
523 {
524 	VERIFY(usp != NULL && misc != NULL);
525 	nvlist_free(usp->ufms_misc);
526 	usp->ufms_misc = misc;
527 }
528 
529 void
530 ddi_ufm_slot_set_imgsize(ddi_ufm_slot_t *usp, uint64_t size)
531 {
532 	VERIFY3P(usp, !=, NULL);
533 	usp->ufms_imgsize = size;
534 }
535