xref: /illumos-gate/usr/src/uts/common/os/ddi_ufm.c (revision 13b136d3061155363c62c9f6568d25b8b27da8f6)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2019 Joyent, Inc.
14  */
15 
16 #include <sys/avl.h>
17 #include <sys/ddi_ufm.h>
18 #include <sys/ddi_ufm_impl.h>
19 #include <sys/debug.h>
20 #include <sys/kmem.h>
21 #include <sys/sunddi.h>
22 #include <sys/stddef.h>
23 
24 /*
25  * The UFM subsystem tracks its internal state with respect to device
26  * drivers that participate in the DDI UFM subsystem on a per-instance basis
27  * via ddi_ufm_handle_t structures (see ddi_ufm_impl.h).  This is known as the
28  * UFM handle.  The UFM handle contains a pointer to the driver's UFM ops,
29  * which the ufm(7D) pseudo driver uses to invoke the UFM entry points in
30  * response to DDI UFM ioctls.  Additionally, the DDI UFM subsystem uses the
31  * handle to maintain cached UFM image and slot data.
32  *
33  * In order to track and provide fast lookups of a driver instance's UFM
34  * handle, the DDI UFM subsystem stores a pointer to the handle in a global AVL
35  * tree. UFM handles are added to the tree when a driver calls ddi_ufm_init(9E)
36  * and removed from the tree when a driver calls ddi_ufm_fini(9E).
37  *
38  * Some notes on the locking strategy/rules.
39  *
40  * All access to the tree is serialized via the mutex, ufm_lock.
41  * Additionally, each UFM handle is protected by a per-handle mutex.
42  *
43  * Code must acquire ufm_lock in order to walk the tree.  Before reading or
44  * modifying the state of any UFM handle, code must then acquire the
45  * UFM handle lock.  Once the UFM handle lock has been acquired, ufm_lock
46  * should be dropped.
47  *
48  * Only one UFM handle lock should be held at any time.
49  * If a UFM handle lock is held, it must be released before attempting to
50  * re-acquire ufm_lock.
51  *
52  * For example, the lock sequence for calling a UFM entry point and/or
53  * reading/modifying UFM handle state would be as follows:
54  * - acquire ufm_lock
55  * - walk tree to find UFH handle
56  * - acquire UFM handle lock
57  * - release ufm_lock
58  * - call entry point and/or access handle state
59  *
60  * Testing
61  * -------
62  * A set of automated tests for the DDI UFM subsystem exists at:
63  * usr/src/test/os-tests/tests/ddi_ufm/
64  *
65  * These tests should be run whenever changes are made to the DDI UFM
66  * subsystem or the ufm driver.
67  */
68 static avl_tree_t ufm_handles;
69 static kmutex_t ufm_lock;
70 
71 static int ufm_handle_compare(const void *, const void *);
72 
73 static void
74 ufm_cache_invalidate(ddi_ufm_handle_t *ufmh)
75 {
76 	ASSERT(MUTEX_HELD(&ufmh->ufmh_lock));
77 
78 	if (ufmh->ufmh_images == NULL)
79 		return;
80 
81 	for (uint_t i = 0; i < ufmh->ufmh_nimages; i++) {
82 		struct ddi_ufm_image *img = &ufmh->ufmh_images[i];
83 
84 		if (img->ufmi_slots == NULL)
85 			continue;
86 
87 		for (uint_t s = 0; s < img->ufmi_nslots; s++) {
88 			struct ddi_ufm_slot *slot = &img->ufmi_slots[s];
89 
90 			if (slot->ufms_version != NULL)
91 				strfree(slot->ufms_version);
92 			nvlist_free(slot->ufms_misc);
93 		}
94 		kmem_free(img->ufmi_slots,
95 		    (img->ufmi_nslots * sizeof (ddi_ufm_slot_t)));
96 		if (img->ufmi_desc != NULL)
97 			strfree(img->ufmi_desc);
98 		nvlist_free(img->ufmi_misc);
99 	}
100 
101 	kmem_free(ufmh->ufmh_images,
102 	    (ufmh->ufmh_nimages * sizeof (ddi_ufm_image_t)));
103 	ufmh->ufmh_images = NULL;
104 	ufmh->ufmh_nimages = 0;
105 	ufmh->ufmh_caps = 0;
106 	nvlist_free(ufmh->ufmh_report);
107 	ufmh->ufmh_report = NULL;
108 }
109 
110 static void
111 free_nvlist_array(nvlist_t **nvlarr, uint_t nelems)
112 {
113 	for (uint_t i = 0; i < nelems; i++) {
114 		if (nvlarr[i] != NULL)
115 			nvlist_free(nvlarr[i]);
116 	}
117 	kmem_free(nvlarr, nelems * sizeof (nvlist_t *));
118 }
119 
120 int
121 ufm_cache_fill(ddi_ufm_handle_t *ufmh)
122 {
123 	int ret;
124 	uint_t nimgs;
125 	ddi_ufm_cap_t caps;
126 	nvlist_t **images = NULL, **slots = NULL;
127 
128 	ASSERT(MUTEX_HELD(&ufmh->ufmh_lock));
129 
130 	/*
131 	 * Check whether we already have a cached report and if so, return
132 	 * straight away.
133 	 */
134 	if (ufmh->ufmh_report != NULL)
135 		return (0);
136 
137 	/*
138 	 * First check which UFM caps this driver supports.  If it doesn't
139 	 * support DDI_UFM_CAP_REPORT, then there's nothing to cache and we
140 	 * can just return.
141 	 */
142 	ret = ufmh->ufmh_ops->ddi_ufm_op_getcaps(ufmh, ufmh->ufmh_arg, &caps);
143 	if (ret != 0)
144 		return (ret);
145 
146 	ufmh->ufmh_caps = caps;
147 	if ((ufmh->ufmh_caps & DDI_UFM_CAP_REPORT) == 0)
148 		return (ENOTSUP);
149 
150 	/*
151 	 * Next, figure out how many UFM images the device has.  If a
152 	 * ddi_ufm_op_nimages entry point wasn't specified, then we assume
153 	 * that the device has a single image.
154 	 */
155 	if (ufmh->ufmh_ops->ddi_ufm_op_nimages != NULL) {
156 		ret = ufmh->ufmh_ops->ddi_ufm_op_nimages(ufmh, ufmh->ufmh_arg,
157 		    &nimgs);
158 		if (ret == 0 && nimgs > 0)
159 			ufmh->ufmh_nimages = nimgs;
160 		else
161 			goto cache_fail;
162 	} else {
163 		ufmh->ufmh_nimages = 1;
164 	}
165 
166 	/*
167 	 * Now that we know how many images we're dealing with, allocate space
168 	 * for an appropriately-sized array of ddi_ufm_image_t structs and then
169 	 * iterate through them calling the ddi_ufm_op_fill_image entry point
170 	 * so that the driver can fill them in.
171 	 */
172 	ufmh->ufmh_images =
173 	    kmem_zalloc((sizeof (ddi_ufm_image_t) * ufmh->ufmh_nimages),
174 	    KM_NOSLEEP | KM_NORMALPRI);
175 	if (ufmh->ufmh_images == NULL)
176 		return (ENOMEM);
177 
178 	for (uint_t i = 0; i < ufmh->ufmh_nimages; i++) {
179 		struct ddi_ufm_image *img = &ufmh->ufmh_images[i];
180 
181 		ret = ufmh->ufmh_ops->ddi_ufm_op_fill_image(ufmh,
182 		    ufmh->ufmh_arg, i, img);
183 
184 		if (ret != 0)
185 			goto cache_fail;
186 
187 		if (img->ufmi_desc == NULL || img->ufmi_nslots == 0) {
188 			ret = EIO;
189 			goto cache_fail;
190 		}
191 
192 		img->ufmi_slots =
193 		    kmem_zalloc((sizeof (ddi_ufm_slot_t) * img->ufmi_nslots),
194 		    KM_NOSLEEP | KM_NORMALPRI);
195 		if (img->ufmi_slots == NULL) {
196 			ret = ENOMEM;
197 			goto cache_fail;
198 		}
199 
200 		for (uint_t s = 0; s < img->ufmi_nslots; s++) {
201 			struct ddi_ufm_slot *slot = &img->ufmi_slots[s];
202 
203 			ret = ufmh->ufmh_ops->ddi_ufm_op_fill_slot(ufmh,
204 			    ufmh->ufmh_arg, i, s, slot);
205 
206 			if (ret != 0)
207 				goto cache_fail;
208 
209 			ASSERT(slot->ufms_attrs & DDI_UFM_ATTR_EMPTY ||
210 			    slot->ufms_version != NULL);
211 		}
212 	}
213 	images = kmem_zalloc(sizeof (nvlist_t *) * ufmh->ufmh_nimages,
214 	    KM_SLEEP);
215 	for (uint_t i = 0; i < ufmh->ufmh_nimages; i ++) {
216 		ddi_ufm_image_t *img = &ufmh->ufmh_images[i];
217 
218 		images[i] = fnvlist_alloc();
219 		fnvlist_add_string(images[i], DDI_UFM_NV_IMAGE_DESC,
220 		    img->ufmi_desc);
221 		if (img->ufmi_misc != NULL) {
222 			fnvlist_add_nvlist(images[i], DDI_UFM_NV_IMAGE_MISC,
223 			    img->ufmi_misc);
224 		}
225 
226 		slots = kmem_zalloc(sizeof (nvlist_t *) * img->ufmi_nslots,
227 		    KM_SLEEP);
228 		for (uint_t s = 0; s < img->ufmi_nslots; s++) {
229 			ddi_ufm_slot_t *slot = &img->ufmi_slots[s];
230 
231 			slots[s] = fnvlist_alloc();
232 			fnvlist_add_uint32(slots[s], DDI_UFM_NV_SLOT_ATTR,
233 			    slot->ufms_attrs);
234 			if (slot->ufms_attrs & DDI_UFM_ATTR_EMPTY)
235 				continue;
236 
237 			fnvlist_add_string(slots[s], DDI_UFM_NV_SLOT_VERSION,
238 			    slot->ufms_version);
239 			if (slot->ufms_misc != NULL) {
240 				fnvlist_add_nvlist(slots[s],
241 				    DDI_UFM_NV_SLOT_MISC, slot->ufms_misc);
242 			}
243 		}
244 		fnvlist_add_nvlist_array(images[i], DDI_UFM_NV_IMAGE_SLOTS,
245 		    slots, img->ufmi_nslots);
246 		free_nvlist_array(slots, img->ufmi_nslots);
247 	}
248 	ufmh->ufmh_report = fnvlist_alloc();
249 	fnvlist_add_nvlist_array(ufmh->ufmh_report, DDI_UFM_NV_IMAGES, images,
250 	    ufmh->ufmh_nimages);
251 	free_nvlist_array(images, ufmh->ufmh_nimages);
252 
253 	return (0);
254 
255 cache_fail:
256 	ufm_cache_invalidate(ufmh);
257 	return (ret);
258 }
259 
260 /*
261  * This gets called early in boot by setup_ddi().
262  */
263 void
264 ufm_init(void)
265 {
266 	mutex_init(&ufm_lock, NULL, MUTEX_DEFAULT, NULL);
267 
268 	avl_create(&ufm_handles, ufm_handle_compare,
269 	    sizeof (ddi_ufm_handle_t),
270 	    offsetof(ddi_ufm_handle_t, ufmh_link));
271 }
272 
273 static int
274 ufm_handle_compare(const void *a1, const void *a2)
275 {
276 	const struct ddi_ufm_handle *hdl1, *hdl2;
277 	int cmp;
278 
279 	hdl1 = (struct ddi_ufm_handle *)a1;
280 	hdl2 = (struct ddi_ufm_handle *)a2;
281 
282 	cmp = strcmp(hdl1->ufmh_devpath, hdl2->ufmh_devpath);
283 
284 	if (cmp > 0)
285 		return (1);
286 	else if (cmp < 0)
287 		return (-1);
288 	else
289 		return (0);
290 }
291 
292 /*
293  * This is used by the ufm driver to lookup the UFM handle associated with a
294  * particular devpath.
295  *
296  * On success, this function returns the reqested UFH handle, with its lock
297  * held.  Caller is responsible to dropping the lock when it is done with the
298  * handle.
299  */
300 struct ddi_ufm_handle *
301 ufm_find(const char *devpath)
302 {
303 	struct ddi_ufm_handle find = { 0 }, *ufmh;
304 
305 	(void) strlcpy(find.ufmh_devpath, devpath, MAXPATHLEN);
306 
307 	mutex_enter(&ufm_lock);
308 	ufmh = avl_find(&ufm_handles, &find, NULL);
309 	if (ufmh != NULL)
310 		mutex_enter(&ufmh->ufmh_lock);
311 	mutex_exit(&ufm_lock);
312 
313 	return (ufmh);
314 }
315 
316 int
317 ddi_ufm_init(dev_info_t *dip, uint_t version, ddi_ufm_ops_t *ufmops,
318     ddi_ufm_handle_t **ufmh, void *arg)
319 {
320 	ddi_ufm_handle_t *old_ufmh;
321 	char devpath[MAXPATHLEN];
322 
323 	VERIFY(version != 0 && ufmops != NULL);
324 	VERIFY(ufmops->ddi_ufm_op_fill_image != NULL &&
325 	    ufmops->ddi_ufm_op_fill_slot != NULL &&
326 	    ufmops->ddi_ufm_op_getcaps != NULL);
327 
328 	if (version < DDI_UFM_VERSION_ONE || version > DDI_UFM_CURRENT_VERSION)
329 		return (ENOTSUP);
330 
331 	/*
332 	 * First we check if we already have a UFM handle for this device
333 	 * instance.  This can happen if the module got unloaded or the driver
334 	 * was suspended after previously registering with the UFM subsystem.
335 	 *
336 	 * If we find an old handle then we simply reset its state and hand it
337 	 * back to the driver.
338 	 *
339 	 * If we don't find an old handle then this is a new registration, so
340 	 * we allocate and initialize a new handle.
341 	 *
342 	 * In either case, we don't need to NULL-out the other fields (like
343 	 * ufmh_report) as in order for them to be referenced, ufmh_state has to
344 	 * first transition to DDI_UFM_STATE_READY.  The only way that can
345 	 * happen is for the driver to call ddi_ufm_update(), which will call
346 	 * ufm_cache_invalidate(), which in turn will take care of properly
347 	 * cleaning up and reinitializing the other fields in the handle.
348 	 */
349 	(void) ddi_pathname(dip, devpath);
350 	if ((old_ufmh = ufm_find(devpath)) != NULL) {
351 		*ufmh = old_ufmh;
352 	} else {
353 		*ufmh = kmem_zalloc(sizeof (ddi_ufm_handle_t), KM_SLEEP);
354 		(void) strlcpy((*ufmh)->ufmh_devpath, devpath, MAXPATHLEN);
355 		mutex_init(&(*ufmh)->ufmh_lock, NULL, MUTEX_DEFAULT, NULL);
356 	}
357 	(*ufmh)->ufmh_ops = ufmops;
358 	(*ufmh)->ufmh_arg = arg;
359 	(*ufmh)->ufmh_version = version;
360 	(*ufmh)->ufmh_state = DDI_UFM_STATE_INIT;
361 
362 	/*
363 	 * If this is a new registration, add the UFM handle to the global AVL
364 	 * tree of handles.
365 	 *
366 	 * Otherwise, if it's an old registration then ufm_find() will have
367 	 * returned the old handle with the lock already held, so we need to
368 	 * release it before returning.
369 	 */
370 	if (old_ufmh == NULL) {
371 		mutex_enter(&ufm_lock);
372 		avl_add(&ufm_handles, *ufmh);
373 		mutex_exit(&ufm_lock);
374 	} else {
375 		mutex_exit(&old_ufmh->ufmh_lock);
376 	}
377 
378 	return (DDI_SUCCESS);
379 }
380 
381 void
382 ddi_ufm_fini(ddi_ufm_handle_t *ufmh)
383 {
384 	VERIFY(ufmh != NULL);
385 
386 	mutex_enter(&ufmh->ufmh_lock);
387 	ufmh->ufmh_state |= DDI_UFM_STATE_SHUTTING_DOWN;
388 	ufm_cache_invalidate(ufmh);
389 	mutex_exit(&ufmh->ufmh_lock);
390 }
391 
392 void
393 ddi_ufm_update(ddi_ufm_handle_t *ufmh)
394 {
395 	VERIFY(ufmh != NULL);
396 
397 	mutex_enter(&ufmh->ufmh_lock);
398 	if (ufmh->ufmh_state & DDI_UFM_STATE_SHUTTING_DOWN) {
399 		mutex_exit(&ufmh->ufmh_lock);
400 		return;
401 	}
402 	ufm_cache_invalidate(ufmh);
403 	ufmh->ufmh_state |= DDI_UFM_STATE_READY;
404 	mutex_exit(&ufmh->ufmh_lock);
405 }
406 
407 void
408 ddi_ufm_image_set_desc(ddi_ufm_image_t *uip, const char *desc)
409 {
410 	VERIFY(uip != NULL && desc != NULL);
411 	if (uip->ufmi_desc != NULL)
412 		strfree(uip->ufmi_desc);
413 
414 	uip->ufmi_desc = ddi_strdup(desc, KM_SLEEP);
415 }
416 
417 void
418 ddi_ufm_image_set_nslots(ddi_ufm_image_t *uip, uint_t nslots)
419 {
420 	VERIFY(uip != NULL);
421 	uip->ufmi_nslots = nslots;
422 }
423 
424 void
425 ddi_ufm_image_set_misc(ddi_ufm_image_t *uip, nvlist_t *misc)
426 {
427 	VERIFY(uip != NULL && misc != NULL);
428 	nvlist_free(uip->ufmi_misc);
429 	uip->ufmi_misc = misc;
430 }
431 
432 void
433 ddi_ufm_slot_set_version(ddi_ufm_slot_t *usp, const char *version)
434 {
435 	VERIFY(usp != NULL && version != NULL);
436 	if (usp->ufms_version != NULL)
437 		strfree(usp->ufms_version);
438 
439 	usp->ufms_version = ddi_strdup(version, KM_SLEEP);
440 }
441 
442 void
443 ddi_ufm_slot_set_attrs(ddi_ufm_slot_t *usp, ddi_ufm_attr_t attr)
444 {
445 	VERIFY(usp != NULL && attr <= DDI_UFM_ATTR_MAX);
446 	usp->ufms_attrs = attr;
447 }
448 
449 void
450 ddi_ufm_slot_set_misc(ddi_ufm_slot_t *usp, nvlist_t *misc)
451 {
452 	VERIFY(usp != NULL && misc != NULL);
453 	nvlist_free(usp->ufms_misc);
454 	usp->ufms_misc = misc;
455 }
456