xref: /titanic_51/usr/src/uts/common/os/pghw.c (revision df14233e629298598736976c5bfcf4a31873745f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/systm.h>
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/thread.h>
32 #include <sys/cpuvar.h>
33 #include <sys/kmem.h>
34 #include <sys/cmn_err.h>
35 #include <sys/group.h>
36 #include <sys/pg.h>
37 #include <sys/pghw.h>
38 
39 /*
40  * Processor Groups: Hardware sharing relationship layer
41  *
42  * This file implements an extension to Processor Groups to capture
43  * hardware sharing relationships existing between logical CPUs. Examples of
44  * hardware sharing relationships include shared caches on some CMT
45  * procesoor architectures, or shared local memory controllers on NUMA
46  * based system architectures.
47  *
48  * The pghw_t structure represents the extended PG. The first member
49  * of the structure is the generic pg_t with the pghw specific members
50  * following. The generic pg_t *must* remain the first member of the
51  * structure as the code uses casting of structure references to access
52  * the generic pg_t structure elements.
53  *
54  * In addition to the generic CPU grouping, physical PGs have a hardware
55  * sharing relationship enumerated "type", and an instance id. The enumerated
56  * type is defined by the pghw_type_t enumeration, while the instance id
57  * uniquely identifies the sharing instance from among others of the same
58  * hardware sharing type.
59  *
60  * The physical PGs are organized into an overall hierarchy, and are tracked
61  * in a number of different per CPU, and per pghw_type_t type groups.
62  * As an example:
63  *
64  * -------------
65  * | pg_hw     |
66  * | (group_t) |
67  * -------------
68  *  ||                          ============================
69  *  ||\\-----------------------//       \\                 \\
70  *  ||  | hwset (PGC_HW_CHIP) |        -------------      -------------
71  *  ||  | (group_t)           |        | pghw_t    |      | pghw_t    |
72  *  ||  -----------------------        | chip 0    |      | chip 1    |
73  *  ||                                 -------------      -------------
74  *  ||                                 \\  \\  \\  \\     \\  \\  \\  \\
75  *  ||                                  cpu cpu cpu cpu    cpu cpu cpu cpu
76  *  ||
77  *  ||                          ============================
78  *  ||\\-----------------------//       \\                 \\
79  *  ||  | hwset (PGC_HW_IPIPE)|        -------------      -------------
80  *  ||  | (group_t)           |        | pghw_t    |      | pghw_t    |
81  *  ||  -----------------------        | ipipe 0   |      | ipipe 1   |
82  *  ||                                 -------------      -------------
83  *  ||                                 \\  \\             \\  \\
84  *  ||                                  cpu cpu            cpu cpu
85  *  ...
86  *
87  *
88  * The top level pg_hw is a group of "hwset" groups. Each hwset holds of group
89  * of physical PGs of the same hardware sharing type. Within each hwset, the
90  * PG's instance id uniquely identifies the grouping relationshsip among other
91  * groupings of the same sharing type. The instance id for a grouping is
92  * platform defined, and in some cases may be used by platform code as a handle
93  * to search for a particular relationship instance.
94  *
95  * Each physical PG (by virtue of the embedded pg_t) contains a group of CPUs
96  * that participate in the sharing relationship. Each CPU also has associated
97  * with it a grouping tracking the PGs in which the CPU belongs. This can be
98  * used to iterate over the various relationships in which the CPU participates
99  * (the CPU's chip, cache, lgroup, etc.).
100  *
101  * The hwsets are created dynamically as new hardware sharing relationship types
102  * are instantiated. They are never destroyed, as once a given relathionship
103  * type appears in the system, it is quite likely that at least one instance of
104  * that relationship will always persist as long as the system is running.
105  */
106 
107 static group_t		*pg_hw;		/* top level pg hw group */
108 
109 /*
110  * Lookup table mapping hardware sharing relationships with hierarchy levels
111  */
112 static int		pghw_level_table[PGHW_NUM_COMPONENTS];
113 
114 /*
115  * Physical PG kstats
116  */
117 struct pghw_kstat {
118 	kstat_named_t	pg_id;
119 	kstat_named_t	pg_class;
120 	kstat_named_t	pg_ncpus;
121 	kstat_named_t	pg_instance_id;
122 	kstat_named_t	pg_hw;
123 } pghw_kstat = {
124 	{ "id",			KSTAT_DATA_UINT64 },
125 	{ "pg_class",		KSTAT_DATA_STRING },
126 	{ "ncpus",		KSTAT_DATA_UINT64 },
127 	{ "instance_id",	KSTAT_DATA_UINT64 },
128 	{ "hardware",		KSTAT_DATA_STRING },
129 };
130 
131 kmutex_t		pghw_kstat_lock;
132 
133 /*
134  * hwset operations
135  */
136 static group_t		*pghw_set_create(pghw_type_t);
137 static void		pghw_set_add(group_t *, pghw_t *);
138 static void		pghw_set_remove(group_t *, pghw_t *);
139 
140 /*
141  * Initialize the physical portion of a physical PG
142  */
143 void
144 pghw_init(pghw_t *pg, cpu_t *cp, pghw_type_t hw)
145 {
146 	group_t		*hwset;
147 
148 	if ((hwset = pghw_set_lookup(hw)) == NULL) {
149 		/*
150 		 * Haven't seen this hardware type yet
151 		 */
152 		hwset = pghw_set_create(hw);
153 	}
154 
155 	pghw_set_add(hwset, pg);
156 	pg->pghw_hw = hw;
157 	pg->pghw_instance =
158 	    pg_plat_hw_instance_id(cp, hw);
159 	pghw_kstat_create(pg);
160 }
161 
162 /*
163  * Teardown the physical portion of a physical PG
164  */
165 void
166 pghw_fini(pghw_t *pg)
167 {
168 	group_t		*hwset;
169 
170 	hwset = pghw_set_lookup(pg->pghw_hw);
171 	ASSERT(hwset != NULL);
172 
173 	pghw_set_remove(hwset, pg);
174 	pg->pghw_instance = (id_t)PGHW_INSTANCE_ANON;
175 	pg->pghw_hw = (pghw_type_t)-1;
176 
177 	if (pg->pghw_kstat)
178 		kstat_delete(pg->pghw_kstat);
179 }
180 
181 /*
182  * Find an existing physical PG in which to place
183  * the given CPU for the specified hardware sharing
184  * relationship
185  */
186 pghw_t *
187 pghw_place_cpu(cpu_t *cp, pghw_type_t hw)
188 {
189 	group_t		*hwset;
190 
191 	if ((hwset = pghw_set_lookup(hw)) == NULL) {
192 		return (NULL);
193 	}
194 
195 	return ((pghw_t *)pg_cpu_find_pg(cp, hwset));
196 }
197 
198 /*
199  * Find the pg representing the hw sharing relationship in which
200  * cp belongs
201  */
202 pghw_t *
203 pghw_find_pg(cpu_t *cp, pghw_type_t hw)
204 {
205 	group_iter_t	i;
206 	pghw_t	*pg;
207 
208 	group_iter_init(&i);
209 	while ((pg = group_iterate(&cp->cpu_pg->pgs, &i)) != NULL) {
210 		if (pg->pghw_hw == hw)
211 			return (pg);
212 	}
213 	return (NULL);
214 }
215 
216 /*
217  * Find the PG of the given hardware sharing relationship
218  * type with the given instance id
219  */
220 pghw_t *
221 pghw_find_by_instance(id_t id, pghw_type_t hw)
222 {
223 	group_iter_t	i;
224 	group_t		*set;
225 	pghw_t		*pg;
226 
227 	set = pghw_set_lookup(hw);
228 	if (!set)
229 		return (NULL);
230 
231 	group_iter_init(&i);
232 	while ((pg = group_iterate(set, &i)) != NULL) {
233 		if (pg->pghw_instance == id)
234 			return (pg);
235 	}
236 	return (NULL);
237 }
238 
239 /*
240  * CPUs physical ID cache creation / destruction
241  * The cache's elements are initialized to the CPU's id
242  */
243 void
244 pghw_physid_create(cpu_t *cp)
245 {
246 	int	i;
247 
248 	cp->cpu_physid = kmem_alloc(sizeof (cpu_physid_t), KM_SLEEP);
249 
250 	for (i = 0; i < (sizeof (cpu_physid_t) / sizeof (id_t)); i++) {
251 		((id_t *)cp->cpu_physid)[i] = cp->cpu_id;
252 	}
253 }
254 
255 void
256 pghw_physid_destroy(cpu_t *cp)
257 {
258 	if (cp->cpu_physid) {
259 		kmem_free(cp->cpu_physid, sizeof (cpu_physid_t));
260 		cp->cpu_physid = NULL;
261 	}
262 }
263 
264 /*
265  * Return a sequential level identifier for the specified
266  * hardware sharing relationship
267  */
268 int
269 pghw_level(pghw_type_t hw)
270 {
271 	return (pg_plat_hw_level(hw));
272 }
273 
274 /*
275  * Create a new, empty hwset.
276  * This routine may block, and must not be called from any
277  * paused CPU context.
278  */
279 static group_t	*
280 pghw_set_create(pghw_type_t hw)
281 {
282 	group_t	*g;
283 	int	ret;
284 
285 	/*
286 	 * Create the top level PG hw group if it doesn't already exist
287 	 * This is a "set" of hardware sets, that is ordered (and indexed)
288 	 * by the pghw_type_t enum.
289 	 */
290 	if (pg_hw == NULL) {
291 		pg_hw = kmem_alloc(sizeof (group_t), KM_SLEEP);
292 		group_create(pg_hw);
293 		group_expand(pg_hw, (uint_t)PGHW_NUM_COMPONENTS);
294 	}
295 
296 	/*
297 	 * Create the new hwset
298 	 * Add it to the top level pg_hw group.
299 	 */
300 	g = kmem_alloc(sizeof (group_t), KM_SLEEP);
301 	group_create(g);
302 
303 	ret = group_add_at(pg_hw, g, (uint_t)hw);
304 	ASSERT(ret == 0);
305 
306 	/*
307 	 * Update the table that maps hardware sharing relationships
308 	 * to hierarchy levels
309 	 */
310 	ASSERT(pghw_level_table[hw] == NULL);
311 	pghw_level_table[hw] = pg_plat_hw_level(hw);
312 
313 	return (g);
314 }
315 
316 /*
317  * Find the hwset associated with the given hardware sharing type
318  */
319 group_t *
320 pghw_set_lookup(pghw_type_t hw)
321 {
322 	group_t	*hwset;
323 
324 	if (pg_hw == NULL)
325 		return (NULL);
326 
327 	hwset = GROUP_ACCESS(pg_hw, (uint_t)hw);
328 	return (hwset);
329 }
330 
331 /*
332  * Add a PG to a hwset
333  */
334 static void
335 pghw_set_add(group_t *hwset, pghw_t *pg)
336 {
337 	(void) group_add(hwset, pg, GRP_RESIZE);
338 }
339 
340 /*
341  * Remove a PG from a hwset
342  */
343 static void
344 pghw_set_remove(group_t *hwset, pghw_t *pg)
345 {
346 	int result;
347 
348 	result = group_remove(hwset, pg, GRP_RESIZE);
349 	ASSERT(result == 0);
350 }
351 
352 
353 /*
354  * Return a string name given a pg_hw sharing type
355  */
356 #define	PGHW_TYPE_NAME_MAX	8
357 
358 static char *
359 pghw_type_string(pghw_type_t hw)
360 {
361 	switch (hw) {
362 	case PGHW_IPIPE:
363 		return ("ipipe");
364 	case PGHW_CACHE:
365 		return ("cache");
366 	case PGHW_FPU:
367 		return ("fpu");
368 	case PGHW_MPIPE:
369 		return ("mpipe");
370 	case PGHW_CHIP:
371 		return ("chip");
372 	case PGHW_MEMORY:
373 		return ("memory");
374 	default:
375 		return ("unknown");
376 	}
377 }
378 
379 /*
380  * Create / Update routines for PG hw kstats
381  *
382  * It is the intention of these kstats to provide some level
383  * of informational / debugging observability into the types
384  * and nature of the system's detected hardware sharing relationships
385  */
386 void
387 pghw_kstat_create(pghw_t *pg)
388 {
389 	/*
390 	 * Create a physical pg kstat
391 	 */
392 	if ((pg->pghw_kstat = kstat_create("pg", ((pg_t *)pg)->pg_id,
393 	    "pg", "pg", KSTAT_TYPE_NAMED,
394 	    sizeof (pghw_kstat) / sizeof (kstat_named_t),
395 	    KSTAT_FLAG_VIRTUAL)) != NULL) {
396 		pg->pghw_kstat->ks_data_size += PG_CLASS_NAME_MAX;
397 		pg->pghw_kstat->ks_data_size += PGHW_TYPE_NAME_MAX;
398 		pg->pghw_kstat->ks_lock = &pghw_kstat_lock;
399 		pg->pghw_kstat->ks_data = &pghw_kstat;
400 		pg->pghw_kstat->ks_update = pghw_kstat_update;
401 		pg->pghw_kstat->ks_private = pg;
402 		kstat_install(pg->pghw_kstat);
403 	}
404 }
405 
406 int
407 pghw_kstat_update(kstat_t *ksp, int rw)
408 {
409 	struct pghw_kstat	*pgsp = &pghw_kstat;
410 	pghw_t			*pg = ksp->ks_private;
411 
412 	if (rw == KSTAT_WRITE)
413 		return (EACCES);
414 
415 	pgsp->pg_id.value.ui64 = ((pg_t *)pg)->pg_id;
416 	pgsp->pg_ncpus.value.ui64 = GROUP_SIZE(&((pg_t *)pg)->pg_cpus);
417 	pgsp->pg_instance_id.value.ui64 = (uint64_t)pg->pghw_instance;
418 	kstat_named_setstr(&pgsp->pg_class, ((pg_t *)pg)->pg_class->pgc_name);
419 	kstat_named_setstr(&pgsp->pg_hw, pghw_type_string(pg->pghw_hw));
420 
421 	return (0);
422 }
423