xref: /titanic_52/usr/src/uts/common/os/pghw.c (revision 3ed4a803a4d4b35e2773c194c9a6f4977687b542)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/systm.h>
27 #include <sys/types.h>
28 #include <sys/param.h>
29 #include <sys/thread.h>
30 #include <sys/cpuvar.h>
31 #include <sys/kmem.h>
32 #include <sys/cmn_err.h>
33 #include <sys/group.h>
34 #include <sys/pg.h>
35 #include <sys/pghw.h>
36 #include <sys/cpu_pm.h>
37 
38 /*
39  * Processor Groups: Hardware sharing relationship layer
40  *
41  * This file implements an extension to Processor Groups to capture
42  * hardware sharing relationships existing between logical CPUs. Examples of
43  * hardware sharing relationships include shared caches on some CMT
44  * procesoor architectures, or shared local memory controllers on NUMA
45  * based system architectures.
46  *
47  * The pghw_t structure represents the extended PG. The first member
48  * of the structure is the generic pg_t with the pghw specific members
49  * following. The generic pg_t *must* remain the first member of the
50  * structure as the code uses casting of structure references to access
51  * the generic pg_t structure elements.
52  *
53  * In addition to the generic CPU grouping, physical PGs have a hardware
54  * sharing relationship enumerated "type", and an instance id. The enumerated
55  * type is defined by the pghw_type_t enumeration, while the instance id
56  * uniquely identifies the sharing instance from among others of the same
57  * hardware sharing type.
58  *
59  * The physical PGs are organized into an overall hierarchy, and are tracked
60  * in a number of different per CPU, and per pghw_type_t type groups.
61  * As an example:
62  *
63  * -------------
64  * | pg_hw     |
65  * | (group_t) |
66  * -------------
67  *  ||                          ============================
68  *  ||\\-----------------------//       \\                 \\
69  *  ||  | hwset (PGC_HW_CHIP) |        -------------      -------------
70  *  ||  | (group_t)           |        | pghw_t    |      | pghw_t    |
71  *  ||  -----------------------        | chip 0    |      | chip 1    |
72  *  ||                                 -------------      -------------
73  *  ||                                 \\  \\  \\  \\     \\  \\  \\  \\
74  *  ||                                  cpu cpu cpu cpu    cpu cpu cpu cpu
75  *  ||
76  *  ||                          ============================
77  *  ||\\-----------------------//       \\                 \\
78  *  ||  | hwset (PGC_HW_IPIPE)|        -------------      -------------
79  *  ||  | (group_t)           |        | pghw_t    |      | pghw_t    |
80  *  ||  -----------------------        | ipipe 0   |      | ipipe 1   |
81  *  ||                                 -------------      -------------
82  *  ||                                 \\  \\             \\  \\
83  *  ||                                  cpu cpu            cpu cpu
84  *  ...
85  *
86  *
87  * The top level pg_hw is a group of "hwset" groups. Each hwset holds of group
88  * of physical PGs of the same hardware sharing type. Within each hwset, the
89  * PG's instance id uniquely identifies the grouping relationshsip among other
90  * groupings of the same sharing type. The instance id for a grouping is
91  * platform defined, and in some cases may be used by platform code as a handle
92  * to search for a particular relationship instance.
93  *
94  * Each physical PG (by virtue of the embedded pg_t) contains a group of CPUs
95  * that participate in the sharing relationship. Each CPU also has associated
96  * with it a grouping tracking the PGs in which the CPU belongs. This can be
97  * used to iterate over the various relationships in which the CPU participates
98  * (the CPU's chip, cache, lgroup, etc.).
99  *
100  * The hwsets are created dynamically as new hardware sharing relationship types
101  * are instantiated. They are never destroyed, as once a given relationship
102  * type appears in the system, it is quite likely that at least one instance of
103  * that relationship will always persist as long as the system is running.
104  */
105 
106 static group_t		*pg_hw;		/* top level pg hw group */
107 
108 /*
109  * Physical PG kstats
110  */
111 struct pghw_kstat {
112 	kstat_named_t	pg_id;
113 	kstat_named_t	pg_class;
114 	kstat_named_t	pg_ncpus;
115 	kstat_named_t	pg_instance_id;
116 	kstat_named_t	pg_hw;
117 	kstat_named_t	pg_policy;
118 } pghw_kstat = {
119 	{ "id",			KSTAT_DATA_UINT64 },
120 	{ "pg_class",		KSTAT_DATA_STRING },
121 	{ "ncpus",		KSTAT_DATA_UINT64 },
122 	{ "instance_id",	KSTAT_DATA_UINT64 },
123 	{ "hardware",		KSTAT_DATA_STRING },
124 	{ "policy",		KSTAT_DATA_STRING },
125 };
126 
127 kmutex_t		pghw_kstat_lock;
128 
129 /*
130  * hwset operations
131  */
132 static group_t		*pghw_set_create(pghw_type_t);
133 static void		pghw_set_add(group_t *, pghw_t *);
134 static void		pghw_set_remove(group_t *, pghw_t *);
135 
136 /*
137  * Initialize the physical portion of a hardware PG
138  */
139 void
140 pghw_init(pghw_t *pg, cpu_t *cp, pghw_type_t hw)
141 {
142 	group_t		*hwset;
143 
144 	if ((hwset = pghw_set_lookup(hw)) == NULL) {
145 		/*
146 		 * Haven't seen this hardware type yet
147 		 */
148 		hwset = pghw_set_create(hw);
149 	}
150 
151 	pghw_set_add(hwset, pg);
152 	pg->pghw_hw = hw;
153 	pg->pghw_instance =
154 	    pg_plat_hw_instance_id(cp, hw);
155 	pghw_kstat_create(pg);
156 
157 	/*
158 	 * Hardware sharing relationship specific initialization
159 	 */
160 	switch (pg->pghw_hw) {
161 	case PGHW_POW_ACTIVE:
162 		pg->pghw_handle =
163 		    (pghw_handle_t)cpupm_domain_init(cp, CPUPM_DTYPE_ACTIVE);
164 		break;
165 	case PGHW_POW_IDLE:
166 		pg->pghw_handle =
167 		    (pghw_handle_t)cpupm_domain_init(cp, CPUPM_DTYPE_IDLE);
168 		break;
169 	default:
170 		pg->pghw_handle = (pghw_handle_t)NULL;
171 	}
172 }
173 
174 /*
175  * Teardown the physical portion of a physical PG
176  */
177 void
178 pghw_fini(pghw_t *pg)
179 {
180 	group_t		*hwset;
181 
182 	hwset = pghw_set_lookup(pg->pghw_hw);
183 	ASSERT(hwset != NULL);
184 
185 	pghw_set_remove(hwset, pg);
186 	pg->pghw_instance = (id_t)PGHW_INSTANCE_ANON;
187 	pg->pghw_hw = (pghw_type_t)-1;
188 
189 	if (pg->pghw_kstat)
190 		kstat_delete(pg->pghw_kstat);
191 }
192 
193 /*
194  * Find an existing physical PG in which to place
195  * the given CPU for the specified hardware sharing
196  * relationship
197  */
198 pghw_t *
199 pghw_place_cpu(cpu_t *cp, pghw_type_t hw)
200 {
201 	group_t		*hwset;
202 
203 	if ((hwset = pghw_set_lookup(hw)) == NULL) {
204 		return (NULL);
205 	}
206 
207 	return ((pghw_t *)pg_cpu_find_pg(cp, hwset));
208 }
209 
210 /*
211  * Find the pg representing the hw sharing relationship in which
212  * cp belongs
213  */
214 pghw_t *
215 pghw_find_pg(cpu_t *cp, pghw_type_t hw)
216 {
217 	group_iter_t	i;
218 	pghw_t	*pg;
219 
220 	group_iter_init(&i);
221 	while ((pg = group_iterate(&cp->cpu_pg->pgs, &i)) != NULL) {
222 		if (pg->pghw_hw == hw)
223 			return (pg);
224 	}
225 	return (NULL);
226 }
227 
228 /*
229  * Find the PG of the given hardware sharing relationship
230  * type with the given instance id
231  */
232 pghw_t *
233 pghw_find_by_instance(id_t id, pghw_type_t hw)
234 {
235 	group_iter_t	i;
236 	group_t		*set;
237 	pghw_t		*pg;
238 
239 	set = pghw_set_lookup(hw);
240 	if (!set)
241 		return (NULL);
242 
243 	group_iter_init(&i);
244 	while ((pg = group_iterate(set, &i)) != NULL) {
245 		if (pg->pghw_instance == id)
246 			return (pg);
247 	}
248 	return (NULL);
249 }
250 
251 /*
252  * CPUs physical ID cache creation / destruction
253  * The cache's elements are initialized to the CPU's id
254  */
255 void
256 pghw_physid_create(cpu_t *cp)
257 {
258 	int	i;
259 
260 	cp->cpu_physid = kmem_alloc(sizeof (cpu_physid_t), KM_SLEEP);
261 
262 	for (i = 0; i < (sizeof (cpu_physid_t) / sizeof (id_t)); i++) {
263 		((id_t *)cp->cpu_physid)[i] = cp->cpu_id;
264 	}
265 }
266 
267 void
268 pghw_physid_destroy(cpu_t *cp)
269 {
270 	if (cp->cpu_physid) {
271 		kmem_free(cp->cpu_physid, sizeof (cpu_physid_t));
272 		cp->cpu_physid = NULL;
273 	}
274 }
275 
276 /*
277  * Create a new, empty hwset.
278  * This routine may block, and must not be called from any
279  * paused CPU context.
280  */
281 static group_t	*
282 pghw_set_create(pghw_type_t hw)
283 {
284 	group_t	*g;
285 	int	ret;
286 
287 	/*
288 	 * Create the top level PG hw group if it doesn't already exist
289 	 * This is a "set" of hardware sets, that is ordered (and indexed)
290 	 * by the pghw_type_t enum.
291 	 */
292 	if (pg_hw == NULL) {
293 		pg_hw = kmem_alloc(sizeof (group_t), KM_SLEEP);
294 		group_create(pg_hw);
295 		group_expand(pg_hw, (uint_t)PGHW_NUM_COMPONENTS);
296 	}
297 
298 	/*
299 	 * Create the new hwset
300 	 * Add it to the top level pg_hw group.
301 	 */
302 	g = kmem_alloc(sizeof (group_t), KM_SLEEP);
303 	group_create(g);
304 
305 	ret = group_add_at(pg_hw, g, (uint_t)hw);
306 	ASSERT(ret == 0);
307 
308 	return (g);
309 }
310 
311 /*
312  * Find the hwset associated with the given hardware sharing type
313  */
314 group_t *
315 pghw_set_lookup(pghw_type_t hw)
316 {
317 	group_t	*hwset;
318 
319 	if (pg_hw == NULL)
320 		return (NULL);
321 
322 	hwset = GROUP_ACCESS(pg_hw, (uint_t)hw);
323 	return (hwset);
324 }
325 
326 /*
327  * Add a PG to a hwset
328  */
329 static void
330 pghw_set_add(group_t *hwset, pghw_t *pg)
331 {
332 	(void) group_add(hwset, pg, GRP_RESIZE);
333 }
334 
335 /*
336  * Remove a PG from a hwset
337  */
338 static void
339 pghw_set_remove(group_t *hwset, pghw_t *pg)
340 {
341 	int result;
342 
343 	result = group_remove(hwset, pg, GRP_RESIZE);
344 	ASSERT(result == 0);
345 }
346 
347 
348 /*
349  * Return a string name given a pg_hw sharing type
350  */
351 static char *
352 pghw_type_string(pghw_type_t hw)
353 {
354 	switch (hw) {
355 	case PGHW_IPIPE:
356 		return ("Integer Pipeline");
357 	case PGHW_CACHE:
358 		return ("Cache");
359 	case PGHW_FPU:
360 		return ("Floating Point Unit");
361 	case PGHW_MPIPE:
362 		return ("Data Pipe to memory");
363 	case PGHW_CHIP:
364 		return ("Socket");
365 	case PGHW_MEMORY:
366 		return ("Memory");
367 	case PGHW_POW_ACTIVE:
368 		return ("CPU PM Active Power Domain");
369 	case PGHW_POW_IDLE:
370 		return ("CPU PM Idle Power Domain");
371 	default:
372 		return ("unknown");
373 	}
374 }
375 
376 /*
377  * Create / Update routines for PG hw kstats
378  *
379  * It is the intention of these kstats to provide some level
380  * of informational / debugging observability into the types
381  * and nature of the system's detected hardware sharing relationships
382  */
383 void
384 pghw_kstat_create(pghw_t *pg)
385 {
386 	/*
387 	 * Create a physical pg kstat
388 	 */
389 	if ((pg->pghw_kstat = kstat_create("pg", ((pg_t *)pg)->pg_id,
390 	    "pg", "pg", KSTAT_TYPE_NAMED,
391 	    sizeof (pghw_kstat) / sizeof (kstat_named_t),
392 	    KSTAT_FLAG_VIRTUAL)) != NULL) {
393 		/* Class string, hw string, and policy string */
394 		pg->pghw_kstat->ks_data_size += PG_CLASS_NAME_MAX;
395 		pg->pghw_kstat->ks_data_size += PGHW_KSTAT_STR_LEN_MAX;
396 		pg->pghw_kstat->ks_data_size += PGHW_KSTAT_STR_LEN_MAX;
397 		pg->pghw_kstat->ks_lock = &pghw_kstat_lock;
398 		pg->pghw_kstat->ks_data = &pghw_kstat;
399 		pg->pghw_kstat->ks_update = pghw_kstat_update;
400 		pg->pghw_kstat->ks_private = pg;
401 		kstat_install(pg->pghw_kstat);
402 	}
403 }
404 
405 int
406 pghw_kstat_update(kstat_t *ksp, int rw)
407 {
408 	struct pghw_kstat	*pgsp = &pghw_kstat;
409 	pghw_t			*pg = ksp->ks_private;
410 
411 	if (rw == KSTAT_WRITE)
412 		return (EACCES);
413 
414 	pgsp->pg_id.value.ui64 = ((pg_t *)pg)->pg_id;
415 	pgsp->pg_ncpus.value.ui64 = GROUP_SIZE(&((pg_t *)pg)->pg_cpus);
416 	pgsp->pg_instance_id.value.ui64 = (uint64_t)pg->pghw_instance;
417 	kstat_named_setstr(&pgsp->pg_class, ((pg_t *)pg)->pg_class->pgc_name);
418 	kstat_named_setstr(&pgsp->pg_hw, pghw_type_string(pg->pghw_hw));
419 	kstat_named_setstr(&pgsp->pg_policy, pg_policy_name((pg_t *)pg));
420 	return (0);
421 }
422