1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/types.h>
27 #include <sys/machsystm.h>
28 #include <sys/x_call.h>
29 #include <sys/cmp.h>
30 #include <sys/cmt.h>
31 #include <sys/debug.h>
32 #include <sys/disp.h>
33 #include <sys/cheetahregs.h>
34
35 /*
36 * Note: We assume that chipid == portid. This is not necessarily true.
37 * We buried it down here in the implementation, and not in the
38 * interfaces, so that we can change it later.
39 */
40
41 /*
42 * pre-alloc'ed because this is used early in boot (before the memory
43 * allocator is available).
44 */
45 static cpuset_t chips[MAX_CPU_CHIPID];
46
47 /*
48 * Returns 1 if cpuid is CMP-capable, 0 otherwise.
49 */
50 int
cmp_cpu_is_cmp(processorid_t cpuid)51 cmp_cpu_is_cmp(processorid_t cpuid)
52 {
53 chipid_t chipid;
54
55 /* N.B. We're assuming that the cpunode[].portid is still intact */
56 chipid = cpunodes[cpuid].portid;
57 return (!CPUSET_ISNULL(chips[chipid]));
58 }
59
60 /*
61 * Indicate that this core (cpuid) resides on the chip indicated by chipid.
62 * Called during boot and DR add.
63 */
64 void
cmp_add_cpu(chipid_t chipid,processorid_t cpuid)65 cmp_add_cpu(chipid_t chipid, processorid_t cpuid)
66 {
67 CPUSET_ADD(chips[chipid], cpuid);
68 }
69
70 /*
71 * Indicate that this core (cpuid) is being DR removed.
72 */
73 void
cmp_delete_cpu(processorid_t cpuid)74 cmp_delete_cpu(processorid_t cpuid)
75 {
76 chipid_t chipid;
77
78 /* N.B. We're assuming that the cpunode[].portid is still intact */
79 chipid = cpunodes[cpuid].portid;
80 CPUSET_DEL(chips[chipid], cpuid);
81 }
82
83 /*
84 * Called when cpuid is being onlined or offlined. If the offlined
85 * processor is CMP-capable then current target of the CMP Error Steering
86 * Register is set to either the lowest numbered on-line sibling core, if
87 * one exists, or else to this core.
88 */
89 /* ARGSUSED */
90 void
cmp_error_resteer(processorid_t cpuid)91 cmp_error_resteer(processorid_t cpuid)
92 {
93 #ifndef _CMP_NO_ERROR_STEERING
94 cpuset_t mycores;
95 cpu_t *cpu;
96 chipid_t chipid;
97 int i;
98
99 if (!cmp_cpu_is_cmp(cpuid))
100 return;
101
102 ASSERT(MUTEX_HELD(&cpu_lock));
103 chipid = cpunodes[cpuid].portid;
104 mycores = chips[chipid];
105
106 /* Look for an online sibling core */
107 for (i = 0; i < NCPU; i++) {
108 if (i == cpuid)
109 continue;
110
111 if (CPU_IN_SET(mycores, i) &&
112 (cpu = cpu_get(i)) != NULL && cpu_is_active(cpu)) {
113 /* Found one, reset error steering */
114 xc_one(i, (xcfunc_t *)set_cmp_error_steering, 0, 0);
115 break;
116 }
117 }
118
119 /* No online sibling cores, point to this core. */
120 if (i == NCPU) {
121 xc_one(cpuid, (xcfunc_t *)set_cmp_error_steering, 0, 0);
122 }
123 #else
124 /* Not all CMP's support (e.g. Olympus-C by Fujitsu) error steering */
125 return;
126 #endif /* _CMP_NO_ERROR_STEERING */
127 }
128
129 chipid_t
cmp_cpu_to_chip(processorid_t cpuid)130 cmp_cpu_to_chip(processorid_t cpuid)
131 {
132 if (!cmp_cpu_is_cmp(cpuid)) {
133 /* This CPU is not a CMP, so by definition chipid==cpuid */
134 ASSERT(cpuid < MAX_CPU_CHIPID && CPUSET_ISNULL(chips[cpuid]));
135 return (cpuid);
136 }
137
138 /* N.B. We're assuming that the cpunode[].portid is still intact */
139 return (cpunodes[cpuid].portid);
140 }
141
142 /* ARGSUSED */
143 int
pg_plat_hw_shared(cpu_t * cp,pghw_type_t hw)144 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw)
145 {
146 int impl;
147
148 impl = cpunodes[cp->cpu_id].implementation;
149
150 switch (hw) {
151 case PGHW_IPIPE:
152 if ((IS_OLYMPUS_C(impl)) || (IS_JUPITER(impl)))
153 return (1);
154 break;
155 case PGHW_CHIP:
156 if (IS_JAGUAR(impl) || IS_PANTHER(impl) ||
157 IS_OLYMPUS_C(impl) || IS_JUPITER(impl))
158 return (1);
159 break;
160 case PGHW_CACHE:
161 if (IS_PANTHER(impl) || IS_OLYMPUS_C(impl) || IS_JUPITER(impl))
162 return (1);
163 break;
164 }
165 return (0);
166 }
167
168 int
pg_plat_cpus_share(cpu_t * cpu_a,cpu_t * cpu_b,pghw_type_t hw)169 pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw)
170 {
171 int impl;
172
173 impl = cpunodes[cpu_a->cpu_id].implementation;
174
175 switch (hw) {
176 case PGHW_IPIPE:
177 case PGHW_CHIP:
178 return (pg_plat_hw_instance_id(cpu_a, hw) ==
179 pg_plat_hw_instance_id(cpu_b, hw));
180 case PGHW_CACHE:
181 if ((IS_PANTHER(impl) || IS_OLYMPUS_C(impl) ||
182 IS_JUPITER(impl)) && pg_plat_cpus_share(cpu_a,
183 cpu_b, PGHW_CHIP)) {
184 return (1);
185 } else {
186 return (0);
187 }
188 }
189 return (0);
190 }
191
192 id_t
pg_plat_hw_instance_id(cpu_t * cpu,pghw_type_t hw)193 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw)
194 {
195 int impl;
196
197 impl = cpunodes[cpu->cpu_id].implementation;
198
199 switch (hw) {
200 case PGHW_IPIPE:
201 if (IS_OLYMPUS_C(impl) || IS_JUPITER(impl)) {
202 /*
203 * Currently only Fujitsu Olympus-C (SPARC64-VI) and
204 * Jupiter (SPARC64-VII) processors support
205 * multi-stranded cores. Return the cpu_id with the
206 * strand bit masked out.
207 */
208 return ((id_t)((uint_t)cpu->cpu_id & ~(0x1)));
209 } else {
210 return (cpu->cpu_id);
211 }
212 case PGHW_CHIP:
213 return (cmp_cpu_to_chip(cpu->cpu_id));
214 case PGHW_CACHE:
215 if (IS_PANTHER(impl) ||
216 IS_OLYMPUS_C(impl) || IS_JUPITER(impl))
217 return (pg_plat_hw_instance_id(cpu, PGHW_CHIP));
218 else
219 return (cpu->cpu_id);
220 default:
221 return (-1);
222 }
223 }
224
225 /*
226 * Rank the relative importance of optimizing for hw1 or hw2
227 */
228 pghw_type_t
pg_plat_hw_rank(pghw_type_t hw1,pghw_type_t hw2)229 pg_plat_hw_rank(pghw_type_t hw1, pghw_type_t hw2)
230 {
231 int i;
232 int rank1 = 0;
233 int rank2 = 0;
234
235 static pghw_type_t hw_hier[] = {
236 PGHW_IPIPE,
237 PGHW_CHIP,
238 PGHW_CACHE,
239 PGHW_NUM_COMPONENTS
240 };
241
242 for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) {
243 if (hw_hier[i] == hw1)
244 rank1 = i;
245 if (hw_hier[i] == hw2)
246 rank2 = i;
247 }
248
249 if (rank1 > rank2)
250 return (hw1);
251 else
252 return (hw2);
253 }
254
255 /*
256 * Override the default CMT dispatcher policy for the specified
257 * hardware sharing relationship
258 */
259 /* ARGSUSED */
260 pg_cmt_policy_t
pg_plat_cmt_policy(pghw_type_t hw)261 pg_plat_cmt_policy(pghw_type_t hw)
262 {
263 /* Accept the default polices */
264 return (CMT_NO_POLICY);
265 }
266
267 id_t
pg_plat_get_core_id(cpu_t * cp)268 pg_plat_get_core_id(cpu_t *cp)
269 {
270 return (pg_plat_hw_instance_id(cp, PGHW_IPIPE));
271 }
272
273 void
cmp_set_nosteal_interval(void)274 cmp_set_nosteal_interval(void)
275 {
276 /* Set the nosteal interval (used by disp_getbest()) to 100us */
277 nosteal_nsec = 100000UL;
278 }
279 /*
280 * Return 1 if CMT load balancing policies should be
281 * implemented across instances of the specified hardware
282 * sharing relationship.
283 */
284 int
pg_cmt_load_bal_hw(pghw_type_t hw)285 pg_cmt_load_bal_hw(pghw_type_t hw)
286 {
287 if (hw == PGHW_IPIPE ||
288 hw == PGHW_FPU ||
289 hw == PGHW_CHIP)
290 return (1);
291 else
292 return (0);
293 }
294 /*
295 * Return 1 if thread affinity polices should be implemented
296 * for instances of the specifed hardware sharing relationship.
297 */
298 int
pg_cmt_affinity_hw(pghw_type_t hw)299 pg_cmt_affinity_hw(pghw_type_t hw)
300 {
301 if (hw == PGHW_CACHE)
302 return (1);
303 else
304 return (0);
305 }
306
307 /*
308 * Return number of counter events requested to measure hardware capacity and
309 * utilization and setup CPC requests for specified CPU if list where to add
310 * CPC requests is given
311 */
312 int
313 /* LINTED E_FUNC_ARG_UNUSED */
cu_plat_cpc_init(cpu_t * cp,kcpc_request_list_t * reqs,int nreqs)314 cu_plat_cpc_init(cpu_t *cp, kcpc_request_list_t *reqs, int nreqs)
315 {
316 /*
317 * Return error to tell common code to decide what counter events to
318 * program on this CPU for measuring hardware capacity and utilization
319 */
320 return (-1);
321 }
322