xref: /titanic_44/usr/src/uts/sun4v/os/cmp.c (revision b885580b43755ee4ea1e280b85428893d2ba9291)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/machsystm.h>
28 #include <sys/cmp.h>
29 #include <sys/cmt.h>
30 
31 /*
32  * Note: For now assume the chip ID as 0 for all the cpus until additional
33  * information is available via machine description table
34  */
35 
36 /*
37  * Returns 1 if cpuid is CMP-capable, 0 otherwise.
38  */
39 /*ARGSUSED*/
40 int
cmp_cpu_is_cmp(processorid_t cpuid)41 cmp_cpu_is_cmp(processorid_t cpuid)
42 {
43 	return (0);
44 }
45 
46 /*
47  * Indicate that this core (cpuid) resides on the chip indicated by chipid.
48  * Called during boot and DR add.
49  */
50 /*ARGSUSED*/
51 void
cmp_add_cpu(chipid_t chipid,processorid_t cpuid)52 cmp_add_cpu(chipid_t chipid, processorid_t cpuid)
53 {
54 }
55 
56 /*
57  * Indicate that this core (cpuid) is being DR removed.
58  */
59 /*ARGSUSED*/
60 void
cmp_delete_cpu(processorid_t cpuid)61 cmp_delete_cpu(processorid_t cpuid)
62 {
63 }
64 
65 /*
66  * Called when cpuid is being onlined or offlined.  If the offlined
67  * processor is CMP-capable then current target of the CMP Error Steering
68  * Register is set to either the lowest numbered on-line sibling core, if
69  * one exists, or else to this core.
70  */
71 /*ARGSUSED*/
72 void
cmp_error_resteer(processorid_t cpuid)73 cmp_error_resteer(processorid_t cpuid)
74 {
75 }
76 
77 /*
78  * Return 0, shortterm workaround until MD table is updated
79  * to provide cpu-chip mapping
80  */
81 
82 /*ARGSUSED*/
83 chipid_t
cmp_cpu_to_chip(processorid_t cpuid)84 cmp_cpu_to_chip(processorid_t cpuid)
85 {
86 	return (cpu[cpuid]->cpu_m.cpu_chip);
87 }
88 
89 /*ARGSUSED*/
90 int
pg_plat_hw_shared(cpu_t * cp,pghw_type_t hw)91 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw)
92 {
93 	switch (hw) {
94 	case PGHW_IPIPE:
95 		return (1);
96 	case PGHW_FPU:
97 		return (1);
98 	case PGHW_MPIPE:
99 		return (1);
100 	}
101 	return (0);
102 }
103 
104 int
pg_plat_cpus_share(cpu_t * cpu_a,cpu_t * cpu_b,pghw_type_t hw)105 pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw)
106 {
107 	if (pg_plat_hw_shared(cpu_a, hw) == 0 ||
108 	    pg_plat_hw_shared(cpu_b, hw) == 0)
109 		return (0);
110 
111 	return (pg_plat_hw_instance_id(cpu_a, hw) ==
112 	    pg_plat_hw_instance_id(cpu_b, hw));
113 }
114 
115 id_t
pg_plat_hw_instance_id(cpu_t * cpu,pghw_type_t hw)116 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw)
117 {
118 	switch (hw) {
119 	case PGHW_IPIPE:
120 		return (cpu->cpu_m.cpu_ipipe);
121 	case PGHW_CHIP:
122 		return (cpu->cpu_m.cpu_chip);
123 	case PGHW_MPIPE:
124 		return (cpu->cpu_m.cpu_mpipe);
125 	case PGHW_FPU:
126 		return (cpu->cpu_m.cpu_fpu);
127 	default:
128 		return (-1);
129 	}
130 }
131 
132 /*
133  * Rank the relative importance of optimizing for hw1 or hw2
134  */
135 pghw_type_t
pg_plat_hw_rank(pghw_type_t hw1,pghw_type_t hw2)136 pg_plat_hw_rank(pghw_type_t hw1, pghw_type_t hw2)
137 {
138 	int i;
139 	int rank1 = 0;
140 	int rank2 = 0;
141 
142 	static pghw_type_t hw_hier[] = {
143 		PGHW_IPIPE,
144 		PGHW_FPU,
145 		PGHW_MPIPE,
146 		PGHW_NUM_COMPONENTS
147 	};
148 
149 	for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) {
150 		if (hw_hier[i] == hw1)
151 			rank1 = i;
152 		if (hw_hier[i] == hw2)
153 			rank2 = i;
154 	}
155 	if (rank1 > rank2)
156 		return (hw1);
157 	else
158 		return (hw2);
159 }
160 
161 /*
162  * Override the default CMT dispatcher policy for the specified
163  * hardware sharing relationship
164  */
165 /* ARGSUSED */
166 pg_cmt_policy_t
pg_plat_cmt_policy(pghw_type_t hw)167 pg_plat_cmt_policy(pghw_type_t hw)
168 {
169 	/* Accept the default policies */
170 	return (CMT_NO_POLICY);
171 }
172 
173 id_t
pg_plat_get_core_id(cpu_t * cpu)174 pg_plat_get_core_id(cpu_t *cpu)
175 {
176 	return (cpu->cpu_m.cpu_core);
177 }
178 
179 void
cmp_set_nosteal_interval(void)180 cmp_set_nosteal_interval(void)
181 {
182 	nosteal_nsec = 0;
183 }
184 /*
185  * Return 1 if CMT load balancing policies should be
186  * implemented across instances of the specified hardware
187  * sharing relationship.
188  */
189 int
pg_cmt_load_bal_hw(pghw_type_t hw)190 pg_cmt_load_bal_hw(pghw_type_t hw)
191 {
192 	if (hw == PGHW_IPIPE ||
193 	    hw == PGHW_FPU ||
194 	    hw == PGHW_MPIPE)
195 		return (1);
196 	else
197 		return (0);
198 }
199 /*
200  * Return 1 if thread affinity policies should be implemented
201  * for instances of the specifed hardware sharing relationship.
202  */
203 int
pg_cmt_affinity_hw(pghw_type_t hw)204 pg_cmt_affinity_hw(pghw_type_t hw)
205 {
206 	if (hw == PGHW_CACHE)
207 		return (1);
208 	else
209 		return (0);
210 }
211 
212 /*
213  * Return number of counter events requested to measure hardware capacity and
214  * utilization and setup CPC requests for specified CPU if list where to add
215  * CPC requests is given
216  */
217 int
218 /* LINTED E_FUNC_ARG_UNUSED */
cu_plat_cpc_init(cpu_t * cp,kcpc_request_list_t * reqs,int nreqs)219 cu_plat_cpc_init(cpu_t *cp, kcpc_request_list_t *reqs, int nreqs)
220 {
221 	/*
222 	 * Return error to tell common code to decide what counter events to
223 	 * program on this CPU for measuring hardware capacity and utilization
224 	 */
225 	return (-1);
226 }
227