1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26 /*
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
29 */
30
31 /*
32 * Generic x86 CPU Module
33 *
34 * This CPU module is used for generic x86 CPUs when Solaris has no other
35 * CPU-specific support module available. Code in this module should be the
36 * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
37 */
38
39 #include <sys/types.h>
40 #include <sys/cpu_module_impl.h>
41 #include <sys/cpuvar.h>
42 #include <sys/kmem.h>
43 #include <sys/modctl.h>
44 #include <sys/pghw.h>
45
46 #include "gcpu.h"
47
48 /*
49 * Prevent generic cpu support from loading.
50 */
51 int gcpu_disable = 0;
52
53 #define GCPU_MAX_CHIPID 32
54 static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
55
56 /*
57 * Our cmi_init entry point, called during startup of each cpu instance.
58 */
59 int
gcpu_init(cmi_hdl_t hdl,void ** datap)60 gcpu_init(cmi_hdl_t hdl, void **datap)
61 {
62 uint_t chipid = cmi_hdl_chipid(hdl);
63 struct gcpu_chipshared *sp, *osp;
64 gcpu_data_t *gcpu;
65
66 if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
67 return (ENOTSUP);
68
69 /*
70 * Allocate the state structure for this cpu. We will only
71 * allocate the bank logout areas in gcpu_mca_init once we
72 * know how many banks there are.
73 */
74 gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
75 cmi_hdl_hold(hdl); /* release in gcpu_fini */
76 gcpu->gcpu_hdl = hdl;
77
78 /*
79 * Allocate a chipshared structure if no sibling cpu has already
80 * allocated it, but allow for the fact that a sibling core may
81 * be starting up in parallel.
82 */
83 if ((sp = gcpu_shared[chipid]) == NULL) {
84 sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
85 mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
86 mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
87 osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
88 if (osp != NULL) {
89 mutex_destroy(&sp->gcpus_cfglock);
90 mutex_destroy(&sp->gcpus_poll_lock);
91 kmem_free(sp, sizeof (struct gcpu_chipshared));
92 sp = osp;
93 }
94 }
95
96 atomic_inc_32(&sp->gcpus_actv_cnt);
97 gcpu->gcpu_shared = sp;
98
99 return (0);
100 }
101
102 /*
103 * deconfigure gcpu_init()
104 */
105 void
gcpu_fini(cmi_hdl_t hdl)106 gcpu_fini(cmi_hdl_t hdl)
107 {
108 uint_t chipid = cmi_hdl_chipid(hdl);
109 gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
110 struct gcpu_chipshared *sp;
111
112 if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
113 return;
114
115 gcpu_mca_fini(hdl);
116
117 /*
118 * Keep shared data in cache for reuse.
119 */
120 sp = gcpu_shared[chipid];
121 ASSERT(sp != NULL);
122 atomic_dec_32(&sp->gcpus_actv_cnt);
123
124 if (gcpu != NULL)
125 kmem_free(gcpu, sizeof (gcpu_data_t));
126
127 /* Release reference count held in gcpu_init(). */
128 cmi_hdl_rele(hdl);
129 }
130
131 void
gcpu_post_startup(cmi_hdl_t hdl)132 gcpu_post_startup(cmi_hdl_t hdl)
133 {
134 gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
135
136 if (gcpu_disable)
137 return;
138
139 if (gcpu != NULL)
140 cms_post_startup(hdl);
141 #ifdef __xpv
142 /*
143 * All cpu handles are initialized so we can begin polling now.
144 * Furthermore, our virq mechanism requires that everything
145 * be run on cpu 0 so we can assure that by starting from here.
146 */
147 gcpu_mca_poll_start(hdl);
148 #endif
149 }
150
151 void
gcpu_post_mpstartup(cmi_hdl_t hdl)152 gcpu_post_mpstartup(cmi_hdl_t hdl)
153 {
154 if (gcpu_disable)
155 return;
156
157 cms_post_mpstartup(hdl);
158
159 #ifndef __xpv
160 /*
161 * All cpu handles are initialized only once all cpus
162 * are started, so we can begin polling post mp startup.
163 */
164 gcpu_mca_poll_start(hdl);
165 #endif
166 }
167
168 #ifdef __xpv
169 #define GCPU_OP(ntvop, xpvop) xpvop
170 #else
171 #define GCPU_OP(ntvop, xpvop) ntvop
172 #endif
173
174 cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
175
176 const cmi_ops_t _cmi_ops = {
177 gcpu_init, /* cmi_init */
178 gcpu_post_startup, /* cmi_post_startup */
179 gcpu_post_mpstartup, /* cmi_post_mpstartup */
180 gcpu_faulted_enter, /* cmi_faulted_enter */
181 gcpu_faulted_exit, /* cmi_faulted_exit */
182 gcpu_mca_init, /* cmi_mca_init */
183 GCPU_OP(gcpu_mca_trap, NULL), /* cmi_mca_trap */
184 GCPU_OP(gcpu_cmci_trap, NULL), /* cmi_cmci_trap */
185 gcpu_msrinject, /* cmi_msrinject */
186 GCPU_OP(gcpu_hdl_poke, NULL), /* cmi_hdl_poke */
187 gcpu_fini, /* cmi_fini */
188 GCPU_OP(NULL, gcpu_xpv_panic_callback), /* cmi_panic_callback */
189 };
190
191 static struct modlcpu modlcpu = {
192 &mod_cpuops,
193 "Generic x86 CPU Module"
194 };
195
196 static struct modlinkage modlinkage = {
197 MODREV_1,
198 (void *)&modlcpu,
199 NULL
200 };
201
202 int
_init(void)203 _init(void)
204 {
205 return (mod_install(&modlinkage));
206 }
207
208 int
_info(struct modinfo * modinfop)209 _info(struct modinfo *modinfop)
210 {
211 return (mod_info(&modlinkage, modinfop));
212 }
213
214 int
_fini(void)215 _fini(void)
216 {
217 return (mod_remove(&modlinkage));
218 }
219