xref: /illumos-gate/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c (revision 92a0208178405fef708b0283ffcaa02fbc3468ff)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Generic x86 CPU Module
29  *
30  * This CPU module is used for generic x86 CPUs when Solaris has no other
31  * CPU-specific support module available.  Code in this module should be the
32  * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/cpu_module_impl.h>
37 #include <sys/cpuvar.h>
38 #include <sys/kmem.h>
39 #include <sys/modctl.h>
40 #include <sys/pghw.h>
41 
42 #include "gcpu.h"
43 
44 /*
45  * Prevent generic cpu support from loading.
46  */
47 int gcpu_disable = 0;
48 
49 #define	GCPU_MAX_CHIPID		32
50 static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
51 
52 /*
53  * Our cmi_init entry point, called during startup of each cpu instance.
54  */
55 int
56 gcpu_init(cmi_hdl_t hdl, void **datap)
57 {
58 	uint_t chipid = cmi_hdl_chipid(hdl);
59 	struct gcpu_chipshared *sp, *osp;
60 	gcpu_data_t *gcpu;
61 
62 	if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
63 		return (ENOTSUP);
64 
65 	/*
66 	 * Allocate the state structure for this cpu.  We will only
67 	 * allocate the bank logout areas in gcpu_mca_init once we
68 	 * know how many banks there are.
69 	 */
70 	gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
71 	cmi_hdl_hold(hdl);	/* release in gcpu_fini */
72 	gcpu->gcpu_hdl = hdl;
73 
74 	/*
75 	 * Allocate a chipshared structure if no sibling cpu has already
76 	 * allocated it, but allow for the fact that a sibling core may
77 	 * be starting up in parallel.
78 	 */
79 	if ((sp = gcpu_shared[chipid]) == NULL) {
80 		sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
81 		osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
82 		if (osp == NULL) {
83 			mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER,
84 			    NULL);
85 			mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER,
86 			    NULL);
87 		} else {
88 			kmem_free(sp, sizeof (struct gcpu_chipshared));
89 			sp = osp;
90 		}
91 	}
92 	gcpu->gcpu_shared = sp;
93 
94 	return (0);
95 }
96 
97 void
98 gcpu_post_startup(cmi_hdl_t hdl)
99 {
100 	gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
101 
102 	if (gcpu_disable)
103 		return;
104 
105 	if (gcpu != NULL)
106 		cms_post_startup(hdl);
107 #ifdef __xpv
108 	/*
109 	 * All cpu handles are initialized so we can begin polling now.
110 	 * Furthermore, our virq mechanism requires that everything
111 	 * be run on cpu 0 so we can assure that by starting from here.
112 	 */
113 	gcpu_mca_poll_start(hdl);
114 #endif
115 }
116 
117 void
118 gcpu_post_mpstartup(cmi_hdl_t hdl)
119 {
120 	if (gcpu_disable)
121 		return;
122 
123 	cms_post_mpstartup(hdl);
124 
125 #ifndef __xpv
126 		/*
127 		 * All cpu handles are initialized only once all cpus
128 		 * are started, so we can begin polling post mp startup.
129 		 */
130 		gcpu_mca_poll_start(hdl);
131 #endif
132 }
133 
134 #ifdef __xpv
135 #define	GCPU_OP(ntvop, xpvop)	xpvop
136 #else
137 #define	GCPU_OP(ntvop, xpvop)	ntvop
138 #endif
139 
140 cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
141 
142 const cmi_ops_t _cmi_ops = {
143 	gcpu_init,				/* cmi_init */
144 	gcpu_post_startup,			/* cmi_post_startup */
145 	gcpu_post_mpstartup,			/* cmi_post_mpstartup */
146 	gcpu_faulted_enter,			/* cmi_faulted_enter */
147 	gcpu_faulted_exit,			/* cmi_faulted_exit */
148 	gcpu_mca_init,				/* cmi_mca_init */
149 	GCPU_OP(gcpu_mca_trap, NULL),		/* cmi_mca_trap */
150 	GCPU_OP(gcpu_cmci_trap, NULL),		/* cmi_cmci_trap */
151 	gcpu_msrinject,				/* cmi_msrinject */
152 	GCPU_OP(gcpu_hdl_poke, NULL),		/* cmi_hdl_poke */
153 	NULL,					/* cmi_fini */
154 	GCPU_OP(NULL, gcpu_xpv_panic_callback),	/* cmi_panic_callback */
155 };
156 
157 static struct modlcpu modlcpu = {
158 	&mod_cpuops,
159 	"Generic x86 CPU Module"
160 };
161 
162 static struct modlinkage modlinkage = {
163 	MODREV_1,
164 	(void *)&modlcpu,
165 	NULL
166 };
167 
168 int
169 _init(void)
170 {
171 	return (mod_install(&modlinkage));
172 }
173 
174 int
175 _info(struct modinfo *modinfop)
176 {
177 	return (mod_info(&modlinkage, modinfop));
178 }
179 
180 int
181 _fini(void)
182 {
183 	return (mod_remove(&modlinkage));
184 }
185