xref: /titanic_52/usr/src/uts/intel/io/ipmi/ipmi.c (revision d33341fb88062a3afe7066acda297c3a1959176a)
1 /*
2  * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /* $FreeBSD: src/sys/dev/ipmi/ipmi.c,v 1.16 2011/11/07 15:43:11 ed Exp $ */
28 
29 /*
30  * Copyright 2012, Joyent, Inc.  All rights reserved.
31  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
32  */
33 
34 #include <sys/devops.h>
35 #include <sys/conf.h>
36 #include <sys/modctl.h>
37 #include <sys/types.h>
38 #include <sys/file.h>
39 #include <sys/errno.h>
40 #include <sys/open.h>
41 #include <sys/cred.h>
42 #include <sys/uio.h>
43 #include <sys/stat.h>
44 #include <sys/cmn_err.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/smbios.h>
48 #include <sys/smbios_impl.h>
49 #include <sys/ipmi.h>
50 #include "ipmivars.h"
51 
52 /*
53  * Request management.
54  */
55 
56 /* Allocate a new request with request and reply buffers. */
57 struct ipmi_request *
58 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
59     uint8_t command, size_t requestlen, size_t replylen)
60 {
61 	struct ipmi_request *req;
62 
63 	req = kmem_zalloc(sizeof (struct ipmi_request) + requestlen + replylen,
64 	    KM_SLEEP);
65 	req->ir_sz = sizeof (struct ipmi_request) + requestlen + replylen;
66 	req->ir_owner = dev;
67 	req->ir_msgid = msgid;
68 	req->ir_addr = addr;
69 	req->ir_command = command;
70 	if (requestlen) {
71 		req->ir_request = (uchar_t *)&req[1];
72 		req->ir_requestlen = requestlen;
73 	}
74 	if (replylen) {
75 		req->ir_reply = (uchar_t *)&req[1] + requestlen;
76 		req->ir_replybuflen = replylen;
77 	}
78 
79 	cv_init(&req->ir_cv, NULL, CV_DEFAULT, NULL);
80 	req->ir_status = IRS_ALLOCATED;
81 
82 	return (req);
83 }
84 
85 /* Free a request no longer in use. */
86 void
87 ipmi_free_request(struct ipmi_request *req)
88 {
89 	if (req == NULL)
90 		return;
91 
92 	cv_destroy(&req->ir_cv);
93 
94 	kmem_free(req, req->ir_sz);
95 }
96 
97 /* Store a processed request on the appropriate completion queue. */
98 /*ARGSUSED*/
99 void
100 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
101 {
102 	struct ipmi_device *dev;
103 
104 	IPMI_LOCK_ASSERT(sc);
105 
106 	/*
107 	 * Anonymous requests (from inside the driver) always have a
108 	 * waiter that we awaken.
109 	 */
110 	if (req->ir_status == IRS_CANCELED) {
111 		ASSERT(req->ir_owner == NULL);
112 		ipmi_free_request(req);
113 		return;
114 	}
115 	req->ir_status = IRS_COMPLETED;
116 	cv_signal(&req->ir_cv);
117 
118 	if (req->ir_owner != NULL) {
119 		dev = req->ir_owner;
120 		TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
121 		pollwakeup(dev->ipmi_pollhead, POLLIN | POLLRDNORM);
122 	}
123 }
124 
125 /*
126  * Enqueue an internal driver request and wait until it is completed.
127  */
128 static int
129 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request **preq,
130     int timo)
131 {
132 	int error;
133 	struct ipmi_request *req = *preq;
134 
135 	ASSERT(req->ir_owner == NULL);
136 
137 	IPMI_LOCK(sc);
138 	error = sc->ipmi_enqueue_request(sc, req);
139 
140 	if (error != 0) {
141 		IPMI_UNLOCK(sc);
142 		return (error);
143 	}
144 
145 	while (req->ir_status != IRS_COMPLETED && error >= 0)
146 		if (timo == 0)
147 			cv_wait(&req->ir_cv, &sc->ipmi_lock);
148 		else
149 			error = cv_timedwait(&req->ir_cv, &sc->ipmi_lock,
150 			    ddi_get_lbolt() + timo);
151 
152 	switch (req->ir_status) {
153 		case IRS_QUEUED:
154 			TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
155 			req->ir_status = IRS_CANCELED;
156 			error = EWOULDBLOCK;
157 			break;
158 		case IRS_PROCESSED:
159 			req->ir_status = IRS_CANCELED;
160 			error = EWOULDBLOCK;
161 			*preq = NULL;
162 			break;
163 		case IRS_COMPLETED:
164 			error = req->ir_error;
165 			break;
166 		default:
167 			panic("IPMI: Invalid request status");
168 			break;
169 	}
170 	IPMI_UNLOCK(sc);
171 
172 	return (error);
173 }
174 
175 /*
176  * Helper routine for polled system interfaces that use
177  * ipmi_polled_enqueue_request() to queue requests.  This request
178  * waits until there is a pending request and then returns the first
179  * request.  If the driver is shutting down, it returns NULL.
180  */
181 struct ipmi_request *
182 ipmi_dequeue_request(struct ipmi_softc *sc)
183 {
184 	struct ipmi_request *req;
185 
186 	IPMI_LOCK_ASSERT(sc);
187 
188 	while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests))
189 		cv_wait(&sc->ipmi_request_added, &sc->ipmi_lock);
190 	if (sc->ipmi_detaching)
191 		return (NULL);
192 
193 	req = TAILQ_FIRST(&sc->ipmi_pending_requests);
194 	TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
195 	req->ir_status = IRS_PROCESSED;
196 	return (req);
197 }
198 
199 int
200 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
201 {
202 
203 	IPMI_LOCK_ASSERT(sc);
204 
205 	TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
206 	req->ir_status = IRS_QUEUED;
207 	cv_signal(&sc->ipmi_request_added);
208 	return (0);
209 }
210 
211 void
212 ipmi_shutdown(struct ipmi_softc *sc)
213 {
214 	taskq_destroy(sc->ipmi_kthread);
215 
216 	cv_destroy(&sc->ipmi_request_added);
217 	mutex_destroy(&sc->ipmi_lock);
218 }
219 
220 boolean_t
221 ipmi_startup(struct ipmi_softc *sc)
222 {
223 	struct ipmi_request *req;
224 	int error, i;
225 
226 	/* Initialize interface-independent state. */
227 	mutex_init(&sc->ipmi_lock, NULL, MUTEX_DEFAULT, NULL);
228 	cv_init(&sc->ipmi_request_added, NULL, CV_DEFAULT, NULL);
229 	TAILQ_INIT(&sc->ipmi_pending_requests);
230 
231 	/* Initialize interface-dependent state. */
232 	error = sc->ipmi_startup(sc);
233 	if (error) {
234 		cmn_err(CE_WARN, "Failed to initialize interface: %d", error);
235 		return (B_FALSE);
236 	}
237 
238 	/* Send a GET_DEVICE_ID request. */
239 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
240 	    IPMI_GET_DEVICE_ID, 0, 15);
241 
242 	error = ipmi_submit_driver_request(sc, &req, MAX_TIMEOUT);
243 	if (error == EWOULDBLOCK) {
244 		cmn_err(CE_WARN, "Timed out waiting for GET_DEVICE_ID");
245 		ipmi_free_request(req);
246 		return (B_FALSE);
247 	} else if (error) {
248 		cmn_err(CE_WARN, "Failed GET_DEVICE_ID: %d", error);
249 		ipmi_free_request(req);
250 		return (B_FALSE);
251 	} else if (req->ir_compcode != 0) {
252 		cmn_err(CE_WARN,
253 		    "Bad completion code for GET_DEVICE_ID: %d",
254 		    req->ir_compcode);
255 		ipmi_free_request(req);
256 		return (B_FALSE);
257 	} else if (req->ir_replylen < 5) {
258 		cmn_err(CE_WARN, "Short reply for GET_DEVICE_ID: %d",
259 		    req->ir_replylen);
260 		ipmi_free_request(req);
261 		return (B_FALSE);
262 	}
263 
264 	cmn_err(CE_CONT, "!device rev. %d, firmware rev. %d.%d%d, "
265 	    "version %d.%d",
266 	    req->ir_reply[1] & 0x0f, req->ir_reply[2] & 0x7f,
267 	    req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
268 	    req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4);
269 
270 	ipmi_free_request(req);
271 
272 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
273 	    IPMI_CLEAR_FLAGS, 1, 0);
274 
275 	if ((error = ipmi_submit_driver_request(sc, &req, 0)) != 0) {
276 		cmn_err(CE_WARN, "Failed to clear IPMI flags: %d\n", error);
277 		ipmi_free_request(req);
278 		return (B_FALSE);
279 	}
280 
281 	/* Magic numbers */
282 	if (req->ir_compcode == 0xc0) {
283 		cmn_err(CE_NOTE, "!Clear flags is busy");
284 	}
285 	if (req->ir_compcode == 0xc1) {
286 		cmn_err(CE_NOTE, "!Clear flags illegal");
287 	}
288 	ipmi_free_request(req);
289 
290 	for (i = 0; i < 8; i++) {
291 		req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
292 		    IPMI_GET_CHANNEL_INFO, 1, 0);
293 		req->ir_request[0] = (uchar_t)i;
294 
295 		if (ipmi_submit_driver_request(sc, &req, 0) != 0) {
296 			ipmi_free_request(req);
297 			break;
298 		}
299 
300 		if (req->ir_compcode != 0) {
301 			ipmi_free_request(req);
302 			break;
303 		}
304 		ipmi_free_request(req);
305 	}
306 	cmn_err(CE_CONT, "!number of channels %d", i);
307 
308 	/* probe for watchdog */
309 	req = ipmi_alloc_driver_request(IPMI_ADDR(IPMI_APP_REQUEST, 0),
310 	    IPMI_GET_WDOG, 0, 0);
311 
312 	if ((error = ipmi_submit_driver_request(sc, &req, 0)) != 0) {
313 		cmn_err(CE_WARN, "Failed to check IPMI watchdog: %d\n", error);
314 		ipmi_free_request(req);
315 		return (B_FALSE);
316 	}
317 
318 	if (req->ir_compcode == 0x00) {
319 		cmn_err(CE_CONT, "!watchdog supported");
320 
321 		/*
322 		 * Here is where we could register a watchdog event handler.
323 		 * See ipmi_wd_event() in the FreeBSD code.
324 		 */
325 	}
326 	ipmi_free_request(req);
327 
328 	return (B_TRUE);
329 }
330