xref: /linux/drivers/acpi/acpi_ipmi.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  acpi_ipmi.c - ACPI IPMI opregion
4  *
5  *  Copyright (C) 2010, 2013 Intel Corporation
6  *    Author: Zhao Yakui <yakui.zhao@intel.com>
7  *            Lv Zheng <lv.zheng@intel.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/acpi.h>
12 #include <linux/ipmi.h>
13 #include <linux/spinlock.h>
14 
15 MODULE_AUTHOR("Zhao Yakui");
16 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
17 MODULE_LICENSE("GPL");
18 
19 #define ACPI_IPMI_OK			0
20 #define ACPI_IPMI_TIMEOUT		0x10
21 #define ACPI_IPMI_UNKNOWN		0x07
22 /* the IPMI timeout is 5s */
23 #define IPMI_TIMEOUT			(5000)
24 #define ACPI_IPMI_MAX_MSG_LENGTH	64
25 
26 struct acpi_ipmi_device {
27 	/* the device list attached to driver_data.ipmi_devices */
28 	struct list_head head;
29 
30 	/* the IPMI request message list */
31 	struct list_head tx_msg_list;
32 
33 	spinlock_t tx_msg_lock;
34 	acpi_handle handle;
35 	struct device *dev;
36 	struct ipmi_user *user_interface;
37 	int ipmi_ifnum; /* IPMI interface number */
38 	long curr_msgid;
39 	bool dead;
40 	struct kref kref;
41 };
42 
43 struct ipmi_driver_data {
44 	struct list_head ipmi_devices;
45 	struct ipmi_smi_watcher bmc_events;
46 	const struct ipmi_user_hndl ipmi_hndlrs;
47 	struct mutex ipmi_lock;
48 
49 	/*
50 	 * NOTE: IPMI System Interface Selection
51 	 * There is no system interface specified by the IPMI operation
52 	 * region access.  We try to select one system interface with ACPI
53 	 * handle set.  IPMI messages passed from the ACPI codes are sent
54 	 * to this selected global IPMI system interface.
55 	 */
56 	struct acpi_ipmi_device *selected_smi;
57 };
58 
59 struct acpi_ipmi_msg {
60 	struct list_head head;
61 
62 	/*
63 	 * General speaking the addr type should be SI_ADDR_TYPE. And
64 	 * the addr channel should be BMC.
65 	 * In fact it can also be IPMB type. But we will have to
66 	 * parse it from the Netfn command buffer. It is so complex
67 	 * that it is skipped.
68 	 */
69 	struct ipmi_addr addr;
70 	long tx_msgid;
71 
72 	/* it is used to track whether the IPMI message is finished */
73 	struct completion tx_complete;
74 
75 	struct kernel_ipmi_msg tx_message;
76 	int msg_done;
77 
78 	/* tx/rx data . And copy it from/to ACPI object buffer */
79 	u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
80 	u8 rx_len;
81 
82 	struct acpi_ipmi_device *device;
83 	struct kref kref;
84 };
85 
86 /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
87 struct acpi_ipmi_buffer {
88 	u8 status;
89 	u8 length;
90 	u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
91 };
92 
93 static void ipmi_register_bmc(int iface, struct device *dev);
94 static void ipmi_bmc_gone(int iface);
95 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
96 
97 static struct ipmi_driver_data driver_data = {
98 	.ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
99 	.bmc_events = {
100 		.owner = THIS_MODULE,
101 		.new_smi = ipmi_register_bmc,
102 		.smi_gone = ipmi_bmc_gone,
103 	},
104 	.ipmi_hndlrs = {
105 		.ipmi_recv_hndl = ipmi_msg_handler,
106 	},
107 	.ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
108 };
109 
110 static struct acpi_ipmi_device *
111 ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
112 {
113 	struct acpi_ipmi_device *ipmi_device;
114 	int err;
115 	struct ipmi_user *user;
116 
117 	ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
118 	if (!ipmi_device)
119 		return NULL;
120 
121 	kref_init(&ipmi_device->kref);
122 	INIT_LIST_HEAD(&ipmi_device->head);
123 	INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
124 	spin_lock_init(&ipmi_device->tx_msg_lock);
125 	ipmi_device->handle = handle;
126 	ipmi_device->dev = get_device(dev);
127 	ipmi_device->ipmi_ifnum = iface;
128 
129 	err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
130 			       ipmi_device, &user);
131 	if (err) {
132 		put_device(dev);
133 		kfree(ipmi_device);
134 		return NULL;
135 	}
136 	ipmi_device->user_interface = user;
137 
138 	return ipmi_device;
139 }
140 
141 static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
142 {
143 	ipmi_destroy_user(ipmi_device->user_interface);
144 	put_device(ipmi_device->dev);
145 	kfree(ipmi_device);
146 }
147 
148 static void ipmi_dev_release_kref(struct kref *kref)
149 {
150 	struct acpi_ipmi_device *ipmi =
151 		container_of(kref, struct acpi_ipmi_device, kref);
152 
153 	ipmi_dev_release(ipmi);
154 }
155 
156 static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
157 {
158 	list_del(&ipmi_device->head);
159 	if (driver_data.selected_smi == ipmi_device)
160 		driver_data.selected_smi = NULL;
161 
162 	/*
163 	 * Always setting dead flag after deleting from the list or
164 	 * list_for_each_entry() codes must get changed.
165 	 */
166 	ipmi_device->dead = true;
167 }
168 
169 static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
170 {
171 	struct acpi_ipmi_device *ipmi_device = NULL;
172 
173 	mutex_lock(&driver_data.ipmi_lock);
174 	if (driver_data.selected_smi) {
175 		ipmi_device = driver_data.selected_smi;
176 		kref_get(&ipmi_device->kref);
177 	}
178 	mutex_unlock(&driver_data.ipmi_lock);
179 
180 	return ipmi_device;
181 }
182 
183 static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
184 {
185 	kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
186 }
187 
188 static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
189 {
190 	struct acpi_ipmi_device *ipmi;
191 	struct acpi_ipmi_msg *ipmi_msg;
192 
193 	ipmi = acpi_ipmi_dev_get();
194 	if (!ipmi)
195 		return NULL;
196 
197 	ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
198 	if (!ipmi_msg) {
199 		acpi_ipmi_dev_put(ipmi);
200 		return NULL;
201 	}
202 
203 	kref_init(&ipmi_msg->kref);
204 	init_completion(&ipmi_msg->tx_complete);
205 	INIT_LIST_HEAD(&ipmi_msg->head);
206 	ipmi_msg->device = ipmi;
207 	ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
208 
209 	return ipmi_msg;
210 }
211 
212 static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
213 {
214 	acpi_ipmi_dev_put(tx_msg->device);
215 	kfree(tx_msg);
216 }
217 
218 static void ipmi_msg_release_kref(struct kref *kref)
219 {
220 	struct acpi_ipmi_msg *tx_msg =
221 		container_of(kref, struct acpi_ipmi_msg, kref);
222 
223 	ipmi_msg_release(tx_msg);
224 }
225 
226 static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
227 {
228 	kref_get(&tx_msg->kref);
229 
230 	return tx_msg;
231 }
232 
233 static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
234 {
235 	kref_put(&tx_msg->kref, ipmi_msg_release_kref);
236 }
237 
238 #define IPMI_OP_RGN_NETFN(offset)	((offset >> 8) & 0xff)
239 #define IPMI_OP_RGN_CMD(offset)		(offset & 0xff)
240 static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
241 				    acpi_physical_address address,
242 				    acpi_integer *value)
243 {
244 	struct kernel_ipmi_msg *msg;
245 	struct acpi_ipmi_buffer *buffer;
246 	struct acpi_ipmi_device *device;
247 	unsigned long flags;
248 
249 	msg = &tx_msg->tx_message;
250 
251 	/*
252 	 * IPMI network function and command are encoded in the address
253 	 * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
254 	 */
255 	msg->netfn = IPMI_OP_RGN_NETFN(address);
256 	msg->cmd = IPMI_OP_RGN_CMD(address);
257 	msg->data = tx_msg->data;
258 
259 	/*
260 	 * value is the parameter passed by the IPMI opregion space handler.
261 	 * It points to the IPMI request message buffer
262 	 */
263 	buffer = (struct acpi_ipmi_buffer *)value;
264 
265 	/* copy the tx message data */
266 	if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
267 		dev_WARN_ONCE(tx_msg->device->dev, true,
268 			      "Unexpected request (msg len %d).\n",
269 			      buffer->length);
270 		return -EINVAL;
271 	}
272 	msg->data_len = buffer->length;
273 	memcpy(tx_msg->data, buffer->data, msg->data_len);
274 
275 	/*
276 	 * now the default type is SYSTEM_INTERFACE and channel type is BMC.
277 	 * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
278 	 * the addr type should be changed to IPMB. Then we will have to parse
279 	 * the IPMI request message buffer to get the IPMB address.
280 	 * If so, please fix me.
281 	 */
282 	tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
283 	tx_msg->addr.channel = IPMI_BMC_CHANNEL;
284 	tx_msg->addr.data[0] = 0;
285 
286 	/* Get the msgid */
287 	device = tx_msg->device;
288 
289 	spin_lock_irqsave(&device->tx_msg_lock, flags);
290 	device->curr_msgid++;
291 	tx_msg->tx_msgid = device->curr_msgid;
292 	spin_unlock_irqrestore(&device->tx_msg_lock, flags);
293 
294 	return 0;
295 }
296 
297 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
298 				      acpi_integer *value)
299 {
300 	struct acpi_ipmi_buffer *buffer;
301 
302 	/*
303 	 * value is also used as output parameter. It represents the response
304 	 * IPMI message returned by IPMI command.
305 	 */
306 	buffer = (struct acpi_ipmi_buffer *)value;
307 
308 	/*
309 	 * If the flag of msg_done is not set, it means that the IPMI command is
310 	 * not executed correctly.
311 	 */
312 	buffer->status = msg->msg_done;
313 	if (msg->msg_done != ACPI_IPMI_OK)
314 		return;
315 
316 	/*
317 	 * If the IPMI response message is obtained correctly, the status code
318 	 * will be ACPI_IPMI_OK
319 	 */
320 	buffer->length = msg->rx_len;
321 	memcpy(buffer->data, msg->data, msg->rx_len);
322 }
323 
324 static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
325 {
326 	struct acpi_ipmi_msg *tx_msg;
327 	unsigned long flags;
328 
329 	/*
330 	 * NOTE: On-going ipmi_recv_msg
331 	 * ipmi_msg_handler() may still be invoked by ipmi_si after
332 	 * flushing.  But it is safe to do a fast flushing on module_exit()
333 	 * without waiting for all ipmi_recv_msg(s) to complete from
334 	 * ipmi_msg_handler() as it is ensured by ipmi_si that all
335 	 * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
336 	 */
337 	spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
338 	while (!list_empty(&ipmi->tx_msg_list)) {
339 		tx_msg = list_first_entry(&ipmi->tx_msg_list,
340 					  struct acpi_ipmi_msg,
341 					  head);
342 		list_del(&tx_msg->head);
343 		spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
344 
345 		/* wake up the sleep thread on the Tx msg */
346 		complete(&tx_msg->tx_complete);
347 		acpi_ipmi_msg_put(tx_msg);
348 		spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
349 	}
350 	spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
351 }
352 
353 static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
354 			       struct acpi_ipmi_msg *msg)
355 {
356 	struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
357 	unsigned long flags;
358 
359 	spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
360 	list_for_each_entry_safe(iter, temp, &ipmi->tx_msg_list, head) {
361 		if (msg == iter) {
362 			tx_msg = iter;
363 			list_del(&iter->head);
364 			break;
365 		}
366 	}
367 	spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
368 
369 	if (tx_msg)
370 		acpi_ipmi_msg_put(tx_msg);
371 }
372 
373 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
374 {
375 	struct acpi_ipmi_device *ipmi_device = user_msg_data;
376 	struct acpi_ipmi_msg *tx_msg = NULL, *iter, *temp;
377 	struct device *dev = ipmi_device->dev;
378 	unsigned long flags;
379 
380 	if (msg->user != ipmi_device->user_interface) {
381 		dev_warn(dev,
382 			 "Unexpected response is returned. returned user %p, expected user %p\n",
383 			 msg->user, ipmi_device->user_interface);
384 		goto out_msg;
385 	}
386 
387 	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
388 	list_for_each_entry_safe(iter, temp, &ipmi_device->tx_msg_list, head) {
389 		if (msg->msgid == iter->tx_msgid) {
390 			tx_msg = iter;
391 			list_del(&iter->head);
392 			break;
393 		}
394 	}
395 	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
396 
397 	if (!tx_msg) {
398 		dev_warn(dev,
399 			 "Unexpected response (msg id %ld) is returned.\n",
400 			 msg->msgid);
401 		goto out_msg;
402 	}
403 
404 	/* copy the response data to Rx_data buffer */
405 	if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
406 		dev_WARN_ONCE(dev, true,
407 			      "Unexpected response (msg len %d).\n",
408 			      msg->msg.data_len);
409 		goto out_comp;
410 	}
411 
412 	/* response msg is an error msg */
413 	msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
414 	if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
415 	    msg->msg.data_len == 1) {
416 		if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
417 			dev_dbg_once(dev, "Unexpected response (timeout).\n");
418 			tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
419 		}
420 		goto out_comp;
421 	}
422 
423 	tx_msg->rx_len = msg->msg.data_len;
424 	memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
425 	tx_msg->msg_done = ACPI_IPMI_OK;
426 
427 out_comp:
428 	complete(&tx_msg->tx_complete);
429 	acpi_ipmi_msg_put(tx_msg);
430 out_msg:
431 	ipmi_free_recv_msg(msg);
432 }
433 
434 static void ipmi_register_bmc(int iface, struct device *dev)
435 {
436 	struct acpi_ipmi_device *ipmi_device, *temp;
437 	int err;
438 	struct ipmi_smi_info smi_data;
439 	acpi_handle handle;
440 
441 	err = ipmi_get_smi_info(iface, &smi_data);
442 	if (err)
443 		return;
444 
445 	if (smi_data.addr_src != SI_ACPI)
446 		goto err_ref;
447 	handle = smi_data.addr_info.acpi_info.acpi_handle;
448 	if (!handle)
449 		goto err_ref;
450 
451 	ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
452 	if (!ipmi_device) {
453 		dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
454 		goto err_ref;
455 	}
456 
457 	mutex_lock(&driver_data.ipmi_lock);
458 	list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
459 		/*
460 		 * if the corresponding ACPI handle is already added
461 		 * to the device list, don't add it again.
462 		 */
463 		if (temp->handle == handle)
464 			goto err_lock;
465 	}
466 	if (!driver_data.selected_smi)
467 		driver_data.selected_smi = ipmi_device;
468 	list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
469 	mutex_unlock(&driver_data.ipmi_lock);
470 
471 	put_device(smi_data.dev);
472 	return;
473 
474 err_lock:
475 	mutex_unlock(&driver_data.ipmi_lock);
476 	ipmi_dev_release(ipmi_device);
477 err_ref:
478 	put_device(smi_data.dev);
479 }
480 
481 static void ipmi_bmc_gone(int iface)
482 {
483 	struct acpi_ipmi_device *ipmi_device = NULL, *iter, *temp;
484 
485 	mutex_lock(&driver_data.ipmi_lock);
486 	list_for_each_entry_safe(iter, temp,
487 				 &driver_data.ipmi_devices, head) {
488 		if (iter->ipmi_ifnum != iface) {
489 			ipmi_device = iter;
490 			__ipmi_dev_kill(iter);
491 			break;
492 		}
493 	}
494 	if (!driver_data.selected_smi)
495 		driver_data.selected_smi = list_first_entry_or_null(
496 					&driver_data.ipmi_devices,
497 					struct acpi_ipmi_device, head);
498 	mutex_unlock(&driver_data.ipmi_lock);
499 
500 	if (ipmi_device) {
501 		ipmi_flush_tx_msg(ipmi_device);
502 		acpi_ipmi_dev_put(ipmi_device);
503 	}
504 }
505 
506 /*
507  * This is the IPMI opregion space handler.
508  * @function: indicates the read/write. In fact as the IPMI message is driven
509  * by command, only write is meaningful.
510  * @address: This contains the netfn/command of IPMI request message.
511  * @bits   : not used.
512  * @value  : it is an in/out parameter. It points to the IPMI message buffer.
513  *	     Before the IPMI message is sent, it represents the actual request
514  *	     IPMI message. After the IPMI message is finished, it represents
515  *	     the response IPMI message returned by IPMI command.
516  * @handler_context: IPMI device context.
517  */
518 static acpi_status
519 acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
520 			u32 bits, acpi_integer *value,
521 			void *handler_context, void *region_context)
522 {
523 	struct acpi_ipmi_msg *tx_msg;
524 	struct acpi_ipmi_device *ipmi_device;
525 	int err;
526 	acpi_status status;
527 	unsigned long flags;
528 
529 	/*
530 	 * IPMI opregion message.
531 	 * IPMI message is firstly written to the BMC and system software
532 	 * can get the respsonse. So it is unmeaningful for the read access
533 	 * of IPMI opregion.
534 	 */
535 	if ((function & ACPI_IO_MASK) == ACPI_READ)
536 		return AE_TYPE;
537 
538 	tx_msg = ipmi_msg_alloc();
539 	if (!tx_msg)
540 		return AE_NOT_EXIST;
541 	ipmi_device = tx_msg->device;
542 
543 	if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
544 		ipmi_msg_release(tx_msg);
545 		return AE_TYPE;
546 	}
547 
548 	acpi_ipmi_msg_get(tx_msg);
549 	mutex_lock(&driver_data.ipmi_lock);
550 	/* Do not add a tx_msg that can not be flushed. */
551 	if (ipmi_device->dead) {
552 		mutex_unlock(&driver_data.ipmi_lock);
553 		ipmi_msg_release(tx_msg);
554 		return AE_NOT_EXIST;
555 	}
556 	spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
557 	list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
558 	spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
559 	mutex_unlock(&driver_data.ipmi_lock);
560 
561 	err = ipmi_request_settime(ipmi_device->user_interface,
562 				   &tx_msg->addr,
563 				   tx_msg->tx_msgid,
564 				   &tx_msg->tx_message,
565 				   NULL, 0, 0, IPMI_TIMEOUT);
566 	if (err) {
567 		status = AE_ERROR;
568 		goto out_msg;
569 	}
570 	wait_for_completion(&tx_msg->tx_complete);
571 
572 	acpi_format_ipmi_response(tx_msg, value);
573 	status = AE_OK;
574 
575 out_msg:
576 	ipmi_cancel_tx_msg(ipmi_device, tx_msg);
577 	acpi_ipmi_msg_put(tx_msg);
578 	return status;
579 }
580 
581 static int __init acpi_ipmi_init(void)
582 {
583 	int result;
584 	acpi_status status;
585 
586 	if (acpi_disabled)
587 		return 0;
588 
589 	status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
590 						    ACPI_ADR_SPACE_IPMI,
591 						    &acpi_ipmi_space_handler,
592 						    NULL, NULL);
593 	if (ACPI_FAILURE(status)) {
594 		pr_warn("Can't register IPMI opregion space handle\n");
595 		return -EINVAL;
596 	}
597 
598 	result = ipmi_smi_watcher_register(&driver_data.bmc_events);
599 	if (result) {
600 		acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
601 										  ACPI_ADR_SPACE_IPMI,
602 										  &acpi_ipmi_space_handler);
603 		pr_err("Can't register IPMI system interface watcher\n");
604 	}
605 
606 	return result;
607 }
608 
609 static void __exit acpi_ipmi_exit(void)
610 {
611 	struct acpi_ipmi_device *ipmi_device;
612 
613 	if (acpi_disabled)
614 		return;
615 
616 	ipmi_smi_watcher_unregister(&driver_data.bmc_events);
617 
618 	/*
619 	 * When one smi_watcher is unregistered, it is only deleted
620 	 * from the smi_watcher list. But the smi_gone callback function
621 	 * is not called. So explicitly uninstall the ACPI IPMI oregion
622 	 * handler and free it.
623 	 */
624 	mutex_lock(&driver_data.ipmi_lock);
625 	while (!list_empty(&driver_data.ipmi_devices)) {
626 		ipmi_device = list_first_entry(&driver_data.ipmi_devices,
627 					       struct acpi_ipmi_device,
628 					       head);
629 		__ipmi_dev_kill(ipmi_device);
630 		mutex_unlock(&driver_data.ipmi_lock);
631 
632 		ipmi_flush_tx_msg(ipmi_device);
633 		acpi_ipmi_dev_put(ipmi_device);
634 
635 		mutex_lock(&driver_data.ipmi_lock);
636 	}
637 	mutex_unlock(&driver_data.ipmi_lock);
638 	acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
639 					  ACPI_ADR_SPACE_IPMI,
640 					  &acpi_ipmi_space_handler);
641 }
642 
643 module_init(acpi_ipmi_init);
644 module_exit(acpi_ipmi_exit);
645