Lines Matching +full:ipmb +full:- +full:dev

1 // SPDX-License-Identifier: GPL-2.0+
65 /* FIXME - add watchdog stuff. */
68 /* Some BT-specific defines we need here. */
73 /* 'invalid' to allow a firmware-specified interface to be disabled */
147 * Per-OEM handler, called from handle_flags(). Returns 1
148 * when handle_flags() needs to be re-run or 0 indicating it
203 * memory. Once that situation clears up, it will re-enable
253 atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
255 ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
276 dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n", in debug_timestamp()
293 ipmi_smi_msg_received(smi_info->intf, msg); in deliver_recv_msg()
298 struct ipmi_smi_msg *msg = smi_info->curr_msg; in return_hosed_msg()
305 msg->rsp[0] = msg->data[0] | 4; in return_hosed_msg()
306 msg->rsp[1] = msg->data[1]; in return_hosed_msg()
307 msg->rsp[2] = cCode; in return_hosed_msg()
308 msg->rsp_size = 3; in return_hosed_msg()
310 smi_info->curr_msg = NULL; in return_hosed_msg()
318 if (!smi_info->waiting_msg) { in start_next_msg()
319 smi_info->curr_msg = NULL; in start_next_msg()
324 smi_info->curr_msg = smi_info->waiting_msg; in start_next_msg()
325 smi_info->waiting_msg = NULL; in start_next_msg()
333 err = smi_info->handlers->start_transaction( in start_next_msg()
334 smi_info->si_sm, in start_next_msg()
335 smi_info->curr_msg->data, in start_next_msg()
336 smi_info->curr_msg->data_size); in start_next_msg()
348 if (!smi_info->timer_can_start) in smi_mod_timer()
350 smi_info->last_timeout_jiffies = jiffies; in smi_mod_timer()
351 mod_timer(&smi_info->si_timer, new_val); in smi_mod_timer()
352 smi_info->timer_running = true; in smi_mod_timer()
363 if (smi_info->thread) in start_new_msg()
364 wake_up_process(smi_info->thread); in start_new_msg()
366 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); in start_new_msg()
377 smi_info->si_state = SI_CHECKING_ENABLES; in start_check_enables()
384 /* Make sure the watchdog pre-timeout flag is not set at startup. */ in start_clear_flags()
390 smi_info->si_state = SI_CLEARING_FLAGS; in start_clear_flags()
395 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); in start_getting_msg_queue()
396 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; in start_getting_msg_queue()
397 smi_info->curr_msg->data_size = 2; in start_getting_msg_queue()
399 start_new_msg(smi_info, smi_info->curr_msg->data, in start_getting_msg_queue()
400 smi_info->curr_msg->data_size); in start_getting_msg_queue()
401 smi_info->si_state = SI_GETTING_MESSAGES; in start_getting_msg_queue()
406 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); in start_getting_events()
407 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; in start_getting_events()
408 smi_info->curr_msg->data_size = 2; in start_getting_events()
410 start_new_msg(smi_info, smi_info->curr_msg->data, in start_getting_events()
411 smi_info->curr_msg->data_size); in start_getting_events()
412 smi_info->si_state = SI_GETTING_EVENTS; in start_getting_events()
419 * memory, we will re-enable the interrupt.
426 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { in disable_si_irq()
427 smi_info->interrupt_disabled = true; in disable_si_irq()
436 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { in enable_si_irq()
437 smi_info->interrupt_disabled = false; in enable_si_irq()
457 smi_info->si_state = SI_NORMAL; in alloc_msg_handle_irq()
468 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { in handle_flags()
469 /* Watchdog pre-timeout */ in handle_flags()
473 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; in handle_flags()
474 ipmi_smi_watchdog_pretimeout(smi_info->intf); in handle_flags()
475 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { in handle_flags()
477 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); in handle_flags()
478 if (!smi_info->curr_msg) in handle_flags()
482 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { in handle_flags()
484 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); in handle_flags()
485 if (!smi_info->curr_msg) in handle_flags()
489 } else if (smi_info->msg_flags & OEM_DATA_AVAIL && in handle_flags()
490 smi_info->oem_data_avail_handler) { in handle_flags()
491 if (smi_info->oem_data_avail_handler(smi_info)) in handle_flags()
494 smi_info->si_state = SI_NORMAL; in handle_flags()
508 if (smi_info->supports_event_msg_buff) in current_global_enables()
511 if (((smi_info->io.irq && !smi_info->interrupt_disabled) || in current_global_enables()
512 smi_info->cannot_disable_irq) && in current_global_enables()
513 !smi_info->irq_enable_broken) in current_global_enables()
516 if (smi_info->supports_event_msg_buff && in current_global_enables()
517 smi_info->io.irq && !smi_info->interrupt_disabled && in current_global_enables()
518 !smi_info->irq_enable_broken) in current_global_enables()
528 u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); in check_bt_irq()
536 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, in check_bt_irq()
539 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); in check_bt_irq()
547 switch (smi_info->si_state) { in handle_transaction_done()
549 if (!smi_info->curr_msg) in handle_transaction_done()
552 smi_info->curr_msg->rsp_size in handle_transaction_done()
553 = smi_info->handlers->get_result( in handle_transaction_done()
554 smi_info->si_sm, in handle_transaction_done()
555 smi_info->curr_msg->rsp, in handle_transaction_done()
563 msg = smi_info->curr_msg; in handle_transaction_done()
564 smi_info->curr_msg = NULL; in handle_transaction_done()
574 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); in handle_transaction_done()
577 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
583 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
585 smi_info->msg_flags = msg[3]; in handle_transaction_done()
596 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); in handle_transaction_done()
599 dev_warn_ratelimited(smi_info->io.dev, in handle_transaction_done()
602 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
608 smi_info->curr_msg->rsp_size in handle_transaction_done()
609 = smi_info->handlers->get_result( in handle_transaction_done()
610 smi_info->si_sm, in handle_transaction_done()
611 smi_info->curr_msg->rsp, in handle_transaction_done()
619 msg = smi_info->curr_msg; in handle_transaction_done()
620 smi_info->curr_msg = NULL; in handle_transaction_done()
621 if (msg->rsp[2] != 0) { in handle_transaction_done()
623 msg->done(msg); in handle_transaction_done()
626 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; in handle_transaction_done()
646 smi_info->curr_msg->rsp_size in handle_transaction_done()
647 = smi_info->handlers->get_result( in handle_transaction_done()
648 smi_info->si_sm, in handle_transaction_done()
649 smi_info->curr_msg->rsp, in handle_transaction_done()
657 msg = smi_info->curr_msg; in handle_transaction_done()
658 smi_info->curr_msg = NULL; in handle_transaction_done()
659 if (msg->rsp[2] != 0) { in handle_transaction_done()
661 msg->done(msg); in handle_transaction_done()
664 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; in handle_transaction_done()
689 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); in handle_transaction_done()
691 dev_warn_ratelimited(smi_info->io.dev, in handle_transaction_done()
695 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
699 if (smi_info->io.si_info->type == SI_BT) in handle_transaction_done()
707 smi_info->handlers->start_transaction( in handle_transaction_done()
708 smi_info->si_sm, msg, 3); in handle_transaction_done()
709 smi_info->si_state = SI_SETTING_ENABLES; in handle_transaction_done()
710 } else if (smi_info->supports_event_msg_buff) { in handle_transaction_done()
711 smi_info->curr_msg = ipmi_alloc_smi_msg(); in handle_transaction_done()
712 if (!smi_info->curr_msg) { in handle_transaction_done()
713 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
718 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
727 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); in handle_transaction_done()
729 dev_warn_ratelimited(smi_info->io.dev, in handle_transaction_done()
733 if (smi_info->supports_event_msg_buff) { in handle_transaction_done()
734 smi_info->curr_msg = ipmi_alloc_smi_msg(); in handle_transaction_done()
735 if (!smi_info->curr_msg) { in handle_transaction_done()
736 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
741 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
767 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); in smi_event_handler()
770 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); in smi_event_handler()
784 smi_info->si_state = SI_NORMAL; in smi_event_handler()
785 if (smi_info->curr_msg != NULL) { in smi_event_handler()
800 if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) { in smi_event_handler()
803 if (smi_info->si_state != SI_NORMAL) { in smi_event_handler()
808 smi_info->got_attn = true; in smi_event_handler()
810 smi_info->got_attn = false; in smi_event_handler()
824 smi_info->si_state = SI_GETTING_FLAGS; in smi_event_handler()
839 && (atomic_read(&smi_info->req_events))) { in smi_event_handler()
844 atomic_set(&smi_info->req_events, 0); in smi_event_handler()
852 if (smi_info->supports_event_msg_buff || smi_info->io.irq) { in smi_event_handler()
855 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); in smi_event_handler()
856 if (!smi_info->curr_msg) in smi_event_handler()
864 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { in smi_event_handler()
866 if (timer_delete(&smi_info->si_timer)) in smi_event_handler()
867 smi_info->timer_running = false; in smi_event_handler()
876 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { in check_start_timer_thread()
879 if (smi_info->thread) in check_start_timer_thread()
880 wake_up_process(smi_info->thread); in check_start_timer_thread()
893 * Currently, this function is called only in run-to-completion in flush_messages()
894 * mode. This means we are single-threaded, no need for locks. in flush_messages()
911 if (smi_info->run_to_completion) { in sender()
916 smi_info->waiting_msg = msg; in sender()
920 spin_lock_irqsave(&smi_info->si_lock, flags); in sender()
928 BUG_ON(smi_info->waiting_msg); in sender()
929 smi_info->waiting_msg = msg; in sender()
931 spin_unlock_irqrestore(&smi_info->si_lock, flags); in sender()
938 smi_info->run_to_completion = i_run_to_completion; in set_run_to_completion()
944 * Use -1 as a special constant to tell that we are spinning in kipmid
947 #define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
954 if (smi_info->si_num < num_max_busy_us) in ipmi_thread_busy_wait()
955 max_busy_us = kipmid_max_busy_us[smi_info->si_num]; in ipmi_thread_busy_wait()
971 * A busy-waiting loop for speeding up IPMI operation.
977 * Documentation/driver-api/ipmi.rst for details.
990 spin_lock_irqsave(&(smi_info->si_lock), flags); in ipmi_thread()
1000 if (smi_result != SI_SM_IDLE && !smi_info->timer_running) in ipmi_thread()
1003 spin_unlock_irqrestore(&(smi_info->si_lock), flags); in ipmi_thread()
1015 if (smi_info->in_maintenance_mode) in ipmi_thread()
1020 if (atomic_read(&smi_info->need_watch)) { in ipmi_thread()
1039 bool run_to_completion = smi_info->run_to_completion; in poll()
1047 spin_lock_irqsave(&smi_info->si_lock, flags); in poll()
1050 spin_unlock_irqrestore(&smi_info->si_lock, flags); in poll()
1057 if (!smi_info->has_event_buffer) in request_events()
1060 atomic_set(&smi_info->req_events, 1); in request_events()
1071 atomic_set(&smi_info->need_watch, enable); in set_need_watch()
1072 spin_lock_irqsave(&smi_info->si_lock, flags); in set_need_watch()
1074 spin_unlock_irqrestore(&smi_info->si_lock, flags); in set_need_watch()
1087 spin_lock_irqsave(&(smi_info->si_lock), flags); in smi_timeout()
1091 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) in smi_timeout()
1095 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { in smi_timeout()
1118 smi_info->timer_running = false; in smi_timeout()
1119 spin_unlock_irqrestore(&(smi_info->si_lock), flags); in smi_timeout()
1127 if (smi_info->io.si_info->type == SI_BT) in ipmi_si_irq_handler()
1129 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, in ipmi_si_irq_handler()
1133 spin_lock_irqsave(&(smi_info->si_lock), flags); in ipmi_si_irq_handler()
1140 spin_unlock_irqrestore(&(smi_info->si_lock), flags); in ipmi_si_irq_handler()
1150 new_smi->intf = intf; in smi_start_processing()
1153 timer_setup(&new_smi->si_timer, smi_timeout, 0); in smi_start_processing()
1154 new_smi->timer_can_start = true; in smi_start_processing()
1158 if (new_smi->io.irq_setup) { in smi_start_processing()
1159 new_smi->io.irq_handler_data = new_smi; in smi_start_processing()
1160 new_smi->io.irq_setup(&new_smi->io); in smi_start_processing()
1166 if (new_smi->si_num < num_force_kipmid) in smi_start_processing()
1167 enable = force_kipmid[new_smi->si_num]; in smi_start_processing()
1172 else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq) in smi_start_processing()
1176 new_smi->thread = kthread_run(ipmi_thread, new_smi, in smi_start_processing()
1177 "kipmi%d", new_smi->si_num); in smi_start_processing()
1178 if (IS_ERR(new_smi->thread)) { in smi_start_processing()
1179 dev_notice(new_smi->io.dev, in smi_start_processing()
1181 PTR_ERR(new_smi->thread)); in smi_start_processing()
1182 new_smi->thread = NULL; in smi_start_processing()
1193 data->addr_src = smi->io.addr_source; in get_smi_info()
1194 data->dev = smi->io.dev; in get_smi_info()
1195 data->addr_info = smi->io.addr_info; in get_smi_info()
1196 get_device(smi->io.dev); in get_smi_info()
1206 atomic_set(&smi_info->req_events, 0); in set_maintenance_mode()
1207 smi_info->in_maintenance_mode = enable; in set_maintenance_mode()
1233 …"Force the kipmi daemon to be enabled (1) or disabled(0). Normally the IPMI driver auto-detects t…
1239 …"Max time (in microseconds) to busy-wait for IPMI data before sleeping. 0 (default) means to wait …
1243 if (io->si_info->type == SI_BT) in ipmi_irq_finish_setup()
1245 io->outputb(io, IPMI_BT_INTMASK_REG, in ipmi_irq_finish_setup()
1251 if (io->si_info->type == SI_BT) in ipmi_irq_start_cleanup()
1253 io->outputb(io, IPMI_BT_INTMASK_REG, 0); in ipmi_irq_start_cleanup()
1259 free_irq(io->irq, io->irq_handler_data); in std_irq_cleanup()
1266 if (!io->irq) in ipmi_std_irq_setup()
1269 rv = request_irq(io->irq, in ipmi_std_irq_setup()
1273 io->irq_handler_data); in ipmi_std_irq_setup()
1275 dev_warn(io->dev, "%s unable to claim interrupt %d, running polled\n", in ipmi_std_irq_setup()
1276 SI_DEVICE_NAME, io->irq); in ipmi_std_irq_setup()
1277 io->irq = 0; in ipmi_std_irq_setup()
1279 io->irq_cleanup = std_irq_cleanup; in ipmi_std_irq_setup()
1281 dev_info(io->dev, "Using irq %d\n", io->irq); in ipmi_std_irq_setup()
1291 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); in wait_for_msg_done()
1296 smi_result = smi_info->handlers->event( in wait_for_msg_done()
1297 smi_info->si_sm, jiffies_to_usecs(1)); in wait_for_msg_done()
1299 smi_result = smi_info->handlers->event( in wait_for_msg_done()
1300 smi_info->si_sm, 0); in wait_for_msg_done()
1309 return -ENODEV; in wait_for_msg_done()
1324 return -ENOMEM; in try_get_dev_id()
1334 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); in try_get_dev_id()
1340 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in try_get_dev_id()
1345 resp + 2, resp_len - 2, &smi_info->device_id); in try_get_dev_id()
1352 dev_warn_ratelimited(smi_info->io.dev, in try_get_dev_id()
1373 return -ENOMEM; in get_global_enables()
1377 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); in get_global_enables()
1381 dev_warn(smi_info->io.dev, in get_global_enables()
1387 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in get_global_enables()
1394 dev_warn(smi_info->io.dev, in get_global_enables()
1397 rv = -EINVAL; in get_global_enables()
1420 return -ENOMEM; in set_global_enables()
1425 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); in set_global_enables()
1429 dev_warn(smi_info->io.dev, in set_global_enables()
1435 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in set_global_enables()
1441 dev_warn(smi_info->io.dev, in set_global_enables()
1444 rv = -EINVAL; in set_global_enables()
1477 dev_err(smi_info->io.dev, in check_clr_rcv_irq()
1487 dev_warn(smi_info->io.dev, in check_clr_rcv_irq()
1489 smi_info->cannot_disable_irq = true; in check_clr_rcv_irq()
1503 if (!smi_info->io.irq) in check_set_rcv_irq()
1513 dev_err(smi_info->io.dev, in check_set_rcv_irq()
1523 dev_warn(smi_info->io.dev, in check_set_rcv_irq()
1525 smi_info->cannot_disable_irq = true; in check_set_rcv_irq()
1526 smi_info->irq_enable_broken = true; in check_set_rcv_irq()
1539 return -ENOMEM; in try_enable_event_buffer()
1543 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); in try_enable_event_buffer()
1551 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in try_enable_event_buffer()
1559 rv = -EINVAL; in try_enable_event_buffer()
1565 smi_info->supports_event_msg_buff = true; in try_enable_event_buffer()
1572 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); in try_enable_event_buffer()
1580 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in try_enable_event_buffer()
1587 rv = -EINVAL; in try_enable_event_buffer()
1596 rv = -ENOENT; in try_enable_event_buffer()
1598 smi_info->supports_event_msg_buff = true; in try_enable_event_buffer()
1606 static ssize_t name##_show(struct device *dev, \
1610 struct smi_info *smi_info = dev_get_drvdata(dev); \
1616 static ssize_t type_show(struct device *dev, in type_show() argument
1620 struct smi_info *smi_info = dev_get_drvdata(dev); in type_show()
1622 return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]); in type_show()
1626 static ssize_t interrupts_enabled_show(struct device *dev, in interrupts_enabled_show() argument
1630 struct smi_info *smi_info = dev_get_drvdata(dev); in interrupts_enabled_show()
1631 int enabled = smi_info->io.irq && !smi_info->interrupt_disabled; in interrupts_enabled_show()
1649 static ssize_t params_show(struct device *dev, in params_show() argument
1653 struct smi_info *smi_info = dev_get_drvdata(dev); in params_show()
1656 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", in params_show()
1657 si_to_str[smi_info->io.si_info->type], in params_show()
1658 addr_space_to_str[smi_info->io.addr_space], in params_show()
1659 smi_info->io.addr_data, in params_show()
1660 smi_info->io.regspacing, in params_show()
1661 smi_info->io.regsize, in params_show()
1662 smi_info->io.regshift, in params_show()
1663 smi_info->io.irq, in params_show()
1664 smi_info->io.slave_addr); in params_show()
1692 * @info - smi_info structure with msg_flags set
1695 * Returns 1 indicating need to re-run handle_flags().
1699 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | in oem_data_avail_to_receive_msg_avail()
1706 * @info - smi_info.device_id must be populated
1710 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
1735 struct ipmi_device_id *id = &smi_info->device_id; in setup_dell_poweredge_oem_data_handler()
1736 if (id->manufacturer_id == DELL_IANA_MFR_ID) { in setup_dell_poweredge_oem_data_handler()
1737 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && in setup_dell_poweredge_oem_data_handler()
1738 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && in setup_dell_poweredge_oem_data_handler()
1739 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { in setup_dell_poweredge_oem_data_handler()
1740 smi_info->oem_data_avail_handler = in setup_dell_poweredge_oem_data_handler()
1745 smi_info->oem_data_avail_handler = in setup_dell_poweredge_oem_data_handler()
1754 struct ipmi_smi_msg *msg = smi_info->curr_msg; in return_hosed_msg_badsize()
1757 msg->rsp[0] = msg->data[0] | 4; in return_hosed_msg_badsize()
1758 msg->rsp[1] = msg->data[1]; in return_hosed_msg_badsize()
1759 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; in return_hosed_msg_badsize()
1760 msg->rsp_size = 3; in return_hosed_msg_badsize()
1761 smi_info->curr_msg = NULL; in return_hosed_msg_badsize()
1767 * @info - smi_info.device_id must be populated
1773 * callers to try again with a different-sized buffer, which succeeds.
1783 unsigned char *data = smi_info->curr_msg->data; in dell_poweredge_bt_xaction_handler()
1784 unsigned int size = smi_info->curr_msg->data_size; in dell_poweredge_bt_xaction_handler()
1801 * @info - smi_info.device_id must be filled in already
1809 struct ipmi_device_id *id = &smi_info->device_id; in setup_dell_poweredge_bt_xaction_handler()
1810 if (id->manufacturer_id == DELL_IANA_MFR_ID && in setup_dell_poweredge_bt_xaction_handler()
1811 smi_info->io.si_info->type == SI_BT) in setup_dell_poweredge_bt_xaction_handler()
1817 * @info - smi_info.device_id must be filled in already
1841 if (smi_info->thread != NULL) { in stop_timer_and_thread()
1842 kthread_stop(smi_info->thread); in stop_timer_and_thread()
1843 smi_info->thread = NULL; in stop_timer_and_thread()
1846 smi_info->timer_can_start = false; in stop_timer_and_thread()
1847 timer_delete_sync(&smi_info->si_timer); in stop_timer_and_thread()
1855 if (e->io.addr_space != info->io.addr_space) in find_dup_si()
1857 if (e->io.addr_data == info->io.addr_data) { in find_dup_si()
1863 if (info->io.slave_addr && !e->io.slave_addr) in find_dup_si()
1864 e->io.slave_addr = info->io.slave_addr; in find_dup_si()
1878 * If the user gave us a hard-coded device at the same in ipmi_si_add_smi()
1882 if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD && in ipmi_si_add_smi()
1883 ipmi_si_hardcode_match(io->addr_space, io->addr_data)) { in ipmi_si_add_smi()
1884 dev_info(io->dev, in ipmi_si_add_smi()
1885 "Hard-coded device at this address already exists"); in ipmi_si_add_smi()
1886 return -ENODEV; in ipmi_si_add_smi()
1889 if (!io->io_setup) { in ipmi_si_add_smi()
1891 io->addr_space == IPMI_IO_ADDR_SPACE) { in ipmi_si_add_smi()
1892 io->io_setup = ipmi_si_port_setup; in ipmi_si_add_smi()
1893 } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) { in ipmi_si_add_smi()
1894 io->io_setup = ipmi_si_mem_setup; in ipmi_si_add_smi()
1896 return -EINVAL; in ipmi_si_add_smi()
1902 return -ENOMEM; in ipmi_si_add_smi()
1903 spin_lock_init(&new_smi->si_lock); in ipmi_si_add_smi()
1905 new_smi->io = *io; in ipmi_si_add_smi()
1910 if (new_smi->io.addr_source == SI_ACPI && in ipmi_si_add_smi()
1911 dup->io.addr_source == SI_SMBIOS) { in ipmi_si_add_smi()
1913 dev_info(dup->io.dev, in ipmi_si_add_smi()
1914 "Removing SMBIOS-specified %s state machine in favor of ACPI\n", in ipmi_si_add_smi()
1915 si_to_str[new_smi->io.si_info->type]); in ipmi_si_add_smi()
1918 dev_info(new_smi->io.dev, in ipmi_si_add_smi()
1919 "%s-specified %s state machine: duplicate\n", in ipmi_si_add_smi()
1920 ipmi_addr_src_to_str(new_smi->io.addr_source), in ipmi_si_add_smi()
1921 si_to_str[new_smi->io.si_info->type]); in ipmi_si_add_smi()
1922 rv = -EBUSY; in ipmi_si_add_smi()
1928 pr_info("Adding %s-specified %s state machine\n", in ipmi_si_add_smi()
1929 ipmi_addr_src_to_str(new_smi->io.addr_source), in ipmi_si_add_smi()
1930 si_to_str[new_smi->io.si_info->type]); in ipmi_si_add_smi()
1932 list_add_tail(&new_smi->link, &smi_infos); in ipmi_si_add_smi()
1951 pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n", in try_smi_init()
1952 ipmi_addr_src_to_str(new_smi->io.addr_source), in try_smi_init()
1953 si_to_str[new_smi->io.si_info->type], in try_smi_init()
1954 addr_space_to_str[new_smi->io.addr_space], in try_smi_init()
1955 new_smi->io.addr_data, in try_smi_init()
1956 new_smi->io.slave_addr, new_smi->io.irq); in try_smi_init()
1958 switch (new_smi->io.si_info->type) { in try_smi_init()
1960 new_smi->handlers = &kcs_smi_handlers; in try_smi_init()
1964 new_smi->handlers = &smic_smi_handlers; in try_smi_init()
1968 new_smi->handlers = &bt_smi_handlers; in try_smi_init()
1973 rv = -EIO; in try_smi_init()
1977 new_smi->si_num = smi_num; in try_smi_init()
1980 if (!new_smi->io.dev) { in try_smi_init()
1982 rv = -EIO; in try_smi_init()
1987 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); in try_smi_init()
1988 if (!new_smi->si_sm) { in try_smi_init()
1989 rv = -ENOMEM; in try_smi_init()
1992 new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm, in try_smi_init()
1993 &new_smi->io); in try_smi_init()
1996 rv = new_smi->io.io_setup(&new_smi->io); in try_smi_init()
1998 dev_err(new_smi->io.dev, "Could not set up I/O space\n"); in try_smi_init()
2002 /* Do low-level detection first. */ in try_smi_init()
2003 if (new_smi->handlers->detect(new_smi->si_sm)) { in try_smi_init()
2004 if (new_smi->io.addr_source) in try_smi_init()
2005 dev_err(new_smi->io.dev, in try_smi_init()
2007 rv = -ENODEV; in try_smi_init()
2017 if (new_smi->io.addr_source) in try_smi_init()
2018 dev_err(new_smi->io.dev, in try_smi_init()
2027 new_smi->waiting_msg = NULL; in try_smi_init()
2028 new_smi->curr_msg = NULL; in try_smi_init()
2029 atomic_set(&new_smi->req_events, 0); in try_smi_init()
2030 new_smi->run_to_completion = false; in try_smi_init()
2032 atomic_set(&new_smi->stats[i], 0); in try_smi_init()
2034 new_smi->interrupt_disabled = true; in try_smi_init()
2035 atomic_set(&new_smi->need_watch, 0); in try_smi_init()
2039 new_smi->has_event_buffer = true; in try_smi_init()
2048 * IRQ is defined to be set when non-zero. req_events will in try_smi_init()
2051 if (new_smi->io.irq) { in try_smi_init()
2052 new_smi->interrupt_disabled = false; in try_smi_init()
2053 atomic_set(&new_smi->req_events, 1); in try_smi_init()
2056 dev_set_drvdata(new_smi->io.dev, new_smi); in try_smi_init()
2057 rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group); in try_smi_init()
2059 dev_err(new_smi->io.dev, in try_smi_init()
2064 new_smi->dev_group_added = true; in try_smi_init()
2068 new_smi->io.dev, in try_smi_init()
2069 new_smi->io.slave_addr); in try_smi_init()
2071 dev_err(new_smi->io.dev, in try_smi_init()
2080 dev_info(new_smi->io.dev, "IPMI %s interface initialized\n", in try_smi_init()
2081 si_to_str[new_smi->io.si_info->type]); in try_smi_init()
2083 WARN_ON(new_smi->io.dev->init_name != NULL); in try_smi_init()
2086 if (rv && new_smi->io.io_cleanup) { in try_smi_init()
2087 new_smi->io.io_cleanup(&new_smi->io); in try_smi_init()
2088 new_smi->io.io_cleanup = NULL; in try_smi_init()
2091 if (rv && new_smi->si_sm) { in try_smi_init()
2092 kfree(new_smi->si_sm); in try_smi_init()
2093 new_smi->si_sm = NULL; in try_smi_init()
2104 return (e1->io.addr_space == e2->io.addr_space && in ipmi_smi_info_same()
2105 e1->io.addr_data == e2->io.addr_data); in ipmi_smi_info_same()
2136 if (!e->io.irq) in init_ipmi_si()
2146 if (e2->io.irq && ipmi_smi_info_same(e, e2)) { in init_ipmi_si()
2161 if (e->io.irq) in init_ipmi_si()
2170 if (!e2->io.irq) in init_ipmi_si()
2197 return -ENODEV; in init_ipmi_si()
2210 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { in wait_msg_processed()
2212 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) in wait_msg_processed()
2223 if (smi_info->dev_group_added) { in shutdown_smi()
2224 device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group); in shutdown_smi()
2225 smi_info->dev_group_added = false; in shutdown_smi()
2227 if (smi_info->io.dev) in shutdown_smi()
2228 dev_set_drvdata(smi_info->io.dev, NULL); in shutdown_smi()
2234 smi_info->interrupt_disabled = true; in shutdown_smi()
2235 if (smi_info->io.irq_cleanup) { in shutdown_smi()
2236 smi_info->io.irq_cleanup(&smi_info->io); in shutdown_smi()
2237 smi_info->io.irq_cleanup = NULL; in shutdown_smi()
2255 if (smi_info->handlers) in shutdown_smi()
2260 if (smi_info->handlers) in shutdown_smi()
2261 smi_info->handlers->cleanup(smi_info->si_sm); in shutdown_smi()
2263 if (smi_info->io.io_cleanup) { in shutdown_smi()
2264 smi_info->io.io_cleanup(&smi_info->io); in shutdown_smi()
2265 smi_info->io.io_cleanup = NULL; in shutdown_smi()
2268 kfree(smi_info->si_sm); in shutdown_smi()
2269 smi_info->si_sm = NULL; in shutdown_smi()
2271 smi_info->intf = NULL; in shutdown_smi()
2276 * smi_info->intf check.
2283 list_del(&smi_info->link); in cleanup_one_si()
2284 ipmi_unregister_smi(smi_info->intf); in cleanup_one_si()
2288 void ipmi_si_remove_by_dev(struct device *dev) in ipmi_si_remove_by_dev() argument
2294 if (e->io.dev == dev) { in ipmi_si_remove_by_dev()
2307 struct device *dev = NULL; in ipmi_si_remove_by_data() local
2311 if (e->io.addr_space != addr_space) in ipmi_si_remove_by_data()
2313 if (e->io.si_info->type != si_type) in ipmi_si_remove_by_data()
2315 if (e->io.addr_data == addr) { in ipmi_si_remove_by_data()
2316 dev = get_device(e->io.dev); in ipmi_si_remove_by_data()
2322 return dev; in ipmi_si_remove_by_data()
2348 MODULE_ALIAS("platform:dmi-ipmi-si");