1*6b627f88SBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2*6b627f88SBjoern A. Zeeb /*
3*6b627f88SBjoern A. Zeeb * Copyright (C) 2017 Intel Deutschland GmbH
4*6b627f88SBjoern A. Zeeb * Copyright (C) 2018-2025 Intel Corporation
5*6b627f88SBjoern A. Zeeb */
6*6b627f88SBjoern A. Zeeb #if defined(__FreeBSD__)
7*6b627f88SBjoern A. Zeeb #include <linux/delay.h>
8*6b627f88SBjoern A. Zeeb #endif
9*6b627f88SBjoern A. Zeeb #include "iwl-trans.h"
10*6b627f88SBjoern A. Zeeb #include "iwl-prph.h"
11*6b627f88SBjoern A. Zeeb #include "pcie/iwl-context-info.h"
12*6b627f88SBjoern A. Zeeb #include "pcie/iwl-context-info-v2.h"
13*6b627f88SBjoern A. Zeeb #include "internal.h"
14*6b627f88SBjoern A. Zeeb #include "fw/dbg.h"
15*6b627f88SBjoern A. Zeeb
16*6b627f88SBjoern A. Zeeb #define FW_RESET_TIMEOUT (HZ / 5)
17*6b627f88SBjoern A. Zeeb
18*6b627f88SBjoern A. Zeeb /*
19*6b627f88SBjoern A. Zeeb * Start up NIC's basic functionality after it has been reset
20*6b627f88SBjoern A. Zeeb * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
21*6b627f88SBjoern A. Zeeb * NOTE: This does not load uCode nor start the embedded processor
22*6b627f88SBjoern A. Zeeb */
iwl_pcie_gen2_apm_init(struct iwl_trans * trans)23*6b627f88SBjoern A. Zeeb int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
24*6b627f88SBjoern A. Zeeb {
25*6b627f88SBjoern A. Zeeb int ret = 0;
26*6b627f88SBjoern A. Zeeb
27*6b627f88SBjoern A. Zeeb IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
28*6b627f88SBjoern A. Zeeb
29*6b627f88SBjoern A. Zeeb /*
30*6b627f88SBjoern A. Zeeb * Use "set_bit" below rather than "write", to preserve any hardware
31*6b627f88SBjoern A. Zeeb * bits already set by default after reset.
32*6b627f88SBjoern A. Zeeb */
33*6b627f88SBjoern A. Zeeb
34*6b627f88SBjoern A. Zeeb /*
35*6b627f88SBjoern A. Zeeb * Disable L0s without affecting L1;
36*6b627f88SBjoern A. Zeeb * don't wait for ICH L0s (ICH bug W/A)
37*6b627f88SBjoern A. Zeeb */
38*6b627f88SBjoern A. Zeeb iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
39*6b627f88SBjoern A. Zeeb CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
40*6b627f88SBjoern A. Zeeb
41*6b627f88SBjoern A. Zeeb /* Set FH wait threshold to maximum (HW error during stress W/A) */
42*6b627f88SBjoern A. Zeeb iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
43*6b627f88SBjoern A. Zeeb
44*6b627f88SBjoern A. Zeeb /*
45*6b627f88SBjoern A. Zeeb * Enable HAP INTA (interrupt from management bus) to
46*6b627f88SBjoern A. Zeeb * wake device's PCI Express link L1a -> L0s
47*6b627f88SBjoern A. Zeeb */
48*6b627f88SBjoern A. Zeeb iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
49*6b627f88SBjoern A. Zeeb CSR_HW_IF_CONFIG_REG_HAP_WAKE);
50*6b627f88SBjoern A. Zeeb
51*6b627f88SBjoern A. Zeeb iwl_pcie_apm_config(trans);
52*6b627f88SBjoern A. Zeeb
53*6b627f88SBjoern A. Zeeb ret = iwl_finish_nic_init(trans);
54*6b627f88SBjoern A. Zeeb if (ret)
55*6b627f88SBjoern A. Zeeb return ret;
56*6b627f88SBjoern A. Zeeb
57*6b627f88SBjoern A. Zeeb set_bit(STATUS_DEVICE_ENABLED, &trans->status);
58*6b627f88SBjoern A. Zeeb
59*6b627f88SBjoern A. Zeeb return 0;
60*6b627f88SBjoern A. Zeeb }
61*6b627f88SBjoern A. Zeeb
iwl_pcie_gen2_apm_stop(struct iwl_trans * trans,bool op_mode_leave)62*6b627f88SBjoern A. Zeeb static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
63*6b627f88SBjoern A. Zeeb {
64*6b627f88SBjoern A. Zeeb IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
65*6b627f88SBjoern A. Zeeb
66*6b627f88SBjoern A. Zeeb if (op_mode_leave) {
67*6b627f88SBjoern A. Zeeb if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
68*6b627f88SBjoern A. Zeeb iwl_pcie_gen2_apm_init(trans);
69*6b627f88SBjoern A. Zeeb
70*6b627f88SBjoern A. Zeeb /* inform ME that we are leaving */
71*6b627f88SBjoern A. Zeeb iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
72*6b627f88SBjoern A. Zeeb CSR_RESET_LINK_PWR_MGMT_DISABLED);
73*6b627f88SBjoern A. Zeeb iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
74*6b627f88SBjoern A. Zeeb CSR_HW_IF_CONFIG_REG_WAKE_ME |
75*6b627f88SBjoern A. Zeeb CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN);
76*6b627f88SBjoern A. Zeeb mdelay(1);
77*6b627f88SBjoern A. Zeeb iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
78*6b627f88SBjoern A. Zeeb CSR_RESET_LINK_PWR_MGMT_DISABLED);
79*6b627f88SBjoern A. Zeeb mdelay(5);
80*6b627f88SBjoern A. Zeeb }
81*6b627f88SBjoern A. Zeeb
82*6b627f88SBjoern A. Zeeb clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
83*6b627f88SBjoern A. Zeeb
84*6b627f88SBjoern A. Zeeb /* Stop device's DMA activity */
85*6b627f88SBjoern A. Zeeb iwl_pcie_apm_stop_master(trans);
86*6b627f88SBjoern A. Zeeb
87*6b627f88SBjoern A. Zeeb iwl_trans_pcie_sw_reset(trans, false);
88*6b627f88SBjoern A. Zeeb
89*6b627f88SBjoern A. Zeeb /*
90*6b627f88SBjoern A. Zeeb * Clear "initialization complete" bit to move adapter from
91*6b627f88SBjoern A. Zeeb * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
92*6b627f88SBjoern A. Zeeb */
93*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
94*6b627f88SBjoern A. Zeeb iwl_clear_bit(trans, CSR_GP_CNTRL,
95*6b627f88SBjoern A. Zeeb CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
96*6b627f88SBjoern A. Zeeb else
97*6b627f88SBjoern A. Zeeb iwl_clear_bit(trans, CSR_GP_CNTRL,
98*6b627f88SBjoern A. Zeeb CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
99*6b627f88SBjoern A. Zeeb }
100*6b627f88SBjoern A. Zeeb
iwl_trans_pcie_fw_reset_handshake(struct iwl_trans * trans)101*6b627f88SBjoern A. Zeeb void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
102*6b627f88SBjoern A. Zeeb {
103*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
104*6b627f88SBjoern A. Zeeb int ret;
105*6b627f88SBjoern A. Zeeb
106*6b627f88SBjoern A. Zeeb trans_pcie->fw_reset_state = FW_RESET_REQUESTED;
107*6b627f88SBjoern A. Zeeb
108*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
109*6b627f88SBjoern A. Zeeb iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
110*6b627f88SBjoern A. Zeeb UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE);
111*6b627f88SBjoern A. Zeeb else if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
112*6b627f88SBjoern A. Zeeb iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
113*6b627f88SBjoern A. Zeeb UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
114*6b627f88SBjoern A. Zeeb else
115*6b627f88SBjoern A. Zeeb iwl_write32(trans, CSR_DOORBELL_VECTOR,
116*6b627f88SBjoern A. Zeeb UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE);
117*6b627f88SBjoern A. Zeeb
118*6b627f88SBjoern A. Zeeb /* wait 200ms */
119*6b627f88SBjoern A. Zeeb ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
120*6b627f88SBjoern A. Zeeb trans_pcie->fw_reset_state != FW_RESET_REQUESTED,
121*6b627f88SBjoern A. Zeeb FW_RESET_TIMEOUT);
122*6b627f88SBjoern A. Zeeb if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) {
123*6b627f88SBjoern A. Zeeb bool reset_done;
124*6b627f88SBjoern A. Zeeb u32 inta_hw;
125*6b627f88SBjoern A. Zeeb
126*6b627f88SBjoern A. Zeeb if (trans_pcie->msix_enabled) {
127*6b627f88SBjoern A. Zeeb inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
128*6b627f88SBjoern A. Zeeb reset_done =
129*6b627f88SBjoern A. Zeeb inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE;
130*6b627f88SBjoern A. Zeeb } else {
131*6b627f88SBjoern A. Zeeb inta_hw = iwl_read32(trans, CSR_INT);
132*6b627f88SBjoern A. Zeeb reset_done = inta_hw & CSR_INT_BIT_RESET_DONE;
133*6b627f88SBjoern A. Zeeb }
134*6b627f88SBjoern A. Zeeb
135*6b627f88SBjoern A. Zeeb IWL_ERR(trans,
136*6b627f88SBjoern A. Zeeb "timeout waiting for FW reset ACK (inta_hw=0x%x, reset_done %d)\n",
137*6b627f88SBjoern A. Zeeb inta_hw, reset_done);
138*6b627f88SBjoern A. Zeeb
139*6b627f88SBjoern A. Zeeb if (!reset_done) {
140*6b627f88SBjoern A. Zeeb struct iwl_fw_error_dump_mode mode = {
141*6b627f88SBjoern A. Zeeb .type = IWL_ERR_TYPE_RESET_HS_TIMEOUT,
142*6b627f88SBjoern A. Zeeb .context = IWL_ERR_CONTEXT_FROM_OPMODE,
143*6b627f88SBjoern A. Zeeb };
144*6b627f88SBjoern A. Zeeb iwl_op_mode_nic_error(trans->op_mode,
145*6b627f88SBjoern A. Zeeb IWL_ERR_TYPE_RESET_HS_TIMEOUT);
146*6b627f88SBjoern A. Zeeb iwl_op_mode_dump_error(trans->op_mode, &mode);
147*6b627f88SBjoern A. Zeeb }
148*6b627f88SBjoern A. Zeeb }
149*6b627f88SBjoern A. Zeeb
150*6b627f88SBjoern A. Zeeb trans_pcie->fw_reset_state = FW_RESET_IDLE;
151*6b627f88SBjoern A. Zeeb }
152*6b627f88SBjoern A. Zeeb
_iwl_trans_pcie_gen2_stop_device(struct iwl_trans * trans)153*6b627f88SBjoern A. Zeeb static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
154*6b627f88SBjoern A. Zeeb {
155*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
156*6b627f88SBjoern A. Zeeb
157*6b627f88SBjoern A. Zeeb lockdep_assert_held(&trans_pcie->mutex);
158*6b627f88SBjoern A. Zeeb
159*6b627f88SBjoern A. Zeeb if (trans_pcie->is_down)
160*6b627f88SBjoern A. Zeeb return;
161*6b627f88SBjoern A. Zeeb
162*6b627f88SBjoern A. Zeeb if (trans->state >= IWL_TRANS_FW_STARTED &&
163*6b627f88SBjoern A. Zeeb trans->conf.fw_reset_handshake) {
164*6b627f88SBjoern A. Zeeb /*
165*6b627f88SBjoern A. Zeeb * Reset handshake can dump firmware on timeout, but that
166*6b627f88SBjoern A. Zeeb * should assume that the firmware is already dead.
167*6b627f88SBjoern A. Zeeb */
168*6b627f88SBjoern A. Zeeb trans->state = IWL_TRANS_NO_FW;
169*6b627f88SBjoern A. Zeeb iwl_trans_pcie_fw_reset_handshake(trans);
170*6b627f88SBjoern A. Zeeb }
171*6b627f88SBjoern A. Zeeb
172*6b627f88SBjoern A. Zeeb trans_pcie->is_down = true;
173*6b627f88SBjoern A. Zeeb
174*6b627f88SBjoern A. Zeeb /* tell the device to stop sending interrupts */
175*6b627f88SBjoern A. Zeeb iwl_disable_interrupts(trans);
176*6b627f88SBjoern A. Zeeb
177*6b627f88SBjoern A. Zeeb /* device going down, Stop using ICT table */
178*6b627f88SBjoern A. Zeeb iwl_pcie_disable_ict(trans);
179*6b627f88SBjoern A. Zeeb
180*6b627f88SBjoern A. Zeeb /*
181*6b627f88SBjoern A. Zeeb * If a HW restart happens during firmware loading,
182*6b627f88SBjoern A. Zeeb * then the firmware loading might call this function
183*6b627f88SBjoern A. Zeeb * and later it might be called again due to the
184*6b627f88SBjoern A. Zeeb * restart. So don't process again if the device is
185*6b627f88SBjoern A. Zeeb * already dead.
186*6b627f88SBjoern A. Zeeb */
187*6b627f88SBjoern A. Zeeb if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
188*6b627f88SBjoern A. Zeeb IWL_DEBUG_INFO(trans,
189*6b627f88SBjoern A. Zeeb "DEVICE_ENABLED bit was set and is now cleared\n");
190*6b627f88SBjoern A. Zeeb iwl_pcie_synchronize_irqs(trans);
191*6b627f88SBjoern A. Zeeb iwl_pcie_rx_napi_sync(trans);
192*6b627f88SBjoern A. Zeeb iwl_txq_gen2_tx_free(trans);
193*6b627f88SBjoern A. Zeeb iwl_pcie_rx_stop(trans);
194*6b627f88SBjoern A. Zeeb }
195*6b627f88SBjoern A. Zeeb
196*6b627f88SBjoern A. Zeeb iwl_pcie_ctxt_info_free_paging(trans);
197*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
198*6b627f88SBjoern A. Zeeb iwl_pcie_ctxt_info_v2_free(trans, false);
199*6b627f88SBjoern A. Zeeb else
200*6b627f88SBjoern A. Zeeb iwl_pcie_ctxt_info_free(trans);
201*6b627f88SBjoern A. Zeeb
202*6b627f88SBjoern A. Zeeb /* Stop the device, and put it in low power state */
203*6b627f88SBjoern A. Zeeb iwl_pcie_gen2_apm_stop(trans, false);
204*6b627f88SBjoern A. Zeeb
205*6b627f88SBjoern A. Zeeb /* re-take ownership to prevent other users from stealing the device */
206*6b627f88SBjoern A. Zeeb iwl_trans_pcie_sw_reset(trans, true);
207*6b627f88SBjoern A. Zeeb
208*6b627f88SBjoern A. Zeeb /*
209*6b627f88SBjoern A. Zeeb * Upon stop, the IVAR table gets erased, so msi-x won't
210*6b627f88SBjoern A. Zeeb * work. This causes a bug in RF-KILL flows, since the interrupt
211*6b627f88SBjoern A. Zeeb * that enables radio won't fire on the correct irq, and the
212*6b627f88SBjoern A. Zeeb * driver won't be able to handle the interrupt.
213*6b627f88SBjoern A. Zeeb * Configure the IVAR table again after reset.
214*6b627f88SBjoern A. Zeeb */
215*6b627f88SBjoern A. Zeeb iwl_pcie_conf_msix_hw(trans_pcie);
216*6b627f88SBjoern A. Zeeb
217*6b627f88SBjoern A. Zeeb /*
218*6b627f88SBjoern A. Zeeb * Upon stop, the APM issues an interrupt if HW RF kill is set.
219*6b627f88SBjoern A. Zeeb * This is a bug in certain verions of the hardware.
220*6b627f88SBjoern A. Zeeb * Certain devices also keep sending HW RF kill interrupt all
221*6b627f88SBjoern A. Zeeb * the time, unless the interrupt is ACKed even if the interrupt
222*6b627f88SBjoern A. Zeeb * should be masked. Re-ACK all the interrupts here.
223*6b627f88SBjoern A. Zeeb */
224*6b627f88SBjoern A. Zeeb iwl_disable_interrupts(trans);
225*6b627f88SBjoern A. Zeeb
226*6b627f88SBjoern A. Zeeb /* clear all status bits */
227*6b627f88SBjoern A. Zeeb clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
228*6b627f88SBjoern A. Zeeb clear_bit(STATUS_INT_ENABLED, &trans->status);
229*6b627f88SBjoern A. Zeeb clear_bit(STATUS_TPOWER_PMI, &trans->status);
230*6b627f88SBjoern A. Zeeb
231*6b627f88SBjoern A. Zeeb /*
232*6b627f88SBjoern A. Zeeb * Even if we stop the HW, we still want the RF kill
233*6b627f88SBjoern A. Zeeb * interrupt
234*6b627f88SBjoern A. Zeeb */
235*6b627f88SBjoern A. Zeeb iwl_enable_rfkill_int(trans);
236*6b627f88SBjoern A. Zeeb }
237*6b627f88SBjoern A. Zeeb
iwl_trans_pcie_gen2_stop_device(struct iwl_trans * trans)238*6b627f88SBjoern A. Zeeb void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
239*6b627f88SBjoern A. Zeeb {
240*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
241*6b627f88SBjoern A. Zeeb bool was_in_rfkill;
242*6b627f88SBjoern A. Zeeb
243*6b627f88SBjoern A. Zeeb iwl_op_mode_time_point(trans->op_mode,
244*6b627f88SBjoern A. Zeeb IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
245*6b627f88SBjoern A. Zeeb NULL);
246*6b627f88SBjoern A. Zeeb
247*6b627f88SBjoern A. Zeeb mutex_lock(&trans_pcie->mutex);
248*6b627f88SBjoern A. Zeeb trans_pcie->opmode_down = true;
249*6b627f88SBjoern A. Zeeb was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
250*6b627f88SBjoern A. Zeeb _iwl_trans_pcie_gen2_stop_device(trans);
251*6b627f88SBjoern A. Zeeb iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
252*6b627f88SBjoern A. Zeeb mutex_unlock(&trans_pcie->mutex);
253*6b627f88SBjoern A. Zeeb }
254*6b627f88SBjoern A. Zeeb
iwl_pcie_gen2_nic_init(struct iwl_trans * trans)255*6b627f88SBjoern A. Zeeb static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
256*6b627f88SBjoern A. Zeeb {
257*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
258*6b627f88SBjoern A. Zeeb int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
259*6b627f88SBjoern A. Zeeb trans->mac_cfg->base->min_txq_size);
260*6b627f88SBjoern A. Zeeb int ret;
261*6b627f88SBjoern A. Zeeb
262*6b627f88SBjoern A. Zeeb /* TODO: most of the logic can be removed in A0 - but not in Z0 */
263*6b627f88SBjoern A. Zeeb spin_lock_bh(&trans_pcie->irq_lock);
264*6b627f88SBjoern A. Zeeb ret = iwl_pcie_gen2_apm_init(trans);
265*6b627f88SBjoern A. Zeeb spin_unlock_bh(&trans_pcie->irq_lock);
266*6b627f88SBjoern A. Zeeb if (ret)
267*6b627f88SBjoern A. Zeeb return ret;
268*6b627f88SBjoern A. Zeeb
269*6b627f88SBjoern A. Zeeb iwl_op_mode_nic_config(trans->op_mode);
270*6b627f88SBjoern A. Zeeb
271*6b627f88SBjoern A. Zeeb /* Allocate the RX queue, or reset if it is already allocated */
272*6b627f88SBjoern A. Zeeb if (iwl_pcie_gen2_rx_init(trans))
273*6b627f88SBjoern A. Zeeb return -ENOMEM;
274*6b627f88SBjoern A. Zeeb
275*6b627f88SBjoern A. Zeeb /* Allocate or reset and init all Tx and Command queues */
276*6b627f88SBjoern A. Zeeb if (iwl_txq_gen2_init(trans, trans->conf.cmd_queue, queue_size))
277*6b627f88SBjoern A. Zeeb return -ENOMEM;
278*6b627f88SBjoern A. Zeeb
279*6b627f88SBjoern A. Zeeb /* enable shadow regs in HW */
280*6b627f88SBjoern A. Zeeb iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
281*6b627f88SBjoern A. Zeeb IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
282*6b627f88SBjoern A. Zeeb
283*6b627f88SBjoern A. Zeeb return 0;
284*6b627f88SBjoern A. Zeeb }
285*6b627f88SBjoern A. Zeeb
iwl_pcie_get_rf_name(struct iwl_trans * trans)286*6b627f88SBjoern A. Zeeb static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
287*6b627f88SBjoern A. Zeeb {
288*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
289*6b627f88SBjoern A. Zeeb char *buf = trans_pcie->rf_name;
290*6b627f88SBjoern A. Zeeb size_t buflen = sizeof(trans_pcie->rf_name);
291*6b627f88SBjoern A. Zeeb size_t pos;
292*6b627f88SBjoern A. Zeeb u32 version;
293*6b627f88SBjoern A. Zeeb
294*6b627f88SBjoern A. Zeeb if (buf[0])
295*6b627f88SBjoern A. Zeeb return;
296*6b627f88SBjoern A. Zeeb
297*6b627f88SBjoern A. Zeeb switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
298*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):
299*6b627f88SBjoern A. Zeeb pos = scnprintf(buf, buflen, "JF");
300*6b627f88SBjoern A. Zeeb break;
301*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF):
302*6b627f88SBjoern A. Zeeb pos = scnprintf(buf, buflen, "GF");
303*6b627f88SBjoern A. Zeeb break;
304*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4):
305*6b627f88SBjoern A. Zeeb pos = scnprintf(buf, buflen, "GF4");
306*6b627f88SBjoern A. Zeeb break;
307*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
308*6b627f88SBjoern A. Zeeb pos = scnprintf(buf, buflen, "HR");
309*6b627f88SBjoern A. Zeeb break;
310*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
311*6b627f88SBjoern A. Zeeb pos = scnprintf(buf, buflen, "HR1");
312*6b627f88SBjoern A. Zeeb break;
313*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
314*6b627f88SBjoern A. Zeeb pos = scnprintf(buf, buflen, "HRCDB");
315*6b627f88SBjoern A. Zeeb break;
316*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_FM):
317*6b627f88SBjoern A. Zeeb pos = scnprintf(buf, buflen, "FM");
318*6b627f88SBjoern A. Zeeb break;
319*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_WP):
320*6b627f88SBjoern A. Zeeb if (SILICON_Z_STEP ==
321*6b627f88SBjoern A. Zeeb CSR_HW_RFID_STEP(trans->info.hw_rf_id))
322*6b627f88SBjoern A. Zeeb pos = scnprintf(buf, buflen, "WHTC");
323*6b627f88SBjoern A. Zeeb else
324*6b627f88SBjoern A. Zeeb pos = scnprintf(buf, buflen, "WH");
325*6b627f88SBjoern A. Zeeb break;
326*6b627f88SBjoern A. Zeeb default:
327*6b627f88SBjoern A. Zeeb return;
328*6b627f88SBjoern A. Zeeb }
329*6b627f88SBjoern A. Zeeb
330*6b627f88SBjoern A. Zeeb switch (CSR_HW_RFID_TYPE(trans->info.hw_rf_id)) {
331*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
332*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
333*6b627f88SBjoern A. Zeeb case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
334*6b627f88SBjoern A. Zeeb version = iwl_read_prph(trans, CNVI_MBOX_C);
335*6b627f88SBjoern A. Zeeb switch (version) {
336*6b627f88SBjoern A. Zeeb case 0x20000:
337*6b627f88SBjoern A. Zeeb pos += scnprintf(buf + pos, buflen - pos, " B3");
338*6b627f88SBjoern A. Zeeb break;
339*6b627f88SBjoern A. Zeeb case 0x120000:
340*6b627f88SBjoern A. Zeeb pos += scnprintf(buf + pos, buflen - pos, " B5");
341*6b627f88SBjoern A. Zeeb break;
342*6b627f88SBjoern A. Zeeb default:
343*6b627f88SBjoern A. Zeeb pos += scnprintf(buf + pos, buflen - pos,
344*6b627f88SBjoern A. Zeeb " (0x%x)", version);
345*6b627f88SBjoern A. Zeeb break;
346*6b627f88SBjoern A. Zeeb }
347*6b627f88SBjoern A. Zeeb break;
348*6b627f88SBjoern A. Zeeb default:
349*6b627f88SBjoern A. Zeeb break;
350*6b627f88SBjoern A. Zeeb }
351*6b627f88SBjoern A. Zeeb
352*6b627f88SBjoern A. Zeeb pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x",
353*6b627f88SBjoern A. Zeeb trans->info.hw_rf_id);
354*6b627f88SBjoern A. Zeeb
355*6b627f88SBjoern A. Zeeb IWL_INFO(trans, "Detected RF %s\n", buf);
356*6b627f88SBjoern A. Zeeb
357*6b627f88SBjoern A. Zeeb /*
358*6b627f88SBjoern A. Zeeb * also add a \n for debugfs - need to do it after printing
359*6b627f88SBjoern A. Zeeb * since our IWL_INFO machinery wants to see a static \n at
360*6b627f88SBjoern A. Zeeb * the end of the string
361*6b627f88SBjoern A. Zeeb */
362*6b627f88SBjoern A. Zeeb pos += scnprintf(buf + pos, buflen - pos, "\n");
363*6b627f88SBjoern A. Zeeb }
364*6b627f88SBjoern A. Zeeb
iwl_trans_pcie_gen2_fw_alive(struct iwl_trans * trans)365*6b627f88SBjoern A. Zeeb void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
366*6b627f88SBjoern A. Zeeb {
367*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
368*6b627f88SBjoern A. Zeeb
369*6b627f88SBjoern A. Zeeb iwl_pcie_reset_ict(trans);
370*6b627f88SBjoern A. Zeeb
371*6b627f88SBjoern A. Zeeb /* make sure all queue are not stopped/used */
372*6b627f88SBjoern A. Zeeb memset(trans_pcie->txqs.queue_stopped, 0,
373*6b627f88SBjoern A. Zeeb sizeof(trans_pcie->txqs.queue_stopped));
374*6b627f88SBjoern A. Zeeb memset(trans_pcie->txqs.queue_used, 0,
375*6b627f88SBjoern A. Zeeb sizeof(trans_pcie->txqs.queue_used));
376*6b627f88SBjoern A. Zeeb
377*6b627f88SBjoern A. Zeeb /* now that we got alive we can free the fw image & the context info.
378*6b627f88SBjoern A. Zeeb * paging memory cannot be freed included since FW will still use it
379*6b627f88SBjoern A. Zeeb */
380*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
381*6b627f88SBjoern A. Zeeb iwl_pcie_ctxt_info_v2_free(trans, true);
382*6b627f88SBjoern A. Zeeb else
383*6b627f88SBjoern A. Zeeb iwl_pcie_ctxt_info_free(trans);
384*6b627f88SBjoern A. Zeeb
385*6b627f88SBjoern A. Zeeb /*
386*6b627f88SBjoern A. Zeeb * Re-enable all the interrupts, including the RF-Kill one, now that
387*6b627f88SBjoern A. Zeeb * the firmware is alive.
388*6b627f88SBjoern A. Zeeb */
389*6b627f88SBjoern A. Zeeb iwl_enable_interrupts(trans);
390*6b627f88SBjoern A. Zeeb mutex_lock(&trans_pcie->mutex);
391*6b627f88SBjoern A. Zeeb iwl_pcie_check_hw_rf_kill(trans);
392*6b627f88SBjoern A. Zeeb
393*6b627f88SBjoern A. Zeeb iwl_pcie_get_rf_name(trans);
394*6b627f88SBjoern A. Zeeb mutex_unlock(&trans_pcie->mutex);
395*6b627f88SBjoern A. Zeeb
396*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
397*6b627f88SBjoern A. Zeeb trans->step_urm = !!(iwl_read_umac_prph(trans,
398*6b627f88SBjoern A. Zeeb CNVI_PMU_STEP_FLOW) &
399*6b627f88SBjoern A. Zeeb CNVI_PMU_STEP_FLOW_FORCE_URM);
400*6b627f88SBjoern A. Zeeb }
401*6b627f88SBjoern A. Zeeb
iwl_pcie_set_ltr(struct iwl_trans * trans)402*6b627f88SBjoern A. Zeeb static bool iwl_pcie_set_ltr(struct iwl_trans *trans)
403*6b627f88SBjoern A. Zeeb {
404*6b627f88SBjoern A. Zeeb u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
405*6b627f88SBjoern A. Zeeb u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
406*6b627f88SBjoern A. Zeeb CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
407*6b627f88SBjoern A. Zeeb u32_encode_bits(250,
408*6b627f88SBjoern A. Zeeb CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
409*6b627f88SBjoern A. Zeeb CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
410*6b627f88SBjoern A. Zeeb u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
411*6b627f88SBjoern A. Zeeb CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
412*6b627f88SBjoern A. Zeeb u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
413*6b627f88SBjoern A. Zeeb
414*6b627f88SBjoern A. Zeeb /*
415*6b627f88SBjoern A. Zeeb * To workaround hardware latency issues during the boot process,
416*6b627f88SBjoern A. Zeeb * initialize the LTR to ~250 usec (see ltr_val above).
417*6b627f88SBjoern A. Zeeb * The firmware initializes this again later (to a smaller value).
418*6b627f88SBjoern A. Zeeb */
419*6b627f88SBjoern A. Zeeb if ((trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
420*6b627f88SBjoern A. Zeeb trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
421*6b627f88SBjoern A. Zeeb !trans->mac_cfg->integrated) {
422*6b627f88SBjoern A. Zeeb iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
423*6b627f88SBjoern A. Zeeb return true;
424*6b627f88SBjoern A. Zeeb }
425*6b627f88SBjoern A. Zeeb
426*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->integrated &&
427*6b627f88SBjoern A. Zeeb trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
428*6b627f88SBjoern A. Zeeb iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
429*6b627f88SBjoern A. Zeeb iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
430*6b627f88SBjoern A. Zeeb return true;
431*6b627f88SBjoern A. Zeeb }
432*6b627f88SBjoern A. Zeeb
433*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
434*6b627f88SBjoern A. Zeeb /* First clear the interrupt, just in case */
435*6b627f88SBjoern A. Zeeb iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD,
436*6b627f88SBjoern A. Zeeb MSIX_HW_INT_CAUSES_REG_IML);
437*6b627f88SBjoern A. Zeeb /* In this case, unfortunately the same ROM bug exists in the
438*6b627f88SBjoern A. Zeeb * device (not setting LTR correctly), but we don't have control
439*6b627f88SBjoern A. Zeeb * over the settings from the host due to some hardware security
440*6b627f88SBjoern A. Zeeb * features. The only workaround we've been able to come up with
441*6b627f88SBjoern A. Zeeb * so far is to try to keep the CPU and device busy by polling
442*6b627f88SBjoern A. Zeeb * it and the IML (image loader) completed interrupt.
443*6b627f88SBjoern A. Zeeb */
444*6b627f88SBjoern A. Zeeb return false;
445*6b627f88SBjoern A. Zeeb }
446*6b627f88SBjoern A. Zeeb
447*6b627f88SBjoern A. Zeeb /* nothing needs to be done on other devices */
448*6b627f88SBjoern A. Zeeb return true;
449*6b627f88SBjoern A. Zeeb }
450*6b627f88SBjoern A. Zeeb
iwl_pcie_spin_for_iml(struct iwl_trans * trans)451*6b627f88SBjoern A. Zeeb static void iwl_pcie_spin_for_iml(struct iwl_trans *trans)
452*6b627f88SBjoern A. Zeeb {
453*6b627f88SBjoern A. Zeeb /* in practice, this seems to complete in around 20-30ms at most, wait 100 */
454*6b627f88SBjoern A. Zeeb #define IML_WAIT_TIMEOUT (HZ / 10)
455*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
456*6b627f88SBjoern A. Zeeb unsigned long end_time = jiffies + IML_WAIT_TIMEOUT;
457*6b627f88SBjoern A. Zeeb u32 value, loops = 0;
458*6b627f88SBjoern A. Zeeb bool irq = false;
459*6b627f88SBjoern A. Zeeb
460*6b627f88SBjoern A. Zeeb if (WARN_ON(!trans_pcie->iml))
461*6b627f88SBjoern A. Zeeb return;
462*6b627f88SBjoern A. Zeeb
463*6b627f88SBjoern A. Zeeb value = iwl_read32(trans, CSR_LTR_LAST_MSG);
464*6b627f88SBjoern A. Zeeb IWL_DEBUG_INFO(trans, "Polling for IML load - CSR_LTR_LAST_MSG=0x%x\n",
465*6b627f88SBjoern A. Zeeb value);
466*6b627f88SBjoern A. Zeeb
467*6b627f88SBjoern A. Zeeb while (time_before(jiffies, end_time)) {
468*6b627f88SBjoern A. Zeeb if (iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD) &
469*6b627f88SBjoern A. Zeeb MSIX_HW_INT_CAUSES_REG_IML) {
470*6b627f88SBjoern A. Zeeb irq = true;
471*6b627f88SBjoern A. Zeeb break;
472*6b627f88SBjoern A. Zeeb }
473*6b627f88SBjoern A. Zeeb /* Keep the CPU and device busy. */
474*6b627f88SBjoern A. Zeeb value = iwl_read32(trans, CSR_LTR_LAST_MSG);
475*6b627f88SBjoern A. Zeeb loops++;
476*6b627f88SBjoern A. Zeeb }
477*6b627f88SBjoern A. Zeeb
478*6b627f88SBjoern A. Zeeb IWL_DEBUG_INFO(trans,
479*6b627f88SBjoern A. Zeeb "Polled for IML load: irq=%d, loops=%d, CSR_LTR_LAST_MSG=0x%x\n",
480*6b627f88SBjoern A. Zeeb irq, loops, value);
481*6b627f88SBjoern A. Zeeb
482*6b627f88SBjoern A. Zeeb /* We don't fail here even if we timed out - maybe we get lucky and the
483*6b627f88SBjoern A. Zeeb * interrupt comes in later (and we get alive from firmware) and then
484*6b627f88SBjoern A. Zeeb * we're all happy - but if not we'll fail on alive timeout or get some
485*6b627f88SBjoern A. Zeeb * other error out.
486*6b627f88SBjoern A. Zeeb */
487*6b627f88SBjoern A. Zeeb }
488*6b627f88SBjoern A. Zeeb
iwl_trans_pcie_gen2_start_fw(struct iwl_trans * trans,const struct iwl_fw * fw,const struct fw_img * img,bool run_in_rfkill)489*6b627f88SBjoern A. Zeeb int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
490*6b627f88SBjoern A. Zeeb const struct iwl_fw *fw,
491*6b627f88SBjoern A. Zeeb const struct fw_img *img,
492*6b627f88SBjoern A. Zeeb bool run_in_rfkill)
493*6b627f88SBjoern A. Zeeb {
494*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
495*6b627f88SBjoern A. Zeeb bool hw_rfkill, keep_ram_busy;
496*6b627f88SBjoern A. Zeeb bool top_reset_done = false;
497*6b627f88SBjoern A. Zeeb int ret;
498*6b627f88SBjoern A. Zeeb
499*6b627f88SBjoern A. Zeeb mutex_lock(&trans_pcie->mutex);
500*6b627f88SBjoern A. Zeeb again:
501*6b627f88SBjoern A. Zeeb /* This may fail if AMT took ownership of the device */
502*6b627f88SBjoern A. Zeeb if (iwl_pcie_prepare_card_hw(trans)) {
503*6b627f88SBjoern A. Zeeb IWL_WARN(trans, "Exit HW not ready\n");
504*6b627f88SBjoern A. Zeeb ret = -EIO;
505*6b627f88SBjoern A. Zeeb goto out;
506*6b627f88SBjoern A. Zeeb }
507*6b627f88SBjoern A. Zeeb
508*6b627f88SBjoern A. Zeeb iwl_enable_rfkill_int(trans);
509*6b627f88SBjoern A. Zeeb
510*6b627f88SBjoern A. Zeeb iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
511*6b627f88SBjoern A. Zeeb
512*6b627f88SBjoern A. Zeeb /*
513*6b627f88SBjoern A. Zeeb * We enabled the RF-Kill interrupt and the handler may very
514*6b627f88SBjoern A. Zeeb * well be running. Disable the interrupts to make sure no other
515*6b627f88SBjoern A. Zeeb * interrupt can be fired.
516*6b627f88SBjoern A. Zeeb */
517*6b627f88SBjoern A. Zeeb iwl_disable_interrupts(trans);
518*6b627f88SBjoern A. Zeeb
519*6b627f88SBjoern A. Zeeb /* Make sure it finished running */
520*6b627f88SBjoern A. Zeeb iwl_pcie_synchronize_irqs(trans);
521*6b627f88SBjoern A. Zeeb
522*6b627f88SBjoern A. Zeeb /* If platform's RF_KILL switch is NOT set to KILL */
523*6b627f88SBjoern A. Zeeb hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
524*6b627f88SBjoern A. Zeeb if (hw_rfkill && !run_in_rfkill) {
525*6b627f88SBjoern A. Zeeb ret = -ERFKILL;
526*6b627f88SBjoern A. Zeeb goto out;
527*6b627f88SBjoern A. Zeeb }
528*6b627f88SBjoern A. Zeeb
529*6b627f88SBjoern A. Zeeb /* Someone called stop_device, don't try to start_fw */
530*6b627f88SBjoern A. Zeeb if (trans_pcie->is_down) {
531*6b627f88SBjoern A. Zeeb IWL_WARN(trans,
532*6b627f88SBjoern A. Zeeb "Can't start_fw since the HW hasn't been started\n");
533*6b627f88SBjoern A. Zeeb ret = -EIO;
534*6b627f88SBjoern A. Zeeb goto out;
535*6b627f88SBjoern A. Zeeb }
536*6b627f88SBjoern A. Zeeb
537*6b627f88SBjoern A. Zeeb /* make sure rfkill handshake bits are cleared */
538*6b627f88SBjoern A. Zeeb iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
539*6b627f88SBjoern A. Zeeb iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
540*6b627f88SBjoern A. Zeeb CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
541*6b627f88SBjoern A. Zeeb
542*6b627f88SBjoern A. Zeeb /* clear (again), then enable host interrupts */
543*6b627f88SBjoern A. Zeeb iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
544*6b627f88SBjoern A. Zeeb
545*6b627f88SBjoern A. Zeeb ret = iwl_pcie_gen2_nic_init(trans);
546*6b627f88SBjoern A. Zeeb if (ret) {
547*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "Unable to init nic\n");
548*6b627f88SBjoern A. Zeeb goto out;
549*6b627f88SBjoern A. Zeeb }
550*6b627f88SBjoern A. Zeeb
551*6b627f88SBjoern A. Zeeb if (WARN_ON(trans->do_top_reset &&
552*6b627f88SBjoern A. Zeeb trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)) {
553*6b627f88SBjoern A. Zeeb ret = -EINVAL;
554*6b627f88SBjoern A. Zeeb goto out;
555*6b627f88SBjoern A. Zeeb }
556*6b627f88SBjoern A. Zeeb
557*6b627f88SBjoern A. Zeeb /* we need to wait later - set state */
558*6b627f88SBjoern A. Zeeb if (trans->do_top_reset)
559*6b627f88SBjoern A. Zeeb trans_pcie->fw_reset_state = FW_RESET_TOP_REQUESTED;
560*6b627f88SBjoern A. Zeeb
561*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
562*6b627f88SBjoern A. Zeeb if (!top_reset_done) {
563*6b627f88SBjoern A. Zeeb ret = iwl_pcie_ctxt_info_v2_alloc(trans, fw, img);
564*6b627f88SBjoern A. Zeeb if (ret)
565*6b627f88SBjoern A. Zeeb goto out;
566*6b627f88SBjoern A. Zeeb }
567*6b627f88SBjoern A. Zeeb
568*6b627f88SBjoern A. Zeeb iwl_pcie_ctxt_info_v2_kick(trans);
569*6b627f88SBjoern A. Zeeb } else {
570*6b627f88SBjoern A. Zeeb ret = iwl_pcie_ctxt_info_init(trans, img);
571*6b627f88SBjoern A. Zeeb if (ret)
572*6b627f88SBjoern A. Zeeb goto out;
573*6b627f88SBjoern A. Zeeb }
574*6b627f88SBjoern A. Zeeb
575*6b627f88SBjoern A. Zeeb keep_ram_busy = !iwl_pcie_set_ltr(trans);
576*6b627f88SBjoern A. Zeeb
577*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
578*6b627f88SBjoern A. Zeeb IWL_DEBUG_POWER(trans, "function scratch register value is 0x%08x\n",
579*6b627f88SBjoern A. Zeeb iwl_read32(trans, CSR_FUNC_SCRATCH));
580*6b627f88SBjoern A. Zeeb iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE);
581*6b627f88SBjoern A. Zeeb iwl_set_bit(trans, CSR_GP_CNTRL,
582*6b627f88SBjoern A. Zeeb CSR_GP_CNTRL_REG_FLAG_ROM_START);
583*6b627f88SBjoern A. Zeeb } else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
584*6b627f88SBjoern A. Zeeb iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
585*6b627f88SBjoern A. Zeeb } else {
586*6b627f88SBjoern A. Zeeb iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
587*6b627f88SBjoern A. Zeeb }
588*6b627f88SBjoern A. Zeeb
589*6b627f88SBjoern A. Zeeb if (keep_ram_busy)
590*6b627f88SBjoern A. Zeeb iwl_pcie_spin_for_iml(trans);
591*6b627f88SBjoern A. Zeeb
592*6b627f88SBjoern A. Zeeb if (trans->do_top_reset) {
593*6b627f88SBjoern A. Zeeb trans->do_top_reset = 0;
594*6b627f88SBjoern A. Zeeb
595*6b627f88SBjoern A. Zeeb #define FW_TOP_RESET_TIMEOUT (HZ / 4)
596*6b627f88SBjoern A. Zeeb ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
597*6b627f88SBjoern A. Zeeb trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED,
598*6b627f88SBjoern A. Zeeb FW_TOP_RESET_TIMEOUT);
599*6b627f88SBjoern A. Zeeb
600*6b627f88SBjoern A. Zeeb if (trans_pcie->fw_reset_state != FW_RESET_OK) {
601*6b627f88SBjoern A. Zeeb if (trans_pcie->fw_reset_state != FW_RESET_TOP_REQUESTED)
602*6b627f88SBjoern A. Zeeb IWL_ERR(trans,
603*6b627f88SBjoern A. Zeeb "TOP reset interrupted by error (state %d)!\n",
604*6b627f88SBjoern A. Zeeb trans_pcie->fw_reset_state);
605*6b627f88SBjoern A. Zeeb else
606*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "TOP reset timed out!\n");
607*6b627f88SBjoern A. Zeeb iwl_op_mode_nic_error(trans->op_mode,
608*6b627f88SBjoern A. Zeeb IWL_ERR_TYPE_TOP_RESET_FAILED);
609*6b627f88SBjoern A. Zeeb iwl_trans_schedule_reset(trans,
610*6b627f88SBjoern A. Zeeb IWL_ERR_TYPE_TOP_RESET_FAILED);
611*6b627f88SBjoern A. Zeeb ret = -EIO;
612*6b627f88SBjoern A. Zeeb goto out;
613*6b627f88SBjoern A. Zeeb }
614*6b627f88SBjoern A. Zeeb
615*6b627f88SBjoern A. Zeeb msleep(10);
616*6b627f88SBjoern A. Zeeb IWL_INFO(trans, "TOP reset successful, reinit now\n");
617*6b627f88SBjoern A. Zeeb /* now load the firmware again properly */
618*6b627f88SBjoern A. Zeeb ret = _iwl_trans_pcie_start_hw(trans);
619*6b627f88SBjoern A. Zeeb if (ret) {
620*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "failed to start HW after TOP reset\n");
621*6b627f88SBjoern A. Zeeb goto out;
622*6b627f88SBjoern A. Zeeb }
623*6b627f88SBjoern A. Zeeb trans_pcie->prph_scratch->ctrl_cfg.control.control_flags &=
624*6b627f88SBjoern A. Zeeb ~cpu_to_le32(IWL_PRPH_SCRATCH_TOP_RESET);
625*6b627f88SBjoern A. Zeeb top_reset_done = true;
626*6b627f88SBjoern A. Zeeb goto again;
627*6b627f88SBjoern A. Zeeb }
628*6b627f88SBjoern A. Zeeb
629*6b627f88SBjoern A. Zeeb /* re-check RF-Kill state since we may have missed the interrupt */
630*6b627f88SBjoern A. Zeeb hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
631*6b627f88SBjoern A. Zeeb if (hw_rfkill && !run_in_rfkill)
632*6b627f88SBjoern A. Zeeb ret = -ERFKILL;
633*6b627f88SBjoern A. Zeeb
634*6b627f88SBjoern A. Zeeb out:
635*6b627f88SBjoern A. Zeeb mutex_unlock(&trans_pcie->mutex);
636*6b627f88SBjoern A. Zeeb return ret;
637*6b627f88SBjoern A. Zeeb }
638*6b627f88SBjoern A. Zeeb
iwl_trans_pcie_gen2_op_mode_leave(struct iwl_trans * trans)639*6b627f88SBjoern A. Zeeb void iwl_trans_pcie_gen2_op_mode_leave(struct iwl_trans *trans)
640*6b627f88SBjoern A. Zeeb {
641*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
642*6b627f88SBjoern A. Zeeb
643*6b627f88SBjoern A. Zeeb mutex_lock(&trans_pcie->mutex);
644*6b627f88SBjoern A. Zeeb
645*6b627f88SBjoern A. Zeeb /* disable interrupts - don't enable HW RF kill interrupt */
646*6b627f88SBjoern A. Zeeb iwl_disable_interrupts(trans);
647*6b627f88SBjoern A. Zeeb
648*6b627f88SBjoern A. Zeeb iwl_pcie_gen2_apm_stop(trans, true);
649*6b627f88SBjoern A. Zeeb
650*6b627f88SBjoern A. Zeeb iwl_disable_interrupts(trans);
651*6b627f88SBjoern A. Zeeb
652*6b627f88SBjoern A. Zeeb iwl_pcie_disable_ict(trans);
653*6b627f88SBjoern A. Zeeb
654*6b627f88SBjoern A. Zeeb mutex_unlock(&trans_pcie->mutex);
655*6b627f88SBjoern A. Zeeb
656*6b627f88SBjoern A. Zeeb iwl_pcie_synchronize_irqs(trans);
657*6b627f88SBjoern A. Zeeb }
658