1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* 3 * Copyright (C) 2005-2014, 2018-2021, 2024-2025 Intel Corporation 4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH 5 * Copyright (C) 2015 Intel Deutschland GmbH 6 */ 7 #ifndef __iwl_op_mode_h__ 8 #define __iwl_op_mode_h__ 9 10 #include <linux/netdevice.h> 11 #include <linux/debugfs.h> 12 #include "iwl-dbg-tlv.h" 13 14 struct iwl_op_mode; 15 struct iwl_trans; 16 struct sk_buff; 17 struct iwl_device_cmd; 18 struct iwl_rx_cmd_buffer; 19 struct iwl_fw; 20 struct iwl_rf_cfg; 21 22 /** 23 * DOC: Operational mode - what is it ? 24 * 25 * The operational mode (a.k.a. op_mode) is the layer that implements 26 * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses 27 * the transport API to access the HW. The op_mode doesn't need to know how the 28 * underlying HW works, since the transport layer takes care of that. 29 * 30 * There can be several op_mode: i.e. different fw APIs will require two 31 * different op_modes. This is why the op_mode is virtualized. 32 */ 33 34 /** 35 * DOC: Life cycle of the Operational mode 36 * 37 * The operational mode has a very simple life cycle. 38 * 39 * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the 40 * capabilities advertised by the fw file (in TLV format). 41 * 2) The driver layer starts the op_mode (ops->start) 42 * 3) The op_mode registers mac80211 43 * 4) The op_mode is governed by mac80211 44 * 5) The driver layer stops the op_mode 45 */ 46 47 /** 48 * enum iwl_fw_error_type - FW error types/sources 49 * @IWL_ERR_TYPE_IRQ: "normal" FW error through an IRQ 50 * @IWL_ERR_TYPE_NMI_FORCED: NMI was forced by driver 51 * @IWL_ERR_TYPE_RESET_HS_TIMEOUT: reset handshake timed out, 52 * any debug collection must happen synchronously as 53 * the device will be shut down 54 * @IWL_ERR_TYPE_CMD_QUEUE_FULL: command queue was full 55 * @IWL_ERR_TYPE_TOP_RESET_BY_BT: TOP reset initiated by BT 56 * @IWL_ERR_TYPE_TOP_FATAL_ERROR: TOP fatal error 57 * @IWL_ERR_TYPE_TOP_RESET_FAILED: TOP reset failed 58 * @IWL_ERR_TYPE_DEBUGFS: error/reset indication from debugfs 59 */ 60 enum iwl_fw_error_type { 61 IWL_ERR_TYPE_IRQ, 62 IWL_ERR_TYPE_NMI_FORCED, 63 IWL_ERR_TYPE_RESET_HS_TIMEOUT, 64 IWL_ERR_TYPE_CMD_QUEUE_FULL, 65 IWL_ERR_TYPE_TOP_RESET_BY_BT, 66 IWL_ERR_TYPE_TOP_FATAL_ERROR, 67 IWL_ERR_TYPE_TOP_RESET_FAILED, 68 IWL_ERR_TYPE_DEBUGFS, 69 }; 70 71 /** 72 * enum iwl_fw_error_context - error dump context 73 * @IWL_ERR_CONTEXT_WORKER: regular from worker context, 74 * opmode must acquire locks and must also check 75 * for @IWL_ERR_CONTEXT_ABORT after acquiring locks 76 * @IWL_ERR_CONTEXT_FROM_OPMODE: context is in a call 77 * originating from the opmode, e.g. while resetting 78 * or stopping the device, so opmode must not acquire 79 * any locks 80 * @IWL_ERR_CONTEXT_ABORT: after lock acquisition, indicates 81 * that the dump already happened via another callback 82 * (currently only while stopping the device) via the 83 * @IWL_ERR_CONTEXT_FROM_OPMODE context, and this call 84 * must be aborted 85 */ 86 enum iwl_fw_error_context { 87 IWL_ERR_CONTEXT_WORKER, 88 IWL_ERR_CONTEXT_FROM_OPMODE, 89 IWL_ERR_CONTEXT_ABORT, 90 }; 91 92 /** 93 * struct iwl_fw_error_dump_mode - error dump mode for callback 94 * @type: The reason for the dump, per &enum iwl_fw_error_type. 95 * @context: The context for the dump, may also indicate this 96 * call needs to be skipped. This MUST be checked before 97 * and after acquiring any locks in the op-mode! 98 */ 99 struct iwl_fw_error_dump_mode { 100 enum iwl_fw_error_type type; 101 enum iwl_fw_error_context context; 102 }; 103 104 /** 105 * struct iwl_op_mode_ops - op_mode specific operations 106 * 107 * The op_mode exports its ops so that external components can start it and 108 * interact with it. The driver layer typically calls the start and stop 109 * handlers, the transport layer calls the others. 110 * 111 * All the handlers MUST be implemented, except @rx_rss which can be left 112 * out *iff* the opmode will never run on hardware with multi-queue capability. 113 * 114 * @start: start the op_mode. The transport layer is already allocated. 115 * May sleep 116 * @stop: stop the op_mode. Must free all the memory allocated. 117 * May sleep 118 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the 119 * HCMD this Rx responds to. Can't sleep. 120 * @rx_rss: data queue RX notification to the op_mode, for (data) notifications 121 * received on the RSS queue(s). The queue parameter indicates which of the 122 * RSS queues received this frame; it will always be non-zero. 123 * This method must not sleep. 124 * @queue_full: notifies that a HW queue is full. 125 * Must be atomic and called with BH disabled. 126 * @queue_not_full: notifies that a HW queue is not full any more. 127 * Must be atomic and called with BH disabled. 128 * @hw_rf_kill: notifies of a change in the HW rf kill switch. True means that 129 * the radio is killed. Return %true if the device should be stopped by 130 * the transport immediately after the call. May sleep. 131 * Note that this must not return %true for newer devices using gen2 PCIe 132 * transport. 133 * @free_skb: allows the transport layer to free skbs that haven't been 134 * reclaimed by the op_mode. This can happen when the driver is freed and 135 * there are Tx packets pending in the transport layer. 136 * Must be atomic 137 * @nic_error: error notification. Must be atomic, the op mode should handle 138 * the error (e.g. abort notification waiters) and print the error if 139 * applicable 140 * @dump_error: NIC error dump collection (can sleep, synchronous) 141 * @sw_reset: (maybe) initiate a software reset, return %true if started 142 * @nic_config: configure NIC, called before firmware is started. 143 * May sleep 144 * @wimax_active: invoked when WiMax becomes active. May sleep 145 * @time_point: called when transport layer wants to collect debug data 146 * @device_powered_off: called upon resume from hibernation but not only. 147 * Op_mode needs to reset its internal state because the device did not 148 * survive the system state transition. The firmware is no longer running, 149 * etc... 150 * @dump: Op_mode needs to collect the firmware dump upon this handler 151 * being called. 152 */ 153 struct iwl_op_mode_ops { 154 struct iwl_op_mode *(*start)(struct iwl_trans *trans, 155 const struct iwl_rf_cfg *cfg, 156 const struct iwl_fw *fw, 157 struct dentry *dbgfs_dir); 158 void (*stop)(struct iwl_op_mode *op_mode); 159 void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi, 160 struct iwl_rx_cmd_buffer *rxb); 161 void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi, 162 struct iwl_rx_cmd_buffer *rxb, unsigned int queue); 163 void (*queue_full)(struct iwl_op_mode *op_mode, int queue); 164 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); 165 bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); 166 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); 167 void (*nic_error)(struct iwl_op_mode *op_mode, 168 enum iwl_fw_error_type type); 169 void (*dump_error)(struct iwl_op_mode *op_mode, 170 struct iwl_fw_error_dump_mode *mode); 171 bool (*sw_reset)(struct iwl_op_mode *op_mode, 172 enum iwl_fw_error_type type); 173 void (*nic_config)(struct iwl_op_mode *op_mode); 174 void (*wimax_active)(struct iwl_op_mode *op_mode); 175 void (*time_point)(struct iwl_op_mode *op_mode, 176 enum iwl_fw_ini_time_point tp_id, 177 union iwl_dbg_tlv_tp_data *tp_data); 178 void (*device_powered_off)(struct iwl_op_mode *op_mode); 179 void (*dump)(struct iwl_op_mode *op_mode); 180 }; 181 182 int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops); 183 void iwl_opmode_deregister(const char *name); 184 185 /** 186 * struct iwl_op_mode - operational mode 187 * @ops: pointer to its own ops 188 * 189 * This holds an implementation of the mac80211 / fw API. 190 */ 191 struct iwl_op_mode { 192 const struct iwl_op_mode_ops *ops; 193 194 char op_mode_specific[] __aligned(sizeof(void *)); 195 }; 196 197 static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode) 198 { 199 might_sleep(); 200 op_mode->ops->stop(op_mode); 201 } 202 203 static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode, 204 struct napi_struct *napi, 205 struct iwl_rx_cmd_buffer *rxb) 206 { 207 return op_mode->ops->rx(op_mode, napi, rxb); 208 } 209 210 static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode, 211 struct napi_struct *napi, 212 struct iwl_rx_cmd_buffer *rxb, 213 unsigned int queue) 214 { 215 op_mode->ops->rx_rss(op_mode, napi, rxb, queue); 216 } 217 218 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, 219 int queue) 220 { 221 op_mode->ops->queue_full(op_mode, queue); 222 } 223 224 static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, 225 int queue) 226 { 227 op_mode->ops->queue_not_full(op_mode, queue); 228 } 229 230 static inline bool __must_check 231 iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state) 232 { 233 might_sleep(); 234 return op_mode->ops->hw_rf_kill(op_mode, state); 235 } 236 237 static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, 238 struct sk_buff *skb) 239 { 240 if (WARN_ON_ONCE(!op_mode)) 241 return; 242 op_mode->ops->free_skb(op_mode, skb); 243 } 244 245 static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, 246 enum iwl_fw_error_type type) 247 { 248 op_mode->ops->nic_error(op_mode, type); 249 } 250 251 static inline void iwl_op_mode_dump_error(struct iwl_op_mode *op_mode, 252 struct iwl_fw_error_dump_mode *mode) 253 { 254 might_sleep(); 255 256 if (WARN_ON(mode->type == IWL_ERR_TYPE_TOP_RESET_BY_BT)) 257 return; 258 259 if (op_mode->ops->dump_error) 260 op_mode->ops->dump_error(op_mode, mode); 261 } 262 263 static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode) 264 { 265 might_sleep(); 266 if (op_mode->ops->nic_config) 267 op_mode->ops->nic_config(op_mode); 268 } 269 270 static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode) 271 { 272 might_sleep(); 273 op_mode->ops->wimax_active(op_mode); 274 } 275 276 static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode, 277 enum iwl_fw_ini_time_point tp_id, 278 union iwl_dbg_tlv_tp_data *tp_data) 279 { 280 if (!op_mode || !op_mode->ops || !op_mode->ops->time_point) 281 return; 282 op_mode->ops->time_point(op_mode, tp_id, tp_data); 283 } 284 285 static inline void iwl_op_mode_device_powered_off(struct iwl_op_mode *op_mode) 286 { 287 if (!op_mode || !op_mode->ops || !op_mode->ops->device_powered_off) 288 return; 289 op_mode->ops->device_powered_off(op_mode); 290 } 291 292 static inline void iwl_op_mode_dump(struct iwl_op_mode *op_mode) 293 { 294 if (!op_mode || !op_mode->ops || !op_mode->ops->dump) 295 return; 296 op_mode->ops->dump(op_mode); 297 } 298 299 #endif /* __iwl_op_mode_h__ */ 300