1bfcc09ddSBjoern A. Zeeb /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2bfcc09ddSBjoern A. Zeeb /* 3*a4128aadSBjoern A. Zeeb * Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation 4bfcc09ddSBjoern A. Zeeb * Copyright (C) 2013-2014 Intel Mobile Communications GmbH 5bfcc09ddSBjoern A. Zeeb * Copyright (C) 2015 Intel Deutschland GmbH 6bfcc09ddSBjoern A. Zeeb */ 7bfcc09ddSBjoern A. Zeeb #ifndef __iwl_op_mode_h__ 8bfcc09ddSBjoern A. Zeeb #define __iwl_op_mode_h__ 9bfcc09ddSBjoern A. Zeeb 10bfcc09ddSBjoern A. Zeeb #include <linux/netdevice.h> 11bfcc09ddSBjoern A. Zeeb #include <linux/debugfs.h> 12bfcc09ddSBjoern A. Zeeb #include "iwl-dbg-tlv.h" 13bfcc09ddSBjoern A. Zeeb 14bfcc09ddSBjoern A. Zeeb struct iwl_op_mode; 15bfcc09ddSBjoern A. Zeeb struct iwl_trans; 16bfcc09ddSBjoern A. Zeeb struct sk_buff; 17bfcc09ddSBjoern A. Zeeb struct iwl_device_cmd; 18bfcc09ddSBjoern A. Zeeb struct iwl_rx_cmd_buffer; 19bfcc09ddSBjoern A. Zeeb struct iwl_fw; 20bfcc09ddSBjoern A. Zeeb struct iwl_cfg; 21bfcc09ddSBjoern A. Zeeb 22bfcc09ddSBjoern A. Zeeb /** 23bfcc09ddSBjoern A. Zeeb * DOC: Operational mode - what is it ? 24bfcc09ddSBjoern A. Zeeb * 25bfcc09ddSBjoern A. Zeeb * The operational mode (a.k.a. op_mode) is the layer that implements 26bfcc09ddSBjoern A. Zeeb * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses 27bfcc09ddSBjoern A. Zeeb * the transport API to access the HW. The op_mode doesn't need to know how the 28bfcc09ddSBjoern A. Zeeb * underlying HW works, since the transport layer takes care of that. 29bfcc09ddSBjoern A. Zeeb * 30bfcc09ddSBjoern A. Zeeb * There can be several op_mode: i.e. different fw APIs will require two 31bfcc09ddSBjoern A. Zeeb * different op_modes. This is why the op_mode is virtualized. 32bfcc09ddSBjoern A. Zeeb */ 33bfcc09ddSBjoern A. Zeeb 34bfcc09ddSBjoern A. Zeeb /** 35bfcc09ddSBjoern A. Zeeb * DOC: Life cycle of the Operational mode 36bfcc09ddSBjoern A. Zeeb * 37bfcc09ddSBjoern A. Zeeb * The operational mode has a very simple life cycle. 38bfcc09ddSBjoern A. Zeeb * 39bfcc09ddSBjoern A. Zeeb * 1) The driver layer (iwl-drv.c) chooses the op_mode based on the 40bfcc09ddSBjoern A. Zeeb * capabilities advertised by the fw file (in TLV format). 41bfcc09ddSBjoern A. Zeeb * 2) The driver layer starts the op_mode (ops->start) 42bfcc09ddSBjoern A. Zeeb * 3) The op_mode registers mac80211 43bfcc09ddSBjoern A. Zeeb * 4) The op_mode is governed by mac80211 44bfcc09ddSBjoern A. Zeeb * 5) The driver layer stops the op_mode 45bfcc09ddSBjoern A. Zeeb */ 46bfcc09ddSBjoern A. Zeeb 47bfcc09ddSBjoern A. Zeeb /** 48bfcc09ddSBjoern A. Zeeb * struct iwl_op_mode_ops - op_mode specific operations 49bfcc09ddSBjoern A. Zeeb * 50bfcc09ddSBjoern A. Zeeb * The op_mode exports its ops so that external components can start it and 51bfcc09ddSBjoern A. Zeeb * interact with it. The driver layer typically calls the start and stop 52bfcc09ddSBjoern A. Zeeb * handlers, the transport layer calls the others. 53bfcc09ddSBjoern A. Zeeb * 54bfcc09ddSBjoern A. Zeeb * All the handlers MUST be implemented, except @rx_rss which can be left 55bfcc09ddSBjoern A. Zeeb * out *iff* the opmode will never run on hardware with multi-queue capability. 56bfcc09ddSBjoern A. Zeeb * 57bfcc09ddSBjoern A. Zeeb * @start: start the op_mode. The transport layer is already allocated. 58bfcc09ddSBjoern A. Zeeb * May sleep 59bfcc09ddSBjoern A. Zeeb * @stop: stop the op_mode. Must free all the memory allocated. 60bfcc09ddSBjoern A. Zeeb * May sleep 61bfcc09ddSBjoern A. Zeeb * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the 62bfcc09ddSBjoern A. Zeeb * HCMD this Rx responds to. Can't sleep. 63bfcc09ddSBjoern A. Zeeb * @rx_rss: data queue RX notification to the op_mode, for (data) notifications 64bfcc09ddSBjoern A. Zeeb * received on the RSS queue(s). The queue parameter indicates which of the 65bfcc09ddSBjoern A. Zeeb * RSS queues received this frame; it will always be non-zero. 66bfcc09ddSBjoern A. Zeeb * This method must not sleep. 67bfcc09ddSBjoern A. Zeeb * @queue_full: notifies that a HW queue is full. 68bfcc09ddSBjoern A. Zeeb * Must be atomic and called with BH disabled. 69bfcc09ddSBjoern A. Zeeb * @queue_not_full: notifies that a HW queue is not full any more. 70bfcc09ddSBjoern A. Zeeb * Must be atomic and called with BH disabled. 71bfcc09ddSBjoern A. Zeeb * @hw_rf_kill: notifies of a change in the HW rf kill switch. True means that 72bfcc09ddSBjoern A. Zeeb * the radio is killed. Return %true if the device should be stopped by 73bfcc09ddSBjoern A. Zeeb * the transport immediately after the call. May sleep. 74*a4128aadSBjoern A. Zeeb * Note that this must not return %true for newer devices using gen2 PCIe 75*a4128aadSBjoern A. Zeeb * transport. 76bfcc09ddSBjoern A. Zeeb * @free_skb: allows the transport layer to free skbs that haven't been 77bfcc09ddSBjoern A. Zeeb * reclaimed by the op_mode. This can happen when the driver is freed and 78bfcc09ddSBjoern A. Zeeb * there are Tx packets pending in the transport layer. 79bfcc09ddSBjoern A. Zeeb * Must be atomic 80bfcc09ddSBjoern A. Zeeb * @nic_error: error notification. Must be atomic and must be called with BH 81bfcc09ddSBjoern A. Zeeb * disabled, unless the sync parameter is true. 82bfcc09ddSBjoern A. Zeeb * @cmd_queue_full: Called when the command queue gets full. Must be atomic and 83bfcc09ddSBjoern A. Zeeb * called with BH disabled. 84bfcc09ddSBjoern A. Zeeb * @nic_config: configure NIC, called before firmware is started. 85bfcc09ddSBjoern A. Zeeb * May sleep 86bfcc09ddSBjoern A. Zeeb * @wimax_active: invoked when WiMax becomes active. May sleep 87bfcc09ddSBjoern A. Zeeb * @time_point: called when transport layer wants to collect debug data 88*a4128aadSBjoern A. Zeeb * @device_powered_off: called upon resume from hibernation but not only. 89*a4128aadSBjoern A. Zeeb * Op_mode needs to reset its internal state because the device did not 90*a4128aadSBjoern A. Zeeb * survive the system state transition. The firmware is no longer running, 91*a4128aadSBjoern A. Zeeb * etc... 92bfcc09ddSBjoern A. Zeeb */ 93bfcc09ddSBjoern A. Zeeb struct iwl_op_mode_ops { 94bfcc09ddSBjoern A. Zeeb struct iwl_op_mode *(*start)(struct iwl_trans *trans, 95bfcc09ddSBjoern A. Zeeb const struct iwl_cfg *cfg, 96bfcc09ddSBjoern A. Zeeb const struct iwl_fw *fw, 97bfcc09ddSBjoern A. Zeeb struct dentry *dbgfs_dir); 98bfcc09ddSBjoern A. Zeeb void (*stop)(struct iwl_op_mode *op_mode); 99bfcc09ddSBjoern A. Zeeb void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi, 100bfcc09ddSBjoern A. Zeeb struct iwl_rx_cmd_buffer *rxb); 101bfcc09ddSBjoern A. Zeeb void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi, 102bfcc09ddSBjoern A. Zeeb struct iwl_rx_cmd_buffer *rxb, unsigned int queue); 103bfcc09ddSBjoern A. Zeeb void (*queue_full)(struct iwl_op_mode *op_mode, int queue); 104bfcc09ddSBjoern A. Zeeb void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); 105bfcc09ddSBjoern A. Zeeb bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); 106bfcc09ddSBjoern A. Zeeb void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); 107bfcc09ddSBjoern A. Zeeb void (*nic_error)(struct iwl_op_mode *op_mode, bool sync); 108bfcc09ddSBjoern A. Zeeb void (*cmd_queue_full)(struct iwl_op_mode *op_mode); 109bfcc09ddSBjoern A. Zeeb void (*nic_config)(struct iwl_op_mode *op_mode); 110bfcc09ddSBjoern A. Zeeb void (*wimax_active)(struct iwl_op_mode *op_mode); 111bfcc09ddSBjoern A. Zeeb void (*time_point)(struct iwl_op_mode *op_mode, 112bfcc09ddSBjoern A. Zeeb enum iwl_fw_ini_time_point tp_id, 113bfcc09ddSBjoern A. Zeeb union iwl_dbg_tlv_tp_data *tp_data); 114*a4128aadSBjoern A. Zeeb void (*device_powered_off)(struct iwl_op_mode *op_mode); 115bfcc09ddSBjoern A. Zeeb }; 116bfcc09ddSBjoern A. Zeeb 117bfcc09ddSBjoern A. Zeeb int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops); 118bfcc09ddSBjoern A. Zeeb void iwl_opmode_deregister(const char *name); 119bfcc09ddSBjoern A. Zeeb 120bfcc09ddSBjoern A. Zeeb /** 121bfcc09ddSBjoern A. Zeeb * struct iwl_op_mode - operational mode 122bfcc09ddSBjoern A. Zeeb * @ops: pointer to its own ops 123bfcc09ddSBjoern A. Zeeb * 124bfcc09ddSBjoern A. Zeeb * This holds an implementation of the mac80211 / fw API. 125bfcc09ddSBjoern A. Zeeb */ 126bfcc09ddSBjoern A. Zeeb struct iwl_op_mode { 127bfcc09ddSBjoern A. Zeeb const struct iwl_op_mode_ops *ops; 128bfcc09ddSBjoern A. Zeeb 129bfcc09ddSBjoern A. Zeeb char op_mode_specific[] __aligned(sizeof(void *)); 130bfcc09ddSBjoern A. Zeeb }; 131bfcc09ddSBjoern A. Zeeb 132bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode) 133bfcc09ddSBjoern A. Zeeb { 134bfcc09ddSBjoern A. Zeeb might_sleep(); 135bfcc09ddSBjoern A. Zeeb op_mode->ops->stop(op_mode); 136bfcc09ddSBjoern A. Zeeb } 137bfcc09ddSBjoern A. Zeeb 138bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode, 139bfcc09ddSBjoern A. Zeeb struct napi_struct *napi, 140bfcc09ddSBjoern A. Zeeb struct iwl_rx_cmd_buffer *rxb) 141bfcc09ddSBjoern A. Zeeb { 142bfcc09ddSBjoern A. Zeeb return op_mode->ops->rx(op_mode, napi, rxb); 143bfcc09ddSBjoern A. Zeeb } 144bfcc09ddSBjoern A. Zeeb 145bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode, 146bfcc09ddSBjoern A. Zeeb struct napi_struct *napi, 147bfcc09ddSBjoern A. Zeeb struct iwl_rx_cmd_buffer *rxb, 148bfcc09ddSBjoern A. Zeeb unsigned int queue) 149bfcc09ddSBjoern A. Zeeb { 150bfcc09ddSBjoern A. Zeeb op_mode->ops->rx_rss(op_mode, napi, rxb, queue); 151bfcc09ddSBjoern A. Zeeb } 152bfcc09ddSBjoern A. Zeeb 153bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, 154bfcc09ddSBjoern A. Zeeb int queue) 155bfcc09ddSBjoern A. Zeeb { 156bfcc09ddSBjoern A. Zeeb op_mode->ops->queue_full(op_mode, queue); 157bfcc09ddSBjoern A. Zeeb } 158bfcc09ddSBjoern A. Zeeb 159bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, 160bfcc09ddSBjoern A. Zeeb int queue) 161bfcc09ddSBjoern A. Zeeb { 162bfcc09ddSBjoern A. Zeeb op_mode->ops->queue_not_full(op_mode, queue); 163bfcc09ddSBjoern A. Zeeb } 164bfcc09ddSBjoern A. Zeeb 165bfcc09ddSBjoern A. Zeeb static inline bool __must_check 166bfcc09ddSBjoern A. Zeeb iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state) 167bfcc09ddSBjoern A. Zeeb { 168bfcc09ddSBjoern A. Zeeb might_sleep(); 169bfcc09ddSBjoern A. Zeeb return op_mode->ops->hw_rf_kill(op_mode, state); 170bfcc09ddSBjoern A. Zeeb } 171bfcc09ddSBjoern A. Zeeb 172bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode, 173bfcc09ddSBjoern A. Zeeb struct sk_buff *skb) 174bfcc09ddSBjoern A. Zeeb { 175bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(!op_mode)) 176bfcc09ddSBjoern A. Zeeb return; 177bfcc09ddSBjoern A. Zeeb op_mode->ops->free_skb(op_mode, skb); 178bfcc09ddSBjoern A. Zeeb } 179bfcc09ddSBjoern A. Zeeb 180bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync) 181bfcc09ddSBjoern A. Zeeb { 182bfcc09ddSBjoern A. Zeeb op_mode->ops->nic_error(op_mode, sync); 183bfcc09ddSBjoern A. Zeeb } 184bfcc09ddSBjoern A. Zeeb 185bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode) 186bfcc09ddSBjoern A. Zeeb { 187bfcc09ddSBjoern A. Zeeb op_mode->ops->cmd_queue_full(op_mode); 188bfcc09ddSBjoern A. Zeeb } 189bfcc09ddSBjoern A. Zeeb 190bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode) 191bfcc09ddSBjoern A. Zeeb { 192bfcc09ddSBjoern A. Zeeb might_sleep(); 193*a4128aadSBjoern A. Zeeb if (op_mode->ops->nic_config) 194bfcc09ddSBjoern A. Zeeb op_mode->ops->nic_config(op_mode); 195bfcc09ddSBjoern A. Zeeb } 196bfcc09ddSBjoern A. Zeeb 197bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode) 198bfcc09ddSBjoern A. Zeeb { 199bfcc09ddSBjoern A. Zeeb might_sleep(); 200bfcc09ddSBjoern A. Zeeb op_mode->ops->wimax_active(op_mode); 201bfcc09ddSBjoern A. Zeeb } 202bfcc09ddSBjoern A. Zeeb 203bfcc09ddSBjoern A. Zeeb static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode, 204bfcc09ddSBjoern A. Zeeb enum iwl_fw_ini_time_point tp_id, 205bfcc09ddSBjoern A. Zeeb union iwl_dbg_tlv_tp_data *tp_data) 206bfcc09ddSBjoern A. Zeeb { 207bfcc09ddSBjoern A. Zeeb if (!op_mode || !op_mode->ops || !op_mode->ops->time_point) 208bfcc09ddSBjoern A. Zeeb return; 209bfcc09ddSBjoern A. Zeeb op_mode->ops->time_point(op_mode, tp_id, tp_data); 210bfcc09ddSBjoern A. Zeeb } 211bfcc09ddSBjoern A. Zeeb 212*a4128aadSBjoern A. Zeeb static inline void iwl_op_mode_device_powered_off(struct iwl_op_mode *op_mode) 213*a4128aadSBjoern A. Zeeb { 214*a4128aadSBjoern A. Zeeb if (!op_mode || !op_mode->ops || !op_mode->ops->device_powered_off) 215*a4128aadSBjoern A. Zeeb return; 216*a4128aadSBjoern A. Zeeb op_mode->ops->device_powered_off(op_mode); 217*a4128aadSBjoern A. Zeeb } 218*a4128aadSBjoern A. Zeeb 219bfcc09ddSBjoern A. Zeeb #endif /* __iwl_op_mode_h__ */ 220