xref: /linux/drivers/net/wireless/intel/iwlwifi/mld/notif.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2024-2025 Intel Corporation
4  */
5 
6 #include "mld.h"
7 #include "notif.h"
8 #include "scan.h"
9 #include "iface.h"
10 #include "mlo.h"
11 #include "iwl-trans.h"
12 #include "fw/file.h"
13 #include "fw/dbg.h"
14 #include "fw/api/cmdhdr.h"
15 #include "fw/api/mac-cfg.h"
16 #include "session-protect.h"
17 #include "fw/api/time-event.h"
18 #include "fw/api/tx.h"
19 #include "fw/api/rs.h"
20 #include "fw/api/offload.h"
21 #include "fw/api/stats.h"
22 #include "fw/api/rfi.h"
23 #include "fw/api/coex.h"
24 
25 #include "mcc.h"
26 #include "link.h"
27 #include "tx.h"
28 #include "rx.h"
29 #include "tlc.h"
30 #include "agg.h"
31 #include "mac80211.h"
32 #include "thermal.h"
33 #include "roc.h"
34 #include "stats.h"
35 #include "coex.h"
36 #include "time_sync.h"
37 #include "ftm-initiator.h"
38 
39 /* Please use this in an increasing order of the versions */
40 #define CMD_VER_ENTRY(_ver, _struct)			\
41 	{ .size = sizeof(struct _struct), .ver = _ver },
42 #define CMD_VERSIONS(name, ...)				\
43 	static const struct iwl_notif_struct_size	\
44 	iwl_notif_struct_sizes_##name[] = { __VA_ARGS__ };
45 
46 #define RX_HANDLER_NO_OBJECT(_grp, _cmd, _name, _context)		\
47 	{.cmd_id = WIDE_ID(_grp, _cmd),					\
48 	 .context = _context,						\
49 	 .fn = iwl_mld_handle_##_name,					\
50 	 .sizes = iwl_notif_struct_sizes_##_name,			\
51 	 .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name),		\
52 	},
53 
54 /* Use this for Rx handlers that do not need notification validation */
55 #define RX_HANDLER_NO_VAL(_grp, _cmd, _name, _context)			\
56 	{.cmd_id = WIDE_ID(_grp, _cmd),					\
57 	 .context = _context,						\
58 	 .fn = iwl_mld_handle_##_name,					\
59 	},
60 
61 #define RX_HANDLER_VAL_FN(_grp, _cmd, _name, _context)			\
62 	{ .cmd_id = WIDE_ID(_grp, _cmd),				\
63 	  .context = _context,						\
64 	  .fn = iwl_mld_handle_##_name,					\
65 	  .val_fn = iwl_mld_validate_##_name,				\
66 	},
67 
68 #define DEFINE_SIMPLE_CANCELLATION(name, notif_struct, id_member)		\
69 static bool iwl_mld_cancel_##name##_notif(struct iwl_mld *mld,			\
70 					  struct iwl_rx_packet *pkt,		\
71 					  u32 obj_id)				\
72 {										\
73 	const struct notif_struct *notif = (const void *)pkt->data;		\
74 										\
75 	return obj_id == _Generic((notif)->id_member,				\
76 				  __le32: le32_to_cpu((notif)->id_member),	\
77 				  __le16: le16_to_cpu((notif)->id_member),	\
78 				  u8: (notif)->id_member);			\
79 }
80 
81 /* Currently only defined for the RX_HANDLER_SIZES options. Use this for
82  * notifications that belong to a specific object, and that should be
83  * canceled when the object is removed
84  */
85 #define RX_HANDLER_OF_OBJ(_grp, _cmd, _name, _obj_type)			\
86 	{.cmd_id = WIDE_ID(_grp, _cmd),					\
87 	/* Only async handlers can be canceled */			\
88 	 .context = RX_HANDLER_ASYNC,					\
89 	 .fn = iwl_mld_handle_##_name,					\
90 	 .sizes = iwl_notif_struct_sizes_##_name,			\
91 	 .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name),		\
92 	 .obj_type = IWL_MLD_OBJECT_TYPE_##_obj_type,			\
93 	 .cancel = iwl_mld_cancel_##_name,				\
94 	 },
95 
96 #define RX_HANDLER_OF_LINK(_grp, _cmd, _name)				\
97 	RX_HANDLER_OF_OBJ(_grp, _cmd, _name, LINK)			\
98 
99 #define RX_HANDLER_OF_VIF(_grp, _cmd, _name)				\
100 	RX_HANDLER_OF_OBJ(_grp, _cmd, _name, VIF)			\
101 
102 #define RX_HANDLER_OF_STA(_grp, _cmd, _name)				\
103 	RX_HANDLER_OF_OBJ(_grp, _cmd, _name, STA)			\
104 
105 #define RX_HANDLER_OF_ROC(_grp, _cmd, _name)				\
106 	RX_HANDLER_OF_OBJ(_grp, _cmd, _name, ROC)
107 
108 #define RX_HANDLER_OF_SCAN(_grp, _cmd, _name)				\
109 	RX_HANDLER_OF_OBJ(_grp, _cmd, _name, SCAN)
110 
111 #define RX_HANDLER_OF_FTM_REQ(_grp, _cmd, _name)				\
112 	RX_HANDLER_OF_OBJ(_grp, _cmd, _name, FTM_REQ)
113 
114 #define RX_HANDLER_OF_NAN(_grp, _cmd, _name)				\
115 	RX_HANDLER_OF_OBJ(_grp, _cmd, _name, NAN)
116 
iwl_mld_handle_mfuart_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)117 static void iwl_mld_handle_mfuart_notif(struct iwl_mld *mld,
118 					struct iwl_rx_packet *pkt)
119 {
120 	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
121 
122 	IWL_DEBUG_INFO(mld,
123 		       "MFUART: installed ver: 0x%08x, external ver: 0x%08x\n",
124 		       le32_to_cpu(mfuart_notif->installed_ver),
125 		       le32_to_cpu(mfuart_notif->external_ver));
126 	IWL_DEBUG_INFO(mld,
127 		       "MFUART: status: 0x%08x, duration: 0x%08x image size: 0x%08x\n",
128 		       le32_to_cpu(mfuart_notif->status),
129 		       le32_to_cpu(mfuart_notif->duration),
130 		       le32_to_cpu(mfuart_notif->image_size));
131 }
132 
iwl_mld_mu_mimo_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)133 static void iwl_mld_mu_mimo_iface_iterator(void *_data, u8 *mac,
134 					   struct ieee80211_vif *vif)
135 {
136 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
137 	unsigned int link_id = 0;
138 
139 	if (WARN(hweight16(vif->active_links) > 1,
140 		 "no support for this notif while in EMLSR 0x%x\n",
141 		 vif->active_links))
142 		return;
143 
144 	if (ieee80211_vif_is_mld(vif)) {
145 		link_id = __ffs(vif->active_links);
146 		bss_conf = link_conf_dereference_check(vif, link_id);
147 	}
148 
149 	if (!WARN_ON(!bss_conf) && bss_conf->mu_mimo_owner) {
150 		const struct iwl_mu_group_mgmt_notif *notif = _data;
151 
152 		BUILD_BUG_ON(sizeof(notif->membership_status) !=
153 			     WLAN_MEMBERSHIP_LEN);
154 		BUILD_BUG_ON(sizeof(notif->user_position) !=
155 			     WLAN_USER_POSITION_LEN);
156 
157 		/* MU-MIMO Group Id action frame is little endian. We treat
158 		 * the data received from firmware as if it came from the
159 		 * action frame, so no conversion is needed.
160 		 */
161 		ieee80211_update_mu_groups(vif, link_id,
162 					   (u8 *)&notif->membership_status,
163 					   (u8 *)&notif->user_position);
164 	}
165 }
166 
167 /* This handler is called in SYNC mode because it needs to be serialized with
168  * Rx as specified in ieee80211_update_mu_groups()'s documentation.
169  */
iwl_mld_handle_mu_mimo_grp_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)170 static void iwl_mld_handle_mu_mimo_grp_notif(struct iwl_mld *mld,
171 					     struct iwl_rx_packet *pkt)
172 {
173 	struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
174 
175 	ieee80211_iterate_active_interfaces_atomic(mld->hw,
176 						   IEEE80211_IFACE_ITER_NORMAL,
177 						   iwl_mld_mu_mimo_iface_iterator,
178 						   notif);
179 }
180 
181 static void
iwl_mld_handle_channel_switch_start_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)182 iwl_mld_handle_channel_switch_start_notif(struct iwl_mld *mld,
183 					  struct iwl_rx_packet *pkt)
184 {
185 	struct iwl_channel_switch_start_notif *notif = (void *)pkt->data;
186 	u32 link_id = le32_to_cpu(notif->link_id);
187 	struct ieee80211_bss_conf *link_conf =
188 		iwl_mld_fw_id_to_link_conf(mld, link_id);
189 	struct ieee80211_vif *vif;
190 
191 	if (WARN_ON(!link_conf))
192 		return;
193 
194 	vif = link_conf->vif;
195 
196 	IWL_DEBUG_INFO(mld,
197 		       "CSA Start Notification with vif type: %d, link_id: %d\n",
198 		       vif->type,
199 		       link_conf->link_id);
200 
201 	switch (vif->type) {
202 	case NL80211_IFTYPE_AP:
203 		/* We don't support canceling a CSA as it was advertised
204 		 * by the AP itself
205 		 */
206 		if (!link_conf->csa_active)
207 			return;
208 
209 		ieee80211_csa_finish(vif, link_conf->link_id);
210 		break;
211 	case NL80211_IFTYPE_STATION:
212 		if (!link_conf->csa_active) {
213 			/* Either unexpected cs notif or mac80211 chose to
214 			 * ignore, for example in channel switch to same channel
215 			 */
216 			struct iwl_cancel_channel_switch_cmd cmd = {
217 				.id = cpu_to_le32(link_id),
218 			};
219 
220 			if (iwl_mld_send_cmd_pdu(mld,
221 						 WIDE_ID(MAC_CONF_GROUP,
222 							 CANCEL_CHANNEL_SWITCH_CMD),
223 						 &cmd))
224 				IWL_ERR(mld,
225 					"Failed to cancel the channel switch\n");
226 			return;
227 		}
228 
229 		ieee80211_chswitch_done(vif, true, link_conf->link_id);
230 		break;
231 
232 	default:
233 		WARN(1, "CSA on invalid vif type: %d", vif->type);
234 	}
235 }
236 
237 static void
iwl_mld_handle_channel_switch_error_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)238 iwl_mld_handle_channel_switch_error_notif(struct iwl_mld *mld,
239 					  struct iwl_rx_packet *pkt)
240 {
241 	struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
242 	struct ieee80211_bss_conf *link_conf;
243 	struct ieee80211_vif *vif;
244 	u32 link_id = le32_to_cpu(notif->link_id);
245 	u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
246 
247 	link_conf = iwl_mld_fw_id_to_link_conf(mld, link_id);
248 	if (WARN_ON(!link_conf))
249 		return;
250 
251 	vif = link_conf->vif;
252 
253 	IWL_DEBUG_INFO(mld, "FW reports CSA error: id=%u, csa_err_mask=%u\n",
254 		       link_id, csa_err_mask);
255 
256 	if (csa_err_mask & (CS_ERR_COUNT_ERROR |
257 			    CS_ERR_LONG_DELAY_AFTER_CS |
258 			    CS_ERR_TX_BLOCK_TIMER_EXPIRED))
259 		ieee80211_channel_switch_disconnect(vif);
260 }
261 
iwl_mld_handle_beacon_notification(struct iwl_mld * mld,struct iwl_rx_packet * pkt)262 static void iwl_mld_handle_beacon_notification(struct iwl_mld *mld,
263 					       struct iwl_rx_packet *pkt)
264 {
265 	struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
266 
267 	mld->ibss_manager = !!beacon->ibss_mgr_status;
268 }
269 
270 /**
271  * DOC: Notification versioning
272  *
273  * The firmware's notifications change from time to time. In order to
274  * differentiate between different versions of the same notification, the
275  * firmware advertises the version of each notification.
276  * Here are listed all the notifications that are supported. Several versions
277  * of the same notification can be allowed at the same time:
278  *
279  * CMD_VERSION(my_multi_version_notif,
280  *	       CMD_VER_ENTRY(1, iwl_my_multi_version_notif_ver1)
281  *	       CMD_VER_ENTRY(2, iwl_my_multi_version_notif_ver2)
282  *
283  * etc...
284  *
285  * The driver will enforce that the notification coming from the firmware
286  * has its version listed here and it'll also enforce that the firmware sent
287  * at least enough bytes to cover the structure listed in the CMD_VER_ENTRY.
288  */
289 
290 CMD_VERSIONS(scan_complete_notif,
291 	     CMD_VER_ENTRY(1, iwl_umac_scan_complete))
292 CMD_VERSIONS(scan_iter_complete_notif,
293 	     CMD_VER_ENTRY(2, iwl_umac_scan_iter_complete_notif))
294 CMD_VERSIONS(channel_survey_notif,
295 	     CMD_VER_ENTRY(1, iwl_umac_scan_channel_survey_notif))
296 CMD_VERSIONS(mfuart_notif,
297 	     CMD_VER_ENTRY(2, iwl_mfuart_load_notif))
298 CMD_VERSIONS(update_mcc,
299 	     CMD_VER_ENTRY(1, iwl_mcc_chub_notif))
300 CMD_VERSIONS(session_prot_notif,
301 	     CMD_VER_ENTRY(3, iwl_session_prot_notif))
302 CMD_VERSIONS(missed_beacon_notif,
303 	     CMD_VER_ENTRY(5, iwl_missed_beacons_notif))
304 CMD_VERSIONS(tx_resp_notif,
305 	     CMD_VER_ENTRY(8, iwl_tx_resp)
306 	     CMD_VER_ENTRY(9, iwl_tx_resp))
307 CMD_VERSIONS(compressed_ba_notif,
308 	     CMD_VER_ENTRY(5, iwl_compressed_ba_notif)
309 	     CMD_VER_ENTRY(6, iwl_compressed_ba_notif)
310 	     CMD_VER_ENTRY(7, iwl_compressed_ba_notif))
311 CMD_VERSIONS(tlc_notif,
312 	     CMD_VER_ENTRY(3, iwl_tlc_update_notif)
313 	     CMD_VER_ENTRY(4, iwl_tlc_update_notif))
314 CMD_VERSIONS(mu_mimo_grp_notif,
315 	     CMD_VER_ENTRY(1, iwl_mu_group_mgmt_notif))
316 CMD_VERSIONS(channel_switch_start_notif,
317 	     CMD_VER_ENTRY(3, iwl_channel_switch_start_notif))
318 CMD_VERSIONS(channel_switch_error_notif,
319 	     CMD_VER_ENTRY(2, iwl_channel_switch_error_notif))
320 CMD_VERSIONS(ct_kill_notif,
321 	     CMD_VER_ENTRY(2, ct_kill_notif))
322 CMD_VERSIONS(temp_notif,
323 	     CMD_VER_ENTRY(2, iwl_dts_measurement_notif))
324 CMD_VERSIONS(roc_notif,
325 	     CMD_VER_ENTRY(1, iwl_roc_notif))
326 CMD_VERSIONS(probe_resp_data_notif,
327 	     CMD_VER_ENTRY(1, iwl_probe_resp_data_notif))
328 CMD_VERSIONS(datapath_monitor_notif,
329 	     CMD_VER_ENTRY(1, iwl_datapath_monitor_notif))
330 CMD_VERSIONS(stats_oper_notif,
331 	     CMD_VER_ENTRY(3, iwl_system_statistics_notif_oper))
332 CMD_VERSIONS(stats_oper_part1_notif,
333 	     CMD_VER_ENTRY(4, iwl_system_statistics_part1_notif_oper))
334 CMD_VERSIONS(bt_coex_notif,
335 	     CMD_VER_ENTRY(1, iwl_bt_coex_profile_notif))
336 CMD_VERSIONS(beacon_notification,
337 	     CMD_VER_ENTRY(6, iwl_extended_beacon_notif))
338 CMD_VERSIONS(emlsr_mode_notif,
339 	     CMD_VER_ENTRY(2, iwl_esr_mode_notif))
340 CMD_VERSIONS(emlsr_trans_fail_notif,
341 	     CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif))
342 CMD_VERSIONS(uapsd_misbehaving_ap_notif,
343 	     CMD_VER_ENTRY(1, iwl_uapsd_misbehaving_ap_notif))
344 CMD_VERSIONS(time_msmt_notif,
345 	     CMD_VER_ENTRY(1, iwl_time_msmt_notify))
346 CMD_VERSIONS(time_sync_confirm_notif,
347 	     CMD_VER_ENTRY(1, iwl_time_msmt_cfm_notify))
348 CMD_VERSIONS(ftm_resp_notif, CMD_VER_ENTRY(10, iwl_tof_range_rsp_ntfy))
349 CMD_VERSIONS(beacon_filter_notif, CMD_VER_ENTRY(2, iwl_beacon_filter_notif))
350 CMD_VERSIONS(nan_cluster_notif, CMD_VER_ENTRY(1, iwl_nan_cluster_notif))
351 CMD_VERSIONS(nan_dw_end_notif, CMD_VER_ENTRY(1, iwl_nan_dw_end_notif))
352 
353 DEFINE_SIMPLE_CANCELLATION(session_prot, iwl_session_prot_notif, mac_link_id)
354 DEFINE_SIMPLE_CANCELLATION(tlc, iwl_tlc_update_notif, sta_id)
355 DEFINE_SIMPLE_CANCELLATION(channel_switch_start,
356 			   iwl_channel_switch_start_notif, link_id)
357 DEFINE_SIMPLE_CANCELLATION(channel_switch_error,
358 			   iwl_channel_switch_error_notif, link_id)
359 DEFINE_SIMPLE_CANCELLATION(datapath_monitor, iwl_datapath_monitor_notif,
360 			   link_id)
361 DEFINE_SIMPLE_CANCELLATION(roc, iwl_roc_notif, activity)
362 DEFINE_SIMPLE_CANCELLATION(scan_complete, iwl_umac_scan_complete, uid)
363 DEFINE_SIMPLE_CANCELLATION(probe_resp_data, iwl_probe_resp_data_notif,
364 			   mac_id)
365 DEFINE_SIMPLE_CANCELLATION(uapsd_misbehaving_ap, iwl_uapsd_misbehaving_ap_notif,
366 			   mac_id)
367 DEFINE_SIMPLE_CANCELLATION(ftm_resp, iwl_tof_range_rsp_ntfy, request_id)
368 DEFINE_SIMPLE_CANCELLATION(beacon_filter, iwl_beacon_filter_notif, link_id)
369 
370 /**
371  * DOC: Handlers for fw notifications
372  *
373  * Here are listed the notifications IDs (including the group ID), the handler
374  * of the notification and how it should be called:
375  *
376  *  - RX_HANDLER_SYNC: will be called as part of the Rx path
377  *  - RX_HANDLER_ASYNC: will be handled in a working with the wiphy_lock held
378  *
379  * This means that if the firmware sends two notifications A and B in that
380  * order and notification A is RX_HANDLER_ASYNC and notification is
381  * RX_HANDLER_SYNC, the handler of B will likely be called before the handler
382  * of A.
383  *
384  * This list should be in order of frequency for performance purposes.
385  * The handler can be one from two contexts, see &iwl_rx_handler_context
386  *
387  * A handler can declare that it relies on a specific object in which case it
388  * can be cancelled in case the object is deleted. In order to use this
389  * mechanism, a cancellation function is needed. The cancellation function must
390  * receive an object id (the index of that object in the firmware) and a
391  * notification payload. It'll return true if that specific notification should
392  * be cancelled upon the obliteration of the specific instance of the object.
393  *
394  * DEFINE_SIMPLE_CANCELLATION allows to easily create a cancellation function
395  * that wills simply return true if a given object id matches the object id in
396  * the firmware notification.
397  */
398 
399 VISIBLE_IF_IWLWIFI_KUNIT
400 const struct iwl_rx_handler iwl_mld_rx_handlers[] = {
401 	RX_HANDLER_NO_OBJECT(LEGACY_GROUP, TX_CMD, tx_resp_notif,
402 			     RX_HANDLER_SYNC)
403 	RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BA_NOTIF, compressed_ba_notif,
404 			     RX_HANDLER_SYNC)
405 	RX_HANDLER_OF_SCAN(LEGACY_GROUP, SCAN_COMPLETE_UMAC,
406 			   scan_complete_notif)
407 	RX_HANDLER_NO_OBJECT(LEGACY_GROUP, SCAN_ITERATION_COMPLETE_UMAC,
408 			     scan_iter_complete_notif,
409 			     RX_HANDLER_SYNC)
410 	RX_HANDLER_NO_VAL(LEGACY_GROUP, MATCH_FOUND_NOTIFICATION,
411 			  match_found_notif, RX_HANDLER_SYNC)
412 
413 	RX_HANDLER_NO_OBJECT(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
414 			     channel_survey_notif,
415 			     RX_HANDLER_ASYNC)
416 
417 	RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
418 			     stats_oper_notif, RX_HANDLER_ASYNC)
419 	RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
420 			     stats_oper_part1_notif, RX_HANDLER_ASYNC)
421 
422 	RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MFUART_LOAD_NOTIFICATION,
423 			     mfuart_notif, RX_HANDLER_SYNC)
424 
425 	RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
426 			     temp_notif, RX_HANDLER_ASYNC)
427 	RX_HANDLER_OF_LINK(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
428 			   session_prot_notif)
429 	RX_HANDLER_OF_LINK(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF,
430 			   missed_beacon_notif)
431 	RX_HANDLER_OF_STA(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, tlc_notif)
432 	RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
433 			   channel_switch_start_notif)
434 	RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
435 			   channel_switch_error_notif)
436 	RX_HANDLER_OF_ROC(MAC_CONF_GROUP, ROC_NOTIF, roc_notif)
437 	RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
438 			     mu_mimo_grp_notif, RX_HANDLER_SYNC)
439 	RX_HANDLER_OF_VIF(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
440 			  probe_resp_data_notif)
441 	RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
442 			     ct_kill_notif, RX_HANDLER_ASYNC)
443 	RX_HANDLER_OF_LINK(DATA_PATH_GROUP, MONITOR_NOTIF,
444 			   datapath_monitor_notif)
445 	RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MCC_CHUB_UPDATE_CMD, update_mcc,
446 			     RX_HANDLER_ASYNC)
447 	RX_HANDLER_NO_OBJECT(BT_COEX_GROUP, PROFILE_NOTIF,
448 			     bt_coex_notif, RX_HANDLER_ASYNC)
449 	RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BEACON_NOTIFICATION,
450 			     beacon_notification, RX_HANDLER_ASYNC)
451 	RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, ESR_MODE_NOTIF,
452 			     emlsr_mode_notif, RX_HANDLER_ASYNC)
453 	RX_HANDLER_NO_OBJECT(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF,
454 			     emlsr_trans_fail_notif, RX_HANDLER_ASYNC)
455 	RX_HANDLER_OF_VIF(LEGACY_GROUP, PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
456 			  uapsd_misbehaving_ap_notif)
457 	RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
458 			     WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION,
459 			     time_msmt_notif, RX_HANDLER_SYNC)
460 	RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
461 			     WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION,
462 			     time_sync_confirm_notif, RX_HANDLER_ASYNC)
463 	RX_HANDLER_OF_LINK(DATA_PATH_GROUP, BEACON_FILTER_IN_NOTIF,
464 			   beacon_filter_notif)
465 	RX_HANDLER_OF_FTM_REQ(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
466 			      ftm_resp_notif)
467 	RX_HANDLER_OF_NAN(MAC_CONF_GROUP, NAN_JOINED_CLUSTER_NOTIF,
468 			  nan_cluster_notif)
469 	RX_HANDLER_OF_NAN(MAC_CONF_GROUP, NAN_DW_END_NOTIF,
470 			  nan_dw_end_notif)
471 };
472 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers);
473 
474 #if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
475 const unsigned int iwl_mld_rx_handlers_num = ARRAY_SIZE(iwl_mld_rx_handlers);
476 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers_num);
477 #endif
478 
479 static bool
iwl_mld_notif_is_valid(struct iwl_mld * mld,struct iwl_rx_packet * pkt,const struct iwl_rx_handler * handler)480 iwl_mld_notif_is_valid(struct iwl_mld *mld, struct iwl_rx_packet *pkt,
481 		       const struct iwl_rx_handler *handler)
482 {
483 	unsigned int size = iwl_rx_packet_payload_len(pkt);
484 	size_t notif_ver;
485 
486 	/* If n_sizes == 0, it indicates that a validation function may be used
487 	 * or that no validation is required.
488 	 */
489 	if (!handler->n_sizes) {
490 		if (handler->val_fn)
491 			return handler->val_fn(mld, pkt);
492 		return true;
493 	}
494 
495 	notif_ver = iwl_fw_lookup_notif_ver(mld->fw,
496 					    iwl_cmd_groupid(handler->cmd_id),
497 					    iwl_cmd_opcode(handler->cmd_id),
498 					    IWL_FW_CMD_VER_UNKNOWN);
499 
500 	for (int i = 0; i < handler->n_sizes; i++) {
501 		if (handler->sizes[i].ver != notif_ver)
502 			continue;
503 
504 		if (IWL_FW_CHECK(mld, size < handler->sizes[i].size,
505 				 "unexpected notification 0x%04x size %d, need %d\n",
506 				 handler->cmd_id, size, handler->sizes[i].size))
507 			return false;
508 		return true;
509 	}
510 
511 	IWL_FW_CHECK_FAILED(mld,
512 			    "notif 0x%04x ver %zu missing expected size, use version %u size\n",
513 			    handler->cmd_id, notif_ver,
514 			    handler->sizes[handler->n_sizes - 1].ver);
515 
516 	return size < handler->sizes[handler->n_sizes - 1].size;
517 }
518 
519 struct iwl_async_handler_entry {
520 	struct list_head list;
521 	struct iwl_rx_cmd_buffer rxb;
522 	const struct iwl_rx_handler *rx_h;
523 };
524 
525 static void
iwl_mld_log_async_handler_op(struct iwl_mld * mld,const char * op,struct iwl_rx_cmd_buffer * rxb)526 iwl_mld_log_async_handler_op(struct iwl_mld *mld, const char *op,
527 			     struct iwl_rx_cmd_buffer *rxb)
528 {
529 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
530 
531 	IWL_DEBUG_HC(mld,
532 		     "%s async handler for notif %s (%.2x.%2x, seq 0x%x)\n",
533 		     op, iwl_get_cmd_string(mld->trans,
534 		     WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
535 		     pkt->hdr.group_id, pkt->hdr.cmd,
536 		     le16_to_cpu(pkt->hdr.sequence));
537 }
538 
iwl_mld_rx_notif(struct iwl_mld * mld,struct iwl_rx_cmd_buffer * rxb,struct iwl_rx_packet * pkt)539 static void iwl_mld_rx_notif(struct iwl_mld *mld,
540 			     struct iwl_rx_cmd_buffer *rxb,
541 			     struct iwl_rx_packet *pkt)
542 {
543 	union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
544 
545 	for (int i = 0; i < ARRAY_SIZE(iwl_mld_rx_handlers); i++) {
546 		const struct iwl_rx_handler *rx_h = &iwl_mld_rx_handlers[i];
547 		struct iwl_async_handler_entry *entry;
548 
549 		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
550 			continue;
551 
552 		if (!iwl_mld_notif_is_valid(mld, pkt, rx_h))
553 			return;
554 
555 		if (rx_h->context == RX_HANDLER_SYNC) {
556 			rx_h->fn(mld, pkt);
557 			break;
558 		}
559 
560 		entry = kzalloc_obj(*entry, GFP_ATOMIC);
561 		/* we can't do much... */
562 		if (!entry)
563 			return;
564 
565 		/* Set the async handler entry */
566 		entry->rxb._page = rxb_steal_page(rxb);
567 		entry->rxb._offset = rxb->_offset;
568 		entry->rxb._rx_page_order = rxb->_rx_page_order;
569 
570 		entry->rx_h = rx_h;
571 
572 		/* Add it to the list and queue the work */
573 		spin_lock(&mld->async_handlers_lock);
574 		list_add_tail(&entry->list, &mld->async_handlers_list);
575 		spin_unlock(&mld->async_handlers_lock);
576 
577 		wiphy_work_queue(mld->hw->wiphy,
578 				 &mld->async_handlers_wk);
579 
580 		iwl_mld_log_async_handler_op(mld, "Queued", rxb);
581 		break;
582 	}
583 
584 	iwl_notification_wait_notify(&mld->notif_wait, pkt);
585 	iwl_dbg_tlv_time_point(&mld->fwrt,
586 			       IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data);
587 }
588 
iwl_mld_rx(struct iwl_op_mode * op_mode,struct napi_struct * napi,struct iwl_rx_cmd_buffer * rxb)589 void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi,
590 		struct iwl_rx_cmd_buffer *rxb)
591 {
592 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
593 	struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
594 	u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
595 
596 	if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
597 		iwl_mld_rx_mpdu(mld, napi, rxb, 0);
598 	else if (cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
599 		iwl_mld_handle_frame_release_notif(mld, napi, pkt, 0);
600 	else if (cmd_id == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE))
601 		iwl_mld_handle_bar_frame_release_notif(mld, napi, pkt, 0);
602 	else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP,
603 					    RX_QUEUES_NOTIFICATION)))
604 		iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, 0);
605 	else if (cmd_id == WIDE_ID(DATA_PATH_GROUP, PHY_AIR_SNIFFER_NOTIF))
606 		iwl_mld_handle_phy_air_sniffer_notif(mld, napi, pkt);
607 	else
608 		iwl_mld_rx_notif(mld, rxb, pkt);
609 }
610 
iwl_mld_rx_rss(struct iwl_op_mode * op_mode,struct napi_struct * napi,struct iwl_rx_cmd_buffer * rxb,unsigned int queue)611 void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
612 		    struct iwl_rx_cmd_buffer *rxb, unsigned int queue)
613 {
614 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
615 	struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
616 	u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
617 
618 	if (unlikely(queue >= mld->trans->info.num_rxqs))
619 		return;
620 
621 	if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
622 		iwl_mld_rx_mpdu(mld, napi, rxb, queue);
623 	else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP,
624 					    RX_QUEUES_NOTIFICATION)))
625 		iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, queue);
626 	else if (unlikely(cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
627 		iwl_mld_handle_frame_release_notif(mld, napi, pkt, queue);
628 }
629 
iwl_mld_delete_handlers(struct iwl_mld * mld,const u16 * cmds,int n_cmds)630 void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds)
631 {
632 	struct iwl_async_handler_entry *entry, *tmp;
633 
634 	spin_lock_bh(&mld->async_handlers_lock);
635 	list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
636 		bool match = false;
637 
638 		for (int i = 0; i < n_cmds; i++) {
639 			if (entry->rx_h->cmd_id == cmds[i]) {
640 				match = true;
641 				break;
642 			}
643 		}
644 
645 		if (!match)
646 			continue;
647 
648 		iwl_mld_log_async_handler_op(mld, "Delete", &entry->rxb);
649 		iwl_free_rxb(&entry->rxb);
650 		list_del(&entry->list);
651 		kfree(entry);
652 	}
653 	spin_unlock_bh(&mld->async_handlers_lock);
654 }
655 
iwl_mld_async_handlers_wk(struct wiphy * wiphy,struct wiphy_work * wk)656 void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk)
657 {
658 	struct iwl_mld *mld =
659 		container_of(wk, struct iwl_mld, async_handlers_wk);
660 	struct iwl_async_handler_entry *entry, *tmp;
661 	LIST_HEAD(local_list);
662 
663 	/* Sync with Rx path with a lock. Remove all the entries from this
664 	 * list, add them to a local one (lock free), and then handle them.
665 	 */
666 	spin_lock_bh(&mld->async_handlers_lock);
667 	list_splice_init(&mld->async_handlers_list, &local_list);
668 	spin_unlock_bh(&mld->async_handlers_lock);
669 
670 	list_for_each_entry_safe(entry, tmp, &local_list, list) {
671 		iwl_mld_log_async_handler_op(mld, "Handle", &entry->rxb);
672 		entry->rx_h->fn(mld, rxb_addr(&entry->rxb));
673 		iwl_free_rxb(&entry->rxb);
674 		list_del(&entry->list);
675 		kfree(entry);
676 	}
677 }
678 
iwl_mld_cancel_async_notifications(struct iwl_mld * mld)679 void iwl_mld_cancel_async_notifications(struct iwl_mld *mld)
680 {
681 	struct iwl_async_handler_entry *entry, *tmp;
682 
683 	lockdep_assert_wiphy(mld->wiphy);
684 
685 	wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
686 
687 	spin_lock_bh(&mld->async_handlers_lock);
688 	list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
689 		iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb);
690 		iwl_free_rxb(&entry->rxb);
691 		list_del(&entry->list);
692 		kfree(entry);
693 	}
694 	spin_unlock_bh(&mld->async_handlers_lock);
695 }
696 
iwl_mld_cancel_notifications_of_object(struct iwl_mld * mld,enum iwl_mld_object_type obj_type,u32 obj_id)697 void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld,
698 					    enum iwl_mld_object_type obj_type,
699 					    u32 obj_id)
700 {
701 	struct iwl_async_handler_entry *entry, *tmp;
702 	LIST_HEAD(cancel_list);
703 
704 	lockdep_assert_wiphy(mld->wiphy);
705 
706 	if (WARN_ON(obj_type == IWL_MLD_OBJECT_TYPE_NONE))
707 		return;
708 
709 	/* Sync with RX path and remove matching entries from the async list */
710 	spin_lock_bh(&mld->async_handlers_lock);
711 	list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
712 		const struct iwl_rx_handler *rx_h = entry->rx_h;
713 
714 		if (rx_h->obj_type != obj_type || WARN_ON(!rx_h->cancel))
715 			continue;
716 
717 		if (rx_h->cancel(mld, rxb_addr(&entry->rxb), obj_id)) {
718 			iwl_mld_log_async_handler_op(mld, "Cancel", &entry->rxb);
719 			list_del(&entry->list);
720 			list_add_tail(&entry->list, &cancel_list);
721 		}
722 	}
723 
724 	spin_unlock_bh(&mld->async_handlers_lock);
725 
726 	/* Free the matching entries outside of the spinlock */
727 	list_for_each_entry_safe(entry, tmp, &cancel_list, list) {
728 		iwl_free_rxb(&entry->rxb);
729 		list_del(&entry->list);
730 		kfree(entry);
731 	}
732 }
733