Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2024-2025 Intel Corporation
11 #include "iwl-trans.h"
15 #include "fw/api/mac-cfg.h"
16 #include "session-protect.h"
17 #include "fw/api/time-event.h"
18 #include "fw/api/tx.h"
27 #include "tx.h"
37 #include "ftm-initiator.h"
70 struct iwl_rx_packet *pkt, \
73 const struct notif_struct *notif = (const void *)pkt->data; \
75 return obj_id == _Generic((notif)->id_member, \
76 __le32: le32_to_cpu((notif)->id_member), \
77 __le16: le16_to_cpu((notif)->id_member), \
78 u8: (notif)->id_member); \
115 struct iwl_rx_packet *pkt) in iwl_mld_handle_mfuart_notif() argument
117 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; in iwl_mld_handle_mfuart_notif()
121 le32_to_cpu(mfuart_notif->installed_ver), in iwl_mld_handle_mfuart_notif()
122 le32_to_cpu(mfuart_notif->external_ver)); in iwl_mld_handle_mfuart_notif()
125 le32_to_cpu(mfuart_notif->status), in iwl_mld_handle_mfuart_notif()
126 le32_to_cpu(mfuart_notif->duration), in iwl_mld_handle_mfuart_notif()
127 le32_to_cpu(mfuart_notif->image_size)); in iwl_mld_handle_mfuart_notif()
133 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; in iwl_mld_mu_mimo_iface_iterator()
136 if (WARN(hweight16(vif->active_links) > 1, in iwl_mld_mu_mimo_iface_iterator()
138 vif->active_links)) in iwl_mld_mu_mimo_iface_iterator()
142 link_id = __ffs(vif->active_links); in iwl_mld_mu_mimo_iface_iterator()
146 if (!WARN_ON(!bss_conf) && bss_conf->mu_mimo_owner) { in iwl_mld_mu_mimo_iface_iterator()
149 BUILD_BUG_ON(sizeof(notif->membership_status) != in iwl_mld_mu_mimo_iface_iterator()
151 BUILD_BUG_ON(sizeof(notif->user_position) != in iwl_mld_mu_mimo_iface_iterator()
154 /* MU-MIMO Group Id action frame is little endian. We treat in iwl_mld_mu_mimo_iface_iterator()
160 (u8 *)&notif->membership_status, in iwl_mld_mu_mimo_iface_iterator()
161 (u8 *)&notif->user_position); in iwl_mld_mu_mimo_iface_iterator()
163 (const u8 *)&notif->membership_status, in iwl_mld_mu_mimo_iface_iterator()
164 (const u8 *)&notif->user_position); in iwl_mld_mu_mimo_iface_iterator()
173 struct iwl_rx_packet *pkt) in iwl_mld_handle_mu_mimo_grp_notif() argument
175 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; in iwl_mld_handle_mu_mimo_grp_notif()
177 ieee80211_iterate_active_interfaces_atomic(mld->hw, in iwl_mld_handle_mu_mimo_grp_notif()
185 struct iwl_rx_packet *pkt) in iwl_mld_handle_channel_switch_start_notif() argument
187 struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; in iwl_mld_handle_channel_switch_start_notif()
188 u32 link_id = le32_to_cpu(notif->link_id); in iwl_mld_handle_channel_switch_start_notif()
196 vif = link_conf->vif; in iwl_mld_handle_channel_switch_start_notif()
200 vif->type, in iwl_mld_handle_channel_switch_start_notif()
201 link_conf->link_id); in iwl_mld_handle_channel_switch_start_notif()
203 switch (vif->type) { in iwl_mld_handle_channel_switch_start_notif()
208 if (!link_conf->csa_active) in iwl_mld_handle_channel_switch_start_notif()
211 ieee80211_csa_finish(vif, link_conf->link_id); in iwl_mld_handle_channel_switch_start_notif()
214 if (!link_conf->csa_active) { in iwl_mld_handle_channel_switch_start_notif()
231 ieee80211_chswitch_done(vif, true, link_conf->link_id); in iwl_mld_handle_channel_switch_start_notif()
235 WARN(1, "CSA on invalid vif type: %d", vif->type); in iwl_mld_handle_channel_switch_start_notif()
241 struct iwl_rx_packet *pkt) in iwl_mld_handle_channel_switch_error_notif() argument
243 struct iwl_channel_switch_error_notif *notif = (void *)pkt->data; in iwl_mld_handle_channel_switch_error_notif()
246 u32 link_id = le32_to_cpu(notif->link_id); in iwl_mld_handle_channel_switch_error_notif()
247 u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask); in iwl_mld_handle_channel_switch_error_notif()
253 vif = link_conf->vif; in iwl_mld_handle_channel_switch_error_notif()
265 struct iwl_rx_packet *pkt) in iwl_mld_handle_beacon_notification() argument
267 struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; in iwl_mld_handle_beacon_notification()
269 mld->ibss_manager = !!beacon->ibss_mgr_status; in iwl_mld_handle_beacon_notification()
377 * - RX_HANDLER_SYNC: will be called as part of the Rx path
378 * - RX_HANDLER_ASYNC: will be handled in a working with the wiphy_lock held
477 iwl_mld_notif_is_valid(struct iwl_mld *mld, struct iwl_rx_packet *pkt, in iwl_mld_notif_is_valid() argument
480 unsigned int size = iwl_rx_packet_payload_len(pkt); in iwl_mld_notif_is_valid()
486 if (!handler->n_sizes) { in iwl_mld_notif_is_valid()
487 if (handler->val_fn) in iwl_mld_notif_is_valid()
488 return handler->val_fn(mld, pkt); in iwl_mld_notif_is_valid()
492 notif_ver = iwl_fw_lookup_notif_ver(mld->fw, in iwl_mld_notif_is_valid()
493 iwl_cmd_groupid(handler->cmd_id), in iwl_mld_notif_is_valid()
494 iwl_cmd_opcode(handler->cmd_id), in iwl_mld_notif_is_valid()
497 for (int i = 0; i < handler->n_sizes; i++) { in iwl_mld_notif_is_valid()
498 if (handler->sizes[i].ver != notif_ver) in iwl_mld_notif_is_valid()
501 if (IWL_FW_CHECK(mld, size < handler->sizes[i].size, in iwl_mld_notif_is_valid()
503 handler->cmd_id, size, handler->sizes[i].size)) in iwl_mld_notif_is_valid()
510 handler->cmd_id, notif_ver, in iwl_mld_notif_is_valid()
511 handler->sizes[handler->n_sizes - 1].ver); in iwl_mld_notif_is_valid()
513 return size < handler->sizes[handler->n_sizes - 1].size; in iwl_mld_notif_is_valid()
526 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mld_log_async_handler_op() local
530 op, iwl_get_cmd_string(mld->trans, in iwl_mld_log_async_handler_op()
531 WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), in iwl_mld_log_async_handler_op()
532 pkt->hdr.group_id, pkt->hdr.cmd, in iwl_mld_log_async_handler_op()
533 le16_to_cpu(pkt->hdr.sequence)); in iwl_mld_log_async_handler_op()
538 struct iwl_rx_packet *pkt) in iwl_mld_rx_notif() argument
544 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) in iwl_mld_rx_notif()
547 if (!iwl_mld_notif_is_valid(mld, pkt, rx_h)) in iwl_mld_rx_notif()
550 if (rx_h->context == RX_HANDLER_SYNC) { in iwl_mld_rx_notif()
551 rx_h->fn(mld, pkt); in iwl_mld_rx_notif()
561 entry->rxb._page = rxb_steal_page(rxb); in iwl_mld_rx_notif()
562 entry->rxb._offset = rxb->_offset; in iwl_mld_rx_notif()
563 entry->rxb._rx_page_order = rxb->_rx_page_order; in iwl_mld_rx_notif()
565 entry->rx_h = rx_h; in iwl_mld_rx_notif()
567 /* Add it to the list and queue the work */ in iwl_mld_rx_notif()
568 spin_lock(&mld->async_handlers_lock); in iwl_mld_rx_notif()
569 list_add_tail(&entry->list, &mld->async_handlers_list); in iwl_mld_rx_notif()
570 spin_unlock(&mld->async_handlers_lock); in iwl_mld_rx_notif()
572 wiphy_work_queue(mld->hw->wiphy, in iwl_mld_rx_notif()
573 &mld->async_handlers_wk); in iwl_mld_rx_notif()
579 iwl_notification_wait_notify(&mld->notif_wait, pkt); in iwl_mld_rx_notif()
585 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mld_rx() local
587 u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mld_rx()
592 iwl_mld_handle_frame_release_notif(mld, napi, pkt, 0); in iwl_mld_rx()
594 iwl_mld_handle_bar_frame_release_notif(mld, napi, pkt, 0); in iwl_mld_rx()
597 iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, 0); in iwl_mld_rx()
599 iwl_mld_rx_monitor_no_data(mld, napi, pkt, 0); in iwl_mld_rx()
601 iwl_mld_rx_notif(mld, rxb, pkt); in iwl_mld_rx()
605 struct iwl_rx_cmd_buffer *rxb, unsigned int queue) in iwl_mld_rx_rss() argument
607 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mld_rx_rss() local
609 u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mld_rx_rss()
611 if (unlikely(queue >= mld->trans->info.num_rxqs)) in iwl_mld_rx_rss()
615 iwl_mld_rx_mpdu(mld, napi, rxb, queue); in iwl_mld_rx_rss()
618 iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, queue); in iwl_mld_rx_rss()
620 iwl_mld_handle_frame_release_notif(mld, napi, pkt, queue); in iwl_mld_rx_rss()
627 spin_lock_bh(&mld->async_handlers_lock); in iwl_mld_delete_handlers()
628 list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { in iwl_mld_delete_handlers()
632 if (entry->rx_h->cmd_id == cmds[i]) { in iwl_mld_delete_handlers()
641 iwl_mld_log_async_handler_op(mld, "Delete", &entry->rxb); in iwl_mld_delete_handlers()
642 iwl_free_rxb(&entry->rxb); in iwl_mld_delete_handlers()
643 list_del(&entry->list); in iwl_mld_delete_handlers()
646 spin_unlock_bh(&mld->async_handlers_lock); in iwl_mld_delete_handlers()
659 spin_lock_bh(&mld->async_handlers_lock); in iwl_mld_async_handlers_wk()
660 list_splice_init(&mld->async_handlers_list, &local_list); in iwl_mld_async_handlers_wk()
661 spin_unlock_bh(&mld->async_handlers_lock); in iwl_mld_async_handlers_wk()
664 iwl_mld_log_async_handler_op(mld, "Handle", &entry->rxb); in iwl_mld_async_handlers_wk()
665 entry->rx_h->fn(mld, rxb_addr(&entry->rxb)); in iwl_mld_async_handlers_wk()
666 iwl_free_rxb(&entry->rxb); in iwl_mld_async_handlers_wk()
667 list_del(&entry->list); in iwl_mld_async_handlers_wk()
676 lockdep_assert_wiphy(mld->wiphy); in iwl_mld_cancel_async_notifications()
678 wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk); in iwl_mld_cancel_async_notifications()
680 spin_lock_bh(&mld->async_handlers_lock); in iwl_mld_cancel_async_notifications()
681 list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { in iwl_mld_cancel_async_notifications()
682 iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb); in iwl_mld_cancel_async_notifications()
683 iwl_free_rxb(&entry->rxb); in iwl_mld_cancel_async_notifications()
684 list_del(&entry->list); in iwl_mld_cancel_async_notifications()
687 spin_unlock_bh(&mld->async_handlers_lock); in iwl_mld_cancel_async_notifications()
697 lockdep_assert_wiphy(mld->wiphy); in iwl_mld_cancel_notifications_of_object()
703 spin_lock_bh(&mld->async_handlers_lock); in iwl_mld_cancel_notifications_of_object()
704 list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { in iwl_mld_cancel_notifications_of_object()
705 const struct iwl_rx_handler *rx_h = entry->rx_h; in iwl_mld_cancel_notifications_of_object()
707 if (rx_h->obj_type != obj_type || WARN_ON(!rx_h->cancel)) in iwl_mld_cancel_notifications_of_object()
710 if (rx_h->cancel(mld, rxb_addr(&entry->rxb), obj_id)) { in iwl_mld_cancel_notifications_of_object()
711 iwl_mld_log_async_handler_op(mld, "Cancel", &entry->rxb); in iwl_mld_cancel_notifications_of_object()
712 list_del(&entry->list); in iwl_mld_cancel_notifications_of_object()
713 list_add_tail(&entry->list, &cancel_list); in iwl_mld_cancel_notifications_of_object()
717 spin_unlock_bh(&mld->async_handlers_lock); in iwl_mld_cancel_notifications_of_object()
721 iwl_free_rxb(&entry->rxb); in iwl_mld_cancel_notifications_of_object()
722 list_del(&entry->list); in iwl_mld_cancel_notifications_of_object()