1*60bac4d6SBjoern A. Zeeb // SPDX-License-Identifier: BSD-3-Clause-Clear
2*60bac4d6SBjoern A. Zeeb /*
3*60bac4d6SBjoern A. Zeeb * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4*60bac4d6SBjoern A. Zeeb * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5*60bac4d6SBjoern A. Zeeb */
6*60bac4d6SBjoern A. Zeeb
7*60bac4d6SBjoern A. Zeeb #include "core.h"
8*60bac4d6SBjoern A. Zeeb #include "dp_peer.h"
9*60bac4d6SBjoern A. Zeeb #include "debug.h"
10*60bac4d6SBjoern A. Zeeb #include "debugfs.h"
11*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_free(struct ath12k_dp_link_peer * peer)12*60bac4d6SBjoern A. Zeeb void ath12k_dp_link_peer_free(struct ath12k_dp_link_peer *peer)
13*60bac4d6SBjoern A. Zeeb {
14*60bac4d6SBjoern A. Zeeb list_del(&peer->list);
15*60bac4d6SBjoern A. Zeeb
16*60bac4d6SBjoern A. Zeeb kfree(peer->peer_stats.rx_stats);
17*60bac4d6SBjoern A. Zeeb kfree(peer);
18*60bac4d6SBjoern A. Zeeb }
19*60bac4d6SBjoern A. Zeeb
20*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_vdev_and_addr(struct ath12k_dp * dp,int vdev_id,const u8 * addr)21*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_find_by_vdev_and_addr(struct ath12k_dp *dp,
22*60bac4d6SBjoern A. Zeeb int vdev_id, const u8 *addr)
23*60bac4d6SBjoern A. Zeeb {
24*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer;
25*60bac4d6SBjoern A. Zeeb
26*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->dp_lock);
27*60bac4d6SBjoern A. Zeeb
28*60bac4d6SBjoern A. Zeeb list_for_each_entry(peer, &dp->peers, list) {
29*60bac4d6SBjoern A. Zeeb if (peer->vdev_id != vdev_id)
30*60bac4d6SBjoern A. Zeeb continue;
31*60bac4d6SBjoern A. Zeeb if (!ether_addr_equal(peer->addr, addr))
32*60bac4d6SBjoern A. Zeeb continue;
33*60bac4d6SBjoern A. Zeeb
34*60bac4d6SBjoern A. Zeeb return peer;
35*60bac4d6SBjoern A. Zeeb }
36*60bac4d6SBjoern A. Zeeb
37*60bac4d6SBjoern A. Zeeb return NULL;
38*60bac4d6SBjoern A. Zeeb }
39*60bac4d6SBjoern A. Zeeb
40*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_pdev_and_addr(struct ath12k_dp * dp,u8 pdev_idx,const u8 * addr)41*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_find_by_pdev_and_addr(struct ath12k_dp *dp, u8 pdev_idx,
42*60bac4d6SBjoern A. Zeeb const u8 *addr)
43*60bac4d6SBjoern A. Zeeb {
44*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer;
45*60bac4d6SBjoern A. Zeeb
46*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->dp_lock);
47*60bac4d6SBjoern A. Zeeb
48*60bac4d6SBjoern A. Zeeb list_for_each_entry(peer, &dp->peers, list) {
49*60bac4d6SBjoern A. Zeeb if (peer->pdev_idx != pdev_idx)
50*60bac4d6SBjoern A. Zeeb continue;
51*60bac4d6SBjoern A. Zeeb if (!ether_addr_equal(peer->addr, addr))
52*60bac4d6SBjoern A. Zeeb continue;
53*60bac4d6SBjoern A. Zeeb
54*60bac4d6SBjoern A. Zeeb return peer;
55*60bac4d6SBjoern A. Zeeb }
56*60bac4d6SBjoern A. Zeeb
57*60bac4d6SBjoern A. Zeeb return NULL;
58*60bac4d6SBjoern A. Zeeb }
59*60bac4d6SBjoern A. Zeeb
60*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_addr(struct ath12k_dp * dp,const u8 * addr)61*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_find_by_addr(struct ath12k_dp *dp, const u8 *addr)
62*60bac4d6SBjoern A. Zeeb {
63*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->dp_lock);
64*60bac4d6SBjoern A. Zeeb
65*60bac4d6SBjoern A. Zeeb return rhashtable_lookup_fast(dp->rhead_peer_addr, addr,
66*60bac4d6SBjoern A. Zeeb dp->rhash_peer_addr_param);
67*60bac4d6SBjoern A. Zeeb }
68*60bac4d6SBjoern A. Zeeb EXPORT_SYMBOL(ath12k_dp_link_peer_find_by_addr);
69*60bac4d6SBjoern A. Zeeb
70*60bac4d6SBjoern A. Zeeb static struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_ml_id(struct ath12k_dp * dp,int ml_peer_id)71*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_find_by_ml_id(struct ath12k_dp *dp, int ml_peer_id)
72*60bac4d6SBjoern A. Zeeb {
73*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer;
74*60bac4d6SBjoern A. Zeeb
75*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->dp_lock);
76*60bac4d6SBjoern A. Zeeb
77*60bac4d6SBjoern A. Zeeb list_for_each_entry(peer, &dp->peers, list)
78*60bac4d6SBjoern A. Zeeb if (ml_peer_id == peer->ml_id)
79*60bac4d6SBjoern A. Zeeb return peer;
80*60bac4d6SBjoern A. Zeeb
81*60bac4d6SBjoern A. Zeeb return NULL;
82*60bac4d6SBjoern A. Zeeb }
83*60bac4d6SBjoern A. Zeeb
84*60bac4d6SBjoern A. Zeeb static struct ath12k_dp_link_peer *
ath12k_dp_link_peer_search_by_id(struct ath12k_dp * dp,int peer_id)85*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_search_by_id(struct ath12k_dp *dp, int peer_id)
86*60bac4d6SBjoern A. Zeeb {
87*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer;
88*60bac4d6SBjoern A. Zeeb
89*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->dp_lock);
90*60bac4d6SBjoern A. Zeeb
91*60bac4d6SBjoern A. Zeeb if (peer_id == HAL_INVALID_PEERID)
92*60bac4d6SBjoern A. Zeeb return NULL;
93*60bac4d6SBjoern A. Zeeb
94*60bac4d6SBjoern A. Zeeb if (peer_id & ATH12K_PEER_ML_ID_VALID)
95*60bac4d6SBjoern A. Zeeb return ath12k_dp_link_peer_find_by_ml_id(dp, peer_id);
96*60bac4d6SBjoern A. Zeeb
97*60bac4d6SBjoern A. Zeeb list_for_each_entry(peer, &dp->peers, list)
98*60bac4d6SBjoern A. Zeeb if (peer_id == peer->peer_id)
99*60bac4d6SBjoern A. Zeeb return peer;
100*60bac4d6SBjoern A. Zeeb
101*60bac4d6SBjoern A. Zeeb return NULL;
102*60bac4d6SBjoern A. Zeeb }
103*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_exist_by_vdev_id(struct ath12k_dp * dp,int vdev_id)104*60bac4d6SBjoern A. Zeeb bool ath12k_dp_link_peer_exist_by_vdev_id(struct ath12k_dp *dp, int vdev_id)
105*60bac4d6SBjoern A. Zeeb {
106*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer;
107*60bac4d6SBjoern A. Zeeb
108*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp->dp_lock);
109*60bac4d6SBjoern A. Zeeb
110*60bac4d6SBjoern A. Zeeb list_for_each_entry(peer, &dp->peers, list) {
111*60bac4d6SBjoern A. Zeeb if (vdev_id == peer->vdev_id) {
112*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp->dp_lock);
113*60bac4d6SBjoern A. Zeeb return true;
114*60bac4d6SBjoern A. Zeeb }
115*60bac4d6SBjoern A. Zeeb }
116*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp->dp_lock);
117*60bac4d6SBjoern A. Zeeb return false;
118*60bac4d6SBjoern A. Zeeb }
119*60bac4d6SBjoern A. Zeeb
120*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_ast(struct ath12k_dp * dp,int ast_hash)121*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_find_by_ast(struct ath12k_dp *dp, int ast_hash)
122*60bac4d6SBjoern A. Zeeb {
123*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer;
124*60bac4d6SBjoern A. Zeeb
125*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->dp_lock);
126*60bac4d6SBjoern A. Zeeb
127*60bac4d6SBjoern A. Zeeb list_for_each_entry(peer, &dp->peers, list)
128*60bac4d6SBjoern A. Zeeb if (ast_hash == peer->ast_hash)
129*60bac4d6SBjoern A. Zeeb return peer;
130*60bac4d6SBjoern A. Zeeb
131*60bac4d6SBjoern A. Zeeb return NULL;
132*60bac4d6SBjoern A. Zeeb }
133*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_unmap_event(struct ath12k_base * ab,u16 peer_id)134*60bac4d6SBjoern A. Zeeb void ath12k_dp_link_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
135*60bac4d6SBjoern A. Zeeb {
136*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer;
137*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
138*60bac4d6SBjoern A. Zeeb
139*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp->dp_lock);
140*60bac4d6SBjoern A. Zeeb
141*60bac4d6SBjoern A. Zeeb peer = ath12k_dp_link_peer_search_by_id(dp, peer_id);
142*60bac4d6SBjoern A. Zeeb if (!peer) {
143*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
144*60bac4d6SBjoern A. Zeeb peer_id);
145*60bac4d6SBjoern A. Zeeb goto exit;
146*60bac4d6SBjoern A. Zeeb }
147*60bac4d6SBjoern A. Zeeb
148*60bac4d6SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
149*60bac4d6SBjoern A. Zeeb peer->vdev_id, peer->addr, peer_id);
150*60bac4d6SBjoern A. Zeeb
151*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_free(peer);
152*60bac4d6SBjoern A. Zeeb wake_up(&ab->peer_mapping_wq);
153*60bac4d6SBjoern A. Zeeb
154*60bac4d6SBjoern A. Zeeb exit:
155*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp->dp_lock);
156*60bac4d6SBjoern A. Zeeb }
157*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_map_event(struct ath12k_base * ab,u8 vdev_id,u16 peer_id,u8 * mac_addr,u16 ast_hash,u16 hw_peer_id)158*60bac4d6SBjoern A. Zeeb void ath12k_dp_link_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
159*60bac4d6SBjoern A. Zeeb u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
160*60bac4d6SBjoern A. Zeeb {
161*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer;
162*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
163*60bac4d6SBjoern A. Zeeb struct ath12k *ar;
164*60bac4d6SBjoern A. Zeeb
165*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp->dp_lock);
166*60bac4d6SBjoern A. Zeeb peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, mac_addr);
167*60bac4d6SBjoern A. Zeeb if (!peer) {
168*60bac4d6SBjoern A. Zeeb peer = kzalloc_obj(*peer, GFP_ATOMIC);
169*60bac4d6SBjoern A. Zeeb if (!peer)
170*60bac4d6SBjoern A. Zeeb goto exit;
171*60bac4d6SBjoern A. Zeeb
172*60bac4d6SBjoern A. Zeeb peer->vdev_id = vdev_id;
173*60bac4d6SBjoern A. Zeeb peer->peer_id = peer_id;
174*60bac4d6SBjoern A. Zeeb peer->ast_hash = ast_hash;
175*60bac4d6SBjoern A. Zeeb peer->hw_peer_id = hw_peer_id;
176*60bac4d6SBjoern A. Zeeb ether_addr_copy(peer->addr, mac_addr);
177*60bac4d6SBjoern A. Zeeb
178*60bac4d6SBjoern A. Zeeb rcu_read_lock();
179*60bac4d6SBjoern A. Zeeb ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
180*60bac4d6SBjoern A. Zeeb if (ar && ath12k_debugfs_is_extd_rx_stats_enabled(ar) &&
181*60bac4d6SBjoern A. Zeeb !peer->peer_stats.rx_stats) {
182*60bac4d6SBjoern A. Zeeb peer->peer_stats.rx_stats = kzalloc_obj(*peer->peer_stats.rx_stats,
183*60bac4d6SBjoern A. Zeeb GFP_ATOMIC);
184*60bac4d6SBjoern A. Zeeb }
185*60bac4d6SBjoern A. Zeeb rcu_read_unlock();
186*60bac4d6SBjoern A. Zeeb
187*60bac4d6SBjoern A. Zeeb list_add(&peer->list, &dp->peers);
188*60bac4d6SBjoern A. Zeeb wake_up(&ab->peer_mapping_wq);
189*60bac4d6SBjoern A. Zeeb ewma_avg_rssi_init(&peer->avg_rssi);
190*60bac4d6SBjoern A. Zeeb }
191*60bac4d6SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
192*60bac4d6SBjoern A. Zeeb vdev_id, mac_addr, peer_id);
193*60bac4d6SBjoern A. Zeeb
194*60bac4d6SBjoern A. Zeeb exit:
195*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp->dp_lock);
196*60bac4d6SBjoern A. Zeeb }
197*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_to_link_sta(struct ath12k_base * ab,struct ath12k_dp_link_peer * peer)198*60bac4d6SBjoern A. Zeeb struct ath12k_link_sta *ath12k_dp_link_peer_to_link_sta(struct ath12k_base *ab,
199*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer)
200*60bac4d6SBjoern A. Zeeb {
201*60bac4d6SBjoern A. Zeeb struct ath12k_sta *ahsta;
202*60bac4d6SBjoern A. Zeeb struct ath12k_link_sta *arsta;
203*60bac4d6SBjoern A. Zeeb
204*60bac4d6SBjoern A. Zeeb RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
205*60bac4d6SBjoern A. Zeeb "ath12k_dp_link_peer to ath12k_link_sta called without rcu lock");
206*60bac4d6SBjoern A. Zeeb
207*60bac4d6SBjoern A. Zeeb if (!peer->sta)
208*60bac4d6SBjoern A. Zeeb return NULL;
209*60bac4d6SBjoern A. Zeeb
210*60bac4d6SBjoern A. Zeeb ahsta = ath12k_sta_to_ahsta(peer->sta);
211*60bac4d6SBjoern A. Zeeb if (peer->ml_id & ATH12K_PEER_ML_ID_VALID) {
212*60bac4d6SBjoern A. Zeeb if (!(ahsta->links_map & BIT(peer->link_id))) {
213*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "peer %pM id %d link_id %d can't found in STA link_map 0x%x\n",
214*60bac4d6SBjoern A. Zeeb peer->addr, peer->peer_id, peer->link_id,
215*60bac4d6SBjoern A. Zeeb ahsta->links_map);
216*60bac4d6SBjoern A. Zeeb return NULL;
217*60bac4d6SBjoern A. Zeeb }
218*60bac4d6SBjoern A. Zeeb arsta = rcu_dereference(ahsta->link[peer->link_id]);
219*60bac4d6SBjoern A. Zeeb if (!arsta)
220*60bac4d6SBjoern A. Zeeb return NULL;
221*60bac4d6SBjoern A. Zeeb } else {
222*60bac4d6SBjoern A. Zeeb arsta = &ahsta->deflink;
223*60bac4d6SBjoern A. Zeeb }
224*60bac4d6SBjoern A. Zeeb return arsta;
225*60bac4d6SBjoern A. Zeeb }
226*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_rhash_addr_tbl_init(struct ath12k_dp * dp)227*60bac4d6SBjoern A. Zeeb static int ath12k_dp_link_peer_rhash_addr_tbl_init(struct ath12k_dp *dp)
228*60bac4d6SBjoern A. Zeeb {
229*60bac4d6SBjoern A. Zeeb struct ath12k_base *ab = dp->ab;
230*60bac4d6SBjoern A. Zeeb struct rhashtable_params *param;
231*60bac4d6SBjoern A. Zeeb struct rhashtable *rhash_addr_tbl;
232*60bac4d6SBjoern A. Zeeb int ret;
233*60bac4d6SBjoern A. Zeeb
234*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->link_peer_rhash_tbl_lock);
235*60bac4d6SBjoern A. Zeeb
236*60bac4d6SBjoern A. Zeeb rhash_addr_tbl = kzalloc_obj(*dp->rhead_peer_addr);
237*60bac4d6SBjoern A. Zeeb if (!rhash_addr_tbl)
238*60bac4d6SBjoern A. Zeeb return -ENOMEM;
239*60bac4d6SBjoern A. Zeeb
240*60bac4d6SBjoern A. Zeeb param = &dp->rhash_peer_addr_param;
241*60bac4d6SBjoern A. Zeeb
242*60bac4d6SBjoern A. Zeeb param->key_offset = offsetof(struct ath12k_dp_link_peer, addr);
243*60bac4d6SBjoern A. Zeeb param->head_offset = offsetof(struct ath12k_dp_link_peer, rhash_addr);
244*60bac4d6SBjoern A. Zeeb param->key_len = sizeof_field(struct ath12k_dp_link_peer, addr);
245*60bac4d6SBjoern A. Zeeb param->automatic_shrinking = true;
246*60bac4d6SBjoern A. Zeeb param->nelem_hint = ab->num_radios * ath12k_core_get_max_peers_per_radio(ab);
247*60bac4d6SBjoern A. Zeeb
248*60bac4d6SBjoern A. Zeeb ret = rhashtable_init(rhash_addr_tbl, param);
249*60bac4d6SBjoern A. Zeeb if (ret) {
250*60bac4d6SBjoern A. Zeeb ath12k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
251*60bac4d6SBjoern A. Zeeb goto err_free;
252*60bac4d6SBjoern A. Zeeb }
253*60bac4d6SBjoern A. Zeeb
254*60bac4d6SBjoern A. Zeeb dp->rhead_peer_addr = rhash_addr_tbl;
255*60bac4d6SBjoern A. Zeeb
256*60bac4d6SBjoern A. Zeeb return 0;
257*60bac4d6SBjoern A. Zeeb
258*60bac4d6SBjoern A. Zeeb err_free:
259*60bac4d6SBjoern A. Zeeb kfree(rhash_addr_tbl);
260*60bac4d6SBjoern A. Zeeb
261*60bac4d6SBjoern A. Zeeb return ret;
262*60bac4d6SBjoern A. Zeeb }
263*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_rhash_tbl_init(struct ath12k_dp * dp)264*60bac4d6SBjoern A. Zeeb int ath12k_dp_link_peer_rhash_tbl_init(struct ath12k_dp *dp)
265*60bac4d6SBjoern A. Zeeb {
266*60bac4d6SBjoern A. Zeeb int ret;
267*60bac4d6SBjoern A. Zeeb
268*60bac4d6SBjoern A. Zeeb mutex_lock(&dp->link_peer_rhash_tbl_lock);
269*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_link_peer_rhash_addr_tbl_init(dp);
270*60bac4d6SBjoern A. Zeeb mutex_unlock(&dp->link_peer_rhash_tbl_lock);
271*60bac4d6SBjoern A. Zeeb
272*60bac4d6SBjoern A. Zeeb return ret;
273*60bac4d6SBjoern A. Zeeb }
274*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_rhash_tbl_destroy(struct ath12k_dp * dp)275*60bac4d6SBjoern A. Zeeb void ath12k_dp_link_peer_rhash_tbl_destroy(struct ath12k_dp *dp)
276*60bac4d6SBjoern A. Zeeb {
277*60bac4d6SBjoern A. Zeeb mutex_lock(&dp->link_peer_rhash_tbl_lock);
278*60bac4d6SBjoern A. Zeeb rhashtable_destroy(dp->rhead_peer_addr);
279*60bac4d6SBjoern A. Zeeb kfree(dp->rhead_peer_addr);
280*60bac4d6SBjoern A. Zeeb dp->rhead_peer_addr = NULL;
281*60bac4d6SBjoern A. Zeeb mutex_unlock(&dp->link_peer_rhash_tbl_lock);
282*60bac4d6SBjoern A. Zeeb }
283*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_rhash_insert(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer)284*60bac4d6SBjoern A. Zeeb static int ath12k_dp_link_peer_rhash_insert(struct ath12k_dp *dp,
285*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer)
286*60bac4d6SBjoern A. Zeeb {
287*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *tmp;
288*60bac4d6SBjoern A. Zeeb
289*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->dp_lock);
290*60bac4d6SBjoern A. Zeeb
291*60bac4d6SBjoern A. Zeeb tmp = rhashtable_lookup_get_insert_fast(dp->rhead_peer_addr, &peer->rhash_addr,
292*60bac4d6SBjoern A. Zeeb dp->rhash_peer_addr_param);
293*60bac4d6SBjoern A. Zeeb if (!tmp)
294*60bac4d6SBjoern A. Zeeb return 0;
295*60bac4d6SBjoern A. Zeeb else if (IS_ERR(tmp))
296*60bac4d6SBjoern A. Zeeb return PTR_ERR(tmp);
297*60bac4d6SBjoern A. Zeeb else
298*60bac4d6SBjoern A. Zeeb return -EEXIST;
299*60bac4d6SBjoern A. Zeeb }
300*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_rhash_remove(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer)301*60bac4d6SBjoern A. Zeeb static int ath12k_dp_link_peer_rhash_remove(struct ath12k_dp *dp,
302*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer)
303*60bac4d6SBjoern A. Zeeb {
304*60bac4d6SBjoern A. Zeeb int ret;
305*60bac4d6SBjoern A. Zeeb
306*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->dp_lock);
307*60bac4d6SBjoern A. Zeeb
308*60bac4d6SBjoern A. Zeeb ret = rhashtable_remove_fast(dp->rhead_peer_addr, &peer->rhash_addr,
309*60bac4d6SBjoern A. Zeeb dp->rhash_peer_addr_param);
310*60bac4d6SBjoern A. Zeeb if (ret && ret != -ENOENT)
311*60bac4d6SBjoern A. Zeeb return ret;
312*60bac4d6SBjoern A. Zeeb
313*60bac4d6SBjoern A. Zeeb return 0;
314*60bac4d6SBjoern A. Zeeb }
315*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_rhash_add(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer)316*60bac4d6SBjoern A. Zeeb int ath12k_dp_link_peer_rhash_add(struct ath12k_dp *dp,
317*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer)
318*60bac4d6SBjoern A. Zeeb {
319*60bac4d6SBjoern A. Zeeb int ret;
320*60bac4d6SBjoern A. Zeeb
321*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->dp_lock);
322*60bac4d6SBjoern A. Zeeb
323*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_link_peer_rhash_insert(dp, peer);
324*60bac4d6SBjoern A. Zeeb if (ret)
325*60bac4d6SBjoern A. Zeeb ath12k_warn(dp, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
326*60bac4d6SBjoern A. Zeeb peer->addr, peer->peer_id, ret);
327*60bac4d6SBjoern A. Zeeb
328*60bac4d6SBjoern A. Zeeb return ret;
329*60bac4d6SBjoern A. Zeeb }
330*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_rhash_delete(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer)331*60bac4d6SBjoern A. Zeeb void ath12k_dp_link_peer_rhash_delete(struct ath12k_dp *dp,
332*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer)
333*60bac4d6SBjoern A. Zeeb {
334*60bac4d6SBjoern A. Zeeb /* No failure handling and hence return type is void */
335*60bac4d6SBjoern A. Zeeb int ret;
336*60bac4d6SBjoern A. Zeeb
337*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp->dp_lock);
338*60bac4d6SBjoern A. Zeeb
339*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_link_peer_rhash_remove(dp, peer);
340*60bac4d6SBjoern A. Zeeb if (ret)
341*60bac4d6SBjoern A. Zeeb ath12k_warn(dp, "failed to remove peer %pM with id %d in rhash_addr ret %d\n",
342*60bac4d6SBjoern A. Zeeb peer->addr, peer->peer_id, ret);
343*60bac4d6SBjoern A. Zeeb }
344*60bac4d6SBjoern A. Zeeb
ath12k_dp_peer_find_by_addr(struct ath12k_dp_hw * dp_hw,u8 * addr)345*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr(struct ath12k_dp_hw *dp_hw, u8 *addr)
346*60bac4d6SBjoern A. Zeeb {
347*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *peer;
348*60bac4d6SBjoern A. Zeeb
349*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp_hw->peer_lock);
350*60bac4d6SBjoern A. Zeeb
351*60bac4d6SBjoern A. Zeeb list_for_each_entry(peer, &dp_hw->dp_peers_list, list) {
352*60bac4d6SBjoern A. Zeeb if (ether_addr_equal(peer->addr, addr))
353*60bac4d6SBjoern A. Zeeb return peer;
354*60bac4d6SBjoern A. Zeeb }
355*60bac4d6SBjoern A. Zeeb
356*60bac4d6SBjoern A. Zeeb return NULL;
357*60bac4d6SBjoern A. Zeeb }
358*60bac4d6SBjoern A. Zeeb EXPORT_SYMBOL(ath12k_dp_peer_find_by_addr);
359*60bac4d6SBjoern A. Zeeb
ath12k_dp_peer_find_by_addr_and_sta(struct ath12k_dp_hw * dp_hw,u8 * addr,struct ieee80211_sta * sta)360*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr_and_sta(struct ath12k_dp_hw *dp_hw,
361*60bac4d6SBjoern A. Zeeb u8 *addr,
362*60bac4d6SBjoern A. Zeeb struct ieee80211_sta *sta)
363*60bac4d6SBjoern A. Zeeb {
364*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *dp_peer;
365*60bac4d6SBjoern A. Zeeb
366*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp_hw->peer_lock);
367*60bac4d6SBjoern A. Zeeb
368*60bac4d6SBjoern A. Zeeb list_for_each_entry(dp_peer, &dp_hw->dp_peers_list, list) {
369*60bac4d6SBjoern A. Zeeb if (ether_addr_equal(dp_peer->addr, addr) && (dp_peer->sta == sta))
370*60bac4d6SBjoern A. Zeeb return dp_peer;
371*60bac4d6SBjoern A. Zeeb }
372*60bac4d6SBjoern A. Zeeb
373*60bac4d6SBjoern A. Zeeb return NULL;
374*60bac4d6SBjoern A. Zeeb }
375*60bac4d6SBjoern A. Zeeb
ath12k_dp_peer_create_find(struct ath12k_dp_hw * dp_hw,u8 * addr,struct ieee80211_sta * sta,bool mlo_peer)376*60bac4d6SBjoern A. Zeeb static struct ath12k_dp_peer *ath12k_dp_peer_create_find(struct ath12k_dp_hw *dp_hw,
377*60bac4d6SBjoern A. Zeeb u8 *addr,
378*60bac4d6SBjoern A. Zeeb struct ieee80211_sta *sta,
379*60bac4d6SBjoern A. Zeeb bool mlo_peer)
380*60bac4d6SBjoern A. Zeeb {
381*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *dp_peer;
382*60bac4d6SBjoern A. Zeeb
383*60bac4d6SBjoern A. Zeeb lockdep_assert_held(&dp_hw->peer_lock);
384*60bac4d6SBjoern A. Zeeb
385*60bac4d6SBjoern A. Zeeb list_for_each_entry(dp_peer, &dp_hw->dp_peers_list, list) {
386*60bac4d6SBjoern A. Zeeb if (ether_addr_equal(dp_peer->addr, addr)) {
387*60bac4d6SBjoern A. Zeeb if (!sta || mlo_peer || dp_peer->is_mlo ||
388*60bac4d6SBjoern A. Zeeb dp_peer->sta == sta)
389*60bac4d6SBjoern A. Zeeb return dp_peer;
390*60bac4d6SBjoern A. Zeeb }
391*60bac4d6SBjoern A. Zeeb }
392*60bac4d6SBjoern A. Zeeb
393*60bac4d6SBjoern A. Zeeb return NULL;
394*60bac4d6SBjoern A. Zeeb }
395*60bac4d6SBjoern A. Zeeb
396*60bac4d6SBjoern A. Zeeb /*
397*60bac4d6SBjoern A. Zeeb * Index of ath12k_dp_peer for MLO client is same as peer id of ath12k_dp_peer,
398*60bac4d6SBjoern A. Zeeb * while for ath12k_dp_link_peer(mlo and non-mlo) and ath12k_dp_peer for
399*60bac4d6SBjoern A. Zeeb * Non-MLO client it is derived as ((DEVICE_ID << 10) | (10 bits of peer id)).
400*60bac4d6SBjoern A. Zeeb *
401*60bac4d6SBjoern A. Zeeb * This is done because ml_peer_id and peer_id_table are at hw granularity,
402*60bac4d6SBjoern A. Zeeb * while link_peer_id is at device granularity, hence in order to avoid
403*60bac4d6SBjoern A. Zeeb * conflict this approach is followed.
404*60bac4d6SBjoern A. Zeeb */
405*60bac4d6SBjoern A. Zeeb #define ATH12K_DP_PEER_TABLE_DEVICE_ID_SHIFT 10
406*60bac4d6SBjoern A. Zeeb
ath12k_dp_peer_get_peerid_index(struct ath12k_dp * dp,u16 peer_id)407*60bac4d6SBjoern A. Zeeb u16 ath12k_dp_peer_get_peerid_index(struct ath12k_dp *dp, u16 peer_id)
408*60bac4d6SBjoern A. Zeeb {
409*60bac4d6SBjoern A. Zeeb return (peer_id & ATH12K_PEER_ML_ID_VALID) ? peer_id :
410*60bac4d6SBjoern A. Zeeb ((dp->device_id << ATH12K_DP_PEER_TABLE_DEVICE_ID_SHIFT) | peer_id);
411*60bac4d6SBjoern A. Zeeb }
412*60bac4d6SBjoern A. Zeeb
ath12k_dp_peer_find_by_peerid(struct ath12k_pdev_dp * dp_pdev,u16 peer_id)413*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *ath12k_dp_peer_find_by_peerid(struct ath12k_pdev_dp *dp_pdev,
414*60bac4d6SBjoern A. Zeeb u16 peer_id)
415*60bac4d6SBjoern A. Zeeb {
416*60bac4d6SBjoern A. Zeeb u16 index;
417*60bac4d6SBjoern A. Zeeb struct ath12k_dp *dp = dp_pdev->dp;
418*60bac4d6SBjoern A. Zeeb
419*60bac4d6SBjoern A. Zeeb RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
420*60bac4d6SBjoern A. Zeeb "ath12k dp peer find by peerid index called without rcu lock");
421*60bac4d6SBjoern A. Zeeb
422*60bac4d6SBjoern A. Zeeb if (!peer_id || peer_id >= ATH12K_DP_PEER_ID_INVALID)
423*60bac4d6SBjoern A. Zeeb return NULL;
424*60bac4d6SBjoern A. Zeeb
425*60bac4d6SBjoern A. Zeeb index = ath12k_dp_peer_get_peerid_index(dp, peer_id);
426*60bac4d6SBjoern A. Zeeb
427*60bac4d6SBjoern A. Zeeb return rcu_dereference(dp_pdev->dp_hw->dp_peers[index]);
428*60bac4d6SBjoern A. Zeeb }
429*60bac4d6SBjoern A. Zeeb EXPORT_SYMBOL(ath12k_dp_peer_find_by_peerid);
430*60bac4d6SBjoern A. Zeeb
431*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_peerid(struct ath12k_pdev_dp * dp_pdev,u16 peer_id)432*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_find_by_peerid(struct ath12k_pdev_dp *dp_pdev, u16 peer_id)
433*60bac4d6SBjoern A. Zeeb {
434*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *dp_peer = NULL;
435*60bac4d6SBjoern A. Zeeb u8 link_id;
436*60bac4d6SBjoern A. Zeeb
437*60bac4d6SBjoern A. Zeeb RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
438*60bac4d6SBjoern A. Zeeb "ath12k dp link peer find by peerid index called without rcu lock");
439*60bac4d6SBjoern A. Zeeb
440*60bac4d6SBjoern A. Zeeb if (dp_pdev->hw_link_id >= ATH12K_GROUP_MAX_RADIO)
441*60bac4d6SBjoern A. Zeeb return NULL;
442*60bac4d6SBjoern A. Zeeb
443*60bac4d6SBjoern A. Zeeb dp_peer = ath12k_dp_peer_find_by_peerid(dp_pdev, peer_id);
444*60bac4d6SBjoern A. Zeeb if (!dp_peer)
445*60bac4d6SBjoern A. Zeeb return NULL;
446*60bac4d6SBjoern A. Zeeb
447*60bac4d6SBjoern A. Zeeb link_id = dp_peer->hw_links[dp_pdev->hw_link_id];
448*60bac4d6SBjoern A. Zeeb
449*60bac4d6SBjoern A. Zeeb return rcu_dereference(dp_peer->link_peers[link_id]);
450*60bac4d6SBjoern A. Zeeb }
451*60bac4d6SBjoern A. Zeeb EXPORT_SYMBOL(ath12k_dp_link_peer_find_by_peerid);
452*60bac4d6SBjoern A. Zeeb
ath12k_dp_peer_create(struct ath12k_dp_hw * dp_hw,u8 * addr,struct ath12k_dp_peer_create_params * params)453*60bac4d6SBjoern A. Zeeb int ath12k_dp_peer_create(struct ath12k_dp_hw *dp_hw, u8 *addr,
454*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer_create_params *params)
455*60bac4d6SBjoern A. Zeeb {
456*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *dp_peer;
457*60bac4d6SBjoern A. Zeeb
458*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp_hw->peer_lock);
459*60bac4d6SBjoern A. Zeeb dp_peer = ath12k_dp_peer_create_find(dp_hw, addr, params->sta, params->is_mlo);
460*60bac4d6SBjoern A. Zeeb if (dp_peer) {
461*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_hw->peer_lock);
462*60bac4d6SBjoern A. Zeeb return -EEXIST;
463*60bac4d6SBjoern A. Zeeb }
464*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_hw->peer_lock);
465*60bac4d6SBjoern A. Zeeb
466*60bac4d6SBjoern A. Zeeb dp_peer = kzalloc_obj(*dp_peer, GFP_ATOMIC);
467*60bac4d6SBjoern A. Zeeb if (!dp_peer)
468*60bac4d6SBjoern A. Zeeb return -ENOMEM;
469*60bac4d6SBjoern A. Zeeb
470*60bac4d6SBjoern A. Zeeb ether_addr_copy(dp_peer->addr, addr);
471*60bac4d6SBjoern A. Zeeb dp_peer->sta = params->sta;
472*60bac4d6SBjoern A. Zeeb dp_peer->is_mlo = params->is_mlo;
473*60bac4d6SBjoern A. Zeeb
474*60bac4d6SBjoern A. Zeeb /*
475*60bac4d6SBjoern A. Zeeb * For MLO client, the host assigns the ML peer ID, so set peer_id in dp_peer
476*60bac4d6SBjoern A. Zeeb * For non-MLO client, host gets link peer ID from firmware and will be
477*60bac4d6SBjoern A. Zeeb * assigned at the time of link peer creation
478*60bac4d6SBjoern A. Zeeb */
479*60bac4d6SBjoern A. Zeeb dp_peer->peer_id = params->is_mlo ? params->peer_id : ATH12K_DP_PEER_ID_INVALID;
480*60bac4d6SBjoern A. Zeeb dp_peer->ucast_ra_only = params->ucast_ra_only;
481*60bac4d6SBjoern A. Zeeb
482*60bac4d6SBjoern A. Zeeb dp_peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
483*60bac4d6SBjoern A. Zeeb dp_peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
484*60bac4d6SBjoern A. Zeeb dp_peer->ucast_ra_only = params->ucast_ra_only;
485*60bac4d6SBjoern A. Zeeb
486*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp_hw->peer_lock);
487*60bac4d6SBjoern A. Zeeb
488*60bac4d6SBjoern A. Zeeb list_add(&dp_peer->list, &dp_hw->dp_peers_list);
489*60bac4d6SBjoern A. Zeeb
490*60bac4d6SBjoern A. Zeeb /*
491*60bac4d6SBjoern A. Zeeb * For MLO client, the peer_id for ath12k_dp_peer is allocated by host
492*60bac4d6SBjoern A. Zeeb * and that peer_id is known at this point, and hence this ath12k_dp_peer
493*60bac4d6SBjoern A. Zeeb * can be added to the RCU table using the peer_id.
494*60bac4d6SBjoern A. Zeeb * For non-MLO client, this addition to RCU table shall be done at the
495*60bac4d6SBjoern A. Zeeb * time of assignment of ath12k_dp_link_peer to ath12k_dp_peer.
496*60bac4d6SBjoern A. Zeeb */
497*60bac4d6SBjoern A. Zeeb if (dp_peer->is_mlo)
498*60bac4d6SBjoern A. Zeeb rcu_assign_pointer(dp_hw->dp_peers[dp_peer->peer_id], dp_peer);
499*60bac4d6SBjoern A. Zeeb
500*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_hw->peer_lock);
501*60bac4d6SBjoern A. Zeeb
502*60bac4d6SBjoern A. Zeeb return 0;
503*60bac4d6SBjoern A. Zeeb }
504*60bac4d6SBjoern A. Zeeb
ath12k_dp_peer_delete(struct ath12k_dp_hw * dp_hw,u8 * addr,struct ieee80211_sta * sta)505*60bac4d6SBjoern A. Zeeb void ath12k_dp_peer_delete(struct ath12k_dp_hw *dp_hw, u8 *addr,
506*60bac4d6SBjoern A. Zeeb struct ieee80211_sta *sta)
507*60bac4d6SBjoern A. Zeeb {
508*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *dp_peer;
509*60bac4d6SBjoern A. Zeeb
510*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp_hw->peer_lock);
511*60bac4d6SBjoern A. Zeeb
512*60bac4d6SBjoern A. Zeeb dp_peer = ath12k_dp_peer_find_by_addr_and_sta(dp_hw, addr, sta);
513*60bac4d6SBjoern A. Zeeb if (!dp_peer) {
514*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_hw->peer_lock);
515*60bac4d6SBjoern A. Zeeb return;
516*60bac4d6SBjoern A. Zeeb }
517*60bac4d6SBjoern A. Zeeb
518*60bac4d6SBjoern A. Zeeb if (dp_peer->is_mlo)
519*60bac4d6SBjoern A. Zeeb rcu_assign_pointer(dp_hw->dp_peers[dp_peer->peer_id], NULL);
520*60bac4d6SBjoern A. Zeeb
521*60bac4d6SBjoern A. Zeeb list_del(&dp_peer->list);
522*60bac4d6SBjoern A. Zeeb
523*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_hw->peer_lock);
524*60bac4d6SBjoern A. Zeeb
525*60bac4d6SBjoern A. Zeeb synchronize_rcu();
526*60bac4d6SBjoern A. Zeeb kfree(dp_peer);
527*60bac4d6SBjoern A. Zeeb }
528*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_assign(struct ath12k_dp * dp,struct ath12k_dp_hw * dp_hw,u8 vdev_id,struct ieee80211_sta * sta,u8 * addr,u8 link_id,u32 hw_link_id)529*60bac4d6SBjoern A. Zeeb int ath12k_dp_link_peer_assign(struct ath12k_dp *dp, struct ath12k_dp_hw *dp_hw,
530*60bac4d6SBjoern A. Zeeb u8 vdev_id, struct ieee80211_sta *sta, u8 *addr,
531*60bac4d6SBjoern A. Zeeb u8 link_id, u32 hw_link_id)
532*60bac4d6SBjoern A. Zeeb {
533*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *dp_peer;
534*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer, *temp_peer;
535*60bac4d6SBjoern A. Zeeb u16 peerid_index;
536*60bac4d6SBjoern A. Zeeb int ret = -EINVAL;
537*60bac4d6SBjoern A. Zeeb u8 *dp_peer_mac = !sta ? addr : sta->addr;
538*60bac4d6SBjoern A. Zeeb
539*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp->dp_lock);
540*60bac4d6SBjoern A. Zeeb
541*60bac4d6SBjoern A. Zeeb peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, addr);
542*60bac4d6SBjoern A. Zeeb if (!peer) {
543*60bac4d6SBjoern A. Zeeb ath12k_warn(dp, "failed to find dp_link_peer with mac %pM on vdev %u\n",
544*60bac4d6SBjoern A. Zeeb addr, vdev_id);
545*60bac4d6SBjoern A. Zeeb ret = -ENOENT;
546*60bac4d6SBjoern A. Zeeb goto err_peer;
547*60bac4d6SBjoern A. Zeeb }
548*60bac4d6SBjoern A. Zeeb
549*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp_hw->peer_lock);
550*60bac4d6SBjoern A. Zeeb
551*60bac4d6SBjoern A. Zeeb dp_peer = ath12k_dp_peer_find_by_addr_and_sta(dp_hw, dp_peer_mac, sta);
552*60bac4d6SBjoern A. Zeeb if (!dp_peer) {
553*60bac4d6SBjoern A. Zeeb ath12k_warn(dp, "failed to find dp_peer with mac %pM\n", dp_peer_mac);
554*60bac4d6SBjoern A. Zeeb ret = -ENOENT;
555*60bac4d6SBjoern A. Zeeb goto err_dp_peer;
556*60bac4d6SBjoern A. Zeeb }
557*60bac4d6SBjoern A. Zeeb
558*60bac4d6SBjoern A. Zeeb /*
559*60bac4d6SBjoern A. Zeeb * Set peer_id in dp_peer for non-mlo client, peer_id for mlo client is
560*60bac4d6SBjoern A. Zeeb * set during dp_peer create
561*60bac4d6SBjoern A. Zeeb */
562*60bac4d6SBjoern A. Zeeb if (!dp_peer->is_mlo)
563*60bac4d6SBjoern A. Zeeb dp_peer->peer_id = peer->peer_id;
564*60bac4d6SBjoern A. Zeeb
565*60bac4d6SBjoern A. Zeeb peer->dp_peer = dp_peer;
566*60bac4d6SBjoern A. Zeeb peer->hw_link_id = hw_link_id;
567*60bac4d6SBjoern A. Zeeb
568*60bac4d6SBjoern A. Zeeb dp_peer->hw_links[peer->hw_link_id] = link_id;
569*60bac4d6SBjoern A. Zeeb
570*60bac4d6SBjoern A. Zeeb peerid_index = ath12k_dp_peer_get_peerid_index(dp, peer->peer_id);
571*60bac4d6SBjoern A. Zeeb
572*60bac4d6SBjoern A. Zeeb rcu_assign_pointer(dp_peer->link_peers[peer->link_id], peer);
573*60bac4d6SBjoern A. Zeeb
574*60bac4d6SBjoern A. Zeeb rcu_assign_pointer(dp_hw->dp_peers[peerid_index], dp_peer);
575*60bac4d6SBjoern A. Zeeb
576*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_hw->peer_lock);
577*60bac4d6SBjoern A. Zeeb
578*60bac4d6SBjoern A. Zeeb /*
579*60bac4d6SBjoern A. Zeeb * In case of Split PHY and roaming scenario, pdev idx
580*60bac4d6SBjoern A. Zeeb * might differ but both the pdev will share same rhash
581*60bac4d6SBjoern A. Zeeb * table. In that case update the rhash table if link_peer is
582*60bac4d6SBjoern A. Zeeb * already present
583*60bac4d6SBjoern A. Zeeb */
584*60bac4d6SBjoern A. Zeeb temp_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
585*60bac4d6SBjoern A. Zeeb if (temp_peer && temp_peer->hw_link_id != hw_link_id)
586*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_rhash_delete(dp, temp_peer);
587*60bac4d6SBjoern A. Zeeb
588*60bac4d6SBjoern A. Zeeb ret = ath12k_dp_link_peer_rhash_add(dp, peer);
589*60bac4d6SBjoern A. Zeeb if (ret) {
590*60bac4d6SBjoern A. Zeeb /*
591*60bac4d6SBjoern A. Zeeb * If new entry addition failed, add back old entry
592*60bac4d6SBjoern A. Zeeb * If old entry addition also fails, then nothing
593*60bac4d6SBjoern A. Zeeb * can be done, simply proceed
594*60bac4d6SBjoern A. Zeeb */
595*60bac4d6SBjoern A. Zeeb if (temp_peer)
596*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_rhash_add(dp, temp_peer);
597*60bac4d6SBjoern A. Zeeb }
598*60bac4d6SBjoern A. Zeeb
599*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp->dp_lock);
600*60bac4d6SBjoern A. Zeeb
601*60bac4d6SBjoern A. Zeeb return ret;
602*60bac4d6SBjoern A. Zeeb
603*60bac4d6SBjoern A. Zeeb err_dp_peer:
604*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_hw->peer_lock);
605*60bac4d6SBjoern A. Zeeb
606*60bac4d6SBjoern A. Zeeb err_peer:
607*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp->dp_lock);
608*60bac4d6SBjoern A. Zeeb
609*60bac4d6SBjoern A. Zeeb return ret;
610*60bac4d6SBjoern A. Zeeb }
611*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_unassign(struct ath12k_dp * dp,struct ath12k_dp_hw * dp_hw,u8 vdev_id,u8 * addr,u32 hw_link_id)612*60bac4d6SBjoern A. Zeeb void ath12k_dp_link_peer_unassign(struct ath12k_dp *dp, struct ath12k_dp_hw *dp_hw,
613*60bac4d6SBjoern A. Zeeb u8 vdev_id, u8 *addr, u32 hw_link_id)
614*60bac4d6SBjoern A. Zeeb {
615*60bac4d6SBjoern A. Zeeb struct ath12k_dp_peer *dp_peer;
616*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *peer, *temp_peer;
617*60bac4d6SBjoern A. Zeeb u16 peerid_index;
618*60bac4d6SBjoern A. Zeeb
619*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp->dp_lock);
620*60bac4d6SBjoern A. Zeeb
621*60bac4d6SBjoern A. Zeeb peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, addr);
622*60bac4d6SBjoern A. Zeeb if (!peer || !peer->dp_peer) {
623*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp->dp_lock);
624*60bac4d6SBjoern A. Zeeb return;
625*60bac4d6SBjoern A. Zeeb }
626*60bac4d6SBjoern A. Zeeb
627*60bac4d6SBjoern A. Zeeb spin_lock_bh(&dp_hw->peer_lock);
628*60bac4d6SBjoern A. Zeeb
629*60bac4d6SBjoern A. Zeeb dp_peer = peer->dp_peer;
630*60bac4d6SBjoern A. Zeeb dp_peer->hw_links[peer->hw_link_id] = 0;
631*60bac4d6SBjoern A. Zeeb
632*60bac4d6SBjoern A. Zeeb peerid_index = ath12k_dp_peer_get_peerid_index(dp, peer->peer_id);
633*60bac4d6SBjoern A. Zeeb
634*60bac4d6SBjoern A. Zeeb rcu_assign_pointer(dp_peer->link_peers[peer->link_id], NULL);
635*60bac4d6SBjoern A. Zeeb
636*60bac4d6SBjoern A. Zeeb rcu_assign_pointer(dp_hw->dp_peers[peerid_index], NULL);
637*60bac4d6SBjoern A. Zeeb
638*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp_hw->peer_lock);
639*60bac4d6SBjoern A. Zeeb
640*60bac4d6SBjoern A. Zeeb /* To handle roaming and split phy scenario */
641*60bac4d6SBjoern A. Zeeb temp_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
642*60bac4d6SBjoern A. Zeeb if (temp_peer && temp_peer->hw_link_id == hw_link_id)
643*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_rhash_delete(dp, peer);
644*60bac4d6SBjoern A. Zeeb
645*60bac4d6SBjoern A. Zeeb spin_unlock_bh(&dp->dp_lock);
646*60bac4d6SBjoern A. Zeeb
647*60bac4d6SBjoern A. Zeeb synchronize_rcu();
648*60bac4d6SBjoern A. Zeeb }
649*60bac4d6SBjoern A. Zeeb
650*60bac4d6SBjoern A. Zeeb void
ath12k_dp_link_peer_get_sta_rate_info_stats(struct ath12k_dp * dp,const u8 * addr,struct ath12k_dp_link_peer_rate_info * info)651*60bac4d6SBjoern A. Zeeb ath12k_dp_link_peer_get_sta_rate_info_stats(struct ath12k_dp *dp, const u8 *addr,
652*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer_rate_info *info)
653*60bac4d6SBjoern A. Zeeb {
654*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *link_peer;
655*60bac4d6SBjoern A. Zeeb
656*60bac4d6SBjoern A. Zeeb guard(spinlock_bh)(&dp->dp_lock);
657*60bac4d6SBjoern A. Zeeb
658*60bac4d6SBjoern A. Zeeb link_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
659*60bac4d6SBjoern A. Zeeb if (!link_peer)
660*60bac4d6SBjoern A. Zeeb return;
661*60bac4d6SBjoern A. Zeeb
662*60bac4d6SBjoern A. Zeeb info->rx_duration = link_peer->rx_duration;
663*60bac4d6SBjoern A. Zeeb info->tx_duration = link_peer->tx_duration;
664*60bac4d6SBjoern A. Zeeb info->txrate.legacy = link_peer->txrate.legacy;
665*60bac4d6SBjoern A. Zeeb info->txrate.mcs = link_peer->txrate.mcs;
666*60bac4d6SBjoern A. Zeeb info->txrate.nss = link_peer->txrate.nss;
667*60bac4d6SBjoern A. Zeeb info->txrate.bw = link_peer->txrate.bw;
668*60bac4d6SBjoern A. Zeeb info->txrate.he_gi = link_peer->txrate.he_gi;
669*60bac4d6SBjoern A. Zeeb info->txrate.he_dcm = link_peer->txrate.he_dcm;
670*60bac4d6SBjoern A. Zeeb info->txrate.he_ru_alloc = link_peer->txrate.he_ru_alloc;
671*60bac4d6SBjoern A. Zeeb info->txrate.flags = link_peer->txrate.flags;
672*60bac4d6SBjoern A. Zeeb info->rssi_comb = link_peer->rssi_comb;
673*60bac4d6SBjoern A. Zeeb info->signal_avg = ewma_avg_rssi_read(&link_peer->avg_rssi);
674*60bac4d6SBjoern A. Zeeb }
675*60bac4d6SBjoern A. Zeeb
ath12k_dp_link_peer_reset_rx_stats(struct ath12k_dp * dp,const u8 * addr)676*60bac4d6SBjoern A. Zeeb void ath12k_dp_link_peer_reset_rx_stats(struct ath12k_dp *dp, const u8 *addr)
677*60bac4d6SBjoern A. Zeeb {
678*60bac4d6SBjoern A. Zeeb struct ath12k_rx_peer_stats *rx_stats;
679*60bac4d6SBjoern A. Zeeb struct ath12k_dp_link_peer *link_peer;
680*60bac4d6SBjoern A. Zeeb
681*60bac4d6SBjoern A. Zeeb guard(spinlock_bh)(&dp->dp_lock);
682*60bac4d6SBjoern A. Zeeb
683*60bac4d6SBjoern A. Zeeb link_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
684*60bac4d6SBjoern A. Zeeb if (!link_peer || !link_peer->peer_stats.rx_stats)
685*60bac4d6SBjoern A. Zeeb return;
686*60bac4d6SBjoern A. Zeeb
687*60bac4d6SBjoern A. Zeeb rx_stats = link_peer->peer_stats.rx_stats;
688*60bac4d6SBjoern A. Zeeb if (rx_stats)
689*60bac4d6SBjoern A. Zeeb memset(rx_stats, 0, sizeof(*rx_stats));
690*60bac4d6SBjoern A. Zeeb }
691