1dd4f32aeSBjoern A. Zeeb // SPDX-License-Identifier: BSD-3-Clause-Clear
2dd4f32aeSBjoern A. Zeeb /*
3dd4f32aeSBjoern A. Zeeb * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4*28348caeSBjoern A. Zeeb * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5dd4f32aeSBjoern A. Zeeb */
6dd4f32aeSBjoern A. Zeeb
7dd4f32aeSBjoern A. Zeeb #include "core.h"
8dd4f32aeSBjoern A. Zeeb #include "peer.h"
9dd4f32aeSBjoern A. Zeeb #include "debug.h"
10dd4f32aeSBjoern A. Zeeb
ath11k_peer_find_list_by_id(struct ath11k_base * ab,int peer_id)11*28348caeSBjoern A. Zeeb static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
12*28348caeSBjoern A. Zeeb int peer_id)
13*28348caeSBjoern A. Zeeb {
14*28348caeSBjoern A. Zeeb struct ath11k_peer *peer;
15*28348caeSBjoern A. Zeeb
16*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->base_lock);
17*28348caeSBjoern A. Zeeb
18*28348caeSBjoern A. Zeeb list_for_each_entry(peer, &ab->peers, list) {
19*28348caeSBjoern A. Zeeb if (peer->peer_id != peer_id)
20*28348caeSBjoern A. Zeeb continue;
21*28348caeSBjoern A. Zeeb
22*28348caeSBjoern A. Zeeb return peer;
23*28348caeSBjoern A. Zeeb }
24*28348caeSBjoern A. Zeeb
25*28348caeSBjoern A. Zeeb return NULL;
26*28348caeSBjoern A. Zeeb }
27*28348caeSBjoern A. Zeeb
ath11k_peer_find(struct ath11k_base * ab,int vdev_id,const u8 * addr)28dd4f32aeSBjoern A. Zeeb struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
29dd4f32aeSBjoern A. Zeeb const u8 *addr)
30dd4f32aeSBjoern A. Zeeb {
31dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
32dd4f32aeSBjoern A. Zeeb
33dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ab->base_lock);
34dd4f32aeSBjoern A. Zeeb
35dd4f32aeSBjoern A. Zeeb list_for_each_entry(peer, &ab->peers, list) {
36dd4f32aeSBjoern A. Zeeb if (peer->vdev_id != vdev_id)
37dd4f32aeSBjoern A. Zeeb continue;
38dd4f32aeSBjoern A. Zeeb if (!ether_addr_equal(peer->addr, addr))
39dd4f32aeSBjoern A. Zeeb continue;
40dd4f32aeSBjoern A. Zeeb
41dd4f32aeSBjoern A. Zeeb return peer;
42dd4f32aeSBjoern A. Zeeb }
43dd4f32aeSBjoern A. Zeeb
44dd4f32aeSBjoern A. Zeeb return NULL;
45dd4f32aeSBjoern A. Zeeb }
46dd4f32aeSBjoern A. Zeeb
ath11k_peer_find_by_addr(struct ath11k_base * ab,const u8 * addr)47dd4f32aeSBjoern A. Zeeb struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
48dd4f32aeSBjoern A. Zeeb const u8 *addr)
49dd4f32aeSBjoern A. Zeeb {
50dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
51dd4f32aeSBjoern A. Zeeb
52dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ab->base_lock);
53dd4f32aeSBjoern A. Zeeb
54*28348caeSBjoern A. Zeeb if (!ab->rhead_peer_addr)
55*28348caeSBjoern A. Zeeb return NULL;
56*28348caeSBjoern A. Zeeb
57*28348caeSBjoern A. Zeeb peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
58*28348caeSBjoern A. Zeeb ab->rhash_peer_addr_param);
59dd4f32aeSBjoern A. Zeeb
60dd4f32aeSBjoern A. Zeeb return peer;
61dd4f32aeSBjoern A. Zeeb }
62dd4f32aeSBjoern A. Zeeb
ath11k_peer_find_by_id(struct ath11k_base * ab,int peer_id)63dd4f32aeSBjoern A. Zeeb struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
64dd4f32aeSBjoern A. Zeeb int peer_id)
65dd4f32aeSBjoern A. Zeeb {
66dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
67dd4f32aeSBjoern A. Zeeb
68dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ab->base_lock);
69dd4f32aeSBjoern A. Zeeb
70*28348caeSBjoern A. Zeeb if (!ab->rhead_peer_id)
71dd4f32aeSBjoern A. Zeeb return NULL;
72*28348caeSBjoern A. Zeeb
73*28348caeSBjoern A. Zeeb peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
74*28348caeSBjoern A. Zeeb ab->rhash_peer_id_param);
75*28348caeSBjoern A. Zeeb
76*28348caeSBjoern A. Zeeb return peer;
77dd4f32aeSBjoern A. Zeeb }
78dd4f32aeSBjoern A. Zeeb
ath11k_peer_find_by_vdev_id(struct ath11k_base * ab,int vdev_id)79dd4f32aeSBjoern A. Zeeb struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
80dd4f32aeSBjoern A. Zeeb int vdev_id)
81dd4f32aeSBjoern A. Zeeb {
82dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
83dd4f32aeSBjoern A. Zeeb
84dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
85dd4f32aeSBjoern A. Zeeb
86dd4f32aeSBjoern A. Zeeb list_for_each_entry(peer, &ab->peers, list) {
87dd4f32aeSBjoern A. Zeeb if (vdev_id == peer->vdev_id) {
88dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
89dd4f32aeSBjoern A. Zeeb return peer;
90dd4f32aeSBjoern A. Zeeb }
91dd4f32aeSBjoern A. Zeeb }
92dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
93dd4f32aeSBjoern A. Zeeb return NULL;
94dd4f32aeSBjoern A. Zeeb }
95dd4f32aeSBjoern A. Zeeb
ath11k_peer_unmap_event(struct ath11k_base * ab,u16 peer_id)96dd4f32aeSBjoern A. Zeeb void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
97dd4f32aeSBjoern A. Zeeb {
98dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
99dd4f32aeSBjoern A. Zeeb
100dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
101dd4f32aeSBjoern A. Zeeb
102*28348caeSBjoern A. Zeeb peer = ath11k_peer_find_list_by_id(ab, peer_id);
103dd4f32aeSBjoern A. Zeeb if (!peer) {
104dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
105dd4f32aeSBjoern A. Zeeb peer_id);
106dd4f32aeSBjoern A. Zeeb goto exit;
107dd4f32aeSBjoern A. Zeeb }
108dd4f32aeSBjoern A. Zeeb
109*28348caeSBjoern A. Zeeb ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer unmap vdev %d peer %pM id %d\n",
110dd4f32aeSBjoern A. Zeeb peer->vdev_id, peer->addr, peer_id);
111dd4f32aeSBjoern A. Zeeb
112dd4f32aeSBjoern A. Zeeb list_del(&peer->list);
113dd4f32aeSBjoern A. Zeeb kfree(peer);
114dd4f32aeSBjoern A. Zeeb wake_up(&ab->peer_mapping_wq);
115dd4f32aeSBjoern A. Zeeb
116dd4f32aeSBjoern A. Zeeb exit:
117dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
118dd4f32aeSBjoern A. Zeeb }
119dd4f32aeSBjoern A. Zeeb
ath11k_peer_map_event(struct ath11k_base * ab,u8 vdev_id,u16 peer_id,u8 * mac_addr,u16 ast_hash,u16 hw_peer_id)120dd4f32aeSBjoern A. Zeeb void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
121dd4f32aeSBjoern A. Zeeb u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
122dd4f32aeSBjoern A. Zeeb {
123dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
124dd4f32aeSBjoern A. Zeeb
125dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
126dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find(ab, vdev_id, mac_addr);
127dd4f32aeSBjoern A. Zeeb if (!peer) {
128dd4f32aeSBjoern A. Zeeb peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
129dd4f32aeSBjoern A. Zeeb if (!peer)
130dd4f32aeSBjoern A. Zeeb goto exit;
131dd4f32aeSBjoern A. Zeeb
132dd4f32aeSBjoern A. Zeeb peer->vdev_id = vdev_id;
133dd4f32aeSBjoern A. Zeeb peer->peer_id = peer_id;
134dd4f32aeSBjoern A. Zeeb peer->ast_hash = ast_hash;
135dd4f32aeSBjoern A. Zeeb peer->hw_peer_id = hw_peer_id;
136dd4f32aeSBjoern A. Zeeb ether_addr_copy(peer->addr, mac_addr);
137dd4f32aeSBjoern A. Zeeb list_add(&peer->list, &ab->peers);
138dd4f32aeSBjoern A. Zeeb wake_up(&ab->peer_mapping_wq);
139dd4f32aeSBjoern A. Zeeb }
140dd4f32aeSBjoern A. Zeeb
141*28348caeSBjoern A. Zeeb ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer map vdev %d peer %pM id %d\n",
142dd4f32aeSBjoern A. Zeeb vdev_id, mac_addr, peer_id);
143dd4f32aeSBjoern A. Zeeb
144dd4f32aeSBjoern A. Zeeb exit:
145dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
146dd4f32aeSBjoern A. Zeeb }
147dd4f32aeSBjoern A. Zeeb
ath11k_wait_for_peer_common(struct ath11k_base * ab,int vdev_id,const u8 * addr,bool expect_mapped)148dd4f32aeSBjoern A. Zeeb static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
149dd4f32aeSBjoern A. Zeeb const u8 *addr, bool expect_mapped)
150dd4f32aeSBjoern A. Zeeb {
151dd4f32aeSBjoern A. Zeeb int ret;
152dd4f32aeSBjoern A. Zeeb
153dd4f32aeSBjoern A. Zeeb ret = wait_event_timeout(ab->peer_mapping_wq, ({
154dd4f32aeSBjoern A. Zeeb bool mapped;
155dd4f32aeSBjoern A. Zeeb
156dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
157dd4f32aeSBjoern A. Zeeb mapped = !!ath11k_peer_find(ab, vdev_id, addr);
158dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
159dd4f32aeSBjoern A. Zeeb
160dd4f32aeSBjoern A. Zeeb (mapped == expect_mapped ||
161dd4f32aeSBjoern A. Zeeb test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
162dd4f32aeSBjoern A. Zeeb }), 3 * HZ);
163dd4f32aeSBjoern A. Zeeb
164dd4f32aeSBjoern A. Zeeb if (ret <= 0)
165dd4f32aeSBjoern A. Zeeb return -ETIMEDOUT;
166dd4f32aeSBjoern A. Zeeb
167dd4f32aeSBjoern A. Zeeb return 0;
168dd4f32aeSBjoern A. Zeeb }
169dd4f32aeSBjoern A. Zeeb
ath11k_peer_rhash_insert(struct ath11k_base * ab,struct rhashtable * rtbl,struct rhash_head * rhead,struct rhashtable_params * params,void * key)170*28348caeSBjoern A. Zeeb static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
171*28348caeSBjoern A. Zeeb struct rhashtable *rtbl,
172*28348caeSBjoern A. Zeeb struct rhash_head *rhead,
173*28348caeSBjoern A. Zeeb struct rhashtable_params *params,
174*28348caeSBjoern A. Zeeb void *key)
175*28348caeSBjoern A. Zeeb {
176*28348caeSBjoern A. Zeeb struct ath11k_peer *tmp;
177*28348caeSBjoern A. Zeeb
178*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->tbl_mtx_lock);
179*28348caeSBjoern A. Zeeb
180*28348caeSBjoern A. Zeeb tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
181*28348caeSBjoern A. Zeeb
182*28348caeSBjoern A. Zeeb if (!tmp)
183*28348caeSBjoern A. Zeeb return 0;
184*28348caeSBjoern A. Zeeb else if (IS_ERR(tmp))
185*28348caeSBjoern A. Zeeb return PTR_ERR(tmp);
186*28348caeSBjoern A. Zeeb else
187*28348caeSBjoern A. Zeeb return -EEXIST;
188*28348caeSBjoern A. Zeeb }
189*28348caeSBjoern A. Zeeb
ath11k_peer_rhash_remove(struct ath11k_base * ab,struct rhashtable * rtbl,struct rhash_head * rhead,struct rhashtable_params * params)190*28348caeSBjoern A. Zeeb static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
191*28348caeSBjoern A. Zeeb struct rhashtable *rtbl,
192*28348caeSBjoern A. Zeeb struct rhash_head *rhead,
193*28348caeSBjoern A. Zeeb struct rhashtable_params *params)
194*28348caeSBjoern A. Zeeb {
195*28348caeSBjoern A. Zeeb int ret;
196*28348caeSBjoern A. Zeeb
197*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->tbl_mtx_lock);
198*28348caeSBjoern A. Zeeb
199*28348caeSBjoern A. Zeeb ret = rhashtable_remove_fast(rtbl, rhead, *params);
200*28348caeSBjoern A. Zeeb if (ret && ret != -ENOENT)
201*28348caeSBjoern A. Zeeb return ret;
202*28348caeSBjoern A. Zeeb
203*28348caeSBjoern A. Zeeb return 0;
204*28348caeSBjoern A. Zeeb }
205*28348caeSBjoern A. Zeeb
ath11k_peer_rhash_add(struct ath11k_base * ab,struct ath11k_peer * peer)206*28348caeSBjoern A. Zeeb static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
207*28348caeSBjoern A. Zeeb {
208*28348caeSBjoern A. Zeeb int ret;
209*28348caeSBjoern A. Zeeb
210*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->base_lock);
211*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->tbl_mtx_lock);
212*28348caeSBjoern A. Zeeb
213*28348caeSBjoern A. Zeeb if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
214*28348caeSBjoern A. Zeeb return -EPERM;
215*28348caeSBjoern A. Zeeb
216*28348caeSBjoern A. Zeeb ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
217*28348caeSBjoern A. Zeeb &ab->rhash_peer_id_param, &peer->peer_id);
218*28348caeSBjoern A. Zeeb if (ret) {
219*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
220*28348caeSBjoern A. Zeeb peer->addr, peer->peer_id, ret);
221*28348caeSBjoern A. Zeeb return ret;
222*28348caeSBjoern A. Zeeb }
223*28348caeSBjoern A. Zeeb
224*28348caeSBjoern A. Zeeb ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
225*28348caeSBjoern A. Zeeb &ab->rhash_peer_addr_param, &peer->addr);
226*28348caeSBjoern A. Zeeb if (ret) {
227*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
228*28348caeSBjoern A. Zeeb peer->addr, peer->peer_id, ret);
229*28348caeSBjoern A. Zeeb goto err_clean;
230*28348caeSBjoern A. Zeeb }
231*28348caeSBjoern A. Zeeb
232*28348caeSBjoern A. Zeeb return 0;
233*28348caeSBjoern A. Zeeb
234*28348caeSBjoern A. Zeeb err_clean:
235*28348caeSBjoern A. Zeeb ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
236*28348caeSBjoern A. Zeeb &ab->rhash_peer_id_param);
237*28348caeSBjoern A. Zeeb return ret;
238*28348caeSBjoern A. Zeeb }
239*28348caeSBjoern A. Zeeb
ath11k_peer_cleanup(struct ath11k * ar,u32 vdev_id)240dd4f32aeSBjoern A. Zeeb void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
241dd4f32aeSBjoern A. Zeeb {
242dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer, *tmp;
243dd4f32aeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
244dd4f32aeSBjoern A. Zeeb
245dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ar->conf_mutex);
246dd4f32aeSBjoern A. Zeeb
247*28348caeSBjoern A. Zeeb mutex_lock(&ab->tbl_mtx_lock);
248dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
249dd4f32aeSBjoern A. Zeeb list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
250dd4f32aeSBjoern A. Zeeb if (peer->vdev_id != vdev_id)
251dd4f32aeSBjoern A. Zeeb continue;
252dd4f32aeSBjoern A. Zeeb
253dd4f32aeSBjoern A. Zeeb ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
254dd4f32aeSBjoern A. Zeeb peer->addr, vdev_id);
255dd4f32aeSBjoern A. Zeeb
256*28348caeSBjoern A. Zeeb ath11k_peer_rhash_delete(ab, peer);
257dd4f32aeSBjoern A. Zeeb list_del(&peer->list);
258dd4f32aeSBjoern A. Zeeb kfree(peer);
259dd4f32aeSBjoern A. Zeeb ar->num_peers--;
260dd4f32aeSBjoern A. Zeeb }
261dd4f32aeSBjoern A. Zeeb
262dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
263*28348caeSBjoern A. Zeeb mutex_unlock(&ab->tbl_mtx_lock);
264dd4f32aeSBjoern A. Zeeb }
265dd4f32aeSBjoern A. Zeeb
ath11k_wait_for_peer_deleted(struct ath11k * ar,int vdev_id,const u8 * addr)266dd4f32aeSBjoern A. Zeeb static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
267dd4f32aeSBjoern A. Zeeb {
268dd4f32aeSBjoern A. Zeeb return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
269dd4f32aeSBjoern A. Zeeb }
270dd4f32aeSBjoern A. Zeeb
ath11k_wait_for_peer_delete_done(struct ath11k * ar,u32 vdev_id,const u8 * addr)271dd4f32aeSBjoern A. Zeeb int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
272dd4f32aeSBjoern A. Zeeb const u8 *addr)
273dd4f32aeSBjoern A. Zeeb {
274dd4f32aeSBjoern A. Zeeb int ret;
275dd4f32aeSBjoern A. Zeeb unsigned long time_left;
276dd4f32aeSBjoern A. Zeeb
277dd4f32aeSBjoern A. Zeeb ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
278dd4f32aeSBjoern A. Zeeb if (ret) {
279dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "failed wait for peer deleted");
280dd4f32aeSBjoern A. Zeeb return ret;
281dd4f32aeSBjoern A. Zeeb }
282dd4f32aeSBjoern A. Zeeb
283dd4f32aeSBjoern A. Zeeb time_left = wait_for_completion_timeout(&ar->peer_delete_done,
284dd4f32aeSBjoern A. Zeeb 3 * HZ);
285dd4f32aeSBjoern A. Zeeb if (time_left == 0) {
286dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "Timeout in receiving peer delete response\n");
287dd4f32aeSBjoern A. Zeeb return -ETIMEDOUT;
288dd4f32aeSBjoern A. Zeeb }
289dd4f32aeSBjoern A. Zeeb
290dd4f32aeSBjoern A. Zeeb return 0;
291dd4f32aeSBjoern A. Zeeb }
292dd4f32aeSBjoern A. Zeeb
__ath11k_peer_delete(struct ath11k * ar,u32 vdev_id,const u8 * addr)293*28348caeSBjoern A. Zeeb static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
294dd4f32aeSBjoern A. Zeeb {
295dd4f32aeSBjoern A. Zeeb int ret;
296*28348caeSBjoern A. Zeeb struct ath11k_peer *peer;
297*28348caeSBjoern A. Zeeb struct ath11k_base *ab = ar->ab;
298dd4f32aeSBjoern A. Zeeb
299dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ar->conf_mutex);
300dd4f32aeSBjoern A. Zeeb
301*28348caeSBjoern A. Zeeb mutex_lock(&ab->tbl_mtx_lock);
302*28348caeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
303*28348caeSBjoern A. Zeeb
304*28348caeSBjoern A. Zeeb peer = ath11k_peer_find_by_addr(ab, addr);
305*28348caeSBjoern A. Zeeb /* Check if the found peer is what we want to remove.
306*28348caeSBjoern A. Zeeb * While the sta is transitioning to another band we may
307*28348caeSBjoern A. Zeeb * have 2 peer with the same addr assigned to different
308*28348caeSBjoern A. Zeeb * vdev_id. Make sure we are deleting the correct peer.
309*28348caeSBjoern A. Zeeb */
310*28348caeSBjoern A. Zeeb if (peer && peer->vdev_id == vdev_id)
311*28348caeSBjoern A. Zeeb ath11k_peer_rhash_delete(ab, peer);
312*28348caeSBjoern A. Zeeb
313*28348caeSBjoern A. Zeeb /* Fallback to peer list search if the correct peer can't be found.
314*28348caeSBjoern A. Zeeb * Skip the deletion of the peer from the rhash since it has already
315*28348caeSBjoern A. Zeeb * been deleted in peer add.
316*28348caeSBjoern A. Zeeb */
317*28348caeSBjoern A. Zeeb if (!peer)
318*28348caeSBjoern A. Zeeb peer = ath11k_peer_find(ab, vdev_id, addr);
319*28348caeSBjoern A. Zeeb
320*28348caeSBjoern A. Zeeb if (!peer) {
321*28348caeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
322*28348caeSBjoern A. Zeeb mutex_unlock(&ab->tbl_mtx_lock);
323*28348caeSBjoern A. Zeeb
324*28348caeSBjoern A. Zeeb ath11k_warn(ab,
325*28348caeSBjoern A. Zeeb "failed to find peer vdev_id %d addr %pM in delete\n",
326*28348caeSBjoern A. Zeeb vdev_id, addr);
327*28348caeSBjoern A. Zeeb return -EINVAL;
328*28348caeSBjoern A. Zeeb }
329*28348caeSBjoern A. Zeeb
330*28348caeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
331*28348caeSBjoern A. Zeeb mutex_unlock(&ab->tbl_mtx_lock);
332*28348caeSBjoern A. Zeeb
333dd4f32aeSBjoern A. Zeeb reinit_completion(&ar->peer_delete_done);
334dd4f32aeSBjoern A. Zeeb
335dd4f32aeSBjoern A. Zeeb ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
336dd4f32aeSBjoern A. Zeeb if (ret) {
337*28348caeSBjoern A. Zeeb ath11k_warn(ab,
338dd4f32aeSBjoern A. Zeeb "failed to delete peer vdev_id %d addr %pM ret %d\n",
339dd4f32aeSBjoern A. Zeeb vdev_id, addr, ret);
340dd4f32aeSBjoern A. Zeeb return ret;
341dd4f32aeSBjoern A. Zeeb }
342dd4f32aeSBjoern A. Zeeb
343dd4f32aeSBjoern A. Zeeb ret = ath11k_wait_for_peer_delete_done(ar, vdev_id, addr);
344dd4f32aeSBjoern A. Zeeb if (ret)
345dd4f32aeSBjoern A. Zeeb return ret;
346dd4f32aeSBjoern A. Zeeb
347*28348caeSBjoern A. Zeeb return 0;
348*28348caeSBjoern A. Zeeb }
349*28348caeSBjoern A. Zeeb
ath11k_peer_delete(struct ath11k * ar,u32 vdev_id,u8 * addr)350*28348caeSBjoern A. Zeeb int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
351*28348caeSBjoern A. Zeeb {
352*28348caeSBjoern A. Zeeb int ret;
353*28348caeSBjoern A. Zeeb
354*28348caeSBjoern A. Zeeb lockdep_assert_held(&ar->conf_mutex);
355*28348caeSBjoern A. Zeeb
356*28348caeSBjoern A. Zeeb ret = __ath11k_peer_delete(ar, vdev_id, addr);
357*28348caeSBjoern A. Zeeb if (ret)
358*28348caeSBjoern A. Zeeb return ret;
359*28348caeSBjoern A. Zeeb
360dd4f32aeSBjoern A. Zeeb ar->num_peers--;
361dd4f32aeSBjoern A. Zeeb
362dd4f32aeSBjoern A. Zeeb return 0;
363dd4f32aeSBjoern A. Zeeb }
364dd4f32aeSBjoern A. Zeeb
ath11k_wait_for_peer_created(struct ath11k * ar,int vdev_id,const u8 * addr)365dd4f32aeSBjoern A. Zeeb static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
366dd4f32aeSBjoern A. Zeeb {
367dd4f32aeSBjoern A. Zeeb return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
368dd4f32aeSBjoern A. Zeeb }
369dd4f32aeSBjoern A. Zeeb
ath11k_peer_create(struct ath11k * ar,struct ath11k_vif * arvif,struct ieee80211_sta * sta,struct peer_create_params * param)370dd4f32aeSBjoern A. Zeeb int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
371dd4f32aeSBjoern A. Zeeb struct ieee80211_sta *sta, struct peer_create_params *param)
372dd4f32aeSBjoern A. Zeeb {
373dd4f32aeSBjoern A. Zeeb struct ath11k_peer *peer;
374dd4f32aeSBjoern A. Zeeb struct ath11k_sta *arsta;
375dd4f32aeSBjoern A. Zeeb int ret, fbret;
376dd4f32aeSBjoern A. Zeeb
377dd4f32aeSBjoern A. Zeeb lockdep_assert_held(&ar->conf_mutex);
378dd4f32aeSBjoern A. Zeeb
379dd4f32aeSBjoern A. Zeeb if (ar->num_peers > (ar->max_num_peers - 1)) {
380dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
381dd4f32aeSBjoern A. Zeeb "failed to create peer due to insufficient peer entry resource in firmware\n");
382dd4f32aeSBjoern A. Zeeb return -ENOBUFS;
383dd4f32aeSBjoern A. Zeeb }
384dd4f32aeSBjoern A. Zeeb
385*28348caeSBjoern A. Zeeb mutex_lock(&ar->ab->tbl_mtx_lock);
386dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ar->ab->base_lock);
387*28348caeSBjoern A. Zeeb peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
388dd4f32aeSBjoern A. Zeeb if (peer) {
389*28348caeSBjoern A. Zeeb if (peer->vdev_id == param->vdev_id) {
390dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ar->ab->base_lock);
391*28348caeSBjoern A. Zeeb mutex_unlock(&ar->ab->tbl_mtx_lock);
392dd4f32aeSBjoern A. Zeeb return -EINVAL;
393dd4f32aeSBjoern A. Zeeb }
394*28348caeSBjoern A. Zeeb
395*28348caeSBjoern A. Zeeb /* Assume sta is transitioning to another band.
396*28348caeSBjoern A. Zeeb * Remove here the peer from rhash.
397*28348caeSBjoern A. Zeeb */
398*28348caeSBjoern A. Zeeb ath11k_peer_rhash_delete(ar->ab, peer);
399*28348caeSBjoern A. Zeeb }
400dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ar->ab->base_lock);
401*28348caeSBjoern A. Zeeb mutex_unlock(&ar->ab->tbl_mtx_lock);
402dd4f32aeSBjoern A. Zeeb
403dd4f32aeSBjoern A. Zeeb ret = ath11k_wmi_send_peer_create_cmd(ar, param);
404dd4f32aeSBjoern A. Zeeb if (ret) {
405dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab,
406dd4f32aeSBjoern A. Zeeb "failed to send peer create vdev_id %d ret %d\n",
407dd4f32aeSBjoern A. Zeeb param->vdev_id, ret);
408dd4f32aeSBjoern A. Zeeb return ret;
409dd4f32aeSBjoern A. Zeeb }
410dd4f32aeSBjoern A. Zeeb
411dd4f32aeSBjoern A. Zeeb ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
412dd4f32aeSBjoern A. Zeeb param->peer_addr);
413dd4f32aeSBjoern A. Zeeb if (ret)
414dd4f32aeSBjoern A. Zeeb return ret;
415dd4f32aeSBjoern A. Zeeb
416*28348caeSBjoern A. Zeeb mutex_lock(&ar->ab->tbl_mtx_lock);
417dd4f32aeSBjoern A. Zeeb spin_lock_bh(&ar->ab->base_lock);
418dd4f32aeSBjoern A. Zeeb
419dd4f32aeSBjoern A. Zeeb peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
420dd4f32aeSBjoern A. Zeeb if (!peer) {
421dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ar->ab->base_lock);
422*28348caeSBjoern A. Zeeb mutex_unlock(&ar->ab->tbl_mtx_lock);
423dd4f32aeSBjoern A. Zeeb ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
424dd4f32aeSBjoern A. Zeeb param->peer_addr, param->vdev_id);
425dd4f32aeSBjoern A. Zeeb
426dd4f32aeSBjoern A. Zeeb ret = -ENOENT;
427dd4f32aeSBjoern A. Zeeb goto cleanup;
428dd4f32aeSBjoern A. Zeeb }
429dd4f32aeSBjoern A. Zeeb
430*28348caeSBjoern A. Zeeb ret = ath11k_peer_rhash_add(ar->ab, peer);
431*28348caeSBjoern A. Zeeb if (ret) {
432*28348caeSBjoern A. Zeeb spin_unlock_bh(&ar->ab->base_lock);
433*28348caeSBjoern A. Zeeb mutex_unlock(&ar->ab->tbl_mtx_lock);
434*28348caeSBjoern A. Zeeb goto cleanup;
435*28348caeSBjoern A. Zeeb }
436*28348caeSBjoern A. Zeeb
437dd4f32aeSBjoern A. Zeeb peer->pdev_idx = ar->pdev_idx;
438dd4f32aeSBjoern A. Zeeb peer->sta = sta;
439dd4f32aeSBjoern A. Zeeb
440dd4f32aeSBjoern A. Zeeb if (arvif->vif->type == NL80211_IFTYPE_STATION) {
441dd4f32aeSBjoern A. Zeeb arvif->ast_hash = peer->ast_hash;
442dd4f32aeSBjoern A. Zeeb arvif->ast_idx = peer->hw_peer_id;
443dd4f32aeSBjoern A. Zeeb }
444dd4f32aeSBjoern A. Zeeb
445dd4f32aeSBjoern A. Zeeb peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
446dd4f32aeSBjoern A. Zeeb peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
447dd4f32aeSBjoern A. Zeeb
448dd4f32aeSBjoern A. Zeeb if (sta) {
449dd4f32aeSBjoern A. Zeeb arsta = (struct ath11k_sta *)sta->drv_priv;
450dd4f32aeSBjoern A. Zeeb arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
451dd4f32aeSBjoern A. Zeeb FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
452dd4f32aeSBjoern A. Zeeb peer->peer_id);
453dd4f32aeSBjoern A. Zeeb
454dd4f32aeSBjoern A. Zeeb /* set HTT extension valid bit to 0 by default */
455dd4f32aeSBjoern A. Zeeb arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
456dd4f32aeSBjoern A. Zeeb }
457dd4f32aeSBjoern A. Zeeb
458dd4f32aeSBjoern A. Zeeb ar->num_peers++;
459dd4f32aeSBjoern A. Zeeb
460dd4f32aeSBjoern A. Zeeb spin_unlock_bh(&ar->ab->base_lock);
461*28348caeSBjoern A. Zeeb mutex_unlock(&ar->ab->tbl_mtx_lock);
462dd4f32aeSBjoern A. Zeeb
463dd4f32aeSBjoern A. Zeeb return 0;
464dd4f32aeSBjoern A. Zeeb
465dd4f32aeSBjoern A. Zeeb cleanup:
466*28348caeSBjoern A. Zeeb fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
467dd4f32aeSBjoern A. Zeeb if (fbret)
468*28348caeSBjoern A. Zeeb ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
469dd4f32aeSBjoern A. Zeeb param->peer_addr, param->vdev_id, fbret);
470dd4f32aeSBjoern A. Zeeb
471dd4f32aeSBjoern A. Zeeb return ret;
472dd4f32aeSBjoern A. Zeeb }
473*28348caeSBjoern A. Zeeb
ath11k_peer_rhash_delete(struct ath11k_base * ab,struct ath11k_peer * peer)474*28348caeSBjoern A. Zeeb int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
475*28348caeSBjoern A. Zeeb {
476*28348caeSBjoern A. Zeeb int ret;
477*28348caeSBjoern A. Zeeb
478*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->base_lock);
479*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->tbl_mtx_lock);
480*28348caeSBjoern A. Zeeb
481*28348caeSBjoern A. Zeeb if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
482*28348caeSBjoern A. Zeeb return -EPERM;
483*28348caeSBjoern A. Zeeb
484*28348caeSBjoern A. Zeeb ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
485*28348caeSBjoern A. Zeeb &ab->rhash_peer_addr_param);
486*28348caeSBjoern A. Zeeb if (ret) {
487*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
488*28348caeSBjoern A. Zeeb peer->addr, peer->peer_id, ret);
489*28348caeSBjoern A. Zeeb return ret;
490*28348caeSBjoern A. Zeeb }
491*28348caeSBjoern A. Zeeb
492*28348caeSBjoern A. Zeeb ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
493*28348caeSBjoern A. Zeeb &ab->rhash_peer_id_param);
494*28348caeSBjoern A. Zeeb if (ret) {
495*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
496*28348caeSBjoern A. Zeeb peer->addr, peer->peer_id, ret);
497*28348caeSBjoern A. Zeeb return ret;
498*28348caeSBjoern A. Zeeb }
499*28348caeSBjoern A. Zeeb
500*28348caeSBjoern A. Zeeb return 0;
501*28348caeSBjoern A. Zeeb }
502*28348caeSBjoern A. Zeeb
ath11k_peer_rhash_id_tbl_init(struct ath11k_base * ab)503*28348caeSBjoern A. Zeeb static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
504*28348caeSBjoern A. Zeeb {
505*28348caeSBjoern A. Zeeb struct rhashtable_params *param;
506*28348caeSBjoern A. Zeeb struct rhashtable *rhash_id_tbl;
507*28348caeSBjoern A. Zeeb int ret;
508*28348caeSBjoern A. Zeeb size_t size;
509*28348caeSBjoern A. Zeeb
510*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->tbl_mtx_lock);
511*28348caeSBjoern A. Zeeb
512*28348caeSBjoern A. Zeeb if (ab->rhead_peer_id)
513*28348caeSBjoern A. Zeeb return 0;
514*28348caeSBjoern A. Zeeb
515*28348caeSBjoern A. Zeeb size = sizeof(*ab->rhead_peer_id);
516*28348caeSBjoern A. Zeeb rhash_id_tbl = kzalloc(size, GFP_KERNEL);
517*28348caeSBjoern A. Zeeb if (!rhash_id_tbl) {
518*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
519*28348caeSBjoern A. Zeeb size);
520*28348caeSBjoern A. Zeeb return -ENOMEM;
521*28348caeSBjoern A. Zeeb }
522*28348caeSBjoern A. Zeeb
523*28348caeSBjoern A. Zeeb param = &ab->rhash_peer_id_param;
524*28348caeSBjoern A. Zeeb
525*28348caeSBjoern A. Zeeb param->key_offset = offsetof(struct ath11k_peer, peer_id);
526*28348caeSBjoern A. Zeeb param->head_offset = offsetof(struct ath11k_peer, rhash_id);
527*28348caeSBjoern A. Zeeb param->key_len = sizeof_field(struct ath11k_peer, peer_id);
528*28348caeSBjoern A. Zeeb param->automatic_shrinking = true;
529*28348caeSBjoern A. Zeeb param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
530*28348caeSBjoern A. Zeeb
531*28348caeSBjoern A. Zeeb ret = rhashtable_init(rhash_id_tbl, param);
532*28348caeSBjoern A. Zeeb if (ret) {
533*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
534*28348caeSBjoern A. Zeeb goto err_free;
535*28348caeSBjoern A. Zeeb }
536*28348caeSBjoern A. Zeeb
537*28348caeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
538*28348caeSBjoern A. Zeeb
539*28348caeSBjoern A. Zeeb if (!ab->rhead_peer_id) {
540*28348caeSBjoern A. Zeeb ab->rhead_peer_id = rhash_id_tbl;
541*28348caeSBjoern A. Zeeb } else {
542*28348caeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
543*28348caeSBjoern A. Zeeb goto cleanup_tbl;
544*28348caeSBjoern A. Zeeb }
545*28348caeSBjoern A. Zeeb
546*28348caeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
547*28348caeSBjoern A. Zeeb
548*28348caeSBjoern A. Zeeb return 0;
549*28348caeSBjoern A. Zeeb
550*28348caeSBjoern A. Zeeb cleanup_tbl:
551*28348caeSBjoern A. Zeeb rhashtable_destroy(rhash_id_tbl);
552*28348caeSBjoern A. Zeeb err_free:
553*28348caeSBjoern A. Zeeb kfree(rhash_id_tbl);
554*28348caeSBjoern A. Zeeb
555*28348caeSBjoern A. Zeeb return ret;
556*28348caeSBjoern A. Zeeb }
557*28348caeSBjoern A. Zeeb
ath11k_peer_rhash_addr_tbl_init(struct ath11k_base * ab)558*28348caeSBjoern A. Zeeb static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
559*28348caeSBjoern A. Zeeb {
560*28348caeSBjoern A. Zeeb struct rhashtable_params *param;
561*28348caeSBjoern A. Zeeb struct rhashtable *rhash_addr_tbl;
562*28348caeSBjoern A. Zeeb int ret;
563*28348caeSBjoern A. Zeeb size_t size;
564*28348caeSBjoern A. Zeeb
565*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->tbl_mtx_lock);
566*28348caeSBjoern A. Zeeb
567*28348caeSBjoern A. Zeeb if (ab->rhead_peer_addr)
568*28348caeSBjoern A. Zeeb return 0;
569*28348caeSBjoern A. Zeeb
570*28348caeSBjoern A. Zeeb size = sizeof(*ab->rhead_peer_addr);
571*28348caeSBjoern A. Zeeb rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
572*28348caeSBjoern A. Zeeb if (!rhash_addr_tbl) {
573*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
574*28348caeSBjoern A. Zeeb size);
575*28348caeSBjoern A. Zeeb return -ENOMEM;
576*28348caeSBjoern A. Zeeb }
577*28348caeSBjoern A. Zeeb
578*28348caeSBjoern A. Zeeb param = &ab->rhash_peer_addr_param;
579*28348caeSBjoern A. Zeeb
580*28348caeSBjoern A. Zeeb param->key_offset = offsetof(struct ath11k_peer, addr);
581*28348caeSBjoern A. Zeeb param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
582*28348caeSBjoern A. Zeeb param->key_len = sizeof_field(struct ath11k_peer, addr);
583*28348caeSBjoern A. Zeeb param->automatic_shrinking = true;
584*28348caeSBjoern A. Zeeb param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
585*28348caeSBjoern A. Zeeb
586*28348caeSBjoern A. Zeeb ret = rhashtable_init(rhash_addr_tbl, param);
587*28348caeSBjoern A. Zeeb if (ret) {
588*28348caeSBjoern A. Zeeb ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
589*28348caeSBjoern A. Zeeb goto err_free;
590*28348caeSBjoern A. Zeeb }
591*28348caeSBjoern A. Zeeb
592*28348caeSBjoern A. Zeeb spin_lock_bh(&ab->base_lock);
593*28348caeSBjoern A. Zeeb
594*28348caeSBjoern A. Zeeb if (!ab->rhead_peer_addr) {
595*28348caeSBjoern A. Zeeb ab->rhead_peer_addr = rhash_addr_tbl;
596*28348caeSBjoern A. Zeeb } else {
597*28348caeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
598*28348caeSBjoern A. Zeeb goto cleanup_tbl;
599*28348caeSBjoern A. Zeeb }
600*28348caeSBjoern A. Zeeb
601*28348caeSBjoern A. Zeeb spin_unlock_bh(&ab->base_lock);
602*28348caeSBjoern A. Zeeb
603*28348caeSBjoern A. Zeeb return 0;
604*28348caeSBjoern A. Zeeb
605*28348caeSBjoern A. Zeeb cleanup_tbl:
606*28348caeSBjoern A. Zeeb rhashtable_destroy(rhash_addr_tbl);
607*28348caeSBjoern A. Zeeb err_free:
608*28348caeSBjoern A. Zeeb kfree(rhash_addr_tbl);
609*28348caeSBjoern A. Zeeb
610*28348caeSBjoern A. Zeeb return ret;
611*28348caeSBjoern A. Zeeb }
612*28348caeSBjoern A. Zeeb
ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base * ab)613*28348caeSBjoern A. Zeeb static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
614*28348caeSBjoern A. Zeeb {
615*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->tbl_mtx_lock);
616*28348caeSBjoern A. Zeeb
617*28348caeSBjoern A. Zeeb if (!ab->rhead_peer_id)
618*28348caeSBjoern A. Zeeb return;
619*28348caeSBjoern A. Zeeb
620*28348caeSBjoern A. Zeeb rhashtable_destroy(ab->rhead_peer_id);
621*28348caeSBjoern A. Zeeb kfree(ab->rhead_peer_id);
622*28348caeSBjoern A. Zeeb ab->rhead_peer_id = NULL;
623*28348caeSBjoern A. Zeeb }
624*28348caeSBjoern A. Zeeb
ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base * ab)625*28348caeSBjoern A. Zeeb static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
626*28348caeSBjoern A. Zeeb {
627*28348caeSBjoern A. Zeeb lockdep_assert_held(&ab->tbl_mtx_lock);
628*28348caeSBjoern A. Zeeb
629*28348caeSBjoern A. Zeeb if (!ab->rhead_peer_addr)
630*28348caeSBjoern A. Zeeb return;
631*28348caeSBjoern A. Zeeb
632*28348caeSBjoern A. Zeeb rhashtable_destroy(ab->rhead_peer_addr);
633*28348caeSBjoern A. Zeeb kfree(ab->rhead_peer_addr);
634*28348caeSBjoern A. Zeeb ab->rhead_peer_addr = NULL;
635*28348caeSBjoern A. Zeeb }
636*28348caeSBjoern A. Zeeb
ath11k_peer_rhash_tbl_init(struct ath11k_base * ab)637*28348caeSBjoern A. Zeeb int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
638*28348caeSBjoern A. Zeeb {
639*28348caeSBjoern A. Zeeb int ret;
640*28348caeSBjoern A. Zeeb
641*28348caeSBjoern A. Zeeb mutex_lock(&ab->tbl_mtx_lock);
642*28348caeSBjoern A. Zeeb
643*28348caeSBjoern A. Zeeb ret = ath11k_peer_rhash_id_tbl_init(ab);
644*28348caeSBjoern A. Zeeb if (ret)
645*28348caeSBjoern A. Zeeb goto out;
646*28348caeSBjoern A. Zeeb
647*28348caeSBjoern A. Zeeb ret = ath11k_peer_rhash_addr_tbl_init(ab);
648*28348caeSBjoern A. Zeeb if (ret)
649*28348caeSBjoern A. Zeeb goto cleanup_tbl;
650*28348caeSBjoern A. Zeeb
651*28348caeSBjoern A. Zeeb mutex_unlock(&ab->tbl_mtx_lock);
652*28348caeSBjoern A. Zeeb
653*28348caeSBjoern A. Zeeb return 0;
654*28348caeSBjoern A. Zeeb
655*28348caeSBjoern A. Zeeb cleanup_tbl:
656*28348caeSBjoern A. Zeeb ath11k_peer_rhash_id_tbl_destroy(ab);
657*28348caeSBjoern A. Zeeb out:
658*28348caeSBjoern A. Zeeb mutex_unlock(&ab->tbl_mtx_lock);
659*28348caeSBjoern A. Zeeb return ret;
660*28348caeSBjoern A. Zeeb }
661*28348caeSBjoern A. Zeeb
ath11k_peer_rhash_tbl_destroy(struct ath11k_base * ab)662*28348caeSBjoern A. Zeeb void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
663*28348caeSBjoern A. Zeeb {
664*28348caeSBjoern A. Zeeb mutex_lock(&ab->tbl_mtx_lock);
665*28348caeSBjoern A. Zeeb
666*28348caeSBjoern A. Zeeb ath11k_peer_rhash_addr_tbl_destroy(ab);
667*28348caeSBjoern A. Zeeb ath11k_peer_rhash_id_tbl_destroy(ab);
668*28348caeSBjoern A. Zeeb
669*28348caeSBjoern A. Zeeb mutex_unlock(&ab->tbl_mtx_lock);
670*28348caeSBjoern A. Zeeb }
671