1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include "core.h"
8 #include "peer.h"
9 #include "debug.h"
10
ath12k_peer_ml_find(struct ath12k_hw * ah,const u8 * addr)11 struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
12 {
13 struct ath12k_ml_peer *ml_peer;
14
15 lockdep_assert_wiphy(ah->hw->wiphy);
16
17 list_for_each_entry(ml_peer, &ah->ml_peers, list) {
18 if (!ether_addr_equal(ml_peer->addr, addr))
19 continue;
20
21 return ml_peer;
22 }
23
24 return NULL;
25 }
26
ath12k_peer_find(struct ath12k_base * ab,int vdev_id,const u8 * addr)27 struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
28 const u8 *addr)
29 {
30 struct ath12k_peer *peer;
31
32 lockdep_assert_held(&ab->base_lock);
33
34 list_for_each_entry(peer, &ab->peers, list) {
35 if (peer->vdev_id != vdev_id)
36 continue;
37 if (!ether_addr_equal(peer->addr, addr))
38 continue;
39
40 return peer;
41 }
42
43 return NULL;
44 }
45
ath12k_peer_find_by_pdev_idx(struct ath12k_base * ab,u8 pdev_idx,const u8 * addr)46 static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab,
47 u8 pdev_idx, const u8 *addr)
48 {
49 struct ath12k_peer *peer;
50
51 lockdep_assert_held(&ab->base_lock);
52
53 list_for_each_entry(peer, &ab->peers, list) {
54 if (peer->pdev_idx != pdev_idx)
55 continue;
56 if (!ether_addr_equal(peer->addr, addr))
57 continue;
58
59 return peer;
60 }
61
62 return NULL;
63 }
64
ath12k_peer_find_by_addr(struct ath12k_base * ab,const u8 * addr)65 struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
66 const u8 *addr)
67 {
68 struct ath12k_peer *peer;
69
70 lockdep_assert_held(&ab->base_lock);
71
72 list_for_each_entry(peer, &ab->peers, list) {
73 if (!ether_addr_equal(peer->addr, addr))
74 continue;
75
76 return peer;
77 }
78
79 return NULL;
80 }
81
ath12k_peer_find_by_ml_id(struct ath12k_base * ab,int ml_peer_id)82 static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
83 int ml_peer_id)
84 {
85 struct ath12k_peer *peer;
86
87 lockdep_assert_held(&ab->base_lock);
88
89 list_for_each_entry(peer, &ab->peers, list)
90 if (ml_peer_id == peer->ml_id)
91 return peer;
92
93 return NULL;
94 }
95
ath12k_peer_find_by_id(struct ath12k_base * ab,int peer_id)96 struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
97 int peer_id)
98 {
99 struct ath12k_peer *peer;
100
101 lockdep_assert_held(&ab->base_lock);
102
103 if (peer_id == HAL_INVALID_PEERID)
104 return NULL;
105
106 if (peer_id & ATH12K_PEER_ML_ID_VALID)
107 return ath12k_peer_find_by_ml_id(ab, peer_id);
108
109 list_for_each_entry(peer, &ab->peers, list)
110 if (peer_id == peer->peer_id)
111 return peer;
112
113 return NULL;
114 }
115
ath12k_peer_exist_by_vdev_id(struct ath12k_base * ab,int vdev_id)116 bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
117 {
118 struct ath12k_peer *peer;
119
120 spin_lock_bh(&ab->base_lock);
121
122 list_for_each_entry(peer, &ab->peers, list) {
123 if (vdev_id == peer->vdev_id) {
124 spin_unlock_bh(&ab->base_lock);
125 return true;
126 }
127 }
128 spin_unlock_bh(&ab->base_lock);
129 return false;
130 }
131
ath12k_peer_find_by_ast(struct ath12k_base * ab,int ast_hash)132 struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
133 int ast_hash)
134 {
135 struct ath12k_peer *peer;
136
137 lockdep_assert_held(&ab->base_lock);
138
139 list_for_each_entry(peer, &ab->peers, list)
140 if (ast_hash == peer->ast_hash)
141 return peer;
142
143 return NULL;
144 }
145
ath12k_peer_unmap_event(struct ath12k_base * ab,u16 peer_id)146 void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
147 {
148 struct ath12k_peer *peer;
149
150 spin_lock_bh(&ab->base_lock);
151
152 peer = ath12k_peer_find_by_id(ab, peer_id);
153 if (!peer) {
154 ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
155 peer_id);
156 goto exit;
157 }
158
159 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
160 peer->vdev_id, peer->addr, peer_id);
161
162 list_del(&peer->list);
163 kfree(peer);
164 wake_up(&ab->peer_mapping_wq);
165
166 exit:
167 spin_unlock_bh(&ab->base_lock);
168 }
169
ath12k_peer_map_event(struct ath12k_base * ab,u8 vdev_id,u16 peer_id,u8 * mac_addr,u16 ast_hash,u16 hw_peer_id)170 void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
171 u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
172 {
173 struct ath12k_peer *peer;
174
175 spin_lock_bh(&ab->base_lock);
176 peer = ath12k_peer_find(ab, vdev_id, mac_addr);
177 if (!peer) {
178 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
179 if (!peer)
180 goto exit;
181
182 peer->vdev_id = vdev_id;
183 peer->peer_id = peer_id;
184 peer->ast_hash = ast_hash;
185 peer->hw_peer_id = hw_peer_id;
186 ether_addr_copy(peer->addr, mac_addr);
187 list_add(&peer->list, &ab->peers);
188 wake_up(&ab->peer_mapping_wq);
189 }
190
191 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
192 vdev_id, mac_addr, peer_id);
193
194 exit:
195 spin_unlock_bh(&ab->base_lock);
196 }
197
ath12k_wait_for_peer_common(struct ath12k_base * ab,int vdev_id,const u8 * addr,bool expect_mapped)198 static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
199 const u8 *addr, bool expect_mapped)
200 {
201 int ret;
202
203 ret = wait_event_timeout(ab->peer_mapping_wq, ({
204 bool mapped;
205
206 spin_lock_bh(&ab->base_lock);
207 mapped = !!ath12k_peer_find(ab, vdev_id, addr);
208 spin_unlock_bh(&ab->base_lock);
209
210 (mapped == expect_mapped ||
211 test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags));
212 }), 3 * HZ);
213
214 if (ret <= 0)
215 return -ETIMEDOUT;
216
217 return 0;
218 }
219
ath12k_peer_cleanup(struct ath12k * ar,u32 vdev_id)220 void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
221 {
222 struct ath12k_peer *peer, *tmp;
223 struct ath12k_base *ab = ar->ab;
224
225 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
226
227 spin_lock_bh(&ab->base_lock);
228 list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
229 if (peer->vdev_id != vdev_id)
230 continue;
231
232 ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
233 peer->addr, vdev_id);
234
235 list_del(&peer->list);
236 kfree(peer);
237 ar->num_peers--;
238 }
239
240 spin_unlock_bh(&ab->base_lock);
241 }
242
ath12k_wait_for_peer_deleted(struct ath12k * ar,int vdev_id,const u8 * addr)243 static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
244 {
245 return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
246 }
247
ath12k_wait_for_peer_delete_done(struct ath12k * ar,u32 vdev_id,const u8 * addr)248 int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
249 const u8 *addr)
250 {
251 int ret;
252 unsigned long time_left;
253
254 ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr);
255 if (ret) {
256 ath12k_warn(ar->ab, "failed wait for peer deleted");
257 return ret;
258 }
259
260 time_left = wait_for_completion_timeout(&ar->peer_delete_done,
261 3 * HZ);
262 if (time_left == 0) {
263 ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n");
264 return -ETIMEDOUT;
265 }
266
267 return 0;
268 }
269
ath12k_peer_delete_send(struct ath12k * ar,u32 vdev_id,const u8 * addr)270 static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr)
271 {
272 struct ath12k_base *ab = ar->ab;
273 int ret;
274
275 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
276
277 reinit_completion(&ar->peer_delete_done);
278
279 ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
280 if (ret) {
281 ath12k_warn(ab,
282 "failed to delete peer vdev_id %d addr %pM ret %d\n",
283 vdev_id, addr, ret);
284 return ret;
285 }
286
287 return 0;
288 }
289
ath12k_peer_delete(struct ath12k * ar,u32 vdev_id,u8 * addr)290 int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
291 {
292 int ret;
293
294 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
295
296 ret = ath12k_peer_delete_send(ar, vdev_id, addr);
297 if (ret)
298 return ret;
299
300 ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
301 if (ret)
302 return ret;
303
304 ar->num_peers--;
305
306 return 0;
307 }
308
ath12k_wait_for_peer_created(struct ath12k * ar,int vdev_id,const u8 * addr)309 static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
310 {
311 return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
312 }
313
ath12k_peer_create(struct ath12k * ar,struct ath12k_link_vif * arvif,struct ieee80211_sta * sta,struct ath12k_wmi_peer_create_arg * arg)314 int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
315 struct ieee80211_sta *sta,
316 struct ath12k_wmi_peer_create_arg *arg)
317 {
318 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
319 struct ath12k_link_sta *arsta;
320 u8 link_id = arvif->link_id;
321 struct ath12k_peer *peer;
322 struct ath12k_sta *ahsta;
323 u16 ml_peer_id;
324 int ret;
325
326 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
327
328 if (ar->num_peers > (ar->max_num_peers - 1)) {
329 ath12k_warn(ar->ab,
330 "failed to create peer due to insufficient peer entry resource in firmware\n");
331 return -ENOBUFS;
332 }
333
334 spin_lock_bh(&ar->ab->base_lock);
335 peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
336 if (peer) {
337 spin_unlock_bh(&ar->ab->base_lock);
338 return -EINVAL;
339 }
340 spin_unlock_bh(&ar->ab->base_lock);
341
342 ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
343 if (ret) {
344 ath12k_warn(ar->ab,
345 "failed to send peer create vdev_id %d ret %d\n",
346 arg->vdev_id, ret);
347 return ret;
348 }
349
350 ret = ath12k_wait_for_peer_created(ar, arg->vdev_id,
351 arg->peer_addr);
352 if (ret)
353 return ret;
354
355 spin_lock_bh(&ar->ab->base_lock);
356
357 peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
358 if (!peer) {
359 spin_unlock_bh(&ar->ab->base_lock);
360 ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
361 arg->peer_addr, arg->vdev_id);
362
363 reinit_completion(&ar->peer_delete_done);
364
365 ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr,
366 arg->vdev_id);
367 if (ret) {
368 ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
369 arg->vdev_id, arg->peer_addr);
370 return ret;
371 }
372
373 ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id,
374 arg->peer_addr);
375 if (ret)
376 return ret;
377
378 return -ENOENT;
379 }
380
381 peer->pdev_idx = ar->pdev_idx;
382 peer->sta = sta;
383
384 if (vif->type == NL80211_IFTYPE_STATION) {
385 arvif->ast_hash = peer->ast_hash;
386 arvif->ast_idx = peer->hw_peer_id;
387 }
388
389 if (vif->type == NL80211_IFTYPE_AP)
390 peer->ucast_ra_only = true;
391
392 if (sta) {
393 ahsta = ath12k_sta_to_ahsta(sta);
394 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
395 ahsta->link[link_id]);
396
397 peer->link_id = arsta->link_id;
398
399 /* Fill ML info into created peer */
400 if (sta->mlo) {
401 ml_peer_id = ahsta->ml_peer_id;
402 peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID;
403 ether_addr_copy(peer->ml_addr, sta->addr);
404
405 /* the assoc link is considered primary for now */
406 peer->primary_link = arsta->is_assoc_link;
407 peer->mlo = true;
408 } else {
409 peer->ml_id = ATH12K_MLO_PEER_ID_INVALID;
410 peer->primary_link = true;
411 peer->mlo = false;
412 }
413 }
414
415 peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
416 peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
417
418 ar->num_peers++;
419
420 spin_unlock_bh(&ar->ab->base_lock);
421
422 return 0;
423 }
424
ath12k_peer_ml_alloc(struct ath12k_hw * ah)425 static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
426 {
427 u16 ml_peer_id;
428
429 lockdep_assert_wiphy(ah->hw->wiphy);
430
431 for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) {
432 if (test_bit(ml_peer_id, ah->free_ml_peer_id_map))
433 continue;
434
435 set_bit(ml_peer_id, ah->free_ml_peer_id_map);
436 break;
437 }
438
439 if (ml_peer_id == ATH12K_MAX_MLO_PEERS)
440 ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
441
442 return ml_peer_id;
443 }
444
ath12k_peer_ml_create(struct ath12k_hw * ah,struct ieee80211_sta * sta)445 int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
446 {
447 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
448 struct ath12k_ml_peer *ml_peer;
449
450 lockdep_assert_wiphy(ah->hw->wiphy);
451
452 if (!sta->mlo)
453 return -EINVAL;
454
455 ml_peer = ath12k_peer_ml_find(ah, sta->addr);
456 if (ml_peer) {
457 ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
458 ml_peer->id, sta->addr);
459 return -EEXIST;
460 }
461
462 ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
463 if (!ml_peer)
464 return -ENOMEM;
465
466 ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
467
468 if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
469 ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
470 sta->addr);
471 kfree(ml_peer);
472 return -ENOMEM;
473 }
474
475 ether_addr_copy(ml_peer->addr, sta->addr);
476 ml_peer->id = ahsta->ml_peer_id;
477 list_add(&ml_peer->list, &ah->ml_peers);
478
479 return 0;
480 }
481
ath12k_peer_ml_delete(struct ath12k_hw * ah,struct ieee80211_sta * sta)482 int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
483 {
484 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
485 struct ath12k_ml_peer *ml_peer;
486
487 lockdep_assert_wiphy(ah->hw->wiphy);
488
489 if (!sta->mlo)
490 return -EINVAL;
491
492 clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
493 ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
494
495 ml_peer = ath12k_peer_ml_find(ah, sta->addr);
496 if (!ml_peer) {
497 ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
498 return -EINVAL;
499 }
500
501 list_del(&ml_peer->list);
502 kfree(ml_peer);
503
504 return 0;
505 }
506
ath12k_peer_mlo_link_peers_delete(struct ath12k_vif * ahvif,struct ath12k_sta * ahsta)507 int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
508 {
509 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
510 struct ath12k_hw *ah = ahvif->ah;
511 struct ath12k_link_vif *arvif;
512 struct ath12k_link_sta *arsta;
513 unsigned long links;
514 struct ath12k *ar;
515 int ret, err_ret = 0;
516 u8 link_id;
517
518 lockdep_assert_wiphy(ah->hw->wiphy);
519
520 if (!sta->mlo)
521 return -EINVAL;
522
523 /* FW expects delete of all link peers at once before waiting for reception
524 * of peer unmap or delete responses
525 */
526 links = ahsta->links_map;
527 for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
528 arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
529 arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
530 if (!arvif || !arsta)
531 continue;
532
533 ar = arvif->ar;
534 if (!ar)
535 continue;
536
537 ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
538
539 ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
540 if (ret) {
541 ath12k_warn(ar->ab,
542 "failed to delete peer vdev_id %d addr %pM ret %d\n",
543 arvif->vdev_id, arsta->addr, ret);
544 err_ret = ret;
545 continue;
546 }
547 }
548
549 /* Ensure all link peers are deleted and unmapped */
550 links = ahsta->links_map;
551 for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
552 arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
553 arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
554 if (!arvif || !arsta)
555 continue;
556
557 ar = arvif->ar;
558 if (!ar)
559 continue;
560
561 ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
562 if (ret) {
563 err_ret = ret;
564 continue;
565 }
566 ar->num_peers--;
567 }
568
569 return err_ret;
570 }
571