xref: /linux/drivers/net/wireless/ath/ath12k/peer.c (revision 95f68e06b41b9e88291796efa3969409d13fdd4c)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include "core.h"
8 #include "peer.h"
9 #include "debug.h"
10 
11 static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
12 {
13 	struct ath12k_ml_peer *ml_peer;
14 
15 	lockdep_assert_wiphy(ah->hw->wiphy);
16 
17 	list_for_each_entry(ml_peer, &ah->ml_peers, list) {
18 		if (!ether_addr_equal(ml_peer->addr, addr))
19 			continue;
20 
21 		return ml_peer;
22 	}
23 
24 	return NULL;
25 }
26 
27 struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
28 				     const u8 *addr)
29 {
30 	struct ath12k_peer *peer;
31 
32 	lockdep_assert_held(&ab->base_lock);
33 
34 	list_for_each_entry(peer, &ab->peers, list) {
35 		if (peer->vdev_id != vdev_id)
36 			continue;
37 		if (!ether_addr_equal(peer->addr, addr))
38 			continue;
39 
40 		return peer;
41 	}
42 
43 	return NULL;
44 }
45 
46 static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab,
47 							u8 pdev_idx, const u8 *addr)
48 {
49 	struct ath12k_peer *peer;
50 
51 	lockdep_assert_held(&ab->base_lock);
52 
53 	list_for_each_entry(peer, &ab->peers, list) {
54 		if (peer->pdev_idx != pdev_idx)
55 			continue;
56 		if (!ether_addr_equal(peer->addr, addr))
57 			continue;
58 
59 		return peer;
60 	}
61 
62 	return NULL;
63 }
64 
65 struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
66 					     const u8 *addr)
67 {
68 	struct ath12k_peer *peer;
69 
70 	lockdep_assert_held(&ab->base_lock);
71 
72 	list_for_each_entry(peer, &ab->peers, list) {
73 		if (!ether_addr_equal(peer->addr, addr))
74 			continue;
75 
76 		return peer;
77 	}
78 
79 	return NULL;
80 }
81 
82 static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
83 						     int ml_peer_id)
84 {
85 	struct ath12k_peer *peer;
86 
87 	lockdep_assert_held(&ab->base_lock);
88 
89 	list_for_each_entry(peer, &ab->peers, list)
90 		if (ml_peer_id == peer->ml_id)
91 			return peer;
92 
93 	return NULL;
94 }
95 
96 struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
97 					   int peer_id)
98 {
99 	struct ath12k_peer *peer;
100 
101 	lockdep_assert_held(&ab->base_lock);
102 
103 	if (peer_id & ATH12K_PEER_ML_ID_VALID)
104 		return ath12k_peer_find_by_ml_id(ab, peer_id);
105 
106 	list_for_each_entry(peer, &ab->peers, list)
107 		if (peer_id == peer->peer_id)
108 			return peer;
109 
110 	return NULL;
111 }
112 
113 bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
114 {
115 	struct ath12k_peer *peer;
116 
117 	spin_lock_bh(&ab->base_lock);
118 
119 	list_for_each_entry(peer, &ab->peers, list) {
120 		if (vdev_id == peer->vdev_id) {
121 			spin_unlock_bh(&ab->base_lock);
122 			return true;
123 		}
124 	}
125 	spin_unlock_bh(&ab->base_lock);
126 	return false;
127 }
128 
129 struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
130 					    int ast_hash)
131 {
132 	struct ath12k_peer *peer;
133 
134 	lockdep_assert_held(&ab->base_lock);
135 
136 	list_for_each_entry(peer, &ab->peers, list)
137 		if (ast_hash == peer->ast_hash)
138 			return peer;
139 
140 	return NULL;
141 }
142 
143 void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
144 {
145 	struct ath12k_peer *peer;
146 
147 	spin_lock_bh(&ab->base_lock);
148 
149 	peer = ath12k_peer_find_by_id(ab, peer_id);
150 	if (!peer) {
151 		ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
152 			    peer_id);
153 		goto exit;
154 	}
155 
156 	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
157 		   peer->vdev_id, peer->addr, peer_id);
158 
159 	list_del(&peer->list);
160 	kfree(peer);
161 	wake_up(&ab->peer_mapping_wq);
162 
163 exit:
164 	spin_unlock_bh(&ab->base_lock);
165 }
166 
167 void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
168 			   u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
169 {
170 	struct ath12k_peer *peer;
171 
172 	spin_lock_bh(&ab->base_lock);
173 	peer = ath12k_peer_find(ab, vdev_id, mac_addr);
174 	if (!peer) {
175 		peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
176 		if (!peer)
177 			goto exit;
178 
179 		peer->vdev_id = vdev_id;
180 		peer->peer_id = peer_id;
181 		peer->ast_hash = ast_hash;
182 		peer->hw_peer_id = hw_peer_id;
183 		ether_addr_copy(peer->addr, mac_addr);
184 		list_add(&peer->list, &ab->peers);
185 		wake_up(&ab->peer_mapping_wq);
186 	}
187 
188 	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
189 		   vdev_id, mac_addr, peer_id);
190 
191 exit:
192 	spin_unlock_bh(&ab->base_lock);
193 }
194 
195 static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
196 				       const u8 *addr, bool expect_mapped)
197 {
198 	int ret;
199 
200 	ret = wait_event_timeout(ab->peer_mapping_wq, ({
201 				bool mapped;
202 
203 				spin_lock_bh(&ab->base_lock);
204 				mapped = !!ath12k_peer_find(ab, vdev_id, addr);
205 				spin_unlock_bh(&ab->base_lock);
206 
207 				(mapped == expect_mapped ||
208 				 test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags));
209 				}), 3 * HZ);
210 
211 	if (ret <= 0)
212 		return -ETIMEDOUT;
213 
214 	return 0;
215 }
216 
217 void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
218 {
219 	struct ath12k_peer *peer, *tmp;
220 	struct ath12k_base *ab = ar->ab;
221 
222 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
223 
224 	spin_lock_bh(&ab->base_lock);
225 	list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
226 		if (peer->vdev_id != vdev_id)
227 			continue;
228 
229 		ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
230 			    peer->addr, vdev_id);
231 
232 		list_del(&peer->list);
233 		kfree(peer);
234 		ar->num_peers--;
235 	}
236 
237 	spin_unlock_bh(&ab->base_lock);
238 }
239 
240 static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
241 {
242 	return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
243 }
244 
245 int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
246 				     const u8 *addr)
247 {
248 	int ret;
249 	unsigned long time_left;
250 
251 	ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr);
252 	if (ret) {
253 		ath12k_warn(ar->ab, "failed wait for peer deleted");
254 		return ret;
255 	}
256 
257 	time_left = wait_for_completion_timeout(&ar->peer_delete_done,
258 						3 * HZ);
259 	if (time_left == 0) {
260 		ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n");
261 		return -ETIMEDOUT;
262 	}
263 
264 	return 0;
265 }
266 
267 static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr)
268 {
269 	struct ath12k_base *ab = ar->ab;
270 	int ret;
271 
272 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
273 
274 	reinit_completion(&ar->peer_delete_done);
275 
276 	ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
277 	if (ret) {
278 		ath12k_warn(ab,
279 			    "failed to delete peer vdev_id %d addr %pM ret %d\n",
280 			    vdev_id, addr, ret);
281 		return ret;
282 	}
283 
284 	return 0;
285 }
286 
287 int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
288 {
289 	int ret;
290 
291 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
292 
293 	ret = ath12k_peer_delete_send(ar, vdev_id, addr);
294 	if (ret)
295 		return ret;
296 
297 	ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
298 	if (ret)
299 		return ret;
300 
301 	ar->num_peers--;
302 
303 	return 0;
304 }
305 
306 static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
307 {
308 	return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
309 }
310 
311 int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
312 		       struct ieee80211_sta *sta,
313 		       struct ath12k_wmi_peer_create_arg *arg)
314 {
315 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
316 	struct ath12k_link_sta *arsta;
317 	u8 link_id = arvif->link_id;
318 	struct ath12k_peer *peer;
319 	struct ath12k_sta *ahsta;
320 	u16 ml_peer_id;
321 	int ret;
322 
323 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
324 
325 	if (ar->num_peers > (ar->max_num_peers - 1)) {
326 		ath12k_warn(ar->ab,
327 			    "failed to create peer due to insufficient peer entry resource in firmware\n");
328 		return -ENOBUFS;
329 	}
330 
331 	spin_lock_bh(&ar->ab->base_lock);
332 	peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
333 	if (peer) {
334 		spin_unlock_bh(&ar->ab->base_lock);
335 		return -EINVAL;
336 	}
337 	spin_unlock_bh(&ar->ab->base_lock);
338 
339 	ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
340 	if (ret) {
341 		ath12k_warn(ar->ab,
342 			    "failed to send peer create vdev_id %d ret %d\n",
343 			    arg->vdev_id, ret);
344 		return ret;
345 	}
346 
347 	ret = ath12k_wait_for_peer_created(ar, arg->vdev_id,
348 					   arg->peer_addr);
349 	if (ret)
350 		return ret;
351 
352 	spin_lock_bh(&ar->ab->base_lock);
353 
354 	peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
355 	if (!peer) {
356 		spin_unlock_bh(&ar->ab->base_lock);
357 		ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
358 			    arg->peer_addr, arg->vdev_id);
359 
360 		reinit_completion(&ar->peer_delete_done);
361 
362 		ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr,
363 						      arg->vdev_id);
364 		if (ret) {
365 			ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
366 				    arg->vdev_id, arg->peer_addr);
367 			return ret;
368 		}
369 
370 		ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id,
371 						       arg->peer_addr);
372 		if (ret)
373 			return ret;
374 
375 		return -ENOENT;
376 	}
377 
378 	peer->pdev_idx = ar->pdev_idx;
379 	peer->sta = sta;
380 
381 	if (vif->type == NL80211_IFTYPE_STATION) {
382 		arvif->ast_hash = peer->ast_hash;
383 		arvif->ast_idx = peer->hw_peer_id;
384 	}
385 
386 	if (sta) {
387 		ahsta = ath12k_sta_to_ahsta(sta);
388 		arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
389 					  ahsta->link[link_id]);
390 
391 		/* Fill ML info into created peer */
392 		if (sta->mlo) {
393 			ml_peer_id = ahsta->ml_peer_id;
394 			peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID;
395 			ether_addr_copy(peer->ml_addr, sta->addr);
396 
397 			/* the assoc link is considered primary for now */
398 			peer->primary_link = arsta->is_assoc_link;
399 			peer->mlo = true;
400 		} else {
401 			peer->ml_id = ATH12K_MLO_PEER_ID_INVALID;
402 			peer->primary_link = true;
403 			peer->mlo = false;
404 		}
405 	}
406 
407 	peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
408 	peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
409 
410 	ar->num_peers++;
411 
412 	spin_unlock_bh(&ar->ab->base_lock);
413 
414 	return 0;
415 }
416 
417 static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
418 {
419 	u16 ml_peer_id;
420 
421 	lockdep_assert_wiphy(ah->hw->wiphy);
422 
423 	for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) {
424 		if (test_bit(ml_peer_id, ah->free_ml_peer_id_map))
425 			continue;
426 
427 		set_bit(ml_peer_id, ah->free_ml_peer_id_map);
428 		break;
429 	}
430 
431 	if (ml_peer_id == ATH12K_MAX_MLO_PEERS)
432 		ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
433 
434 	return ml_peer_id;
435 }
436 
437 int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
438 {
439 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
440 	struct ath12k_ml_peer *ml_peer;
441 
442 	lockdep_assert_wiphy(ah->hw->wiphy);
443 
444 	if (!sta->mlo)
445 		return -EINVAL;
446 
447 	ml_peer = ath12k_peer_ml_find(ah, sta->addr);
448 	if (ml_peer) {
449 		ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
450 			       ml_peer->id, sta->addr);
451 		return -EEXIST;
452 	}
453 
454 	ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
455 	if (!ml_peer)
456 		return -ENOMEM;
457 
458 	ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
459 
460 	if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
461 		ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
462 			       sta->addr);
463 		kfree(ml_peer);
464 		return -ENOMEM;
465 	}
466 
467 	ether_addr_copy(ml_peer->addr, sta->addr);
468 	ml_peer->id = ahsta->ml_peer_id;
469 	list_add(&ml_peer->list, &ah->ml_peers);
470 
471 	return 0;
472 }
473 
474 int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
475 {
476 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
477 	struct ath12k_ml_peer *ml_peer;
478 
479 	lockdep_assert_wiphy(ah->hw->wiphy);
480 
481 	if (!sta->mlo)
482 		return -EINVAL;
483 
484 	clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
485 	ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
486 
487 	ml_peer = ath12k_peer_ml_find(ah, sta->addr);
488 	if (!ml_peer) {
489 		ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
490 		return -EINVAL;
491 	}
492 
493 	list_del(&ml_peer->list);
494 	kfree(ml_peer);
495 
496 	return 0;
497 }
498 
499 int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
500 {
501 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
502 	struct ath12k_hw *ah = ahvif->ah;
503 	struct ath12k_link_vif *arvif;
504 	struct ath12k_link_sta *arsta;
505 	unsigned long links;
506 	struct ath12k *ar;
507 	int ret, err_ret = 0;
508 	u8 link_id;
509 
510 	lockdep_assert_wiphy(ah->hw->wiphy);
511 
512 	if (!sta->mlo)
513 		return -EINVAL;
514 
515 	/* FW expects delete of all link peers at once before waiting for reception
516 	 * of peer unmap or delete responses
517 	 */
518 	links = ahsta->links_map;
519 	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
520 		arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
521 		arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
522 		if (!arvif || !arsta)
523 			continue;
524 
525 		ar = arvif->ar;
526 		if (!ar)
527 			continue;
528 
529 		ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
530 
531 		ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
532 		if (ret) {
533 			ath12k_warn(ar->ab,
534 				    "failed to delete peer vdev_id %d addr %pM ret %d\n",
535 				    arvif->vdev_id, arsta->addr, ret);
536 			err_ret = ret;
537 			continue;
538 		}
539 	}
540 
541 	/* Ensure all link peers are deleted and unmapped */
542 	links = ahsta->links_map;
543 	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
544 		arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
545 		arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
546 		if (!arvif || !arsta)
547 			continue;
548 
549 		ar = arvif->ar;
550 		if (!ar)
551 			continue;
552 
553 		ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
554 		if (ret) {
555 			err_ret = ret;
556 			continue;
557 		}
558 		ar->num_peers--;
559 	}
560 
561 	return err_ret;
562 }
563