xref: /linux/drivers/net/wireless/ath/ath12k/dp_peer.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include "core.h"
8 #include "dp_peer.h"
9 #include "debug.h"
10 #include "debugfs.h"
11 
ath12k_dp_link_peer_free(struct ath12k_dp_link_peer * peer)12 void ath12k_dp_link_peer_free(struct ath12k_dp_link_peer *peer)
13 {
14 	list_del(&peer->list);
15 
16 	kfree(peer->peer_stats.rx_stats);
17 	kfree(peer);
18 }
19 
20 struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_vdev_and_addr(struct ath12k_dp * dp,int vdev_id,const u8 * addr)21 ath12k_dp_link_peer_find_by_vdev_and_addr(struct ath12k_dp *dp,
22 					  int vdev_id, const u8 *addr)
23 {
24 	struct ath12k_dp_link_peer *peer;
25 
26 	lockdep_assert_held(&dp->dp_lock);
27 
28 	list_for_each_entry(peer, &dp->peers, list) {
29 		if (peer->vdev_id != vdev_id)
30 			continue;
31 		if (!ether_addr_equal(peer->addr, addr))
32 			continue;
33 
34 		return peer;
35 	}
36 
37 	return NULL;
38 }
39 
40 struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_pdev_and_addr(struct ath12k_dp * dp,u8 pdev_idx,const u8 * addr)41 ath12k_dp_link_peer_find_by_pdev_and_addr(struct ath12k_dp *dp, u8 pdev_idx,
42 					  const u8 *addr)
43 {
44 	struct ath12k_dp_link_peer *peer;
45 
46 	lockdep_assert_held(&dp->dp_lock);
47 
48 	list_for_each_entry(peer, &dp->peers, list) {
49 		if (peer->pdev_idx != pdev_idx)
50 			continue;
51 		if (!ether_addr_equal(peer->addr, addr))
52 			continue;
53 
54 		return peer;
55 	}
56 
57 	return NULL;
58 }
59 
60 struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_addr(struct ath12k_dp * dp,const u8 * addr)61 ath12k_dp_link_peer_find_by_addr(struct ath12k_dp *dp, const u8 *addr)
62 {
63 	lockdep_assert_held(&dp->dp_lock);
64 
65 	return rhashtable_lookup_fast(dp->rhead_peer_addr, addr,
66 				      dp->rhash_peer_addr_param);
67 }
68 EXPORT_SYMBOL(ath12k_dp_link_peer_find_by_addr);
69 
70 static struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_ml_id(struct ath12k_dp * dp,int ml_peer_id)71 ath12k_dp_link_peer_find_by_ml_id(struct ath12k_dp *dp, int ml_peer_id)
72 {
73 	struct ath12k_dp_link_peer *peer;
74 
75 	lockdep_assert_held(&dp->dp_lock);
76 
77 	list_for_each_entry(peer, &dp->peers, list)
78 		if (ml_peer_id == peer->ml_id)
79 			return peer;
80 
81 	return NULL;
82 }
83 
84 static struct ath12k_dp_link_peer *
ath12k_dp_link_peer_search_by_id(struct ath12k_dp * dp,int peer_id)85 ath12k_dp_link_peer_search_by_id(struct ath12k_dp *dp, int peer_id)
86 {
87 	struct ath12k_dp_link_peer *peer;
88 
89 	lockdep_assert_held(&dp->dp_lock);
90 
91 	if (peer_id == HAL_INVALID_PEERID)
92 		return NULL;
93 
94 	if (peer_id & ATH12K_PEER_ML_ID_VALID)
95 		return ath12k_dp_link_peer_find_by_ml_id(dp, peer_id);
96 
97 	list_for_each_entry(peer, &dp->peers, list)
98 		if (peer_id == peer->peer_id)
99 			return peer;
100 
101 	return NULL;
102 }
103 
ath12k_dp_link_peer_exist_by_vdev_id(struct ath12k_dp * dp,int vdev_id)104 bool ath12k_dp_link_peer_exist_by_vdev_id(struct ath12k_dp *dp, int vdev_id)
105 {
106 	struct ath12k_dp_link_peer *peer;
107 
108 	spin_lock_bh(&dp->dp_lock);
109 
110 	list_for_each_entry(peer, &dp->peers, list) {
111 		if (vdev_id == peer->vdev_id) {
112 			spin_unlock_bh(&dp->dp_lock);
113 			return true;
114 		}
115 	}
116 	spin_unlock_bh(&dp->dp_lock);
117 	return false;
118 }
119 
120 struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_ast(struct ath12k_dp * dp,int ast_hash)121 ath12k_dp_link_peer_find_by_ast(struct ath12k_dp *dp, int ast_hash)
122 {
123 	struct ath12k_dp_link_peer *peer;
124 
125 	lockdep_assert_held(&dp->dp_lock);
126 
127 	list_for_each_entry(peer, &dp->peers, list)
128 		if (ast_hash == peer->ast_hash)
129 			return peer;
130 
131 	return NULL;
132 }
133 
ath12k_dp_link_peer_unmap_event(struct ath12k_base * ab,u16 peer_id)134 void ath12k_dp_link_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
135 {
136 	struct ath12k_dp_link_peer *peer;
137 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
138 
139 	spin_lock_bh(&dp->dp_lock);
140 
141 	peer = ath12k_dp_link_peer_search_by_id(dp, peer_id);
142 	if (!peer) {
143 		ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
144 			    peer_id);
145 		goto exit;
146 	}
147 
148 	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
149 		   peer->vdev_id, peer->addr, peer_id);
150 
151 	ath12k_dp_link_peer_free(peer);
152 	wake_up(&ab->peer_mapping_wq);
153 
154 exit:
155 	spin_unlock_bh(&dp->dp_lock);
156 }
157 
ath12k_dp_link_peer_map_event(struct ath12k_base * ab,u8 vdev_id,u16 peer_id,u8 * mac_addr,u16 ast_hash,u16 hw_peer_id)158 void ath12k_dp_link_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
159 				   u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
160 {
161 	struct ath12k_dp_link_peer *peer;
162 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
163 	struct ath12k *ar;
164 
165 	spin_lock_bh(&dp->dp_lock);
166 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, mac_addr);
167 	if (!peer) {
168 		peer = kzalloc_obj(*peer, GFP_ATOMIC);
169 		if (!peer)
170 			goto exit;
171 
172 		peer->vdev_id = vdev_id;
173 		peer->peer_id = peer_id;
174 		peer->ast_hash = ast_hash;
175 		peer->hw_peer_id = hw_peer_id;
176 		ether_addr_copy(peer->addr, mac_addr);
177 
178 		rcu_read_lock();
179 		ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
180 		if (ar && ath12k_debugfs_is_extd_rx_stats_enabled(ar) &&
181 		    !peer->peer_stats.rx_stats) {
182 			peer->peer_stats.rx_stats = kzalloc_obj(*peer->peer_stats.rx_stats,
183 								GFP_ATOMIC);
184 		}
185 		rcu_read_unlock();
186 
187 		list_add(&peer->list, &dp->peers);
188 		wake_up(&ab->peer_mapping_wq);
189 		ewma_avg_rssi_init(&peer->avg_rssi);
190 	}
191 	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
192 		   vdev_id, mac_addr, peer_id);
193 
194 exit:
195 	spin_unlock_bh(&dp->dp_lock);
196 }
197 
ath12k_dp_link_peer_to_link_sta(struct ath12k_base * ab,struct ath12k_dp_link_peer * peer)198 struct ath12k_link_sta *ath12k_dp_link_peer_to_link_sta(struct ath12k_base *ab,
199 							struct ath12k_dp_link_peer *peer)
200 {
201 	struct ath12k_sta *ahsta;
202 	struct ath12k_link_sta *arsta;
203 
204 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
205 			 "ath12k_dp_link_peer to ath12k_link_sta called without rcu lock");
206 
207 	if (!peer->sta)
208 		return NULL;
209 
210 	ahsta = ath12k_sta_to_ahsta(peer->sta);
211 	if (peer->ml_id & ATH12K_PEER_ML_ID_VALID) {
212 		if (!(ahsta->links_map & BIT(peer->link_id))) {
213 			ath12k_warn(ab, "peer %pM id %d link_id %d can't found in STA link_map 0x%x\n",
214 				    peer->addr, peer->peer_id, peer->link_id,
215 				    ahsta->links_map);
216 			return NULL;
217 		}
218 		arsta = rcu_dereference(ahsta->link[peer->link_id]);
219 		if (!arsta)
220 			return NULL;
221 	} else {
222 		arsta =  &ahsta->deflink;
223 	}
224 	return arsta;
225 }
226 
ath12k_dp_link_peer_rhash_addr_tbl_init(struct ath12k_dp * dp)227 static int ath12k_dp_link_peer_rhash_addr_tbl_init(struct ath12k_dp *dp)
228 {
229 	struct ath12k_base *ab = dp->ab;
230 	struct rhashtable_params *param;
231 	struct rhashtable *rhash_addr_tbl;
232 	int ret;
233 
234 	lockdep_assert_held(&dp->link_peer_rhash_tbl_lock);
235 
236 	rhash_addr_tbl = kzalloc_obj(*dp->rhead_peer_addr);
237 	if (!rhash_addr_tbl)
238 		return -ENOMEM;
239 
240 	param = &dp->rhash_peer_addr_param;
241 
242 	param->key_offset = offsetof(struct ath12k_dp_link_peer, addr);
243 	param->head_offset = offsetof(struct ath12k_dp_link_peer, rhash_addr);
244 	param->key_len = sizeof_field(struct ath12k_dp_link_peer, addr);
245 	param->automatic_shrinking = true;
246 	param->nelem_hint = ab->num_radios * ath12k_core_get_max_peers_per_radio(ab);
247 
248 	ret = rhashtable_init(rhash_addr_tbl, param);
249 	if (ret) {
250 		ath12k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
251 		goto err_free;
252 	}
253 
254 	dp->rhead_peer_addr = rhash_addr_tbl;
255 
256 	return 0;
257 
258 err_free:
259 	kfree(rhash_addr_tbl);
260 
261 	return ret;
262 }
263 
ath12k_dp_link_peer_rhash_tbl_init(struct ath12k_dp * dp)264 int ath12k_dp_link_peer_rhash_tbl_init(struct ath12k_dp *dp)
265 {
266 	int ret;
267 
268 	mutex_lock(&dp->link_peer_rhash_tbl_lock);
269 	ret = ath12k_dp_link_peer_rhash_addr_tbl_init(dp);
270 	mutex_unlock(&dp->link_peer_rhash_tbl_lock);
271 
272 	return ret;
273 }
274 
ath12k_dp_link_peer_rhash_tbl_destroy(struct ath12k_dp * dp)275 void ath12k_dp_link_peer_rhash_tbl_destroy(struct ath12k_dp *dp)
276 {
277 	mutex_lock(&dp->link_peer_rhash_tbl_lock);
278 	rhashtable_destroy(dp->rhead_peer_addr);
279 	kfree(dp->rhead_peer_addr);
280 	dp->rhead_peer_addr = NULL;
281 	mutex_unlock(&dp->link_peer_rhash_tbl_lock);
282 }
283 
ath12k_dp_link_peer_rhash_insert(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer)284 static int ath12k_dp_link_peer_rhash_insert(struct ath12k_dp *dp,
285 					    struct ath12k_dp_link_peer *peer)
286 {
287 	struct ath12k_dp_link_peer *tmp;
288 
289 	lockdep_assert_held(&dp->dp_lock);
290 
291 	tmp = rhashtable_lookup_get_insert_fast(dp->rhead_peer_addr, &peer->rhash_addr,
292 						dp->rhash_peer_addr_param);
293 	if (!tmp)
294 		return 0;
295 	else if (IS_ERR(tmp))
296 		return PTR_ERR(tmp);
297 	else
298 		return -EEXIST;
299 }
300 
ath12k_dp_link_peer_rhash_remove(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer)301 static int ath12k_dp_link_peer_rhash_remove(struct ath12k_dp *dp,
302 					    struct ath12k_dp_link_peer *peer)
303 {
304 	int ret;
305 
306 	lockdep_assert_held(&dp->dp_lock);
307 
308 	ret = rhashtable_remove_fast(dp->rhead_peer_addr, &peer->rhash_addr,
309 				     dp->rhash_peer_addr_param);
310 	if (ret && ret != -ENOENT)
311 		return ret;
312 
313 	return 0;
314 }
315 
ath12k_dp_link_peer_rhash_add(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer)316 int ath12k_dp_link_peer_rhash_add(struct ath12k_dp *dp,
317 				  struct ath12k_dp_link_peer *peer)
318 {
319 	int ret;
320 
321 	lockdep_assert_held(&dp->dp_lock);
322 
323 	ret = ath12k_dp_link_peer_rhash_insert(dp, peer);
324 	if (ret)
325 		ath12k_warn(dp, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
326 			    peer->addr, peer->peer_id, ret);
327 
328 	return ret;
329 }
330 
ath12k_dp_link_peer_rhash_delete(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer)331 void ath12k_dp_link_peer_rhash_delete(struct ath12k_dp *dp,
332 				      struct ath12k_dp_link_peer *peer)
333 {
334 	/* No failure handling and hence return type is void */
335 	int ret;
336 
337 	lockdep_assert_held(&dp->dp_lock);
338 
339 	ret = ath12k_dp_link_peer_rhash_remove(dp, peer);
340 	if (ret)
341 		ath12k_warn(dp, "failed to remove peer %pM with id %d in rhash_addr ret %d\n",
342 			    peer->addr, peer->peer_id, ret);
343 }
344 
ath12k_dp_peer_find_by_addr(struct ath12k_dp_hw * dp_hw,u8 * addr)345 struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr(struct ath12k_dp_hw *dp_hw, u8 *addr)
346 {
347 	struct ath12k_dp_peer *peer;
348 
349 	lockdep_assert_held(&dp_hw->peer_lock);
350 
351 	list_for_each_entry(peer, &dp_hw->dp_peers_list, list) {
352 		if (ether_addr_equal(peer->addr, addr))
353 			return peer;
354 	}
355 
356 	return NULL;
357 }
358 EXPORT_SYMBOL(ath12k_dp_peer_find_by_addr);
359 
ath12k_dp_peer_find_by_addr_and_sta(struct ath12k_dp_hw * dp_hw,u8 * addr,struct ieee80211_sta * sta)360 struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr_and_sta(struct ath12k_dp_hw *dp_hw,
361 							   u8 *addr,
362 							   struct ieee80211_sta *sta)
363 {
364 	struct ath12k_dp_peer *dp_peer;
365 
366 	lockdep_assert_held(&dp_hw->peer_lock);
367 
368 	list_for_each_entry(dp_peer, &dp_hw->dp_peers_list, list) {
369 		if (ether_addr_equal(dp_peer->addr, addr) && (dp_peer->sta == sta))
370 			return dp_peer;
371 	}
372 
373 	return NULL;
374 }
375 
ath12k_dp_peer_create_find(struct ath12k_dp_hw * dp_hw,u8 * addr,struct ieee80211_sta * sta,bool mlo_peer)376 static struct ath12k_dp_peer *ath12k_dp_peer_create_find(struct ath12k_dp_hw *dp_hw,
377 							 u8 *addr,
378 							 struct ieee80211_sta *sta,
379 							 bool mlo_peer)
380 {
381 	struct ath12k_dp_peer *dp_peer;
382 
383 	lockdep_assert_held(&dp_hw->peer_lock);
384 
385 	list_for_each_entry(dp_peer, &dp_hw->dp_peers_list, list) {
386 		if (ether_addr_equal(dp_peer->addr, addr)) {
387 			if (!sta || mlo_peer || dp_peer->is_mlo ||
388 			    dp_peer->sta == sta)
389 				return dp_peer;
390 		}
391 	}
392 
393 	return NULL;
394 }
395 
396 /*
397  * Index of ath12k_dp_peer for MLO client is same as peer id of ath12k_dp_peer,
398  * while for ath12k_dp_link_peer(mlo and non-mlo) and ath12k_dp_peer for
399  * Non-MLO client it is derived as ((DEVICE_ID << 10) | (10 bits of peer id)).
400  *
401  * This is done because ml_peer_id and peer_id_table are at hw granularity,
402  * while link_peer_id is at device granularity, hence in order to avoid
403  * conflict this approach is followed.
404  */
405 #define ATH12K_DP_PEER_TABLE_DEVICE_ID_SHIFT        10
406 
ath12k_dp_peer_get_peerid_index(struct ath12k_dp * dp,u16 peer_id)407 u16 ath12k_dp_peer_get_peerid_index(struct ath12k_dp *dp, u16 peer_id)
408 {
409 	return (peer_id & ATH12K_PEER_ML_ID_VALID) ? peer_id :
410 		((dp->device_id << ATH12K_DP_PEER_TABLE_DEVICE_ID_SHIFT) | peer_id);
411 }
412 
ath12k_dp_peer_find_by_peerid(struct ath12k_pdev_dp * dp_pdev,u16 peer_id)413 struct ath12k_dp_peer *ath12k_dp_peer_find_by_peerid(struct ath12k_pdev_dp *dp_pdev,
414 						     u16 peer_id)
415 {
416 	u16 index;
417 	struct ath12k_dp *dp = dp_pdev->dp;
418 
419 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
420 			 "ath12k dp peer find by peerid index called without rcu lock");
421 
422 	if (!peer_id || peer_id >= ATH12K_DP_PEER_ID_INVALID)
423 		return NULL;
424 
425 	index = ath12k_dp_peer_get_peerid_index(dp, peer_id);
426 
427 	return rcu_dereference(dp_pdev->dp_hw->dp_peers[index]);
428 }
429 EXPORT_SYMBOL(ath12k_dp_peer_find_by_peerid);
430 
431 struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_peerid(struct ath12k_pdev_dp * dp_pdev,u16 peer_id)432 ath12k_dp_link_peer_find_by_peerid(struct ath12k_pdev_dp *dp_pdev, u16 peer_id)
433 {
434 	struct ath12k_dp_peer *dp_peer = NULL;
435 	u8 link_id;
436 
437 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
438 			 "ath12k dp link peer find by peerid index called without rcu lock");
439 
440 	if (dp_pdev->hw_link_id >= ATH12K_GROUP_MAX_RADIO)
441 		return NULL;
442 
443 	dp_peer = ath12k_dp_peer_find_by_peerid(dp_pdev, peer_id);
444 	if (!dp_peer)
445 		return NULL;
446 
447 	link_id = dp_peer->hw_links[dp_pdev->hw_link_id];
448 
449 	return rcu_dereference(dp_peer->link_peers[link_id]);
450 }
451 EXPORT_SYMBOL(ath12k_dp_link_peer_find_by_peerid);
452 
ath12k_dp_peer_create(struct ath12k_dp_hw * dp_hw,u8 * addr,struct ath12k_dp_peer_create_params * params)453 int ath12k_dp_peer_create(struct ath12k_dp_hw *dp_hw, u8 *addr,
454 			  struct ath12k_dp_peer_create_params *params)
455 {
456 	struct ath12k_dp_peer *dp_peer;
457 
458 	spin_lock_bh(&dp_hw->peer_lock);
459 	dp_peer = ath12k_dp_peer_create_find(dp_hw, addr, params->sta, params->is_mlo);
460 	if (dp_peer) {
461 		spin_unlock_bh(&dp_hw->peer_lock);
462 		return -EEXIST;
463 	}
464 	spin_unlock_bh(&dp_hw->peer_lock);
465 
466 	dp_peer = kzalloc_obj(*dp_peer, GFP_ATOMIC);
467 	if (!dp_peer)
468 		return -ENOMEM;
469 
470 	ether_addr_copy(dp_peer->addr, addr);
471 	dp_peer->sta = params->sta;
472 	dp_peer->is_mlo = params->is_mlo;
473 
474 	/*
475 	 * For MLO client, the host assigns the ML peer ID, so set peer_id in dp_peer
476 	 * For non-MLO client, host gets link peer ID from firmware and will be
477 	 * assigned at the time of link peer creation
478 	 */
479 	dp_peer->peer_id = params->is_mlo ? params->peer_id : ATH12K_DP_PEER_ID_INVALID;
480 	dp_peer->ucast_ra_only = params->ucast_ra_only;
481 
482 	dp_peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
483 	dp_peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
484 	dp_peer->ucast_ra_only = params->ucast_ra_only;
485 
486 	spin_lock_bh(&dp_hw->peer_lock);
487 
488 	list_add(&dp_peer->list, &dp_hw->dp_peers_list);
489 
490 	/*
491 	 * For MLO client, the peer_id for ath12k_dp_peer is allocated by host
492 	 * and that peer_id is known at this point, and hence this ath12k_dp_peer
493 	 * can be added to the RCU table using the peer_id.
494 	 * For non-MLO client, this addition to RCU table shall be done at the
495 	 * time of assignment of ath12k_dp_link_peer to ath12k_dp_peer.
496 	 */
497 	if (dp_peer->is_mlo)
498 		rcu_assign_pointer(dp_hw->dp_peers[dp_peer->peer_id], dp_peer);
499 
500 	spin_unlock_bh(&dp_hw->peer_lock);
501 
502 	return 0;
503 }
504 
ath12k_dp_peer_delete(struct ath12k_dp_hw * dp_hw,u8 * addr,struct ieee80211_sta * sta)505 void ath12k_dp_peer_delete(struct ath12k_dp_hw *dp_hw, u8 *addr,
506 			   struct ieee80211_sta *sta)
507 {
508 	struct ath12k_dp_peer *dp_peer;
509 
510 	spin_lock_bh(&dp_hw->peer_lock);
511 
512 	dp_peer = ath12k_dp_peer_find_by_addr_and_sta(dp_hw, addr, sta);
513 	if (!dp_peer) {
514 		spin_unlock_bh(&dp_hw->peer_lock);
515 		return;
516 	}
517 
518 	if (dp_peer->is_mlo)
519 		rcu_assign_pointer(dp_hw->dp_peers[dp_peer->peer_id], NULL);
520 
521 	list_del(&dp_peer->list);
522 
523 	spin_unlock_bh(&dp_hw->peer_lock);
524 
525 	synchronize_rcu();
526 	kfree(dp_peer);
527 }
528 
ath12k_dp_link_peer_assign(struct ath12k_dp * dp,struct ath12k_dp_hw * dp_hw,u8 vdev_id,struct ieee80211_sta * sta,u8 * addr,u8 link_id,u32 hw_link_id)529 int ath12k_dp_link_peer_assign(struct ath12k_dp *dp, struct ath12k_dp_hw *dp_hw,
530 			       u8 vdev_id, struct ieee80211_sta *sta, u8 *addr,
531 			       u8 link_id, u32 hw_link_id)
532 {
533 	struct ath12k_dp_peer *dp_peer;
534 	struct ath12k_dp_link_peer *peer, *temp_peer;
535 	u16 peerid_index;
536 	int ret = -EINVAL;
537 	u8 *dp_peer_mac = !sta ? addr : sta->addr;
538 
539 	spin_lock_bh(&dp->dp_lock);
540 
541 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, addr);
542 	if (!peer) {
543 		ath12k_warn(dp, "failed to find dp_link_peer with mac %pM on vdev %u\n",
544 			    addr, vdev_id);
545 		ret = -ENOENT;
546 		goto err_peer;
547 	}
548 
549 	spin_lock_bh(&dp_hw->peer_lock);
550 
551 	dp_peer = ath12k_dp_peer_find_by_addr_and_sta(dp_hw, dp_peer_mac, sta);
552 	if (!dp_peer) {
553 		ath12k_warn(dp, "failed to find dp_peer with mac %pM\n", dp_peer_mac);
554 		ret = -ENOENT;
555 		goto err_dp_peer;
556 	}
557 
558 	/*
559 	 * Set peer_id in dp_peer for non-mlo client, peer_id for mlo client is
560 	 * set during dp_peer create
561 	 */
562 	if (!dp_peer->is_mlo)
563 		dp_peer->peer_id = peer->peer_id;
564 
565 	peer->dp_peer = dp_peer;
566 	peer->hw_link_id = hw_link_id;
567 
568 	dp_peer->hw_links[peer->hw_link_id] = link_id;
569 
570 	peerid_index = ath12k_dp_peer_get_peerid_index(dp, peer->peer_id);
571 
572 	rcu_assign_pointer(dp_peer->link_peers[peer->link_id], peer);
573 
574 	rcu_assign_pointer(dp_hw->dp_peers[peerid_index], dp_peer);
575 
576 	spin_unlock_bh(&dp_hw->peer_lock);
577 
578 	/*
579 	 * In case of Split PHY and roaming scenario, pdev idx
580 	 * might differ but both the pdev will share same rhash
581 	 * table. In that case update the rhash table if link_peer is
582 	 * already present
583 	 */
584 	temp_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
585 	if (temp_peer && temp_peer->hw_link_id != hw_link_id)
586 		ath12k_dp_link_peer_rhash_delete(dp, temp_peer);
587 
588 	ret = ath12k_dp_link_peer_rhash_add(dp, peer);
589 	if (ret) {
590 		/*
591 		 * If new entry addition failed, add back old entry
592 		 * If old entry addition also fails, then nothing
593 		 * can be done, simply proceed
594 		 */
595 		if (temp_peer)
596 			ath12k_dp_link_peer_rhash_add(dp, temp_peer);
597 	}
598 
599 	spin_unlock_bh(&dp->dp_lock);
600 
601 	return ret;
602 
603 err_dp_peer:
604 	spin_unlock_bh(&dp_hw->peer_lock);
605 
606 err_peer:
607 	spin_unlock_bh(&dp->dp_lock);
608 
609 	return ret;
610 }
611 
ath12k_dp_link_peer_unassign(struct ath12k_dp * dp,struct ath12k_dp_hw * dp_hw,u8 vdev_id,u8 * addr,u32 hw_link_id)612 void ath12k_dp_link_peer_unassign(struct ath12k_dp *dp, struct ath12k_dp_hw *dp_hw,
613 				  u8 vdev_id, u8 *addr, u32 hw_link_id)
614 {
615 	struct ath12k_dp_peer *dp_peer;
616 	struct ath12k_dp_link_peer *peer, *temp_peer;
617 	u16 peerid_index;
618 
619 	spin_lock_bh(&dp->dp_lock);
620 
621 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, addr);
622 	if (!peer || !peer->dp_peer) {
623 		spin_unlock_bh(&dp->dp_lock);
624 		return;
625 	}
626 
627 	spin_lock_bh(&dp_hw->peer_lock);
628 
629 	dp_peer = peer->dp_peer;
630 	dp_peer->hw_links[peer->hw_link_id] = 0;
631 
632 	peerid_index = ath12k_dp_peer_get_peerid_index(dp, peer->peer_id);
633 
634 	rcu_assign_pointer(dp_peer->link_peers[peer->link_id], NULL);
635 
636 	rcu_assign_pointer(dp_hw->dp_peers[peerid_index], NULL);
637 
638 	spin_unlock_bh(&dp_hw->peer_lock);
639 
640 	/* To handle roaming and split phy scenario */
641 	temp_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
642 	if (temp_peer && temp_peer->hw_link_id == hw_link_id)
643 		ath12k_dp_link_peer_rhash_delete(dp, peer);
644 
645 	spin_unlock_bh(&dp->dp_lock);
646 
647 	synchronize_rcu();
648 }
649 
650 void
ath12k_dp_link_peer_get_sta_rate_info_stats(struct ath12k_dp * dp,const u8 * addr,struct ath12k_dp_link_peer_rate_info * info)651 ath12k_dp_link_peer_get_sta_rate_info_stats(struct ath12k_dp *dp, const u8 *addr,
652 					    struct ath12k_dp_link_peer_rate_info *info)
653 {
654 	struct ath12k_dp_link_peer *link_peer;
655 
656 	guard(spinlock_bh)(&dp->dp_lock);
657 
658 	link_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
659 	if (!link_peer)
660 		return;
661 
662 	info->rx_duration = link_peer->rx_duration;
663 	info->tx_duration = link_peer->tx_duration;
664 	info->txrate.legacy = link_peer->txrate.legacy;
665 	info->txrate.mcs = link_peer->txrate.mcs;
666 	info->txrate.nss = link_peer->txrate.nss;
667 	info->txrate.bw = link_peer->txrate.bw;
668 	info->txrate.he_gi = link_peer->txrate.he_gi;
669 	info->txrate.he_dcm = link_peer->txrate.he_dcm;
670 	info->txrate.he_ru_alloc = link_peer->txrate.he_ru_alloc;
671 	info->txrate.flags = link_peer->txrate.flags;
672 	info->rssi_comb = link_peer->rssi_comb;
673 	info->signal_avg = ewma_avg_rssi_read(&link_peer->avg_rssi);
674 }
675 
ath12k_dp_link_peer_reset_rx_stats(struct ath12k_dp * dp,const u8 * addr)676 void ath12k_dp_link_peer_reset_rx_stats(struct ath12k_dp *dp, const u8 *addr)
677 {
678 	struct ath12k_rx_peer_stats *rx_stats;
679 	struct ath12k_dp_link_peer *link_peer;
680 
681 	guard(spinlock_bh)(&dp->dp_lock);
682 
683 	link_peer = ath12k_dp_link_peer_find_by_addr(dp, addr);
684 	if (!link_peer || !link_peer->peer_stats.rx_stats)
685 		return;
686 
687 	rx_stats = link_peer->peer_stats.rx_stats;
688 	if (rx_stats)
689 		memset(rx_stats, 0, sizeof(*rx_stats));
690 }
691