1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5 */
6
7 #include <linux/relay.h>
8 #include "core.h"
9 #include "debug.h"
10
ath11k_cfr_get_dbring(struct ath11k * ar)11 struct ath11k_dbring *ath11k_cfr_get_dbring(struct ath11k *ar)
12 {
13 if (ar->cfr_enabled)
14 return &ar->cfr.rx_ring;
15
16 return NULL;
17 }
18
ath11k_cfr_calculate_tones_from_dma_hdr(struct ath11k_cfr_dma_hdr * hdr)19 static int ath11k_cfr_calculate_tones_from_dma_hdr(struct ath11k_cfr_dma_hdr *hdr)
20 {
21 u8 bw = FIELD_GET(CFIR_DMA_HDR_INFO1_UPLOAD_PKT_BW, hdr->info1);
22 u8 preamble = FIELD_GET(CFIR_DMA_HDR_INFO1_PREAMBLE_TYPE, hdr->info1);
23
24 switch (preamble) {
25 case ATH11K_CFR_PREAMBLE_TYPE_LEGACY:
26 fallthrough;
27 case ATH11K_CFR_PREAMBLE_TYPE_VHT:
28 switch (bw) {
29 case 0:
30 return TONES_IN_20MHZ;
31 case 1: /* DUP40/VHT40 */
32 return TONES_IN_40MHZ;
33 case 2: /* DUP80/VHT80 */
34 return TONES_IN_80MHZ;
35 case 3: /* DUP160/VHT160 */
36 return TONES_IN_160MHZ;
37 default:
38 return TONES_INVALID;
39 }
40 case ATH11K_CFR_PREAMBLE_TYPE_HT:
41 switch (bw) {
42 case 0:
43 return TONES_IN_20MHZ;
44 case 1:
45 return TONES_IN_40MHZ;
46 default:
47 return TONES_INVALID;
48 }
49 default:
50 return TONES_INVALID;
51 }
52 }
53
ath11k_cfr_release_lut_entry(struct ath11k_look_up_table * lut)54 void ath11k_cfr_release_lut_entry(struct ath11k_look_up_table *lut)
55 {
56 memset(lut, 0, sizeof(*lut));
57 }
58
ath11k_cfr_rfs_write(struct ath11k * ar,const void * head,u32 head_len,const void * data,u32 data_len,const void * tail,int tail_data)59 static void ath11k_cfr_rfs_write(struct ath11k *ar, const void *head,
60 u32 head_len, const void *data, u32 data_len,
61 const void *tail, int tail_data)
62 {
63 struct ath11k_cfr *cfr = &ar->cfr;
64
65 if (!cfr->rfs_cfr_capture)
66 return;
67
68 relay_write(cfr->rfs_cfr_capture, head, head_len);
69 relay_write(cfr->rfs_cfr_capture, data, data_len);
70 relay_write(cfr->rfs_cfr_capture, tail, tail_data);
71 relay_flush(cfr->rfs_cfr_capture);
72 }
73
ath11k_cfr_free_pending_dbr_events(struct ath11k * ar)74 static void ath11k_cfr_free_pending_dbr_events(struct ath11k *ar)
75 {
76 struct ath11k_cfr *cfr = &ar->cfr;
77 struct ath11k_look_up_table *lut;
78 int i;
79
80 if (!cfr->lut)
81 return;
82
83 for (i = 0; i < cfr->lut_num; i++) {
84 lut = &cfr->lut[i];
85 if (lut->dbr_recv && !lut->tx_recv &&
86 lut->dbr_tstamp < cfr->last_success_tstamp) {
87 ath11k_dbring_bufs_replenish(ar, &cfr->rx_ring, lut->buff,
88 WMI_DIRECT_BUF_CFR);
89 ath11k_cfr_release_lut_entry(lut);
90 cfr->flush_dbr_cnt++;
91 }
92 }
93 }
94
95 /**
96 * ath11k_cfr_correlate_and_relay() - Correlate and relay CFR events
97 * @ar: Pointer to ath11k structure
98 * @lut: Lookup table for correlation
99 * @event_type: Type of event received (TX or DBR)
100 *
101 * Correlates WMI_PDEV_DMA_RING_BUF_RELEASE_EVENT (DBR) and
102 * WMI_PEER_CFR_CAPTURE_EVENT (TX capture) by PPDU ID. If both events
103 * are present and the PPDU IDs match, returns CORRELATE_STATUS_RELEASE
104 * to relay thecorrelated data to userspace. Otherwise returns
105 * CORRELATE_STATUS_HOLD to wait for the other event.
106 *
107 * Also checks pending DBR events and clears them when no corresponding TX
108 * capture event is received for the PPDU.
109 *
110 * Return: CORRELATE_STATUS_RELEASE or CORRELATE_STATUS_HOLD
111 */
112
113 static enum ath11k_cfr_correlate_status
ath11k_cfr_correlate_and_relay(struct ath11k * ar,struct ath11k_look_up_table * lut,u8 event_type)114 ath11k_cfr_correlate_and_relay(struct ath11k *ar,
115 struct ath11k_look_up_table *lut,
116 u8 event_type)
117 {
118 enum ath11k_cfr_correlate_status status;
119 struct ath11k_cfr *cfr = &ar->cfr;
120 u64 diff;
121
122 if (event_type == ATH11K_CORRELATE_TX_EVENT) {
123 if (lut->tx_recv)
124 cfr->cfr_dma_aborts++;
125 cfr->tx_evt_cnt++;
126 lut->tx_recv = true;
127 } else if (event_type == ATH11K_CORRELATE_DBR_EVENT) {
128 cfr->dbr_evt_cnt++;
129 lut->dbr_recv = true;
130 }
131
132 if (lut->dbr_recv && lut->tx_recv) {
133 if (lut->dbr_ppdu_id == lut->tx_ppdu_id) {
134 /*
135 * 64-bit counters make wraparound highly improbable,
136 * wraparound handling is omitted.
137 */
138 cfr->last_success_tstamp = lut->dbr_tstamp;
139 if (lut->dbr_tstamp > lut->txrx_tstamp) {
140 diff = lut->dbr_tstamp - lut->txrx_tstamp;
141 ath11k_dbg(ar->ab, ATH11K_DBG_CFR,
142 "txrx event -> dbr event delay = %u ms",
143 jiffies_to_msecs(diff));
144 } else if (lut->txrx_tstamp > lut->dbr_tstamp) {
145 diff = lut->txrx_tstamp - lut->dbr_tstamp;
146 ath11k_dbg(ar->ab, ATH11K_DBG_CFR,
147 "dbr event -> txrx event delay = %u ms",
148 jiffies_to_msecs(diff));
149 }
150
151 ath11k_cfr_free_pending_dbr_events(ar);
152
153 cfr->release_cnt++;
154 status = ATH11K_CORRELATE_STATUS_RELEASE;
155 } else {
156 /*
157 * Discard TXRX event on PPDU ID mismatch because multiple PPDUs
158 * may share the same DMA address due to ucode aborts.
159 */
160
161 ath11k_dbg(ar->ab, ATH11K_DBG_CFR,
162 "Received dbr event twice for the same lut entry");
163 lut->tx_recv = false;
164 lut->tx_ppdu_id = 0;
165 cfr->clear_txrx_event++;
166 cfr->cfr_dma_aborts++;
167 status = ATH11K_CORRELATE_STATUS_HOLD;
168 }
169 } else {
170 status = ATH11K_CORRELATE_STATUS_HOLD;
171 }
172
173 return status;
174 }
175
ath11k_cfr_process_data(struct ath11k * ar,struct ath11k_dbring_data * param)176 static int ath11k_cfr_process_data(struct ath11k *ar,
177 struct ath11k_dbring_data *param)
178 {
179 u32 end_magic = ATH11K_CFR_END_MAGIC;
180 struct ath11k_csi_cfr_header *header;
181 struct ath11k_cfr_dma_hdr *dma_hdr;
182 struct ath11k_cfr *cfr = &ar->cfr;
183 struct ath11k_look_up_table *lut;
184 struct ath11k_base *ab = ar->ab;
185 u32 buf_id, tones, length;
186 u8 num_chains;
187 int status;
188 u8 *data;
189
190 data = param->data;
191 buf_id = param->buf_id;
192
193 if (param->data_sz < sizeof(*dma_hdr))
194 return -EINVAL;
195
196 dma_hdr = (struct ath11k_cfr_dma_hdr *)data;
197
198 tones = ath11k_cfr_calculate_tones_from_dma_hdr(dma_hdr);
199 if (tones == TONES_INVALID) {
200 ath11k_warn(ar->ab, "Number of tones received is invalid\n");
201 return -EINVAL;
202 }
203
204 num_chains = FIELD_GET(CFIR_DMA_HDR_INFO1_NUM_CHAINS,
205 dma_hdr->info1);
206
207 length = sizeof(*dma_hdr);
208 length += tones * (num_chains + 1);
209
210 spin_lock_bh(&cfr->lut_lock);
211
212 if (!cfr->lut) {
213 spin_unlock_bh(&cfr->lut_lock);
214 return -EINVAL;
215 }
216
217 lut = &cfr->lut[buf_id];
218
219 ath11k_dbg_dump(ab, ATH11K_DBG_CFR_DUMP, "data_from_buf_rel:", "",
220 data, length);
221
222 lut->buff = param->buff;
223 lut->data = data;
224 lut->data_len = length;
225 lut->dbr_ppdu_id = dma_hdr->phy_ppdu_id;
226 lut->dbr_tstamp = jiffies;
227
228 memcpy(&lut->hdr, dma_hdr, sizeof(*dma_hdr));
229
230 header = &lut->header;
231 header->meta_data.channel_bw = FIELD_GET(CFIR_DMA_HDR_INFO1_UPLOAD_PKT_BW,
232 dma_hdr->info1);
233 header->meta_data.length = length;
234
235 status = ath11k_cfr_correlate_and_relay(ar, lut,
236 ATH11K_CORRELATE_DBR_EVENT);
237 if (status == ATH11K_CORRELATE_STATUS_RELEASE) {
238 ath11k_dbg(ab, ATH11K_DBG_CFR,
239 "releasing CFR data to user space");
240 ath11k_cfr_rfs_write(ar, &lut->header,
241 sizeof(struct ath11k_csi_cfr_header),
242 lut->data, lut->data_len,
243 &end_magic, sizeof(u32));
244 ath11k_cfr_release_lut_entry(lut);
245 } else if (status == ATH11K_CORRELATE_STATUS_HOLD) {
246 ath11k_dbg(ab, ATH11K_DBG_CFR,
247 "tx event is not yet received holding the buf");
248 }
249
250 spin_unlock_bh(&cfr->lut_lock);
251
252 return status;
253 }
254
ath11k_cfr_fill_hdr_info(struct ath11k * ar,struct ath11k_csi_cfr_header * header,struct ath11k_cfr_peer_tx_param * params)255 static void ath11k_cfr_fill_hdr_info(struct ath11k *ar,
256 struct ath11k_csi_cfr_header *header,
257 struct ath11k_cfr_peer_tx_param *params)
258 {
259 struct ath11k_cfr *cfr;
260
261 cfr = &ar->cfr;
262 header->cfr_metadata_version = ATH11K_CFR_META_VERSION_4;
263 header->cfr_data_version = ATH11K_CFR_DATA_VERSION_1;
264 header->cfr_metadata_len = sizeof(struct cfr_metadata);
265 header->chip_type = ar->ab->hw_rev;
266 header->meta_data.status = FIELD_GET(WMI_CFR_PEER_CAPTURE_STATUS,
267 params->status);
268 header->meta_data.capture_bw = params->bandwidth;
269
270 /*
271 * FW reports phymode will always be HE mode.
272 * Replace it with cached phy mode during peer assoc
273 */
274 header->meta_data.phy_mode = cfr->phymode;
275
276 header->meta_data.prim20_chan = params->primary_20mhz_chan;
277 header->meta_data.center_freq1 = params->band_center_freq1;
278 header->meta_data.center_freq2 = params->band_center_freq2;
279
280 /*
281 * CFR capture is triggered by the ACK of a QoS Null frame:
282 * - 20 MHz: Legacy ACK
283 * - 40/80/160 MHz: DUP Legacy ACK
284 */
285 header->meta_data.capture_mode = params->bandwidth ?
286 ATH11K_CFR_CAPTURE_DUP_LEGACY_ACK : ATH11K_CFR_CAPTURE_LEGACY_ACK;
287 header->meta_data.capture_type = params->capture_method;
288 header->meta_data.num_rx_chain = ar->num_rx_chains;
289 header->meta_data.sts_count = params->spatial_streams;
290 header->meta_data.timestamp = params->timestamp_us;
291 ether_addr_copy(header->meta_data.peer_addr, params->peer_mac_addr);
292 memcpy(header->meta_data.chain_rssi, params->chain_rssi,
293 sizeof(params->chain_rssi));
294 memcpy(header->meta_data.chain_phase, params->chain_phase,
295 sizeof(params->chain_phase));
296 memcpy(header->meta_data.agc_gain, params->agc_gain,
297 sizeof(params->agc_gain));
298 }
299
ath11k_process_cfr_capture_event(struct ath11k_base * ab,struct ath11k_cfr_peer_tx_param * params)300 int ath11k_process_cfr_capture_event(struct ath11k_base *ab,
301 struct ath11k_cfr_peer_tx_param *params)
302 {
303 struct ath11k_look_up_table *lut = NULL;
304 u32 end_magic = ATH11K_CFR_END_MAGIC;
305 struct ath11k_csi_cfr_header *header;
306 struct ath11k_dbring_element *buff;
307 struct ath11k_cfr *cfr;
308 dma_addr_t buf_addr;
309 struct ath11k *ar;
310 u8 tx_status;
311 int status;
312 int i;
313
314 rcu_read_lock();
315 ar = ath11k_mac_get_ar_by_vdev_id(ab, params->vdev_id);
316 if (!ar) {
317 rcu_read_unlock();
318 ath11k_warn(ab, "Failed to get ar for vdev id %d\n",
319 params->vdev_id);
320 return -ENOENT;
321 }
322
323 cfr = &ar->cfr;
324 rcu_read_unlock();
325
326 if (WMI_CFR_CAPTURE_STATUS_PEER_PS & params->status) {
327 ath11k_warn(ab, "CFR capture failed as peer %pM is in powersave",
328 params->peer_mac_addr);
329 return -EINVAL;
330 }
331
332 if (!(WMI_CFR_PEER_CAPTURE_STATUS & params->status)) {
333 ath11k_warn(ab, "CFR capture failed for the peer : %pM",
334 params->peer_mac_addr);
335 cfr->tx_peer_status_cfr_fail++;
336 return -EINVAL;
337 }
338
339 tx_status = FIELD_GET(WMI_CFR_FRAME_TX_STATUS, params->status);
340 if (tx_status != WMI_FRAME_TX_STATUS_OK) {
341 ath11k_warn(ab, "WMI tx status %d for the peer %pM",
342 tx_status, params->peer_mac_addr);
343 cfr->tx_evt_status_cfr_fail++;
344 return -EINVAL;
345 }
346
347 buf_addr = (((u64)FIELD_GET(WMI_CFR_CORRELATION_INFO2_BUF_ADDR_HIGH,
348 params->correlation_info_2)) << 32) |
349 params->correlation_info_1;
350
351 spin_lock_bh(&cfr->lut_lock);
352
353 if (!cfr->lut) {
354 spin_unlock_bh(&cfr->lut_lock);
355 return -EINVAL;
356 }
357
358 for (i = 0; i < cfr->lut_num; i++) {
359 struct ath11k_look_up_table *temp = &cfr->lut[i];
360
361 if (temp->dbr_address == buf_addr) {
362 lut = &cfr->lut[i];
363 break;
364 }
365 }
366
367 if (!lut) {
368 spin_unlock_bh(&cfr->lut_lock);
369 ath11k_warn(ab, "lut failure to process tx event\n");
370 cfr->tx_dbr_lookup_fail++;
371 return -EINVAL;
372 }
373
374 lut->tx_ppdu_id = FIELD_GET(WMI_CFR_CORRELATION_INFO2_PPDU_ID,
375 params->correlation_info_2);
376 lut->txrx_tstamp = jiffies;
377
378 header = &lut->header;
379 header->start_magic_num = ATH11K_CFR_START_MAGIC;
380 header->vendorid = VENDOR_QCA;
381 header->platform_type = PLATFORM_TYPE_ARM;
382
383 ath11k_cfr_fill_hdr_info(ar, header, params);
384
385 status = ath11k_cfr_correlate_and_relay(ar, lut,
386 ATH11K_CORRELATE_TX_EVENT);
387 if (status == ATH11K_CORRELATE_STATUS_RELEASE) {
388 ath11k_dbg(ab, ATH11K_DBG_CFR,
389 "Releasing CFR data to user space");
390 ath11k_cfr_rfs_write(ar, &lut->header,
391 sizeof(struct ath11k_csi_cfr_header),
392 lut->data, lut->data_len,
393 &end_magic, sizeof(u32));
394 buff = lut->buff;
395 ath11k_cfr_release_lut_entry(lut);
396
397 ath11k_dbring_bufs_replenish(ar, &cfr->rx_ring, buff,
398 WMI_DIRECT_BUF_CFR);
399 } else if (status == ATH11K_CORRELATE_STATUS_HOLD) {
400 ath11k_dbg(ab, ATH11K_DBG_CFR,
401 "dbr event is not yet received holding buf\n");
402 }
403
404 spin_unlock_bh(&cfr->lut_lock);
405
406 return 0;
407 }
408
409 /* Helper function to check whether the given peer mac address
410 * is in unassociated peer pool or not.
411 */
ath11k_cfr_peer_is_in_cfr_unassoc_pool(struct ath11k * ar,const u8 * peer_mac)412 bool ath11k_cfr_peer_is_in_cfr_unassoc_pool(struct ath11k *ar, const u8 *peer_mac)
413 {
414 struct ath11k_cfr *cfr = &ar->cfr;
415 struct cfr_unassoc_pool_entry *entry;
416 int i;
417
418 if (!ar->cfr_enabled)
419 return false;
420
421 spin_lock_bh(&cfr->lock);
422 for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
423 entry = &cfr->unassoc_pool[i];
424 if (!entry->is_valid)
425 continue;
426
427 if (ether_addr_equal(peer_mac, entry->peer_mac)) {
428 spin_unlock_bh(&cfr->lock);
429 return true;
430 }
431 }
432
433 spin_unlock_bh(&cfr->lock);
434
435 return false;
436 }
437
ath11k_cfr_update_unassoc_pool_entry(struct ath11k * ar,const u8 * peer_mac)438 void ath11k_cfr_update_unassoc_pool_entry(struct ath11k *ar,
439 const u8 *peer_mac)
440 {
441 struct ath11k_cfr *cfr = &ar->cfr;
442 struct cfr_unassoc_pool_entry *entry;
443 int i;
444
445 spin_lock_bh(&cfr->lock);
446 for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
447 entry = &cfr->unassoc_pool[i];
448 if (!entry->is_valid)
449 continue;
450
451 if (ether_addr_equal(peer_mac, entry->peer_mac) &&
452 entry->period == 0) {
453 memset(entry->peer_mac, 0, ETH_ALEN);
454 entry->is_valid = false;
455 cfr->cfr_enabled_peer_cnt--;
456 break;
457 }
458 }
459
460 spin_unlock_bh(&cfr->lock);
461 }
462
ath11k_cfr_decrement_peer_count(struct ath11k * ar,struct ath11k_sta * arsta)463 void ath11k_cfr_decrement_peer_count(struct ath11k *ar,
464 struct ath11k_sta *arsta)
465 {
466 struct ath11k_cfr *cfr = &ar->cfr;
467
468 spin_lock_bh(&cfr->lock);
469
470 if (arsta->cfr_capture.cfr_enable)
471 cfr->cfr_enabled_peer_cnt--;
472
473 spin_unlock_bh(&cfr->lock);
474 }
475
476 static enum ath11k_wmi_cfr_capture_bw
ath11k_cfr_bw_to_fw_cfr_bw(enum ath11k_cfr_capture_bw bw)477 ath11k_cfr_bw_to_fw_cfr_bw(enum ath11k_cfr_capture_bw bw)
478 {
479 switch (bw) {
480 case ATH11K_CFR_CAPTURE_BW_20:
481 return WMI_PEER_CFR_CAPTURE_BW_20;
482 case ATH11K_CFR_CAPTURE_BW_40:
483 return WMI_PEER_CFR_CAPTURE_BW_40;
484 case ATH11K_CFR_CAPTURE_BW_80:
485 return WMI_PEER_CFR_CAPTURE_BW_80;
486 default:
487 return WMI_PEER_CFR_CAPTURE_BW_MAX;
488 }
489 }
490
491 static enum ath11k_wmi_cfr_capture_method
ath11k_cfr_method_to_fw_cfr_method(enum ath11k_cfr_capture_method method)492 ath11k_cfr_method_to_fw_cfr_method(enum ath11k_cfr_capture_method method)
493 {
494 switch (method) {
495 case ATH11K_CFR_CAPTURE_METHOD_NULL_FRAME:
496 return WMI_CFR_CAPTURE_METHOD_NULL_FRAME;
497 case ATH11K_CFR_CAPTURE_METHOD_NULL_FRAME_WITH_PHASE:
498 return WMI_CFR_CAPTURE_METHOD_NULL_FRAME_WITH_PHASE;
499 case ATH11K_CFR_CAPTURE_METHOD_PROBE_RESP:
500 return WMI_CFR_CAPTURE_METHOD_PROBE_RESP;
501 default:
502 return WMI_CFR_CAPTURE_METHOD_MAX;
503 }
504 }
505
ath11k_cfr_send_peer_cfr_capture_cmd(struct ath11k * ar,struct ath11k_sta * arsta,struct ath11k_per_peer_cfr_capture * params,const u8 * peer_mac)506 int ath11k_cfr_send_peer_cfr_capture_cmd(struct ath11k *ar,
507 struct ath11k_sta *arsta,
508 struct ath11k_per_peer_cfr_capture *params,
509 const u8 *peer_mac)
510 {
511 struct ath11k_cfr *cfr = &ar->cfr;
512 struct wmi_peer_cfr_capture_conf_arg arg;
513 enum ath11k_wmi_cfr_capture_bw bw;
514 enum ath11k_wmi_cfr_capture_method method;
515 int ret = 0;
516
517 if (cfr->cfr_enabled_peer_cnt >= ATH11K_MAX_CFR_ENABLED_CLIENTS &&
518 !arsta->cfr_capture.cfr_enable) {
519 ath11k_err(ar->ab, "CFR enable peer threshold reached %u\n",
520 cfr->cfr_enabled_peer_cnt);
521 return -ENOSPC;
522 }
523
524 if (params->cfr_enable == arsta->cfr_capture.cfr_enable &&
525 params->cfr_period == arsta->cfr_capture.cfr_period &&
526 params->cfr_method == arsta->cfr_capture.cfr_method &&
527 params->cfr_bw == arsta->cfr_capture.cfr_bw)
528 return ret;
529
530 if (!params->cfr_enable && !arsta->cfr_capture.cfr_enable)
531 return ret;
532
533 bw = ath11k_cfr_bw_to_fw_cfr_bw(params->cfr_bw);
534 if (bw >= WMI_PEER_CFR_CAPTURE_BW_MAX) {
535 ath11k_warn(ar->ab, "FW doesn't support configured bw %d\n",
536 params->cfr_bw);
537 return -EINVAL;
538 }
539
540 method = ath11k_cfr_method_to_fw_cfr_method(params->cfr_method);
541 if (method >= WMI_CFR_CAPTURE_METHOD_MAX) {
542 ath11k_warn(ar->ab, "FW doesn't support configured method %d\n",
543 params->cfr_method);
544 return -EINVAL;
545 }
546
547 arg.request = params->cfr_enable;
548 arg.periodicity = params->cfr_period;
549 arg.bw = bw;
550 arg.method = method;
551
552 ret = ath11k_wmi_peer_set_cfr_capture_conf(ar, arsta->arvif->vdev_id,
553 peer_mac, &arg);
554 if (ret) {
555 ath11k_warn(ar->ab,
556 "failed to send cfr capture info: vdev_id %u peer %pM: %d\n",
557 arsta->arvif->vdev_id, peer_mac, ret);
558 return ret;
559 }
560
561 spin_lock_bh(&cfr->lock);
562
563 if (params->cfr_enable &&
564 params->cfr_enable != arsta->cfr_capture.cfr_enable)
565 cfr->cfr_enabled_peer_cnt++;
566 else if (!params->cfr_enable)
567 cfr->cfr_enabled_peer_cnt--;
568
569 spin_unlock_bh(&cfr->lock);
570
571 arsta->cfr_capture.cfr_enable = params->cfr_enable;
572 arsta->cfr_capture.cfr_period = params->cfr_period;
573 arsta->cfr_capture.cfr_method = params->cfr_method;
574 arsta->cfr_capture.cfr_bw = params->cfr_bw;
575
576 return ret;
577 }
578
ath11k_cfr_update_unassoc_pool(struct ath11k * ar,struct ath11k_per_peer_cfr_capture * params,u8 * peer_mac)579 void ath11k_cfr_update_unassoc_pool(struct ath11k *ar,
580 struct ath11k_per_peer_cfr_capture *params,
581 u8 *peer_mac)
582 {
583 struct ath11k_cfr *cfr = &ar->cfr;
584 struct cfr_unassoc_pool_entry *entry;
585 int available_idx = -1;
586 int i;
587
588 guard(spinlock_bh)(&cfr->lock);
589
590 if (!params->cfr_enable) {
591 for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
592 entry = &cfr->unassoc_pool[i];
593 if (ether_addr_equal(peer_mac, entry->peer_mac)) {
594 memset(entry->peer_mac, 0, ETH_ALEN);
595 entry->is_valid = false;
596 cfr->cfr_enabled_peer_cnt--;
597 break;
598 }
599 }
600 return;
601 }
602
603 if (cfr->cfr_enabled_peer_cnt >= ATH11K_MAX_CFR_ENABLED_CLIENTS) {
604 ath11k_info(ar->ab, "Max cfr peer threshold reached\n");
605 return;
606 }
607
608 for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
609 entry = &cfr->unassoc_pool[i];
610
611 if (ether_addr_equal(peer_mac, entry->peer_mac)) {
612 ath11k_info(ar->ab,
613 "peer entry already present updating params\n");
614 entry->period = params->cfr_period;
615 available_idx = -1;
616 break;
617 }
618
619 if (available_idx < 0 && !entry->is_valid)
620 available_idx = i;
621 }
622
623 if (available_idx >= 0) {
624 entry = &cfr->unassoc_pool[available_idx];
625 ether_addr_copy(entry->peer_mac, peer_mac);
626 entry->period = params->cfr_period;
627 entry->is_valid = true;
628 cfr->cfr_enabled_peer_cnt++;
629 }
630 }
631
ath11k_read_file_enable_cfr(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)632 static ssize_t ath11k_read_file_enable_cfr(struct file *file,
633 char __user *user_buf,
634 size_t count, loff_t *ppos)
635 {
636 struct ath11k *ar = file->private_data;
637 char buf[32] = {};
638 size_t len;
639
640 mutex_lock(&ar->conf_mutex);
641 len = scnprintf(buf, sizeof(buf), "%d\n", ar->cfr_enabled);
642 mutex_unlock(&ar->conf_mutex);
643
644 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
645 }
646
ath11k_write_file_enable_cfr(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)647 static ssize_t ath11k_write_file_enable_cfr(struct file *file,
648 const char __user *ubuf,
649 size_t count, loff_t *ppos)
650 {
651 struct ath11k *ar = file->private_data;
652 u32 enable_cfr;
653 int ret;
654
655 if (kstrtouint_from_user(ubuf, count, 0, &enable_cfr))
656 return -EINVAL;
657
658 guard(mutex)(&ar->conf_mutex);
659
660 if (ar->state != ATH11K_STATE_ON)
661 return -ENETDOWN;
662
663 if (enable_cfr > 1)
664 return -EINVAL;
665
666 if (ar->cfr_enabled == enable_cfr)
667 return count;
668
669 ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PER_PEER_CFR_ENABLE,
670 enable_cfr, ar->pdev->pdev_id);
671 if (ret) {
672 ath11k_warn(ar->ab,
673 "Failed to enable/disable per peer cfr %d\n", ret);
674 return ret;
675 }
676
677 ar->cfr_enabled = enable_cfr;
678
679 return count;
680 }
681
682 static const struct file_operations fops_enable_cfr = {
683 .read = ath11k_read_file_enable_cfr,
684 .write = ath11k_write_file_enable_cfr,
685 .open = simple_open,
686 .owner = THIS_MODULE,
687 .llseek = default_llseek,
688 };
689
ath11k_write_file_cfr_unassoc(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)690 static ssize_t ath11k_write_file_cfr_unassoc(struct file *file,
691 const char __user *ubuf,
692 size_t count, loff_t *ppos)
693 {
694 struct ath11k *ar = file->private_data;
695 struct ath11k_cfr *cfr = &ar->cfr;
696 struct cfr_unassoc_pool_entry *entry;
697 char buf[64] = {};
698 u8 peer_mac[6];
699 u32 cfr_capture_enable;
700 u32 cfr_capture_period;
701 int available_idx = -1;
702 int ret, i;
703
704 simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
705
706 guard(mutex)(&ar->conf_mutex);
707 guard(spinlock_bh)(&cfr->lock);
708
709 if (ar->state != ATH11K_STATE_ON)
710 return -ENETDOWN;
711
712 if (!ar->cfr_enabled) {
713 ath11k_err(ar->ab, "CFR is not enabled on this pdev %d\n",
714 ar->pdev_idx);
715 return -EINVAL;
716 }
717
718 ret = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %u %u",
719 &peer_mac[0], &peer_mac[1], &peer_mac[2], &peer_mac[3],
720 &peer_mac[4], &peer_mac[5], &cfr_capture_enable,
721 &cfr_capture_period);
722
723 if (ret < 1)
724 return -EINVAL;
725
726 if (cfr_capture_enable && ret != 8)
727 return -EINVAL;
728
729 if (!cfr_capture_enable) {
730 for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
731 entry = &cfr->unassoc_pool[i];
732 if (ether_addr_equal(peer_mac, entry->peer_mac)) {
733 memset(entry->peer_mac, 0, ETH_ALEN);
734 entry->is_valid = false;
735 cfr->cfr_enabled_peer_cnt--;
736 }
737 }
738
739 return count;
740 }
741
742 if (cfr->cfr_enabled_peer_cnt >= ATH11K_MAX_CFR_ENABLED_CLIENTS) {
743 ath11k_info(ar->ab, "Max cfr peer threshold reached\n");
744 return count;
745 }
746
747 for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
748 entry = &cfr->unassoc_pool[i];
749
750 if (available_idx < 0 && !entry->is_valid)
751 available_idx = i;
752
753 if (ether_addr_equal(peer_mac, entry->peer_mac)) {
754 ath11k_info(ar->ab,
755 "peer entry already present updating params\n");
756 entry->period = cfr_capture_period;
757 return count;
758 }
759 }
760
761 if (available_idx >= 0) {
762 entry = &cfr->unassoc_pool[available_idx];
763 ether_addr_copy(entry->peer_mac, peer_mac);
764 entry->period = cfr_capture_period;
765 entry->is_valid = true;
766 cfr->cfr_enabled_peer_cnt++;
767 }
768
769 return count;
770 }
771
ath11k_read_file_cfr_unassoc(struct file * file,char __user * ubuf,size_t count,loff_t * ppos)772 static ssize_t ath11k_read_file_cfr_unassoc(struct file *file,
773 char __user *ubuf,
774 size_t count, loff_t *ppos)
775 {
776 struct ath11k *ar = file->private_data;
777 struct ath11k_cfr *cfr = &ar->cfr;
778 struct cfr_unassoc_pool_entry *entry;
779 char buf[512] = {};
780 int len = 0, i;
781
782 spin_lock_bh(&cfr->lock);
783
784 for (i = 0; i < ATH11K_MAX_CFR_ENABLED_CLIENTS; i++) {
785 entry = &cfr->unassoc_pool[i];
786 if (entry->is_valid)
787 len += scnprintf(buf + len, sizeof(buf) - len,
788 "peer: %pM period: %u\n",
789 entry->peer_mac, entry->period);
790 }
791
792 spin_unlock_bh(&cfr->lock);
793
794 return simple_read_from_buffer(ubuf, count, ppos, buf, len);
795 }
796
797 static const struct file_operations fops_configure_cfr_unassoc = {
798 .write = ath11k_write_file_cfr_unassoc,
799 .read = ath11k_read_file_cfr_unassoc,
800 .open = simple_open,
801 .owner = THIS_MODULE,
802 .llseek = default_llseek,
803 };
804
ath11k_cfr_debug_unregister(struct ath11k * ar)805 static void ath11k_cfr_debug_unregister(struct ath11k *ar)
806 {
807 debugfs_remove(ar->cfr.enable_cfr);
808 ar->cfr.enable_cfr = NULL;
809 debugfs_remove(ar->cfr.cfr_unassoc);
810 ar->cfr.cfr_unassoc = NULL;
811
812 relay_close(ar->cfr.rfs_cfr_capture);
813 ar->cfr.rfs_cfr_capture = NULL;
814 }
815
ath11k_cfr_create_buf_file_handler(const char * filename,struct dentry * parent,umode_t mode,struct rchan_buf * buf,int * is_global)816 static struct dentry *ath11k_cfr_create_buf_file_handler(const char *filename,
817 struct dentry *parent,
818 umode_t mode,
819 struct rchan_buf *buf,
820 int *is_global)
821 {
822 struct dentry *buf_file;
823
824 buf_file = debugfs_create_file(filename, mode, parent, buf,
825 &relay_file_operations);
826 *is_global = 1;
827 return buf_file;
828 }
829
ath11k_cfr_remove_buf_file_handler(struct dentry * dentry)830 static int ath11k_cfr_remove_buf_file_handler(struct dentry *dentry)
831 {
832 debugfs_remove(dentry);
833
834 return 0;
835 }
836
837 static const struct rchan_callbacks rfs_cfr_capture_cb = {
838 .create_buf_file = ath11k_cfr_create_buf_file_handler,
839 .remove_buf_file = ath11k_cfr_remove_buf_file_handler,
840 };
841
ath11k_cfr_debug_register(struct ath11k * ar)842 static void ath11k_cfr_debug_register(struct ath11k *ar)
843 {
844 ar->cfr.rfs_cfr_capture = relay_open("cfr_capture",
845 ar->debug.debugfs_pdev,
846 ar->ab->hw_params.cfr_stream_buf_size,
847 ar->ab->hw_params.cfr_num_stream_bufs,
848 &rfs_cfr_capture_cb, NULL);
849
850 ar->cfr.enable_cfr = debugfs_create_file("enable_cfr", 0600,
851 ar->debug.debugfs_pdev, ar,
852 &fops_enable_cfr);
853
854 ar->cfr.cfr_unassoc = debugfs_create_file("cfr_unassoc", 0600,
855 ar->debug.debugfs_pdev, ar,
856 &fops_configure_cfr_unassoc);
857 }
858
ath11k_cfr_lut_update_paddr(struct ath11k * ar,dma_addr_t paddr,u32 buf_id)859 void ath11k_cfr_lut_update_paddr(struct ath11k *ar, dma_addr_t paddr,
860 u32 buf_id)
861 {
862 struct ath11k_cfr *cfr = &ar->cfr;
863
864 if (cfr->lut)
865 cfr->lut[buf_id].dbr_address = paddr;
866 }
867
ath11k_cfr_update_phymode(struct ath11k * ar,enum wmi_phy_mode phymode)868 void ath11k_cfr_update_phymode(struct ath11k *ar, enum wmi_phy_mode phymode)
869 {
870 struct ath11k_cfr *cfr = &ar->cfr;
871
872 cfr->phymode = phymode;
873 }
874
ath11k_cfr_ring_free(struct ath11k * ar)875 static void ath11k_cfr_ring_free(struct ath11k *ar)
876 {
877 struct ath11k_cfr *cfr = &ar->cfr;
878
879 ath11k_dbring_buf_cleanup(ar, &cfr->rx_ring);
880 ath11k_dbring_srng_cleanup(ar, &cfr->rx_ring);
881 }
882
ath11k_cfr_ring_alloc(struct ath11k * ar,struct ath11k_dbring_cap * db_cap)883 static int ath11k_cfr_ring_alloc(struct ath11k *ar,
884 struct ath11k_dbring_cap *db_cap)
885 {
886 struct ath11k_cfr *cfr = &ar->cfr;
887 int ret;
888
889 ret = ath11k_dbring_srng_setup(ar, &cfr->rx_ring,
890 ATH11K_CFR_NUM_RING_ENTRIES,
891 db_cap->min_elem);
892 if (ret) {
893 ath11k_warn(ar->ab, "failed to setup db ring: %d\n", ret);
894 return ret;
895 }
896
897 ath11k_dbring_set_cfg(ar, &cfr->rx_ring,
898 ATH11K_CFR_NUM_RESP_PER_EVENT,
899 ATH11K_CFR_EVENT_TIMEOUT_MS,
900 ath11k_cfr_process_data);
901
902 ret = ath11k_dbring_buf_setup(ar, &cfr->rx_ring, db_cap);
903 if (ret) {
904 ath11k_warn(ar->ab, "failed to setup db ring buffer: %d\n", ret);
905 goto srng_cleanup;
906 }
907
908 ret = ath11k_dbring_wmi_cfg_setup(ar, &cfr->rx_ring, WMI_DIRECT_BUF_CFR);
909 if (ret) {
910 ath11k_warn(ar->ab, "failed to setup db ring cfg: %d\n", ret);
911 goto buffer_cleanup;
912 }
913
914 return 0;
915
916 buffer_cleanup:
917 ath11k_dbring_buf_cleanup(ar, &cfr->rx_ring);
918 srng_cleanup:
919 ath11k_dbring_srng_cleanup(ar, &cfr->rx_ring);
920 return ret;
921 }
922
ath11k_cfr_deinit(struct ath11k_base * ab)923 void ath11k_cfr_deinit(struct ath11k_base *ab)
924 {
925 struct ath11k_cfr *cfr;
926 struct ath11k *ar;
927 int i;
928
929 if (!test_bit(WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT, ab->wmi_ab.svc_map) ||
930 !ab->hw_params.cfr_support)
931 return;
932
933 for (i = 0; i < ab->num_radios; i++) {
934 ar = ab->pdevs[i].ar;
935 cfr = &ar->cfr;
936
937 if (!cfr->enabled)
938 continue;
939
940 ath11k_cfr_debug_unregister(ar);
941 ath11k_cfr_ring_free(ar);
942
943 spin_lock_bh(&cfr->lut_lock);
944 kfree(cfr->lut);
945 cfr->lut = NULL;
946 cfr->enabled = false;
947 spin_unlock_bh(&cfr->lut_lock);
948 }
949 }
950
ath11k_cfr_init(struct ath11k_base * ab)951 int ath11k_cfr_init(struct ath11k_base *ab)
952 {
953 struct ath11k_dbring_cap db_cap;
954 struct ath11k_cfr *cfr;
955 u32 num_lut_entries;
956 struct ath11k *ar;
957 int i, ret;
958
959 if (!test_bit(WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT, ab->wmi_ab.svc_map) ||
960 !ab->hw_params.cfr_support)
961 return 0;
962
963 for (i = 0; i < ab->num_radios; i++) {
964 ar = ab->pdevs[i].ar;
965 cfr = &ar->cfr;
966
967 ret = ath11k_dbring_get_cap(ar->ab, ar->pdev_idx,
968 WMI_DIRECT_BUF_CFR, &db_cap);
969 if (ret)
970 continue;
971
972 idr_init(&cfr->rx_ring.bufs_idr);
973 spin_lock_init(&cfr->rx_ring.idr_lock);
974 spin_lock_init(&cfr->lock);
975 spin_lock_init(&cfr->lut_lock);
976
977 num_lut_entries = min_t(u32, CFR_MAX_LUT_ENTRIES, db_cap.min_elem);
978 cfr->lut = kzalloc_objs(*cfr->lut, num_lut_entries);
979 if (!cfr->lut) {
980 ret = -ENOMEM;
981 goto err;
982 }
983
984 ret = ath11k_cfr_ring_alloc(ar, &db_cap);
985 if (ret) {
986 ath11k_warn(ab, "failed to init cfr ring for pdev %d: %d\n",
987 i, ret);
988 spin_lock_bh(&cfr->lut_lock);
989 kfree(cfr->lut);
990 cfr->lut = NULL;
991 cfr->enabled = false;
992 spin_unlock_bh(&cfr->lut_lock);
993 goto err;
994 }
995
996 cfr->lut_num = num_lut_entries;
997 cfr->enabled = true;
998
999 ath11k_cfr_debug_register(ar);
1000 }
1001
1002 return 0;
1003
1004 err:
1005 for (i = i - 1; i >= 0; i--) {
1006 ar = ab->pdevs[i].ar;
1007 cfr = &ar->cfr;
1008
1009 if (!cfr->enabled)
1010 continue;
1011
1012 ath11k_cfr_debug_unregister(ar);
1013 ath11k_cfr_ring_free(ar);
1014
1015 spin_lock_bh(&cfr->lut_lock);
1016 kfree(cfr->lut);
1017 cfr->lut = NULL;
1018 cfr->enabled = false;
1019 spin_unlock_bh(&cfr->lut_lock);
1020 }
1021 return ret;
1022 }
1023