1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5 */
6
7 #include <crypto/hash.h>
8 #include "core.h"
9 #include "dp_tx.h"
10 #include "hif.h"
11 #include "hal.h"
12 #include "debug.h"
13 #include "peer.h"
14 #include "dp_cmn.h"
15
16 enum ath12k_dp_desc_type {
17 ATH12K_DP_TX_DESC,
18 ATH12K_DP_RX_DESC,
19 };
20
ath12k_dp_peer_cleanup(struct ath12k * ar,int vdev_id,const u8 * addr)21 void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
22 {
23 struct ath12k_base *ab = ar->ab;
24 struct ath12k_dp_link_peer *peer;
25 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
26
27 /* TODO: Any other peer specific DP cleanup */
28
29 spin_lock_bh(&dp->dp_lock);
30 peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, addr);
31 if (!peer || !peer->dp_peer) {
32 ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
33 addr, vdev_id);
34 spin_unlock_bh(&dp->dp_lock);
35 return;
36 }
37
38 if (!peer->primary_link) {
39 spin_unlock_bh(&dp->dp_lock);
40 return;
41 }
42
43 ath12k_dp_rx_peer_tid_cleanup(ar, peer);
44 crypto_free_shash(peer->dp_peer->tfm_mmic);
45 peer->dp_peer->dp_setup_done = false;
46 spin_unlock_bh(&dp->dp_lock);
47 }
48
ath12k_dp_peer_setup(struct ath12k * ar,int vdev_id,const u8 * addr)49 int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
50 {
51 struct ath12k_base *ab = ar->ab;
52 struct ath12k_dp_link_peer *peer;
53 u32 reo_dest;
54 int ret = 0, tid;
55 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
56
57 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
58 reo_dest = ar->dp.mac_id + 1;
59 ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
60 WMI_PEER_SET_DEFAULT_ROUTING,
61 DP_RX_HASH_ENABLE | (reo_dest << 1));
62
63 if (ret) {
64 ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
65 ret, addr, vdev_id);
66 return ret;
67 }
68
69 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
70 ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
71 HAL_PN_TYPE_NONE);
72 if (ret) {
73 ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
74 tid, ret);
75 goto peer_clean;
76 }
77 }
78
79 ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
80 if (ret) {
81 ath12k_warn(ab, "failed to setup rx defrag context\n");
82 goto peer_clean;
83 }
84
85 /* TODO: Setup other peer specific resource used in data path */
86
87 return 0;
88
89 peer_clean:
90 spin_lock_bh(&dp->dp_lock);
91
92 peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, addr);
93 if (!peer) {
94 ath12k_warn(ab, "failed to find the peer to del rx tid\n");
95 spin_unlock_bh(&dp->dp_lock);
96 return -ENOENT;
97 }
98
99 for (tid--; tid >= 0; tid--)
100 ath12k_dp_arch_rx_peer_tid_delete(dp, peer, tid);
101
102 spin_unlock_bh(&dp->dp_lock);
103
104 return ret;
105 }
106
ath12k_dp_srng_cleanup(struct ath12k_base * ab,struct dp_srng * ring)107 void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring)
108 {
109 if (!ring->vaddr_unaligned)
110 return;
111
112 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
113 ring->paddr_unaligned);
114
115 ring->vaddr_unaligned = NULL;
116 }
117
ath12k_dp_srng_find_ring_in_mask(int ring_num,const u8 * grp_mask)118 static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
119 {
120 int ext_group_num;
121 u8 mask = 1 << ring_num;
122
123 for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
124 ext_group_num++) {
125 if (mask & grp_mask[ext_group_num])
126 return ext_group_num;
127 }
128
129 return -ENOENT;
130 }
131
ath12k_dp_srng_calculate_msi_group(struct ath12k_base * ab,enum hal_ring_type type,int ring_num)132 static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
133 enum hal_ring_type type, int ring_num)
134 {
135 const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
136 const u8 *grp_mask;
137 int i;
138
139 switch (type) {
140 case HAL_WBM2SW_RELEASE:
141 if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
142 grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
143 ring_num = 0;
144 } else {
145 map = ab->hal.tcl_to_wbm_rbm_map;
146 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
147 if (ring_num == map[i].wbm_ring_num) {
148 ring_num = i;
149 break;
150 }
151 }
152
153 grp_mask = &ab->hw_params->ring_mask->tx[0];
154 }
155 break;
156 case HAL_REO_EXCEPTION:
157 grp_mask = &ab->hw_params->ring_mask->rx_err[0];
158 break;
159 case HAL_REO_DST:
160 grp_mask = &ab->hw_params->ring_mask->rx[0];
161 break;
162 case HAL_REO_STATUS:
163 grp_mask = &ab->hw_params->ring_mask->reo_status[0];
164 break;
165 case HAL_RXDMA_MONITOR_STATUS:
166 grp_mask = &ab->hw_params->ring_mask->rx_mon_status[0];
167 break;
168 case HAL_RXDMA_MONITOR_DST:
169 grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
170 break;
171 case HAL_TX_MONITOR_DST:
172 grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
173 break;
174 case HAL_RXDMA_BUF:
175 grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
176 break;
177 case HAL_RXDMA_MONITOR_BUF:
178 case HAL_TCL_DATA:
179 case HAL_TCL_CMD:
180 case HAL_REO_CMD:
181 case HAL_SW2WBM_RELEASE:
182 case HAL_WBM_IDLE_LINK:
183 case HAL_TCL_STATUS:
184 case HAL_REO_REINJECT:
185 case HAL_CE_SRC:
186 case HAL_CE_DST:
187 case HAL_CE_DST_STATUS:
188 default:
189 return -ENOENT;
190 }
191
192 return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
193 }
194
ath12k_dp_srng_msi_setup(struct ath12k_base * ab,struct hal_srng_params * ring_params,enum hal_ring_type type,int ring_num)195 static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab,
196 struct hal_srng_params *ring_params,
197 enum hal_ring_type type, int ring_num)
198 {
199 int msi_group_number, msi_data_count;
200 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
201 int ret;
202
203 ret = ath12k_hif_get_user_msi_vector(ab, "DP",
204 &msi_data_count, &msi_data_start,
205 &msi_irq_start);
206 if (ret)
207 return;
208
209 msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type,
210 ring_num);
211 if (msi_group_number < 0) {
212 ath12k_dbg(ab, ATH12K_DBG_PCI,
213 "ring not part of an ext_group; ring_type: %d,ring_num %d",
214 type, ring_num);
215 ring_params->msi_addr = 0;
216 ring_params->msi_data = 0;
217 return;
218 }
219
220 if (msi_group_number > msi_data_count) {
221 ath12k_dbg(ab, ATH12K_DBG_PCI,
222 "multiple msi_groups share one msi, msi_group_num %d",
223 msi_group_number);
224 }
225
226 ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
227
228 ring_params->msi_addr = addr_lo;
229 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
230 ring_params->msi_data = (msi_group_number % msi_data_count)
231 + msi_data_start;
232 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
233 }
234
ath12k_dp_srng_setup(struct ath12k_base * ab,struct dp_srng * ring,enum hal_ring_type type,int ring_num,int mac_id,int num_entries)235 int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
236 enum hal_ring_type type, int ring_num,
237 int mac_id, int num_entries)
238 {
239 struct hal_srng_params params = {};
240 int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
241 int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
242 int ret;
243
244 if (max_entries < 0 || entry_sz < 0)
245 return -EINVAL;
246
247 if (num_entries > max_entries)
248 num_entries = max_entries;
249
250 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
251 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
252 &ring->paddr_unaligned,
253 GFP_KERNEL);
254 if (!ring->vaddr_unaligned)
255 return -ENOMEM;
256
257 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
258 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
259 (unsigned long)ring->vaddr_unaligned);
260
261 params.ring_base_vaddr = ring->vaddr;
262 params.ring_base_paddr = ring->paddr;
263 params.num_entries = num_entries;
264 ath12k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id);
265
266 switch (type) {
267 case HAL_REO_DST:
268 params.intr_batch_cntr_thres_entries =
269 HAL_SRNG_INT_BATCH_THRESHOLD_RX;
270 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
271 break;
272 case HAL_RXDMA_BUF:
273 case HAL_RXDMA_MONITOR_BUF:
274 params.low_threshold = num_entries >> 3;
275 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
276 params.intr_batch_cntr_thres_entries = 0;
277 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
278 break;
279 case HAL_RXDMA_MONITOR_STATUS:
280 params.low_threshold = num_entries >> 3;
281 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
282 params.intr_batch_cntr_thres_entries = 1;
283 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
284 break;
285 case HAL_TX_MONITOR_DST:
286 params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
287 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
288 params.intr_batch_cntr_thres_entries = 0;
289 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
290 break;
291 case HAL_WBM2SW_RELEASE:
292 if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
293 params.intr_batch_cntr_thres_entries =
294 HAL_SRNG_INT_BATCH_THRESHOLD_TX;
295 params.intr_timer_thres_us =
296 HAL_SRNG_INT_TIMER_THRESHOLD_TX;
297 break;
298 }
299 /* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
300 fallthrough;
301 case HAL_REO_EXCEPTION:
302 case HAL_REO_REINJECT:
303 case HAL_REO_CMD:
304 case HAL_REO_STATUS:
305 case HAL_TCL_DATA:
306 case HAL_TCL_CMD:
307 case HAL_TCL_STATUS:
308 case HAL_WBM_IDLE_LINK:
309 case HAL_SW2WBM_RELEASE:
310 case HAL_RXDMA_DST:
311 case HAL_RXDMA_MONITOR_DST:
312 case HAL_RXDMA_MONITOR_DESC:
313 params.intr_batch_cntr_thres_entries =
314 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
315 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
316 break;
317 case HAL_RXDMA_DIR_BUF:
318 break;
319 default:
320 ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
321 return -EINVAL;
322 }
323
324 ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms);
325 if (ret < 0) {
326 ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
327 ret, ring_num);
328 return ret;
329 }
330
331 ring->ring_id = ret;
332
333 return 0;
334 }
335
ath12k_dp_tx_get_bank_profile(struct ath12k_base * ab,struct ath12k_link_vif * arvif,struct ath12k_dp * dp)336 static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab,
337 struct ath12k_link_vif *arvif,
338 struct ath12k_dp *dp)
339 {
340 int bank_id = DP_INVALID_BANK_ID;
341 int i;
342 u32 bank_config;
343 bool configure_register = false;
344
345 /* convert vdev params into hal_tx_bank_config */
346 bank_config = ath12k_dp_arch_tx_get_vdev_bank_config(dp, arvif);
347
348 spin_lock_bh(&dp->tx_bank_lock);
349 /* TODO: implement using idr kernel framework*/
350 for (i = 0; i < dp->num_bank_profiles; i++) {
351 if (dp->bank_profiles[i].is_configured &&
352 (dp->bank_profiles[i].bank_config ^ bank_config) == 0) {
353 bank_id = i;
354 goto inc_ref_and_return;
355 }
356 if (!dp->bank_profiles[i].is_configured ||
357 !dp->bank_profiles[i].num_users) {
358 bank_id = i;
359 goto configure_and_return;
360 }
361 }
362
363 if (bank_id == DP_INVALID_BANK_ID) {
364 spin_unlock_bh(&dp->tx_bank_lock);
365 ath12k_err(ab, "unable to find TX bank!");
366 return bank_id;
367 }
368
369 configure_and_return:
370 dp->bank_profiles[bank_id].is_configured = true;
371 dp->bank_profiles[bank_id].bank_config = bank_config;
372 configure_register = true;
373 inc_ref_and_return:
374 dp->bank_profiles[bank_id].num_users++;
375 spin_unlock_bh(&dp->tx_bank_lock);
376
377 if (configure_register)
378 ath12k_hal_tx_configure_bank_register(ab,
379 bank_config, bank_id);
380
381 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
382 bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
383 dp->bank_profiles[bank_id].num_users);
384
385 return bank_id;
386 }
387
ath12k_dp_tx_put_bank_profile(struct ath12k_dp * dp,u8 bank_id)388 void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
389 {
390 spin_lock_bh(&dp->tx_bank_lock);
391 dp->bank_profiles[bank_id].num_users--;
392 spin_unlock_bh(&dp->tx_bank_lock);
393 }
394
ath12k_dp_deinit_bank_profiles(struct ath12k_base * ab)395 static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
396 {
397 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
398
399 kfree(dp->bank_profiles);
400 dp->bank_profiles = NULL;
401 }
402
ath12k_dp_init_bank_profiles(struct ath12k_base * ab)403 static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
404 {
405 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
406 u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
407 int i;
408
409 dp->num_bank_profiles = num_tcl_banks;
410 dp->bank_profiles = kmalloc_objs(struct ath12k_dp_tx_bank_profile,
411 num_tcl_banks);
412 if (!dp->bank_profiles)
413 return -ENOMEM;
414
415 spin_lock_init(&dp->tx_bank_lock);
416
417 for (i = 0; i < num_tcl_banks; i++) {
418 dp->bank_profiles[i].is_configured = false;
419 dp->bank_profiles[i].num_users = 0;
420 }
421
422 return 0;
423 }
424
ath12k_dp_srng_common_cleanup(struct ath12k_base * ab)425 static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
426 {
427 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
428 int i;
429
430 ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring);
431 ath12k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
432 ath12k_dp_srng_cleanup(ab, &dp->reo_except_ring);
433 ath12k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
434 ath12k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
435 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
436 ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
437 ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
438 }
439 ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
440 }
441
ath12k_dp_srng_common_setup(struct ath12k_base * ab)442 static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
443 {
444 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
445 const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
446 struct hal_srng *srng;
447 int i, ret, tx_comp_ring_num;
448 u32 ring_hash_map;
449
450 ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
451 HAL_SW2WBM_RELEASE, 0, 0,
452 DP_WBM_RELEASE_RING_SIZE);
453 if (ret) {
454 ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
455 ret);
456 goto err;
457 }
458
459 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
460 map = ab->hal.tcl_to_wbm_rbm_map;
461 tx_comp_ring_num = map[i].wbm_ring_num;
462
463 ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
464 HAL_TCL_DATA, i, 0,
465 DP_TCL_DATA_RING_SIZE);
466 if (ret) {
467 ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
468 i, ret);
469 goto err;
470 }
471
472 ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
473 HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
474 DP_TX_COMP_RING_SIZE(ab));
475 if (ret) {
476 ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
477 tx_comp_ring_num, ret);
478 goto err;
479 }
480 }
481
482 ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
483 0, 0, DP_REO_REINJECT_RING_SIZE);
484 if (ret) {
485 ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
486 ret);
487 goto err;
488 }
489
490 ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
491 HAL_WBM2SW_REL_ERR_RING_NUM, 0,
492 DP_RX_RELEASE_RING_SIZE);
493 if (ret) {
494 ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
495 goto err;
496 }
497
498 ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
499 0, 0, DP_REO_EXCEPTION_RING_SIZE);
500 if (ret) {
501 ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
502 ret);
503 goto err;
504 }
505
506 ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
507 0, 0, DP_REO_CMD_RING_SIZE);
508 if (ret) {
509 ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
510 goto err;
511 }
512
513 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
514 ath12k_hal_reo_init_cmd_ring(ab, srng);
515
516 ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
517 0, 0, DP_REO_STATUS_RING_SIZE);
518 if (ret) {
519 ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
520 goto err;
521 }
522
523 /* When hash based routing of rx packet is enabled, 32 entries to map
524 * the hash values to the ring will be configured. Each hash entry uses
525 * four bits to map to a particular ring. The ring mapping will be
526 * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
527 * 8:SW6, 9:SW7, 10:SW8, 11:Not used.
528 */
529 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
530 HAL_HASH_ROUTING_RING_SW2 << 4 |
531 HAL_HASH_ROUTING_RING_SW3 << 8 |
532 HAL_HASH_ROUTING_RING_SW4 << 12 |
533 HAL_HASH_ROUTING_RING_SW1 << 16 |
534 HAL_HASH_ROUTING_RING_SW2 << 20 |
535 HAL_HASH_ROUTING_RING_SW3 << 24 |
536 HAL_HASH_ROUTING_RING_SW4 << 28;
537
538 ath12k_hal_reo_hw_setup(ab, ring_hash_map);
539
540 return 0;
541
542 err:
543 ath12k_dp_srng_common_cleanup(ab);
544
545 return ret;
546 }
547
ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base * ab)548 static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
549 {
550 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
551 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
552 int i;
553
554 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
555 if (!slist[i].vaddr)
556 continue;
557
558 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
559 slist[i].vaddr, slist[i].paddr);
560 slist[i].vaddr = NULL;
561 }
562 }
563
ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base * ab,int size,u32 n_link_desc_bank,u32 n_link_desc,u32 last_bank_sz)564 static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
565 int size,
566 u32 n_link_desc_bank,
567 u32 n_link_desc,
568 u32 last_bank_sz)
569 {
570 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
571 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
572 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
573 u32 n_entries_per_buf;
574 int num_scatter_buf, scatter_idx;
575 struct hal_wbm_link_desc *scatter_buf;
576 int align_bytes, n_entries;
577 dma_addr_t paddr;
578 int rem_entries;
579 int i;
580 int ret = 0;
581 u32 end_offset, cookie;
582 enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
583
584 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
585 ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
586 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
587
588 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
589 return -EINVAL;
590
591 for (i = 0; i < num_scatter_buf; i++) {
592 slist[i].vaddr = dma_alloc_coherent(ab->dev,
593 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
594 &slist[i].paddr, GFP_KERNEL);
595 if (!slist[i].vaddr) {
596 ret = -ENOMEM;
597 goto err;
598 }
599 }
600
601 scatter_idx = 0;
602 scatter_buf = slist[scatter_idx].vaddr;
603 rem_entries = n_entries_per_buf;
604
605 for (i = 0; i < n_link_desc_bank; i++) {
606 align_bytes = link_desc_banks[i].vaddr -
607 link_desc_banks[i].vaddr_unaligned;
608 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
609 HAL_LINK_DESC_SIZE;
610 paddr = link_desc_banks[i].paddr;
611 while (n_entries) {
612 cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
613 ath12k_hal_set_link_desc_addr(dp->hal, scatter_buf, cookie,
614 paddr, rbm);
615 n_entries--;
616 paddr += HAL_LINK_DESC_SIZE;
617 if (rem_entries) {
618 rem_entries--;
619 scatter_buf++;
620 continue;
621 }
622
623 rem_entries = n_entries_per_buf;
624 scatter_idx++;
625 scatter_buf = slist[scatter_idx].vaddr;
626 }
627 }
628
629 end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
630 sizeof(struct hal_wbm_link_desc);
631 ath12k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
632 n_link_desc, end_offset);
633
634 return 0;
635
636 err:
637 ath12k_dp_scatter_idle_link_desc_cleanup(ab);
638
639 return ret;
640 }
641
642 static void
ath12k_dp_link_desc_bank_free(struct ath12k_base * ab,struct dp_link_desc_bank * link_desc_banks)643 ath12k_dp_link_desc_bank_free(struct ath12k_base *ab,
644 struct dp_link_desc_bank *link_desc_banks)
645 {
646 int i;
647
648 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
649 if (link_desc_banks[i].vaddr_unaligned) {
650 dma_free_coherent(ab->dev,
651 link_desc_banks[i].size,
652 link_desc_banks[i].vaddr_unaligned,
653 link_desc_banks[i].paddr_unaligned);
654 link_desc_banks[i].vaddr_unaligned = NULL;
655 }
656 }
657 }
658
ath12k_dp_link_desc_bank_alloc(struct ath12k_base * ab,struct dp_link_desc_bank * desc_bank,int n_link_desc_bank,int last_bank_sz)659 static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
660 struct dp_link_desc_bank *desc_bank,
661 int n_link_desc_bank,
662 int last_bank_sz)
663 {
664 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
665 int i;
666 int ret = 0;
667 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
668
669 for (i = 0; i < n_link_desc_bank; i++) {
670 if (i == (n_link_desc_bank - 1) && last_bank_sz)
671 desc_sz = last_bank_sz;
672
673 desc_bank[i].vaddr_unaligned =
674 dma_alloc_coherent(ab->dev, desc_sz,
675 &desc_bank[i].paddr_unaligned,
676 GFP_KERNEL);
677 if (!desc_bank[i].vaddr_unaligned) {
678 ret = -ENOMEM;
679 goto err;
680 }
681
682 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
683 HAL_LINK_DESC_ALIGN);
684 desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
685 ((unsigned long)desc_bank[i].vaddr -
686 (unsigned long)desc_bank[i].vaddr_unaligned);
687 desc_bank[i].size = desc_sz;
688 }
689
690 return 0;
691
692 err:
693 ath12k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
694
695 return ret;
696 }
697
ath12k_dp_link_desc_cleanup(struct ath12k_base * ab,struct dp_link_desc_bank * desc_bank,u32 ring_type,struct dp_srng * ring)698 void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
699 struct dp_link_desc_bank *desc_bank,
700 u32 ring_type, struct dp_srng *ring)
701 {
702 ath12k_dp_link_desc_bank_free(ab, desc_bank);
703
704 if (ring_type != HAL_RXDMA_MONITOR_DESC) {
705 ath12k_dp_srng_cleanup(ab, ring);
706 ath12k_dp_scatter_idle_link_desc_cleanup(ab);
707 }
708 }
709
ath12k_wbm_idle_ring_setup(struct ath12k_base * ab,u32 * n_link_desc)710 static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
711 {
712 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
713 u32 n_mpdu_link_desc, n_mpdu_queue_desc;
714 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
715 int ret = 0;
716
717 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
718 HAL_NUM_MPDUS_PER_LINK_DESC;
719
720 n_mpdu_queue_desc = n_mpdu_link_desc /
721 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
722
723 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
724 DP_AVG_MSDUS_PER_FLOW) /
725 HAL_NUM_TX_MSDUS_PER_LINK_DESC;
726
727 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
728 DP_AVG_MSDUS_PER_MPDU) /
729 HAL_NUM_RX_MSDUS_PER_LINK_DESC;
730
731 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
732 n_tx_msdu_link_desc + n_rx_msdu_link_desc;
733
734 if (*n_link_desc & (*n_link_desc - 1))
735 *n_link_desc = 1 << fls(*n_link_desc);
736
737 ret = ath12k_dp_srng_setup(ab, &dp->wbm_idle_ring,
738 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
739 if (ret) {
740 ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
741 return ret;
742 }
743 return ret;
744 }
745
ath12k_dp_link_desc_setup(struct ath12k_base * ab,struct dp_link_desc_bank * link_desc_banks,u32 ring_type,struct hal_srng * srng,u32 n_link_desc)746 int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
747 struct dp_link_desc_bank *link_desc_banks,
748 u32 ring_type, struct hal_srng *srng,
749 u32 n_link_desc)
750 {
751 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
752 u32 tot_mem_sz;
753 u32 n_link_desc_bank, last_bank_sz;
754 u32 entry_sz, align_bytes, n_entries;
755 struct hal_wbm_link_desc *desc;
756 u32 paddr;
757 int i, ret;
758 u32 cookie;
759 enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
760
761 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
762 tot_mem_sz += HAL_LINK_DESC_ALIGN;
763
764 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
765 n_link_desc_bank = 1;
766 last_bank_sz = tot_mem_sz;
767 } else {
768 n_link_desc_bank = tot_mem_sz /
769 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
770 HAL_LINK_DESC_ALIGN);
771 last_bank_sz = tot_mem_sz %
772 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
773 HAL_LINK_DESC_ALIGN);
774
775 if (last_bank_sz)
776 n_link_desc_bank += 1;
777 }
778
779 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
780 return -EINVAL;
781
782 ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
783 n_link_desc_bank, last_bank_sz);
784 if (ret)
785 return ret;
786
787 /* Setup link desc idle list for HW internal usage */
788 entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
789 tot_mem_sz = entry_sz * n_link_desc;
790
791 /* Setup scatter desc list when the total memory requirement is more */
792 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
793 ring_type != HAL_RXDMA_MONITOR_DESC) {
794 ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
795 n_link_desc_bank,
796 n_link_desc,
797 last_bank_sz);
798 if (ret) {
799 ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
800 ret);
801 goto fail_desc_bank_free;
802 }
803
804 return 0;
805 }
806
807 spin_lock_bh(&srng->lock);
808
809 ath12k_hal_srng_access_begin(ab, srng);
810
811 for (i = 0; i < n_link_desc_bank; i++) {
812 align_bytes = link_desc_banks[i].vaddr -
813 link_desc_banks[i].vaddr_unaligned;
814 n_entries = (link_desc_banks[i].size - align_bytes) /
815 HAL_LINK_DESC_SIZE;
816 paddr = link_desc_banks[i].paddr;
817 while (n_entries &&
818 (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
819 cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
820 ath12k_hal_set_link_desc_addr(dp->hal, desc, cookie, paddr,
821 rbm);
822 n_entries--;
823 paddr += HAL_LINK_DESC_SIZE;
824 }
825 }
826
827 ath12k_hal_srng_access_end(ab, srng);
828
829 spin_unlock_bh(&srng->lock);
830
831 return 0;
832
833 fail_desc_bank_free:
834 ath12k_dp_link_desc_bank_free(ab, link_desc_banks);
835
836 return ret;
837 }
838
ath12k_dp_pdev_free(struct ath12k_base * ab)839 void ath12k_dp_pdev_free(struct ath12k_base *ab)
840 {
841 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
842 struct ath12k *ar;
843 int i;
844
845 for (i = 0; i < ab->num_radios; i++) {
846 ar = ab->pdevs[i].ar;
847 rcu_assign_pointer(dp->dp_pdevs[ar->pdev_idx], NULL);
848 }
849
850 synchronize_rcu();
851
852 for (i = 0; i < ab->num_radios; i++)
853 ath12k_dp_rx_pdev_free(ab, i);
854 }
855
ath12k_dp_pdev_pre_alloc(struct ath12k * ar)856 void ath12k_dp_pdev_pre_alloc(struct ath12k *ar)
857 {
858 struct ath12k_pdev_dp *dp = &ar->dp;
859
860 dp->mac_id = ar->pdev_idx;
861 atomic_set(&dp->num_tx_pending, 0);
862 init_waitqueue_head(&dp->tx_empty_waitq);
863 /* TODO: Add any RXDMA setup required per pdev */
864 }
865
ath12k_dp_pdev_alloc(struct ath12k_base * ab)866 int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
867 {
868 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
869 struct ath12k_pdev_dp *dp_pdev;
870 struct ath12k *ar;
871 int ret;
872 int i;
873
874 ret = ath12k_dp_rx_htt_setup(ab);
875 if (ret)
876 goto out;
877
878 /* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
879 for (i = 0; i < ab->num_radios; i++) {
880 ar = ab->pdevs[i].ar;
881
882 dp_pdev = &ar->dp;
883
884 dp_pdev->hw = ar->ah->hw;
885 dp_pdev->dp = dp;
886 dp_pdev->hw_link_id = ar->hw_link_id;
887 dp_pdev->dp_hw = &ar->ah->dp_hw;
888
889 ret = ath12k_dp_rx_pdev_alloc(ab, i);
890 if (ret) {
891 ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
892 i);
893 goto err;
894 }
895 ret = ath12k_dp_rx_pdev_mon_attach(ar);
896 if (ret) {
897 ath12k_warn(ab, "failed to initialize mon pdev %d\n", i);
898 goto err;
899 }
900 }
901
902 for (i = 0; i < ab->num_radios; i++) {
903 ar = ab->pdevs[i].ar;
904 rcu_assign_pointer(dp->dp_pdevs[ar->pdev_idx], &ar->dp);
905 }
906
907 return 0;
908 err:
909 ath12k_dp_pdev_free(ab);
910 out:
911 return ret;
912 }
913
ath12k_dp_update_vdev_search(struct ath12k_link_vif * arvif)914 static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif)
915 {
916 u8 link_id = arvif->link_id;
917 struct ath12k_vif *ahvif = arvif->ahvif;
918 struct ath12k_dp_link_vif *dp_link_vif;
919
920 dp_link_vif = ath12k_dp_vif_to_dp_link_vif(&ahvif->dp_vif, link_id);
921
922 switch (arvif->ahvif->vdev_type) {
923 case WMI_VDEV_TYPE_STA:
924 dp_link_vif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
925 dp_link_vif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
926 break;
927 case WMI_VDEV_TYPE_AP:
928 case WMI_VDEV_TYPE_IBSS:
929 dp_link_vif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
930 dp_link_vif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
931 break;
932 case WMI_VDEV_TYPE_MONITOR:
933 default:
934 return;
935 }
936 }
937
ath12k_dp_vdev_tx_attach(struct ath12k * ar,struct ath12k_link_vif * arvif)938 void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif)
939 {
940 struct ath12k_base *ab = ar->ab;
941 struct ath12k_vif *ahvif = arvif->ahvif;
942 u8 link_id = arvif->link_id;
943 int bank_id;
944 struct ath12k_dp_link_vif *dp_link_vif;
945
946 dp_link_vif = ath12k_dp_vif_to_dp_link_vif(&ahvif->dp_vif, link_id);
947
948 dp_link_vif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
949 u32_encode_bits(arvif->vdev_id,
950 HTT_TCL_META_DATA_VDEV_ID) |
951 u32_encode_bits(ar->pdev->pdev_id,
952 HTT_TCL_META_DATA_PDEV_ID);
953
954 /* set HTT extension valid bit to 0 by default */
955 dp_link_vif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
956
957 ath12k_dp_update_vdev_search(arvif);
958 dp_link_vif->vdev_id_check_en = true;
959 bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, ath12k_ab_to_dp(ab));
960 dp_link_vif->bank_id = bank_id;
961
962 /* TODO: error path for bank id failure */
963 if (bank_id == DP_INVALID_BANK_ID) {
964 ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
965 return;
966 }
967 }
968
ath12k_dp_cc_cleanup(struct ath12k_base * ab)969 static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
970 {
971 struct ath12k_rx_desc_info *desc_info;
972 struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
973 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
974 struct ath12k_skb_cb *skb_cb;
975 struct sk_buff *skb;
976 struct ath12k *ar;
977 int i, j;
978 u32 pool_id, tx_spt_page;
979
980 if (!dp->spt_info)
981 return;
982
983 /* RX Descriptor cleanup */
984 spin_lock_bh(&dp->rx_desc_lock);
985
986 if (dp->rxbaddr) {
987 for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES(ab); i++) {
988 if (!dp->rxbaddr[i])
989 continue;
990
991 desc_info = dp->rxbaddr[i];
992
993 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
994 if (!desc_info[j].in_use) {
995 list_del(&desc_info[j].list);
996 continue;
997 }
998
999 skb = desc_info[j].skb;
1000 if (!skb)
1001 continue;
1002
1003 dma_unmap_single(ab->dev,
1004 ATH12K_SKB_RXCB(skb)->paddr,
1005 skb->len + skb_tailroom(skb),
1006 DMA_FROM_DEVICE);
1007 dev_kfree_skb_any(skb);
1008 }
1009
1010 kfree(dp->rxbaddr[i]);
1011 dp->rxbaddr[i] = NULL;
1012 }
1013
1014 kfree(dp->rxbaddr);
1015 dp->rxbaddr = NULL;
1016 }
1017
1018 spin_unlock_bh(&dp->rx_desc_lock);
1019
1020 /* TX Descriptor cleanup */
1021 for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1022 spin_lock_bh(&dp->tx_desc_lock[i]);
1023
1024 list_for_each_entry_safe(tx_desc_info, tmp1,
1025 &dp->tx_desc_used_list[i], list) {
1026 list_del(&tx_desc_info->list);
1027 skb = tx_desc_info->skb;
1028
1029 if (!skb)
1030 continue;
1031
1032 skb_cb = ATH12K_SKB_CB(skb);
1033 if (skb_cb->paddr_ext_desc) {
1034 dma_unmap_single(ab->dev,
1035 skb_cb->paddr_ext_desc,
1036 tx_desc_info->skb_ext_desc->len,
1037 DMA_TO_DEVICE);
1038 dev_kfree_skb_any(tx_desc_info->skb_ext_desc);
1039 }
1040
1041 /* if we are unregistering, hw would've been destroyed and
1042 * ar is no longer valid.
1043 */
1044 if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) {
1045 ar = skb_cb->ar;
1046
1047 if (atomic_dec_and_test(&ar->dp.num_tx_pending))
1048 wake_up(&ar->dp.tx_empty_waitq);
1049 }
1050
1051 dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
1052 skb->len, DMA_TO_DEVICE);
1053 dev_kfree_skb_any(skb);
1054 }
1055
1056 spin_unlock_bh(&dp->tx_desc_lock[i]);
1057 }
1058
1059 if (dp->txbaddr) {
1060 for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1061 spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1062
1063 for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL(ab); i++) {
1064 tx_spt_page = i + pool_id *
1065 ATH12K_TX_SPT_PAGES_PER_POOL(ab);
1066 if (!dp->txbaddr[tx_spt_page])
1067 continue;
1068
1069 kfree(dp->txbaddr[tx_spt_page]);
1070 dp->txbaddr[tx_spt_page] = NULL;
1071 }
1072
1073 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1074 }
1075
1076 kfree(dp->txbaddr);
1077 dp->txbaddr = NULL;
1078 }
1079
1080 /* unmap SPT pages */
1081 for (i = 0; i < dp->num_spt_pages; i++) {
1082 if (!dp->spt_info[i].vaddr)
1083 continue;
1084
1085 dma_free_coherent(ab->dev, ATH12K_PAGE_SIZE,
1086 dp->spt_info[i].vaddr, dp->spt_info[i].paddr);
1087 dp->spt_info[i].vaddr = NULL;
1088 }
1089
1090 kfree(dp->spt_info);
1091 dp->spt_info = NULL;
1092 }
1093
ath12k_dp_reoq_lut_cleanup(struct ath12k_base * ab)1094 static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
1095 {
1096 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1097
1098 if (!ab->hw_params->reoq_lut_support)
1099 return;
1100
1101 if (dp->reoq_lut.vaddr_unaligned) {
1102 ath12k_hal_write_reoq_lut_addr(ab, 0);
1103 dma_free_coherent(ab->dev, dp->reoq_lut.size,
1104 dp->reoq_lut.vaddr_unaligned,
1105 dp->reoq_lut.paddr_unaligned);
1106 dp->reoq_lut.vaddr_unaligned = NULL;
1107 }
1108
1109 if (dp->ml_reoq_lut.vaddr_unaligned) {
1110 ath12k_hal_write_ml_reoq_lut_addr(ab, 0);
1111 dma_free_coherent(ab->dev, dp->ml_reoq_lut.size,
1112 dp->ml_reoq_lut.vaddr_unaligned,
1113 dp->ml_reoq_lut.paddr_unaligned);
1114 dp->ml_reoq_lut.vaddr_unaligned = NULL;
1115 }
1116 }
1117
ath12k_dp_cleanup(struct ath12k_base * ab)1118 static void ath12k_dp_cleanup(struct ath12k_base *ab)
1119 {
1120 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1121 int i;
1122
1123 ath12k_dp_link_peer_rhash_tbl_destroy(dp);
1124
1125 if (!dp->ab)
1126 return;
1127
1128 ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1129 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1130
1131 ath12k_dp_cc_cleanup(ab);
1132 ath12k_dp_reoq_lut_cleanup(ab);
1133 ath12k_dp_deinit_bank_profiles(ab);
1134 ath12k_dp_srng_common_cleanup(ab);
1135
1136 ath12k_dp_rx_reo_cmd_list_cleanup(ab);
1137
1138 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
1139 kfree(dp->tx_ring[i].tx_status);
1140 dp->tx_ring[i].tx_status = NULL;
1141 }
1142
1143 ath12k_dp_rx_free(ab);
1144 /* Deinit any SOC level resource */
1145 }
1146
ath12k_dp_cc_cookie_gen(u16 ppt_idx,u16 spt_idx)1147 static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
1148 {
1149 return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
1150 }
1151
ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_dp * dp,u16 ppt_idx,u16 spt_idx)1152 static void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_dp *dp,
1153 u16 ppt_idx, u16 spt_idx)
1154 {
1155 return dp->spt_info[ppt_idx].vaddr + spt_idx;
1156 }
1157
ath12k_dp_get_rx_desc(struct ath12k_dp * dp,u32 cookie)1158 struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_dp *dp,
1159 u32 cookie)
1160 {
1161 struct ath12k_rx_desc_info **desc_addr_ptr;
1162 u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
1163
1164 ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1165 spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
1166
1167 start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET(dp->ab);
1168 end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES(dp->ab);
1169
1170 if (ppt_idx < start_ppt_idx ||
1171 ppt_idx >= end_ppt_idx ||
1172 spt_idx > ATH12K_MAX_SPT_ENTRIES)
1173 return NULL;
1174
1175 ppt_idx = ppt_idx - dp->rx_ppt_base;
1176 desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, spt_idx);
1177
1178 return *desc_addr_ptr;
1179 }
1180 EXPORT_SYMBOL(ath12k_dp_get_rx_desc);
1181
ath12k_dp_get_tx_desc(struct ath12k_dp * dp,u32 cookie)1182 struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_dp *dp,
1183 u32 cookie)
1184 {
1185 struct ath12k_tx_desc_info **desc_addr_ptr;
1186 u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
1187
1188 ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1189 spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
1190
1191 start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
1192 end_ppt_idx = start_ppt_idx +
1193 (ATH12K_TX_SPT_PAGES_PER_POOL(dp->ab) * ATH12K_HW_MAX_QUEUES);
1194
1195 if (ppt_idx < start_ppt_idx ||
1196 ppt_idx >= end_ppt_idx ||
1197 spt_idx > ATH12K_MAX_SPT_ENTRIES)
1198 return NULL;
1199
1200 desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, spt_idx);
1201
1202 return *desc_addr_ptr;
1203 }
1204 EXPORT_SYMBOL(ath12k_dp_get_tx_desc);
1205
ath12k_dp_cc_desc_init(struct ath12k_base * ab)1206 static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
1207 {
1208 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1209 struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
1210 struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
1211 u32 num_rx_spt_pages = ATH12K_NUM_RX_SPT_PAGES(ab);
1212 u32 i, j, pool_id, tx_spt_page;
1213 u32 ppt_idx, cookie_ppt_idx;
1214
1215 spin_lock_bh(&dp->rx_desc_lock);
1216
1217 dp->rxbaddr = kzalloc_objs(struct ath12k_rx_desc_info *,
1218 num_rx_spt_pages, GFP_ATOMIC);
1219
1220 if (!dp->rxbaddr) {
1221 spin_unlock_bh(&dp->rx_desc_lock);
1222 return -ENOMEM;
1223 }
1224
1225 /* First ATH12K_NUM_RX_SPT_PAGES(ab) of allocated SPT pages are used for
1226 * RX
1227 */
1228 for (i = 0; i < num_rx_spt_pages; i++) {
1229 rx_descs = kzalloc_objs(*rx_descs, ATH12K_MAX_SPT_ENTRIES,
1230 GFP_ATOMIC);
1231
1232 if (!rx_descs) {
1233 spin_unlock_bh(&dp->rx_desc_lock);
1234 return -ENOMEM;
1235 }
1236
1237 ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET(ab) + i;
1238 cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
1239 dp->rxbaddr[i] = &rx_descs[0];
1240
1241 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1242 rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
1243 rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
1244 rx_descs[j].device_id = ab->device_id;
1245 list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
1246
1247 /* Update descriptor VA in SPT */
1248 rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, j);
1249 *rx_desc_addr = &rx_descs[j];
1250 }
1251 }
1252
1253 spin_unlock_bh(&dp->rx_desc_lock);
1254
1255 dp->txbaddr = kzalloc_objs(struct ath12k_tx_desc_info *,
1256 ATH12K_NUM_TX_SPT_PAGES(ab), GFP_ATOMIC);
1257
1258 if (!dp->txbaddr)
1259 return -ENOMEM;
1260
1261 for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1262 spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1263 for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL(ab); i++) {
1264 tx_descs = kzalloc_objs(*tx_descs,
1265 ATH12K_MAX_SPT_ENTRIES,
1266 GFP_ATOMIC);
1267
1268 if (!tx_descs) {
1269 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1270 /* Caller takes care of TX pending and RX desc cleanup */
1271 return -ENOMEM;
1272 }
1273
1274 tx_spt_page = i + pool_id *
1275 ATH12K_TX_SPT_PAGES_PER_POOL(ab);
1276 ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
1277
1278 dp->txbaddr[tx_spt_page] = &tx_descs[0];
1279
1280 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1281 tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
1282 tx_descs[j].pool_id = pool_id;
1283 list_add_tail(&tx_descs[j].list,
1284 &dp->tx_desc_free_list[pool_id]);
1285
1286 /* Update descriptor VA in SPT */
1287 tx_desc_addr =
1288 ath12k_dp_cc_get_desc_addr_ptr(dp, ppt_idx, j);
1289 *tx_desc_addr = &tx_descs[j];
1290 }
1291 }
1292 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1293 }
1294 return 0;
1295 }
1296
ath12k_dp_cmem_init(struct ath12k_base * ab,struct ath12k_dp * dp,enum ath12k_dp_desc_type type)1297 static int ath12k_dp_cmem_init(struct ath12k_base *ab,
1298 struct ath12k_dp *dp,
1299 enum ath12k_dp_desc_type type)
1300 {
1301 u32 cmem_base;
1302 int i, start, end;
1303
1304 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1305
1306 switch (type) {
1307 case ATH12K_DP_TX_DESC:
1308 start = ATH12K_TX_SPT_PAGE_OFFSET;
1309 end = start + ATH12K_NUM_TX_SPT_PAGES(ab);
1310 break;
1311 case ATH12K_DP_RX_DESC:
1312 cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
1313 start = ATH12K_RX_SPT_PAGE_OFFSET(ab);
1314 end = start + ATH12K_NUM_RX_SPT_PAGES(ab);
1315 break;
1316 default:
1317 ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type);
1318 return -EINVAL;
1319 }
1320
1321 /* Write to PPT in CMEM */
1322 for (i = start; i < end; i++)
1323 ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
1324 dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
1325
1326 return 0;
1327 }
1328
ath12k_dp_partner_cc_init(struct ath12k_base * ab)1329 void ath12k_dp_partner_cc_init(struct ath12k_base *ab)
1330 {
1331 struct ath12k_hw_group *ag = ab->ag;
1332 int i;
1333
1334 for (i = 0; i < ag->num_devices; i++) {
1335 if (ag->ab[i] == ab)
1336 continue;
1337
1338 ath12k_dp_cmem_init(ab, ath12k_ab_to_dp(ag->ab[i]), ATH12K_DP_RX_DESC);
1339 }
1340 }
1341
ath12k_dp_get_num_spt_pages(struct ath12k_base * ab)1342 static u32 ath12k_dp_get_num_spt_pages(struct ath12k_base *ab)
1343 {
1344 return ATH12K_NUM_RX_SPT_PAGES(ab) + ATH12K_NUM_TX_SPT_PAGES(ab);
1345 }
1346
ath12k_dp_cc_init(struct ath12k_base * ab)1347 static int ath12k_dp_cc_init(struct ath12k_base *ab)
1348 {
1349 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1350 int i, ret = 0;
1351
1352 INIT_LIST_HEAD(&dp->rx_desc_free_list);
1353 spin_lock_init(&dp->rx_desc_lock);
1354
1355 for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1356 INIT_LIST_HEAD(&dp->tx_desc_free_list[i]);
1357 INIT_LIST_HEAD(&dp->tx_desc_used_list[i]);
1358 spin_lock_init(&dp->tx_desc_lock[i]);
1359 }
1360
1361 dp->num_spt_pages = ath12k_dp_get_num_spt_pages(ab);
1362 if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
1363 dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
1364
1365 dp->spt_info = kzalloc_objs(struct ath12k_spt_info, dp->num_spt_pages);
1366
1367 if (!dp->spt_info) {
1368 ath12k_warn(ab, "SPT page allocation failure");
1369 return -ENOMEM;
1370 }
1371
1372 dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES(ab);
1373
1374 for (i = 0; i < dp->num_spt_pages; i++) {
1375 dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
1376 ATH12K_PAGE_SIZE,
1377 &dp->spt_info[i].paddr,
1378 GFP_KERNEL);
1379
1380 if (!dp->spt_info[i].vaddr) {
1381 ret = -ENOMEM;
1382 goto free;
1383 }
1384
1385 if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
1386 ath12k_warn(ab, "SPT allocated memory is not 4K aligned");
1387 ret = -EINVAL;
1388 goto free;
1389 }
1390 }
1391
1392 ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_TX_DESC);
1393 if (ret) {
1394 ath12k_warn(ab, "HW CC Tx cmem init failed %d", ret);
1395 goto free;
1396 }
1397
1398 ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_RX_DESC);
1399 if (ret) {
1400 ath12k_warn(ab, "HW CC Rx cmem init failed %d", ret);
1401 goto free;
1402 }
1403
1404 ret = ath12k_dp_cc_desc_init(ab);
1405 if (ret) {
1406 ath12k_warn(ab, "HW CC desc init failed %d", ret);
1407 goto free;
1408 }
1409
1410 return 0;
1411 free:
1412 ath12k_dp_cc_cleanup(ab);
1413 return ret;
1414 }
1415
ath12k_dp_alloc_reoq_lut(struct ath12k_base * ab,struct ath12k_reo_q_addr_lut * lut)1416 static int ath12k_dp_alloc_reoq_lut(struct ath12k_base *ab,
1417 struct ath12k_reo_q_addr_lut *lut)
1418 {
1419 lut->size = DP_REOQ_LUT_SIZE + HAL_REO_QLUT_ADDR_ALIGN - 1;
1420 lut->vaddr_unaligned = dma_alloc_coherent(ab->dev, lut->size,
1421 &lut->paddr_unaligned,
1422 GFP_KERNEL | __GFP_ZERO);
1423 if (!lut->vaddr_unaligned)
1424 return -ENOMEM;
1425
1426 lut->vaddr = PTR_ALIGN(lut->vaddr_unaligned, HAL_REO_QLUT_ADDR_ALIGN);
1427 lut->paddr = lut->paddr_unaligned +
1428 ((unsigned long)lut->vaddr - (unsigned long)lut->vaddr_unaligned);
1429 return 0;
1430 }
1431
ath12k_dp_reoq_lut_setup(struct ath12k_base * ab)1432 static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
1433 {
1434 struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1435 int ret;
1436
1437 if (!ab->hw_params->reoq_lut_support)
1438 return 0;
1439
1440 ret = ath12k_dp_alloc_reoq_lut(ab, &dp->reoq_lut);
1441 if (ret) {
1442 ath12k_warn(ab, "failed to allocate memory for reoq table");
1443 return ret;
1444 }
1445
1446 ret = ath12k_dp_alloc_reoq_lut(ab, &dp->ml_reoq_lut);
1447 if (ret) {
1448 ath12k_warn(ab, "failed to allocate memory for ML reoq table");
1449 dma_free_coherent(ab->dev, dp->reoq_lut.size,
1450 dp->reoq_lut.vaddr_unaligned,
1451 dp->reoq_lut.paddr_unaligned);
1452 dp->reoq_lut.vaddr_unaligned = NULL;
1453 return ret;
1454 }
1455
1456 /* Bits in the register have address [39:8] LUT base address to be
1457 * allocated such that LSBs are assumed to be zero. Also, current
1458 * design supports paddr up to 4 GB max hence it fits in 32 bit
1459 * register only
1460 */
1461
1462 ath12k_hal_write_reoq_lut_addr(ab, dp->reoq_lut.paddr >> 8);
1463 ath12k_hal_write_ml_reoq_lut_addr(ab, dp->ml_reoq_lut.paddr >> 8);
1464 ath12k_hal_reoq_lut_addr_read_enable(ab);
1465 ath12k_hal_reoq_lut_set_max_peerid(ab);
1466
1467 return 0;
1468 }
1469
ath12k_dp_setup(struct ath12k_base * ab)1470 static int ath12k_dp_setup(struct ath12k_base *ab)
1471 {
1472 struct ath12k_dp *dp;
1473 struct hal_srng *srng = NULL;
1474 size_t size = 0;
1475 u32 n_link_desc = 0;
1476 int ret;
1477 int i;
1478
1479 dp = ath12k_ab_to_dp(ab);
1480 dp->ab = ab;
1481
1482 INIT_LIST_HEAD(&dp->reo_cmd_list);
1483 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1484 INIT_LIST_HEAD(&dp->reo_cmd_update_rx_queue_list);
1485 spin_lock_init(&dp->reo_cmd_lock);
1486 spin_lock_init(&dp->reo_rxq_flush_lock);
1487
1488 spin_lock_init(&dp->dp_lock);
1489 INIT_LIST_HEAD(&dp->peers);
1490
1491 mutex_init(&dp->link_peer_rhash_tbl_lock);
1492
1493 dp->reo_cmd_cache_flush_count = 0;
1494 dp->idle_link_rbm =
1495 ath12k_hal_get_idle_link_rbm(&ab->hal, ab->device_id);
1496
1497 ret = ath12k_dp_link_peer_rhash_tbl_init(dp);
1498 if (ret) {
1499 ath12k_warn(ab, "failed to init link_peer rhash table: %d\n", ret);
1500 return ret;
1501 }
1502
1503 ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
1504 if (ret) {
1505 ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1506 goto rhash_destroy;
1507 }
1508
1509 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1510
1511 ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks,
1512 HAL_WBM_IDLE_LINK, srng, n_link_desc);
1513 if (ret) {
1514 ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
1515 goto rhash_destroy;
1516 }
1517
1518 ret = ath12k_dp_cc_init(ab);
1519
1520 if (ret) {
1521 ath12k_warn(ab, "failed to setup cookie converter %d\n", ret);
1522 goto fail_link_desc_cleanup;
1523 }
1524 ret = ath12k_dp_init_bank_profiles(ab);
1525 if (ret) {
1526 ath12k_warn(ab, "failed to setup bank profiles %d\n", ret);
1527 goto fail_hw_cc_cleanup;
1528 }
1529
1530 ret = ath12k_dp_srng_common_setup(ab);
1531 if (ret)
1532 goto fail_dp_bank_profiles_cleanup;
1533
1534 size = ab->hal.hal_wbm_release_ring_tx_size *
1535 DP_TX_COMP_RING_SIZE(ab);
1536
1537 ret = ath12k_dp_reoq_lut_setup(ab);
1538 if (ret) {
1539 ath12k_warn(ab, "failed to setup reoq table %d\n", ret);
1540 goto fail_cmn_srng_cleanup;
1541 }
1542
1543 for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
1544 dp->tx_ring[i].tcl_data_ring_id = i;
1545
1546 dp->tx_ring[i].tx_status_head = 0;
1547 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE(ab) - 1;
1548 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1549 if (!dp->tx_ring[i].tx_status) {
1550 ret = -ENOMEM;
1551 /* FIXME: The allocated tx status is not freed
1552 * properly here
1553 */
1554 goto fail_cmn_reoq_cleanup;
1555 }
1556 }
1557
1558 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1559 ath12k_hal_tx_set_dscp_tid_map(ab, i);
1560
1561 ret = ath12k_dp_rx_alloc(ab);
1562 if (ret)
1563 goto fail_dp_rx_free;
1564
1565 /* Init any SOC level resource for DP */
1566
1567 return 0;
1568
1569 fail_dp_rx_free:
1570 ath12k_dp_rx_free(ab);
1571
1572 fail_cmn_reoq_cleanup:
1573 ath12k_dp_reoq_lut_cleanup(ab);
1574
1575 fail_cmn_srng_cleanup:
1576 ath12k_dp_srng_common_cleanup(ab);
1577
1578 fail_dp_bank_profiles_cleanup:
1579 ath12k_dp_deinit_bank_profiles(ab);
1580
1581 fail_hw_cc_cleanup:
1582 ath12k_dp_cc_cleanup(ab);
1583
1584 fail_link_desc_cleanup:
1585 ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1586 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1587 rhash_destroy:
1588 ath12k_dp_link_peer_rhash_tbl_destroy(dp);
1589
1590 return ret;
1591 }
1592
ath12k_dp_cmn_device_deinit(struct ath12k_dp * dp)1593 void ath12k_dp_cmn_device_deinit(struct ath12k_dp *dp)
1594 {
1595 ath12k_dp_cleanup(dp->ab);
1596 }
1597
ath12k_dp_cmn_device_init(struct ath12k_dp * dp)1598 int ath12k_dp_cmn_device_init(struct ath12k_dp *dp)
1599 {
1600 int ret;
1601
1602 ret = ath12k_dp_setup(dp->ab);
1603 if (ret)
1604 return ret;
1605
1606 return 0;
1607 }
1608
ath12k_dp_cmn_hw_group_unassign(struct ath12k_dp * dp,struct ath12k_hw_group * ag)1609 void ath12k_dp_cmn_hw_group_unassign(struct ath12k_dp *dp,
1610 struct ath12k_hw_group *ag)
1611 {
1612 struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
1613
1614 lockdep_assert_held(&ag->mutex);
1615
1616 dp_hw_grp->dp[dp->device_id] = NULL;
1617
1618 dp->ag = NULL;
1619 dp->device_id = ATH12K_INVALID_DEVICE_ID;
1620 }
1621
ath12k_dp_cmn_hw_group_assign(struct ath12k_dp * dp,struct ath12k_hw_group * ag)1622 void ath12k_dp_cmn_hw_group_assign(struct ath12k_dp *dp,
1623 struct ath12k_hw_group *ag)
1624 {
1625 struct ath12k_base *ab = dp->ab;
1626 struct ath12k_dp_hw_group *dp_hw_grp = &ag->dp_hw_grp;
1627
1628 dp->ag = ag;
1629 dp->device_id = ab->device_id;
1630 dp_hw_grp->dp[dp->device_id] = dp;
1631 }
1632