1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 6 */ 7 8 #include "core.h" 9 #include "debug.h" 10 11 static int ath12k_dbring_bufs_replenish(struct ath12k *ar, 12 struct ath12k_dbring *ring, 13 struct ath12k_dbring_element *buff, 14 gfp_t gfp) 15 { 16 struct ath12k_base *ab = ar->ab; 17 struct hal_srng *srng; 18 dma_addr_t paddr; 19 void *ptr_aligned, *ptr_unaligned, *desc; 20 int ret; 21 int buf_id; 22 u32 cookie; 23 24 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; 25 26 lockdep_assert_held(&srng->lock); 27 28 ath12k_hal_srng_access_begin(ab, srng); 29 30 ptr_unaligned = buff->payload; 31 ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); 32 paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, 33 DMA_FROM_DEVICE); 34 35 ret = dma_mapping_error(ab->dev, paddr); 36 if (ret) 37 goto err; 38 39 spin_lock_bh(&ring->idr_lock); 40 buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp); 41 spin_unlock_bh(&ring->idr_lock); 42 if (buf_id < 0) { 43 ret = -ENOBUFS; 44 goto err_dma_unmap; 45 } 46 47 desc = ath12k_hal_srng_src_get_next_entry(ab, srng); 48 if (!desc) { 49 ret = -ENOENT; 50 goto err_idr_remove; 51 } 52 53 buff->paddr = paddr; 54 55 cookie = u32_encode_bits(ar->pdev_idx, DP_RXDMA_BUF_COOKIE_PDEV_ID) | 56 u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID); 57 58 ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0); 59 60 ath12k_hal_srng_access_end(ab, srng); 61 62 return 0; 63 64 err_idr_remove: 65 spin_lock_bh(&ring->idr_lock); 66 idr_remove(&ring->bufs_idr, buf_id); 67 spin_unlock_bh(&ring->idr_lock); 68 err_dma_unmap: 69 dma_unmap_single(ab->dev, paddr, ring->buf_sz, 70 DMA_FROM_DEVICE); 71 err: 72 ath12k_hal_srng_access_end(ab, srng); 73 return ret; 74 } 75 76 static int ath12k_dbring_fill_bufs(struct ath12k *ar, 77 struct ath12k_dbring *ring, 78 gfp_t gfp) 79 { 80 struct ath12k_dbring_element *buff; 81 struct hal_srng *srng; 82 struct ath12k_base *ab = ar->ab; 83 int num_remain, req_entries, num_free; 84 u32 align; 85 int size, ret; 86 87 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; 88 89 spin_lock_bh(&srng->lock); 90 91 num_free = ath12k_hal_srng_src_num_free(ab, srng, true); 92 req_entries = min(num_free, ring->bufs_max); 93 num_remain = req_entries; 94 align = ring->buf_align; 95 size = sizeof(*buff) + ring->buf_sz + align - 1; 96 97 while (num_remain > 0) { 98 buff = kzalloc(size, gfp); 99 if (!buff) 100 break; 101 102 ret = ath12k_dbring_bufs_replenish(ar, ring, buff, gfp); 103 if (ret) { 104 ath12k_warn(ab, "failed to replenish db ring num_remain %d req_ent %d\n", 105 num_remain, req_entries); 106 kfree(buff); 107 break; 108 } 109 num_remain--; 110 } 111 112 spin_unlock_bh(&srng->lock); 113 114 return num_remain; 115 } 116 117 int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar, 118 struct ath12k_dbring *ring, 119 enum wmi_direct_buffer_module id) 120 { 121 struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {}; 122 int ret; 123 124 if (id >= WMI_DIRECT_BUF_MAX) 125 return -EINVAL; 126 127 arg.pdev_id = DP_SW2HW_MACID(ring->pdev_id); 128 arg.module_id = id; 129 arg.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr); 130 arg.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr); 131 arg.head_idx_paddr_lo = lower_32_bits(ring->hp_addr); 132 arg.head_idx_paddr_hi = upper_32_bits(ring->hp_addr); 133 arg.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr); 134 arg.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr); 135 arg.num_elems = ring->bufs_max; 136 arg.buf_size = ring->buf_sz; 137 arg.num_resp_per_event = ring->num_resp_per_event; 138 arg.event_timeout_ms = ring->event_timeout_ms; 139 140 ret = ath12k_wmi_pdev_dma_ring_cfg(ar, &arg); 141 if (ret) { 142 ath12k_warn(ar->ab, "failed to setup db ring cfg\n"); 143 return ret; 144 } 145 146 return 0; 147 } 148 149 int ath12k_dbring_set_cfg(struct ath12k *ar, struct ath12k_dbring *ring, 150 u32 num_resp_per_event, u32 event_timeout_ms, 151 int (*handler)(struct ath12k *, 152 struct ath12k_dbring_data *)) 153 { 154 if (WARN_ON(!ring)) 155 return -EINVAL; 156 157 ring->num_resp_per_event = num_resp_per_event; 158 ring->event_timeout_ms = event_timeout_ms; 159 ring->handler = handler; 160 161 return 0; 162 } 163 164 int ath12k_dbring_buf_setup(struct ath12k *ar, 165 struct ath12k_dbring *ring, 166 struct ath12k_dbring_cap *db_cap) 167 { 168 struct ath12k_base *ab = ar->ab; 169 struct hal_srng *srng; 170 int ret; 171 172 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; 173 ring->bufs_max = ring->refill_srng.size / 174 ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF); 175 176 ring->buf_sz = db_cap->min_buf_sz; 177 ring->buf_align = db_cap->min_buf_align; 178 ring->pdev_id = db_cap->pdev_id; 179 ring->hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng); 180 ring->tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng); 181 182 ret = ath12k_dbring_fill_bufs(ar, ring, GFP_KERNEL); 183 184 return ret; 185 } 186 187 int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring, 188 int ring_num, int num_entries) 189 { 190 int ret; 191 192 ret = ath12k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF, 193 ring_num, ar->pdev_idx, num_entries); 194 if (ret < 0) { 195 ath12k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n", 196 ret, ring_num); 197 goto err; 198 } 199 200 return 0; 201 err: 202 ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng); 203 return ret; 204 } 205 206 int ath12k_dbring_get_cap(struct ath12k_base *ab, 207 u8 pdev_idx, 208 enum wmi_direct_buffer_module id, 209 struct ath12k_dbring_cap *db_cap) 210 { 211 int i; 212 213 if (!ab->num_db_cap || !ab->db_caps) 214 return -ENOENT; 215 216 if (id >= WMI_DIRECT_BUF_MAX) 217 return -EINVAL; 218 219 for (i = 0; i < ab->num_db_cap; i++) { 220 if (pdev_idx == ab->db_caps[i].pdev_id && 221 id == ab->db_caps[i].id) { 222 *db_cap = ab->db_caps[i]; 223 224 return 0; 225 } 226 } 227 228 return -ENOENT; 229 } 230 231 int ath12k_dbring_buffer_release_event(struct ath12k_base *ab, 232 struct ath12k_dbring_buf_release_event *ev) 233 { 234 struct ath12k_dbring *ring = NULL; 235 struct hal_srng *srng; 236 struct ath12k *ar; 237 struct ath12k_dbring_element *buff; 238 struct ath12k_dbring_data handler_data; 239 struct ath12k_buffer_addr desc; 240 u8 *vaddr_unalign; 241 u32 num_entry, num_buff_reaped; 242 u8 pdev_idx, rbm; 243 u32 cookie; 244 int buf_id; 245 int size; 246 dma_addr_t paddr; 247 int ret = 0; 248 249 pdev_idx = le32_to_cpu(ev->fixed.pdev_id); 250 251 if (pdev_idx >= ab->num_radios) { 252 ath12k_warn(ab, "Invalid pdev id %d\n", pdev_idx); 253 return -EINVAL; 254 } 255 256 if (ev->fixed.num_buf_release_entry != 257 ev->fixed.num_meta_data_entry) { 258 ath12k_warn(ab, "Buffer entry %d mismatch meta entry %d\n", 259 ev->fixed.num_buf_release_entry, 260 ev->fixed.num_meta_data_entry); 261 return -EINVAL; 262 } 263 264 ar = ab->pdevs[pdev_idx].ar; 265 266 rcu_read_lock(); 267 if (!rcu_dereference(ab->pdevs_active[pdev_idx])) { 268 ret = -EINVAL; 269 goto rcu_unlock; 270 } 271 272 switch (ev->fixed.module_id) { 273 case WMI_DIRECT_BUF_SPECTRAL: 274 break; 275 default: 276 ring = NULL; 277 ath12k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n", 278 ev->fixed.module_id); 279 break; 280 } 281 282 if (!ring) { 283 ret = -EINVAL; 284 goto rcu_unlock; 285 } 286 287 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; 288 num_entry = le32_to_cpu(ev->fixed.num_buf_release_entry); 289 size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1; 290 num_buff_reaped = 0; 291 292 spin_lock_bh(&srng->lock); 293 294 while (num_buff_reaped < num_entry) { 295 desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo; 296 desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi; 297 handler_data.meta = ev->meta_data[num_buff_reaped]; 298 299 num_buff_reaped++; 300 301 ath12k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm); 302 303 buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); 304 305 spin_lock_bh(&ring->idr_lock); 306 buff = idr_find(&ring->bufs_idr, buf_id); 307 if (!buff) { 308 spin_unlock_bh(&ring->idr_lock); 309 continue; 310 } 311 idr_remove(&ring->bufs_idr, buf_id); 312 spin_unlock_bh(&ring->idr_lock); 313 314 dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz, 315 DMA_FROM_DEVICE); 316 317 if (ring->handler) { 318 vaddr_unalign = buff->payload; 319 handler_data.data = PTR_ALIGN(vaddr_unalign, 320 ring->buf_align); 321 handler_data.data_sz = ring->buf_sz; 322 323 ring->handler(ar, &handler_data); 324 } 325 326 memset(buff, 0, size); 327 ath12k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC); 328 } 329 330 spin_unlock_bh(&srng->lock); 331 332 rcu_unlock: 333 rcu_read_unlock(); 334 335 return ret; 336 } 337 338 void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring) 339 { 340 ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng); 341 } 342 343 void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring) 344 { 345 struct ath12k_dbring_element *buff; 346 int buf_id; 347 348 spin_lock_bh(&ring->idr_lock); 349 idr_for_each_entry(&ring->bufs_idr, buff, buf_id) { 350 idr_remove(&ring->bufs_idr, buf_id); 351 dma_unmap_single(ar->ab->dev, buff->paddr, 352 ring->buf_sz, DMA_FROM_DEVICE); 353 kfree(buff); 354 } 355 356 idr_destroy(&ring->bufs_idr); 357 spin_unlock_bh(&ring->idr_lock); 358 } 359