1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. 4 */ 5 6 #include "core.h" 7 #include "debug.h" 8 9 static int ath11k_dbring_bufs_replenish(struct ath11k *ar, 10 struct ath11k_dbring *ring, 11 struct ath11k_dbring_element *buff) 12 { 13 struct ath11k_base *ab = ar->ab; 14 struct hal_srng *srng; 15 dma_addr_t paddr; 16 void *ptr_aligned, *ptr_unaligned, *desc; 17 int ret; 18 int buf_id; 19 u32 cookie; 20 21 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; 22 23 lockdep_assert_held(&srng->lock); 24 25 ath11k_hal_srng_access_begin(ab, srng); 26 27 ptr_unaligned = buff->payload; 28 ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); 29 paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, 30 DMA_FROM_DEVICE); 31 32 ret = dma_mapping_error(ab->dev, paddr); 33 if (ret) 34 goto err; 35 36 spin_lock_bh(&ring->idr_lock); 37 buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC); 38 spin_unlock_bh(&ring->idr_lock); 39 if (buf_id < 0) { 40 ret = -ENOBUFS; 41 goto err_dma_unmap; 42 } 43 44 desc = ath11k_hal_srng_src_get_next_entry(ab, srng); 45 if (!desc) { 46 ret = -ENOENT; 47 goto err_idr_remove; 48 } 49 50 buff->paddr = paddr; 51 52 cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) | 53 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); 54 55 ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0); 56 57 ath11k_hal_srng_access_end(ab, srng); 58 59 return 0; 60 61 err_idr_remove: 62 spin_lock_bh(&ring->idr_lock); 63 idr_remove(&ring->bufs_idr, buf_id); 64 spin_unlock_bh(&ring->idr_lock); 65 err_dma_unmap: 66 dma_unmap_single(ab->dev, paddr, ring->buf_sz, 67 DMA_FROM_DEVICE); 68 err: 69 ath11k_hal_srng_access_end(ab, srng); 70 return ret; 71 } 72 73 static int ath11k_dbring_fill_bufs(struct ath11k *ar, 74 struct ath11k_dbring *ring) 75 { 76 struct ath11k_dbring_element *buff; 77 struct hal_srng *srng; 78 int num_remain, req_entries, num_free; 79 u32 align; 80 int size, ret; 81 82 srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id]; 83 84 spin_lock_bh(&srng->lock); 85 86 num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true); 87 req_entries = min(num_free, ring->bufs_max); 88 num_remain = req_entries; 89 align = ring->buf_align; 90 size = ring->buf_sz + align - 1; 91 92 while (num_remain > 0) { 93 buff = kzalloc(sizeof(*buff), GFP_ATOMIC); 94 if (!buff) 95 break; 96 97 buff->payload = kzalloc(size, GFP_ATOMIC); 98 if (!buff->payload) { 99 kfree(buff); 100 break; 101 } 102 ret = ath11k_dbring_bufs_replenish(ar, ring, buff); 103 if (ret) { 104 ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n", 105 num_remain, req_entries); 106 kfree(buff->payload); 107 kfree(buff); 108 break; 109 } 110 num_remain--; 111 } 112 113 spin_unlock_bh(&srng->lock); 114 115 return num_remain; 116 } 117 118 int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar, 119 struct ath11k_dbring *ring, 120 enum wmi_direct_buffer_module id) 121 { 122 struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0}; 123 int ret; 124 125 if (id >= WMI_DIRECT_BUF_MAX) 126 return -EINVAL; 127 128 param.pdev_id = DP_SW2HW_MACID(ring->pdev_id); 129 param.module_id = id; 130 param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr); 131 param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr); 132 param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr); 133 param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr); 134 param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr); 135 param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr); 136 param.num_elems = ring->bufs_max; 137 param.buf_size = ring->buf_sz; 138 param.num_resp_per_event = ring->num_resp_per_event; 139 param.event_timeout_ms = ring->event_timeout_ms; 140 141 ret = ath11k_wmi_pdev_dma_ring_cfg(ar, ¶m); 142 if (ret) { 143 ath11k_warn(ar->ab, "failed to setup db ring cfg\n"); 144 return ret; 145 } 146 147 return 0; 148 } 149 150 int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring, 151 u32 num_resp_per_event, u32 event_timeout_ms, 152 int (*handler)(struct ath11k *, 153 struct ath11k_dbring_data *)) 154 { 155 if (WARN_ON(!ring)) 156 return -EINVAL; 157 158 ring->num_resp_per_event = num_resp_per_event; 159 ring->event_timeout_ms = event_timeout_ms; 160 ring->handler = handler; 161 162 return 0; 163 } 164 165 int ath11k_dbring_buf_setup(struct ath11k *ar, 166 struct ath11k_dbring *ring, 167 struct ath11k_dbring_cap *db_cap) 168 { 169 struct ath11k_base *ab = ar->ab; 170 struct hal_srng *srng; 171 int ret; 172 173 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; 174 ring->bufs_max = ring->refill_srng.size / 175 ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF); 176 177 ring->buf_sz = db_cap->min_buf_sz; 178 ring->buf_align = db_cap->min_buf_align; 179 ring->pdev_id = db_cap->pdev_id; 180 ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng); 181 ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng); 182 183 ret = ath11k_dbring_fill_bufs(ar, ring); 184 185 return ret; 186 } 187 188 int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring, 189 int ring_num, int num_entries) 190 { 191 int ret; 192 193 ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF, 194 ring_num, ar->pdev_idx, num_entries); 195 if (ret < 0) { 196 ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n", 197 ret, ring_num); 198 goto err; 199 } 200 201 return 0; 202 err: 203 ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng); 204 return ret; 205 } 206 207 int ath11k_dbring_get_cap(struct ath11k_base *ab, 208 u8 pdev_idx, 209 enum wmi_direct_buffer_module id, 210 struct ath11k_dbring_cap *db_cap) 211 { 212 int i; 213 214 if (!ab->num_db_cap || !ab->db_caps) 215 return -ENOENT; 216 217 if (id >= WMI_DIRECT_BUF_MAX) 218 return -EINVAL; 219 220 for (i = 0; i < ab->num_db_cap; i++) { 221 if (pdev_idx == ab->db_caps[i].pdev_id && 222 id == ab->db_caps[i].id) { 223 *db_cap = ab->db_caps[i]; 224 225 return 0; 226 } 227 } 228 229 return -ENOENT; 230 } 231 232 int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, 233 struct ath11k_dbring_buf_release_event *ev) 234 { 235 struct ath11k_dbring *ring; 236 struct hal_srng *srng; 237 struct ath11k *ar; 238 struct ath11k_dbring_element *buff; 239 struct ath11k_dbring_data handler_data; 240 struct ath11k_buffer_addr desc; 241 u8 *vaddr_unalign; 242 u32 num_entry, num_buff_reaped; 243 u8 pdev_idx, rbm; 244 u32 cookie; 245 int buf_id; 246 int size; 247 dma_addr_t paddr; 248 int ret = 0; 249 250 pdev_idx = ev->fixed.pdev_id; 251 252 if (pdev_idx >= ab->num_radios) { 253 ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx); 254 return -EINVAL; 255 } 256 257 if (ev->fixed.num_buf_release_entry != 258 ev->fixed.num_meta_data_entry) { 259 ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n", 260 ev->fixed.num_buf_release_entry, 261 ev->fixed.num_meta_data_entry); 262 return -EINVAL; 263 } 264 265 ar = ab->pdevs[pdev_idx].ar; 266 267 rcu_read_lock(); 268 if (!rcu_dereference(ab->pdevs_active[pdev_idx])) { 269 ret = -EINVAL; 270 goto rcu_unlock; 271 } 272 273 switch (ev->fixed.module_id) { 274 case WMI_DIRECT_BUF_SPECTRAL: 275 ring = ath11k_spectral_get_dbring(ar); 276 break; 277 default: 278 ring = NULL; 279 ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n", 280 ev->fixed.module_id); 281 break; 282 } 283 284 if (!ring) { 285 ret = -EINVAL; 286 goto rcu_unlock; 287 } 288 289 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; 290 num_entry = ev->fixed.num_buf_release_entry; 291 size = ring->buf_sz + ring->buf_align - 1; 292 num_buff_reaped = 0; 293 294 spin_lock_bh(&srng->lock); 295 296 while (num_buff_reaped < num_entry) { 297 desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo; 298 desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi; 299 handler_data.meta = ev->meta_data[num_buff_reaped]; 300 301 num_buff_reaped++; 302 303 ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm); 304 305 buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); 306 307 spin_lock_bh(&ring->idr_lock); 308 buff = idr_find(&ring->bufs_idr, buf_id); 309 if (!buff) { 310 spin_unlock_bh(&ring->idr_lock); 311 continue; 312 } 313 idr_remove(&ring->bufs_idr, buf_id); 314 spin_unlock_bh(&ring->idr_lock); 315 316 dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz, 317 DMA_FROM_DEVICE); 318 319 if (ring->handler) { 320 vaddr_unalign = buff->payload; 321 handler_data.data = PTR_ALIGN(vaddr_unalign, 322 ring->buf_align); 323 handler_data.data_sz = ring->buf_sz; 324 325 ring->handler(ar, &handler_data); 326 } 327 328 buff->paddr = 0; 329 memset(buff->payload, 0, size); 330 ath11k_dbring_bufs_replenish(ar, ring, buff); 331 } 332 333 spin_unlock_bh(&srng->lock); 334 335 rcu_unlock: 336 rcu_read_unlock(); 337 338 return ret; 339 } 340 341 void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring) 342 { 343 ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng); 344 } 345 346 void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring) 347 { 348 struct ath11k_dbring_element *buff; 349 int buf_id; 350 351 spin_lock_bh(&ring->idr_lock); 352 idr_for_each_entry(&ring->bufs_idr, buff, buf_id) { 353 idr_remove(&ring->bufs_idr, buf_id); 354 dma_unmap_single(ar->ab->dev, buff->paddr, 355 ring->buf_sz, DMA_FROM_DEVICE); 356 kfree(buff->payload); 357 kfree(buff); 358 } 359 360 idr_destroy(&ring->bufs_idr); 361 spin_unlock_bh(&ring->idr_lock); 362 } 363