1 /* 2 * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/types.h> 18 #include <linux/errno.h> 19 #include <linux/fs.h> 20 #include "wmi.h" 21 #include "wil6210.h" 22 #include "txrx.h" 23 #include "pmc.h" 24 25 struct desc_alloc_info { 26 dma_addr_t pa; 27 void *va; 28 }; 29 30 static int wil_is_pmc_allocated(struct pmc_ctx *pmc) 31 { 32 return !!pmc->pring_va; 33 } 34 35 void wil_pmc_init(struct wil6210_priv *wil) 36 { 37 memset(&wil->pmc, 0, sizeof(struct pmc_ctx)); 38 mutex_init(&wil->pmc.lock); 39 } 40 41 /** 42 * Allocate the physical ring (p-ring) and the required 43 * number of descriptors of required size. 44 * Initialize the descriptors as required by pmc dma. 45 * The descriptors' buffers dwords are initialized to hold 46 * dword's serial number in the lsw and reserved value 47 * PCM_DATA_INVALID_DW_VAL in the msw. 48 */ 49 void wil_pmc_alloc(struct wil6210_priv *wil, 50 int num_descriptors, 51 int descriptor_size) 52 { 53 u32 i; 54 struct pmc_ctx *pmc = &wil->pmc; 55 struct device *dev = wil_to_dev(wil); 56 struct wmi_pmc_cmd pmc_cmd = {0}; 57 int last_cmd_err = -ENOMEM; 58 59 mutex_lock(&pmc->lock); 60 61 if (wil_is_pmc_allocated(pmc)) { 62 /* sanity check */ 63 wil_err(wil, "ERROR pmc is already allocated\n"); 64 goto no_release_err; 65 } 66 if ((num_descriptors <= 0) || (descriptor_size <= 0)) { 67 wil_err(wil, 68 "Invalid params num_descriptors(%d), descriptor_size(%d)\n", 69 num_descriptors, descriptor_size); 70 last_cmd_err = -EINVAL; 71 goto no_release_err; 72 } 73 74 if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) { 75 wil_err(wil, 76 "num_descriptors(%d) exceeds max ring size %d\n", 77 num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX); 78 last_cmd_err = -EINVAL; 79 goto no_release_err; 80 } 81 82 if (num_descriptors > INT_MAX / descriptor_size) { 83 wil_err(wil, 84 "Overflow in num_descriptors(%d)*descriptor_size(%d)\n", 85 num_descriptors, descriptor_size); 86 last_cmd_err = -EINVAL; 87 goto no_release_err; 88 } 89 90 pmc->num_descriptors = num_descriptors; 91 pmc->descriptor_size = descriptor_size; 92 93 wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n", 94 num_descriptors, descriptor_size); 95 96 /* allocate descriptors info list in pmc context*/ 97 pmc->descriptors = kcalloc(num_descriptors, 98 sizeof(struct desc_alloc_info), 99 GFP_KERNEL); 100 if (!pmc->descriptors) { 101 wil_err(wil, "ERROR allocating pmc skb list\n"); 102 goto no_release_err; 103 } 104 105 wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n", 106 pmc->descriptors); 107 108 /* Allocate pring buffer and descriptors. 109 * vring->va should be aligned on its size rounded up to power of 2 110 * This is granted by the dma_alloc_coherent 111 */ 112 pmc->pring_va = dma_alloc_coherent(dev, 113 sizeof(struct vring_tx_desc) * num_descriptors, 114 &pmc->pring_pa, 115 GFP_KERNEL); 116 117 wil_dbg_misc(wil, 118 "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", 119 pmc->pring_va, &pmc->pring_pa, 120 sizeof(struct vring_tx_desc), 121 num_descriptors, 122 sizeof(struct vring_tx_desc) * num_descriptors); 123 124 if (!pmc->pring_va) { 125 wil_err(wil, "ERROR allocating pmc pring\n"); 126 goto release_pmc_skb_list; 127 } 128 129 /* initially, all descriptors are SW owned 130 * For Tx, Rx, and PMC, ownership bit is at the same location, thus 131 * we can use any 132 */ 133 for (i = 0; i < num_descriptors; i++) { 134 struct vring_tx_desc *_d = &pmc->pring_va[i]; 135 struct vring_tx_desc dd = {}, *d = ⅆ 136 int j = 0; 137 138 pmc->descriptors[i].va = dma_alloc_coherent(dev, 139 descriptor_size, 140 &pmc->descriptors[i].pa, 141 GFP_KERNEL); 142 143 if (unlikely(!pmc->descriptors[i].va)) { 144 wil_err(wil, "ERROR allocating pmc descriptor %d", i); 145 goto release_pmc_skbs; 146 } 147 148 for (j = 0; j < descriptor_size / sizeof(u32); j++) { 149 u32 *p = (u32 *)pmc->descriptors[i].va + j; 150 *p = PCM_DATA_INVALID_DW_VAL | j; 151 } 152 153 /* configure dma descriptor */ 154 d->dma.addr.addr_low = 155 cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa)); 156 d->dma.addr.addr_high = 157 cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa)); 158 d->dma.status = 0; /* 0 = HW_OWNED */ 159 d->dma.length = cpu_to_le16(descriptor_size); 160 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; 161 *_d = *d; 162 } 163 164 wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n"); 165 166 pmc_cmd.op = WMI_PMC_ALLOCATE; 167 pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); 168 pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); 169 170 wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n"); 171 pmc->last_cmd_status = wmi_send(wil, 172 WMI_PMC_CMDID, 173 &pmc_cmd, 174 sizeof(pmc_cmd)); 175 if (pmc->last_cmd_status) { 176 wil_err(wil, 177 "WMI_PMC_CMD with ALLOCATE op failed with status %d", 178 pmc->last_cmd_status); 179 goto release_pmc_skbs; 180 } 181 182 mutex_unlock(&pmc->lock); 183 184 return; 185 186 release_pmc_skbs: 187 wil_err(wil, "exit on error: Releasing skbs...\n"); 188 for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) { 189 dma_free_coherent(dev, 190 descriptor_size, 191 pmc->descriptors[i].va, 192 pmc->descriptors[i].pa); 193 194 pmc->descriptors[i].va = NULL; 195 } 196 wil_err(wil, "exit on error: Releasing pring...\n"); 197 198 dma_free_coherent(dev, 199 sizeof(struct vring_tx_desc) * num_descriptors, 200 pmc->pring_va, 201 pmc->pring_pa); 202 203 pmc->pring_va = NULL; 204 205 release_pmc_skb_list: 206 wil_err(wil, "exit on error: Releasing descriptors info list...\n"); 207 kfree(pmc->descriptors); 208 pmc->descriptors = NULL; 209 210 no_release_err: 211 pmc->last_cmd_status = last_cmd_err; 212 mutex_unlock(&pmc->lock); 213 } 214 215 /** 216 * Traverse the p-ring and release all buffers. 217 * At the end release the p-ring memory 218 */ 219 void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) 220 { 221 struct pmc_ctx *pmc = &wil->pmc; 222 struct device *dev = wil_to_dev(wil); 223 struct wmi_pmc_cmd pmc_cmd = {0}; 224 225 mutex_lock(&pmc->lock); 226 227 pmc->last_cmd_status = 0; 228 229 if (!wil_is_pmc_allocated(pmc)) { 230 wil_dbg_misc(wil, 231 "pmc_free: Error, can't free - not allocated\n"); 232 pmc->last_cmd_status = -EPERM; 233 mutex_unlock(&pmc->lock); 234 return; 235 } 236 237 if (send_pmc_cmd) { 238 wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n"); 239 pmc_cmd.op = WMI_PMC_RELEASE; 240 pmc->last_cmd_status = 241 wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, 242 sizeof(pmc_cmd)); 243 if (pmc->last_cmd_status) { 244 wil_err(wil, 245 "WMI_PMC_CMD with RELEASE op failed, status %d", 246 pmc->last_cmd_status); 247 /* There's nothing we can do with this error. 248 * Normally, it should never occur. 249 * Continue to freeing all memory allocated for pmc. 250 */ 251 } 252 } 253 254 if (pmc->pring_va) { 255 size_t buf_size = sizeof(struct vring_tx_desc) * 256 pmc->num_descriptors; 257 258 wil_dbg_misc(wil, "pmc_free: free pring va %p\n", 259 pmc->pring_va); 260 dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); 261 262 pmc->pring_va = NULL; 263 } else { 264 pmc->last_cmd_status = -ENOENT; 265 } 266 267 if (pmc->descriptors) { 268 int i; 269 270 for (i = 0; 271 pmc->descriptors[i].va && i < pmc->num_descriptors; i++) { 272 dma_free_coherent(dev, 273 pmc->descriptor_size, 274 pmc->descriptors[i].va, 275 pmc->descriptors[i].pa); 276 pmc->descriptors[i].va = NULL; 277 } 278 wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i, 279 pmc->num_descriptors); 280 wil_dbg_misc(wil, 281 "pmc_free: free pmc descriptors info list %p\n", 282 pmc->descriptors); 283 kfree(pmc->descriptors); 284 pmc->descriptors = NULL; 285 } else { 286 pmc->last_cmd_status = -ENOENT; 287 } 288 289 mutex_unlock(&pmc->lock); 290 } 291 292 /** 293 * Status of the last operation requested via debugfs: alloc/free/read. 294 * 0 - success or negative errno 295 */ 296 int wil_pmc_last_cmd_status(struct wil6210_priv *wil) 297 { 298 wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n", 299 wil->pmc.last_cmd_status); 300 301 return wil->pmc.last_cmd_status; 302 } 303 304 /** 305 * Read from required position up to the end of current descriptor, 306 * depends on descriptor size configured during alloc request. 307 */ 308 ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, 309 loff_t *f_pos) 310 { 311 struct wil6210_priv *wil = filp->private_data; 312 struct pmc_ctx *pmc = &wil->pmc; 313 size_t retval = 0; 314 unsigned long long idx; 315 loff_t offset; 316 size_t pmc_size; 317 318 mutex_lock(&pmc->lock); 319 320 if (!wil_is_pmc_allocated(pmc)) { 321 wil_err(wil, "error, pmc is not allocated!\n"); 322 pmc->last_cmd_status = -EPERM; 323 mutex_unlock(&pmc->lock); 324 return -EPERM; 325 } 326 327 pmc_size = pmc->descriptor_size * pmc->num_descriptors; 328 329 wil_dbg_misc(wil, 330 "pmc_read: size %u, pos %lld\n", 331 (u32)count, *f_pos); 332 333 pmc->last_cmd_status = 0; 334 335 idx = *f_pos; 336 do_div(idx, pmc->descriptor_size); 337 offset = *f_pos - (idx * pmc->descriptor_size); 338 339 if (*f_pos >= pmc_size) { 340 wil_dbg_misc(wil, 341 "pmc_read: reached end of pmc buf: %lld >= %u\n", 342 *f_pos, (u32)pmc_size); 343 pmc->last_cmd_status = -ERANGE; 344 goto out; 345 } 346 347 wil_dbg_misc(wil, 348 "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", 349 *f_pos, idx, offset, count); 350 351 /* if no errors, return the copied byte count */ 352 retval = simple_read_from_buffer(buf, 353 count, 354 &offset, 355 pmc->descriptors[idx].va, 356 pmc->descriptor_size); 357 *f_pos += retval; 358 out: 359 mutex_unlock(&pmc->lock); 360 361 return retval; 362 } 363 364 loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence) 365 { 366 loff_t newpos; 367 struct wil6210_priv *wil = filp->private_data; 368 struct pmc_ctx *pmc = &wil->pmc; 369 size_t pmc_size; 370 371 mutex_lock(&pmc->lock); 372 373 if (!wil_is_pmc_allocated(pmc)) { 374 wil_err(wil, "error, pmc is not allocated!\n"); 375 pmc->last_cmd_status = -EPERM; 376 mutex_unlock(&pmc->lock); 377 return -EPERM; 378 } 379 380 pmc_size = pmc->descriptor_size * pmc->num_descriptors; 381 382 switch (whence) { 383 case 0: /* SEEK_SET */ 384 newpos = off; 385 break; 386 387 case 1: /* SEEK_CUR */ 388 newpos = filp->f_pos + off; 389 break; 390 391 case 2: /* SEEK_END */ 392 newpos = pmc_size; 393 break; 394 395 default: /* can't happen */ 396 newpos = -EINVAL; 397 goto out; 398 } 399 400 if (newpos < 0) { 401 newpos = -EINVAL; 402 goto out; 403 } 404 if (newpos > pmc_size) 405 newpos = pmc_size; 406 407 filp->f_pos = newpos; 408 409 out: 410 mutex_unlock(&pmc->lock); 411 412 return newpos; 413 } 414