1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6 */
7
8 #include <linux/export.h>
9 #include "core.h"
10 #include "pcic.h"
11 #include "debug.h"
12
13 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
14 "bhi",
15 "mhi-er0",
16 "mhi-er1",
17 "ce0",
18 "ce1",
19 "ce2",
20 "ce3",
21 "ce4",
22 "ce5",
23 "ce6",
24 "ce7",
25 "ce8",
26 "ce9",
27 "ce10",
28 "ce11",
29 "host2wbm-desc-feed",
30 "host2reo-re-injection",
31 "host2reo-command",
32 "host2rxdma-monitor-ring3",
33 "host2rxdma-monitor-ring2",
34 "host2rxdma-monitor-ring1",
35 "reo2ost-exception",
36 "wbm2host-rx-release",
37 "reo2host-status",
38 "reo2host-destination-ring4",
39 "reo2host-destination-ring3",
40 "reo2host-destination-ring2",
41 "reo2host-destination-ring1",
42 "rxdma2host-monitor-destination-mac3",
43 "rxdma2host-monitor-destination-mac2",
44 "rxdma2host-monitor-destination-mac1",
45 "ppdu-end-interrupts-mac3",
46 "ppdu-end-interrupts-mac2",
47 "ppdu-end-interrupts-mac1",
48 "rxdma2host-monitor-status-ring-mac3",
49 "rxdma2host-monitor-status-ring-mac2",
50 "rxdma2host-monitor-status-ring-mac1",
51 "host2rxdma-host-buf-ring-mac3",
52 "host2rxdma-host-buf-ring-mac2",
53 "host2rxdma-host-buf-ring-mac1",
54 "rxdma2host-destination-ring-mac3",
55 "rxdma2host-destination-ring-mac2",
56 "rxdma2host-destination-ring-mac1",
57 "host2tcl-input-ring4",
58 "host2tcl-input-ring3",
59 "host2tcl-input-ring2",
60 "host2tcl-input-ring1",
61 "wbm2host-tx-completions-ring3",
62 "wbm2host-tx-completions-ring2",
63 "wbm2host-tx-completions-ring1",
64 "tcl2host-status-ring",
65 };
66
67 static const struct ath11k_msi_config ath11k_msi_config[] = {
68 {
69 .total_vectors = 32,
70 .total_users = 4,
71 .users = (struct ath11k_msi_user[]) {
72 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
73 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
74 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
75 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
76 },
77 .hw_rev = ATH11K_HW_QCA6390_HW20,
78 },
79 {
80 .total_vectors = 16,
81 .total_users = 3,
82 .users = (struct ath11k_msi_user[]) {
83 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
84 { .name = "CE", .num_vectors = 5, .base_vector = 3 },
85 { .name = "DP", .num_vectors = 8, .base_vector = 8 },
86 },
87 .hw_rev = ATH11K_HW_QCN9074_HW10,
88 },
89 {
90 .total_vectors = 32,
91 .total_users = 4,
92 .users = (struct ath11k_msi_user[]) {
93 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
94 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
95 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
96 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
97 },
98 .hw_rev = ATH11K_HW_WCN6855_HW20,
99 },
100 {
101 .total_vectors = 32,
102 .total_users = 4,
103 .users = (struct ath11k_msi_user[]) {
104 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
105 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
106 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
107 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
108 },
109 .hw_rev = ATH11K_HW_WCN6855_HW21,
110 },
111 {
112 .total_vectors = 28,
113 .total_users = 2,
114 .users = (struct ath11k_msi_user[]) {
115 { .name = "CE", .num_vectors = 10, .base_vector = 0 },
116 { .name = "DP", .num_vectors = 18, .base_vector = 10 },
117 },
118 .hw_rev = ATH11K_HW_WCN6750_HW10,
119 },
120 {
121 .total_vectors = 32,
122 .total_users = 4,
123 .users = (struct ath11k_msi_user[]) {
124 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
125 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
126 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
127 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
128 },
129 .hw_rev = ATH11K_HW_QCA2066_HW21,
130 },
131 {
132 .total_vectors = 32,
133 .total_users = 4,
134 .users = (struct ath11k_msi_user[]) {
135 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
136 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
137 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
138 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
139 },
140 .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
141 },
142 };
143
ath11k_pcic_init_msi_config(struct ath11k_base * ab)144 int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
145 {
146 const struct ath11k_msi_config *msi_config;
147 int i;
148
149 for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
150 msi_config = &ath11k_msi_config[i];
151
152 if (msi_config->hw_rev == ab->hw_rev)
153 break;
154 }
155
156 if (i == ARRAY_SIZE(ath11k_msi_config)) {
157 ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
158 ab->hw_rev);
159 return -EINVAL;
160 }
161
162 ab->pci.msi.config = msi_config;
163 return 0;
164 }
165 EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
166
__ath11k_pcic_write32(struct ath11k_base * ab,u32 offset,u32 value)167 static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
168 {
169 if (offset < ATH11K_PCI_WINDOW_START)
170 iowrite32(value, ab->mem + offset);
171 else
172 ab->pci.ops->window_write32(ab, offset, value);
173 }
174
ath11k_pcic_write32(struct ath11k_base * ab,u32 offset,u32 value)175 void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
176 {
177 int ret = 0;
178 bool wakeup_required;
179
180 /* for offset beyond BAR + 4K - 32, may
181 * need to wakeup the device to access.
182 */
183 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
184 offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
185 if (wakeup_required && ab->pci.ops->wakeup)
186 ret = ab->pci.ops->wakeup(ab);
187
188 __ath11k_pcic_write32(ab, offset, value);
189
190 if (wakeup_required && !ret && ab->pci.ops->release)
191 ab->pci.ops->release(ab);
192 }
193 EXPORT_SYMBOL(ath11k_pcic_write32);
194
__ath11k_pcic_read32(struct ath11k_base * ab,u32 offset)195 static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
196 {
197 u32 val;
198
199 if (offset < ATH11K_PCI_WINDOW_START)
200 val = ioread32(ab->mem + offset);
201 else
202 val = ab->pci.ops->window_read32(ab, offset);
203
204 return val;
205 }
206
ath11k_pcic_read32(struct ath11k_base * ab,u32 offset)207 u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
208 {
209 int ret = 0;
210 u32 val;
211 bool wakeup_required;
212
213 /* for offset beyond BAR + 4K - 32, may
214 * need to wakeup the device to access.
215 */
216 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
217 offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
218 if (wakeup_required && ab->pci.ops->wakeup)
219 ret = ab->pci.ops->wakeup(ab);
220
221 val = __ath11k_pcic_read32(ab, offset);
222
223 if (wakeup_required && !ret && ab->pci.ops->release)
224 ab->pci.ops->release(ab);
225
226 return val;
227 }
228 EXPORT_SYMBOL(ath11k_pcic_read32);
229
ath11k_pcic_read(struct ath11k_base * ab,void * buf,u32 start,u32 end)230 int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
231 {
232 int ret = 0;
233 bool wakeup_required;
234 u32 *data = buf;
235 u32 i;
236
237 /* for offset beyond BAR + 4K - 32, may
238 * need to wakeup the device to access.
239 */
240 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
241 end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
242 if (wakeup_required && ab->pci.ops->wakeup) {
243 ret = ab->pci.ops->wakeup(ab);
244 if (ret) {
245 ath11k_warn(ab,
246 "wakeup failed, data may be invalid: %d",
247 ret);
248 /* Even though wakeup() failed, continue processing rather
249 * than returning because some parts of the data may still
250 * be valid and useful in some cases, e.g. could give us
251 * some clues on firmware crash.
252 * Mislead due to invalid data could be avoided because we
253 * are aware of the wakeup failure.
254 */
255 }
256 }
257
258 for (i = start; i < end + 1; i += 4)
259 *data++ = __ath11k_pcic_read32(ab, i);
260
261 if (wakeup_required && ab->pci.ops->release)
262 ab->pci.ops->release(ab);
263
264 return 0;
265 }
266 EXPORT_SYMBOL(ath11k_pcic_read);
267
ath11k_pcic_get_msi_address(struct ath11k_base * ab,u32 * msi_addr_lo,u32 * msi_addr_hi)268 void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
269 u32 *msi_addr_hi)
270 {
271 *msi_addr_lo = ab->pci.msi.addr_lo;
272 *msi_addr_hi = ab->pci.msi.addr_hi;
273 }
274 EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
275
ath11k_pcic_get_user_msi_assignment(struct ath11k_base * ab,char * user_name,int * num_vectors,u32 * user_base_data,u32 * base_vector)276 int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
277 int *num_vectors, u32 *user_base_data,
278 u32 *base_vector)
279 {
280 const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
281 int idx;
282
283 for (idx = 0; idx < msi_config->total_users; idx++) {
284 if (strcmp(user_name, msi_config->users[idx].name) == 0) {
285 *num_vectors = msi_config->users[idx].num_vectors;
286 *base_vector = msi_config->users[idx].base_vector;
287 *user_base_data = *base_vector + ab->pci.msi.ep_base_data;
288
289 ath11k_dbg(ab, ATH11K_DBG_PCI,
290 "msi assignment %s num_vectors %d user_base_data %u base_vector %u\n",
291 user_name, *num_vectors, *user_base_data,
292 *base_vector);
293
294 return 0;
295 }
296 }
297
298 ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
299
300 return -EINVAL;
301 }
302 EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
303
ath11k_pcic_get_ce_msi_idx(struct ath11k_base * ab,u32 ce_id,u32 * msi_idx)304 void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
305 {
306 u32 i, msi_data_idx;
307
308 for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
309 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
310 continue;
311
312 if (ce_id == i)
313 break;
314
315 msi_data_idx++;
316 }
317 *msi_idx = msi_data_idx;
318 }
319 EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
320
ath11k_pcic_free_ext_irq(struct ath11k_base * ab)321 static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
322 {
323 int i, j;
324
325 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
326 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
327
328 for (j = 0; j < irq_grp->num_irq; j++)
329 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
330
331 netif_napi_del(&irq_grp->napi);
332 free_netdev(irq_grp->napi_ndev);
333 }
334 }
335
ath11k_pcic_free_irq(struct ath11k_base * ab)336 void ath11k_pcic_free_irq(struct ath11k_base *ab)
337 {
338 int i, irq_idx;
339
340 for (i = 0; i < ab->hw_params.ce_count; i++) {
341 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
342 continue;
343 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
344 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
345 }
346
347 ath11k_pcic_free_ext_irq(ab);
348 }
349 EXPORT_SYMBOL(ath11k_pcic_free_irq);
350
ath11k_pcic_ce_irq_enable(struct ath11k_base * ab,u16 ce_id)351 static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
352 {
353 u32 irq_idx;
354
355 /* In case of one MSI vector, we handle irq enable/disable in a
356 * uniform way since we only have one irq
357 */
358 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
359 return;
360
361 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
362 enable_irq(ab->irq_num[irq_idx]);
363 }
364
ath11k_pcic_ce_irq_disable(struct ath11k_base * ab,u16 ce_id)365 static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
366 {
367 u32 irq_idx;
368
369 /* In case of one MSI vector, we handle irq enable/disable in a
370 * uniform way since we only have one irq
371 */
372 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
373 return;
374
375 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
376 disable_irq_nosync(ab->irq_num[irq_idx]);
377 }
378
ath11k_pcic_ce_irqs_disable(struct ath11k_base * ab)379 static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
380 {
381 int i;
382
383 clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
384
385 for (i = 0; i < ab->hw_params.ce_count; i++) {
386 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
387 continue;
388 ath11k_pcic_ce_irq_disable(ab, i);
389 }
390 }
391
ath11k_pcic_sync_ce_irqs(struct ath11k_base * ab)392 static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
393 {
394 int i;
395 int irq_idx;
396
397 for (i = 0; i < ab->hw_params.ce_count; i++) {
398 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
399 continue;
400
401 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
402 synchronize_irq(ab->irq_num[irq_idx]);
403 }
404 }
405
ath11k_pcic_ce_tasklet(struct tasklet_struct * t)406 static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
407 {
408 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
409 int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
410
411 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
412
413 enable_irq(ce_pipe->ab->irq_num[irq_idx]);
414 }
415
ath11k_pcic_ce_interrupt_handler(int irq,void * arg)416 static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
417 {
418 struct ath11k_ce_pipe *ce_pipe = arg;
419 struct ath11k_base *ab = ce_pipe->ab;
420 int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
421
422 if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
423 return IRQ_HANDLED;
424
425 /* last interrupt received for this CE */
426 ce_pipe->timestamp = jiffies;
427
428 disable_irq_nosync(ab->irq_num[irq_idx]);
429
430 tasklet_schedule(&ce_pipe->intr_tq);
431
432 return IRQ_HANDLED;
433 }
434
ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp * irq_grp)435 static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
436 {
437 struct ath11k_base *ab = irq_grp->ab;
438 int i;
439
440 /* In case of one MSI vector, we handle irq enable/disable
441 * in a uniform way since we only have one irq
442 */
443 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
444 return;
445
446 for (i = 0; i < irq_grp->num_irq; i++)
447 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
448 }
449
__ath11k_pcic_ext_irq_disable(struct ath11k_base * ab)450 static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
451 {
452 int i;
453
454 clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
455
456 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
457 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
458
459 ath11k_pcic_ext_grp_disable(irq_grp);
460
461 if (irq_grp->napi_enabled) {
462 napi_synchronize(&irq_grp->napi);
463 napi_disable(&irq_grp->napi);
464 irq_grp->napi_enabled = false;
465 }
466 }
467 }
468
ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp * irq_grp)469 static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
470 {
471 struct ath11k_base *ab = irq_grp->ab;
472 int i;
473
474 /* In case of one MSI vector, we handle irq enable/disable in a
475 * uniform way since we only have one irq
476 */
477 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
478 return;
479
480 for (i = 0; i < irq_grp->num_irq; i++)
481 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
482 }
483
ath11k_pcic_ext_irq_enable(struct ath11k_base * ab)484 void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
485 {
486 int i;
487
488 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
489 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
490
491 if (!irq_grp->napi_enabled) {
492 napi_enable(&irq_grp->napi);
493 irq_grp->napi_enabled = true;
494 }
495 ath11k_pcic_ext_grp_enable(irq_grp);
496 }
497
498 set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
499 }
500 EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
501
ath11k_pcic_sync_ext_irqs(struct ath11k_base * ab)502 static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
503 {
504 int i, j, irq_idx;
505
506 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
507 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
508
509 for (j = 0; j < irq_grp->num_irq; j++) {
510 irq_idx = irq_grp->irqs[j];
511 synchronize_irq(ab->irq_num[irq_idx]);
512 }
513 }
514 }
515
ath11k_pcic_ext_irq_disable(struct ath11k_base * ab)516 void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
517 {
518 __ath11k_pcic_ext_irq_disable(ab);
519 ath11k_pcic_sync_ext_irqs(ab);
520 }
521 EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
522
ath11k_pcic_ext_grp_napi_poll(struct napi_struct * napi,int budget)523 static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
524 {
525 struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
526 struct ath11k_ext_irq_grp,
527 napi);
528 struct ath11k_base *ab = irq_grp->ab;
529 int work_done;
530 int i;
531
532 work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
533 if (work_done < budget) {
534 napi_complete_done(napi, work_done);
535 for (i = 0; i < irq_grp->num_irq; i++)
536 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
537 }
538
539 if (work_done > budget)
540 work_done = budget;
541
542 return work_done;
543 }
544
ath11k_pcic_ext_interrupt_handler(int irq,void * arg)545 static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
546 {
547 struct ath11k_ext_irq_grp *irq_grp = arg;
548 struct ath11k_base *ab = irq_grp->ab;
549 int i;
550
551 if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
552 return IRQ_HANDLED;
553
554 ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq);
555
556 /* last interrupt received for this group */
557 irq_grp->timestamp = jiffies;
558
559 for (i = 0; i < irq_grp->num_irq; i++)
560 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
561
562 napi_schedule(&irq_grp->napi);
563
564 return IRQ_HANDLED;
565 }
566
567 static int
ath11k_pcic_get_msi_irq(struct ath11k_base * ab,unsigned int vector)568 ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
569 {
570 return ab->pci.ops->get_msi_irq(ab, vector);
571 }
572
ath11k_pcic_ext_irq_config(struct ath11k_base * ab)573 static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
574 {
575 int i, j, n, ret, num_vectors = 0;
576 u32 user_base_data = 0, base_vector = 0;
577 struct ath11k_ext_irq_grp *irq_grp;
578 unsigned long irq_flags;
579
580 ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
581 &user_base_data,
582 &base_vector);
583 if (ret < 0)
584 return ret;
585
586 irq_flags = IRQF_SHARED;
587 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
588 irq_flags |= IRQF_NOBALANCING;
589
590 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
591 irq_grp = &ab->ext_irq_grp[i];
592 u32 num_irq = 0;
593
594 irq_grp->ab = ab;
595 irq_grp->grp_id = i;
596 irq_grp->napi_ndev = alloc_netdev_dummy(0);
597 if (!irq_grp->napi_ndev) {
598 ret = -ENOMEM;
599 goto fail_allocate;
600 }
601
602 netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
603 ath11k_pcic_ext_grp_napi_poll);
604
605 if (ab->hw_params.ring_mask->tx[i] ||
606 ab->hw_params.ring_mask->rx[i] ||
607 ab->hw_params.ring_mask->rx_err[i] ||
608 ab->hw_params.ring_mask->rx_wbm_rel[i] ||
609 ab->hw_params.ring_mask->reo_status[i] ||
610 ab->hw_params.ring_mask->rxdma2host[i] ||
611 ab->hw_params.ring_mask->host2rxdma[i] ||
612 ab->hw_params.ring_mask->rx_mon_status[i]) {
613 num_irq = 1;
614 }
615
616 irq_grp->num_irq = num_irq;
617 irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
618
619 for (j = 0; j < irq_grp->num_irq; j++) {
620 int irq_idx = irq_grp->irqs[j];
621 int vector = (i % num_vectors) + base_vector;
622 int irq = ath11k_pcic_get_msi_irq(ab, vector);
623
624 if (irq < 0) {
625 ret = irq;
626 goto fail_irq;
627 }
628
629 ab->irq_num[irq_idx] = irq;
630
631 ath11k_dbg(ab, ATH11K_DBG_PCI,
632 "irq %d group %d\n", irq, i);
633
634 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
635 ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
636 irq_flags, "DP_EXT_IRQ", irq_grp);
637 if (ret) {
638 ath11k_err(ab, "failed request irq %d: %d\n",
639 vector, ret);
640 for (n = 0; n <= i; n++) {
641 irq_grp = &ab->ext_irq_grp[n];
642 free_netdev(irq_grp->napi_ndev);
643 }
644 return ret;
645 }
646 }
647 ath11k_pcic_ext_grp_disable(irq_grp);
648 }
649
650 return 0;
651 fail_irq:
652 /* i ->napi_ndev was properly allocated. Free it also */
653 i += 1;
654 fail_allocate:
655 for (n = 0; n < i; n++) {
656 irq_grp = &ab->ext_irq_grp[n];
657 free_netdev(irq_grp->napi_ndev);
658 }
659 return ret;
660 }
661
ath11k_pcic_config_irq(struct ath11k_base * ab)662 int ath11k_pcic_config_irq(struct ath11k_base *ab)
663 {
664 struct ath11k_ce_pipe *ce_pipe;
665 u32 msi_data_start;
666 u32 msi_data_count, msi_data_idx;
667 u32 msi_irq_start;
668 unsigned int msi_data;
669 int irq, i, ret, irq_idx;
670 unsigned long irq_flags;
671
672 ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
673 &msi_data_start, &msi_irq_start);
674 if (ret)
675 return ret;
676
677 irq_flags = IRQF_SHARED;
678 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
679 irq_flags |= IRQF_NOBALANCING;
680
681 /* Configure CE irqs */
682 for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
683 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
684 continue;
685
686 msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
687 irq = ath11k_pcic_get_msi_irq(ab, msi_data);
688 if (irq < 0)
689 return irq;
690
691 ce_pipe = &ab->ce.ce_pipe[i];
692
693 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
694
695 tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
696
697 ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
698 irq_flags, irq_name[irq_idx], ce_pipe);
699 if (ret) {
700 ath11k_err(ab, "failed to request irq %d: %d\n",
701 irq_idx, ret);
702 return ret;
703 }
704
705 ab->irq_num[irq_idx] = irq;
706 msi_data_idx++;
707
708 ath11k_pcic_ce_irq_disable(ab, i);
709 }
710
711 ret = ath11k_pcic_ext_irq_config(ab);
712 if (ret)
713 return ret;
714
715 return 0;
716 }
717 EXPORT_SYMBOL(ath11k_pcic_config_irq);
718
ath11k_pcic_ce_irqs_enable(struct ath11k_base * ab)719 void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
720 {
721 int i;
722
723 set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
724
725 for (i = 0; i < ab->hw_params.ce_count; i++) {
726 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
727 continue;
728 ath11k_pcic_ce_irq_enable(ab, i);
729 }
730 }
731 EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
732
ath11k_pcic_kill_tasklets(struct ath11k_base * ab)733 static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
734 {
735 int i;
736
737 for (i = 0; i < ab->hw_params.ce_count; i++) {
738 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
739
740 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
741 continue;
742
743 tasklet_kill(&ce_pipe->intr_tq);
744 }
745 }
746
ath11k_pcic_ce_irq_disable_sync(struct ath11k_base * ab)747 void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
748 {
749 ath11k_pcic_ce_irqs_disable(ab);
750 ath11k_pcic_sync_ce_irqs(ab);
751 ath11k_pcic_kill_tasklets(ab);
752 }
753 EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
754
ath11k_pcic_stop(struct ath11k_base * ab)755 void ath11k_pcic_stop(struct ath11k_base *ab)
756 {
757 ath11k_pcic_ce_irq_disable_sync(ab);
758 ath11k_ce_cleanup_pipes(ab);
759 }
760 EXPORT_SYMBOL(ath11k_pcic_stop);
761
ath11k_pcic_start(struct ath11k_base * ab)762 int ath11k_pcic_start(struct ath11k_base *ab)
763 {
764 set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
765
766 ath11k_pcic_ce_irqs_enable(ab);
767 ath11k_ce_rx_post_buf(ab);
768
769 return 0;
770 }
771 EXPORT_SYMBOL(ath11k_pcic_start);
772
ath11k_pcic_map_service_to_pipe(struct ath11k_base * ab,u16 service_id,u8 * ul_pipe,u8 * dl_pipe)773 int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
774 u8 *ul_pipe, u8 *dl_pipe)
775 {
776 const struct service_to_pipe *entry;
777 bool ul_set = false, dl_set = false;
778 int i;
779
780 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
781 entry = &ab->hw_params.svc_to_ce_map[i];
782
783 if (__le32_to_cpu(entry->service_id) != service_id)
784 continue;
785
786 switch (__le32_to_cpu(entry->pipedir)) {
787 case PIPEDIR_NONE:
788 break;
789 case PIPEDIR_IN:
790 WARN_ON(dl_set);
791 *dl_pipe = __le32_to_cpu(entry->pipenum);
792 dl_set = true;
793 break;
794 case PIPEDIR_OUT:
795 WARN_ON(ul_set);
796 *ul_pipe = __le32_to_cpu(entry->pipenum);
797 ul_set = true;
798 break;
799 case PIPEDIR_INOUT:
800 WARN_ON(dl_set);
801 WARN_ON(ul_set);
802 *dl_pipe = __le32_to_cpu(entry->pipenum);
803 *ul_pipe = __le32_to_cpu(entry->pipenum);
804 dl_set = true;
805 ul_set = true;
806 break;
807 }
808 }
809
810 if (WARN_ON(!ul_set || !dl_set))
811 return -ENOENT;
812
813 return 0;
814 }
815 EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
816
ath11k_pcic_register_pci_ops(struct ath11k_base * ab,const struct ath11k_pci_ops * pci_ops)817 int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
818 const struct ath11k_pci_ops *pci_ops)
819 {
820 if (!pci_ops)
821 return 0;
822
823 /* Return error if mandatory pci_ops callbacks are missing */
824 if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
825 !pci_ops->window_read32)
826 return -EINVAL;
827
828 ab->pci.ops = pci_ops;
829 return 0;
830 }
831 EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
832
ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base * ab)833 void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
834 {
835 int i;
836
837 for (i = 0; i < ab->hw_params.ce_count; i++) {
838 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
839 i == ATH11K_PCI_CE_WAKE_IRQ)
840 continue;
841 ath11k_pcic_ce_irq_enable(ab, i);
842 }
843 }
844 EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
845
ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base * ab)846 void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
847 {
848 int i;
849 int irq_idx;
850 struct ath11k_ce_pipe *ce_pipe;
851
852 for (i = 0; i < ab->hw_params.ce_count; i++) {
853 ce_pipe = &ab->ce.ce_pipe[i];
854 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
855
856 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
857 i == ATH11K_PCI_CE_WAKE_IRQ)
858 continue;
859
860 disable_irq_nosync(ab->irq_num[irq_idx]);
861 synchronize_irq(ab->irq_num[irq_idx]);
862 tasklet_kill(&ce_pipe->intr_tq);
863 }
864 }
865 EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);
866