1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #include <linux/debugfs.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/sizes.h>
8 #include <linux/utsname.h>
9 #include <linux/version.h>
10 #include <linux/msi.h>
11 #include <linux/irqdomain.h>
12 #include <linux/export.h>
13
14 #include <net/mana/mana.h>
15 #include <net/mana/hw_channel.h>
16
17 struct dentry *mana_debugfs_root;
18
19 struct mana_dev_recovery {
20 struct list_head list;
21 struct pci_dev *pdev;
22 enum gdma_eqe_type type;
23 };
24
25 static struct mana_dev_recovery_work {
26 struct list_head dev_list;
27 struct delayed_work work;
28
29 /* Lock for dev_list above */
30 spinlock_t lock;
31 } mana_dev_recovery_work;
32
mana_gd_r32(struct gdma_context * g,u64 offset)33 static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
34 {
35 return readl(g->bar0_va + offset);
36 }
37
mana_gd_r64(struct gdma_context * g,u64 offset)38 static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
39 {
40 return readq(g->bar0_va + offset);
41 }
42
mana_gd_init_pf_regs(struct pci_dev * pdev)43 static int mana_gd_init_pf_regs(struct pci_dev *pdev)
44 {
45 struct gdma_context *gc = pci_get_drvdata(pdev);
46 u64 remaining_barsize;
47 u64 sriov_base_off;
48 u64 sriov_shm_off;
49
50 gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
51
52 /* mana_gd_ring_doorbell() accesses offsets up to DOORBELL_OFFSET_EQ
53 * (0xFF8) + 8 bytes = 4KB within each doorbell page, so the page
54 * size must be at least SZ_4K.
55 */
56 if (gc->db_page_size < SZ_4K) {
57 dev_err(gc->dev,
58 "Doorbell page size %llu too small (min %u)\n",
59 gc->db_page_size, SZ_4K);
60 return -EPROTO;
61 }
62
63 gc->db_page_off = mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
64
65 /* Validate doorbell offset is within BAR0 */
66 if (gc->db_page_off >= gc->bar0_size) {
67 dev_err(gc->dev,
68 "Doorbell offset 0x%llx exceeds BAR0 size 0x%llx\n",
69 gc->db_page_off, (u64)gc->bar0_size);
70 return -EPROTO;
71 }
72
73 gc->db_page_base = gc->bar0_va + gc->db_page_off;
74 gc->phys_db_page_base = gc->bar0_pa + gc->db_page_off;
75
76 sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
77 if (sriov_base_off >= gc->bar0_size ||
78 gc->bar0_size - sriov_base_off <
79 GDMA_PF_REG_SHM_OFF + sizeof(u64) ||
80 !IS_ALIGNED(sriov_base_off, sizeof(u64))) {
81 dev_err(gc->dev,
82 "SRIOV base offset 0x%llx out of range or unaligned (BAR0 size 0x%llx)\n",
83 sriov_base_off, (u64)gc->bar0_size);
84 return -EPROTO;
85 }
86
87 remaining_barsize = gc->bar0_size - sriov_base_off;
88 sriov_shm_off = mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
89 if (sriov_shm_off >= remaining_barsize ||
90 remaining_barsize - sriov_shm_off < SMC_APERTURE_SIZE ||
91 !IS_ALIGNED(sriov_shm_off, sizeof(u32))) {
92 dev_err(gc->dev,
93 "SRIOV SHM offset 0x%llx out of range or unaligned (BAR0 size 0x%llx)\n",
94 sriov_shm_off, (u64)gc->bar0_size);
95 return -EPROTO;
96 }
97
98 gc->shm_base = gc->bar0_va + sriov_base_off + sriov_shm_off;
99
100 return 0;
101 }
102
mana_gd_init_vf_regs(struct pci_dev * pdev)103 static int mana_gd_init_vf_regs(struct pci_dev *pdev)
104 {
105 struct gdma_context *gc = pci_get_drvdata(pdev);
106 u64 shm_off;
107
108 gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
109
110 /* mana_gd_ring_doorbell() accesses offsets up to DOORBELL_OFFSET_EQ
111 * (0xFF8) + 8 bytes = 4KB within each doorbell page, so the page
112 * size must be at least SZ_4K.
113 */
114 if (gc->db_page_size < SZ_4K) {
115 dev_err(gc->dev,
116 "Doorbell page size %llu too small (min %u)\n",
117 gc->db_page_size, SZ_4K);
118 return -EPROTO;
119 }
120
121 gc->db_page_off = mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
122
123 /* Validate doorbell offset is within BAR0 */
124 if (gc->db_page_off >= gc->bar0_size) {
125 dev_err(gc->dev,
126 "Doorbell offset 0x%llx exceeds BAR0 size 0x%llx\n",
127 gc->db_page_off, (u64)gc->bar0_size);
128 return -EPROTO;
129 }
130
131 gc->db_page_base = gc->bar0_va + gc->db_page_off;
132 gc->phys_db_page_base = gc->bar0_pa + gc->db_page_off;
133
134 shm_off = mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
135 if (shm_off >= gc->bar0_size ||
136 gc->bar0_size - shm_off < SMC_APERTURE_SIZE ||
137 !IS_ALIGNED(shm_off, sizeof(u32))) {
138 dev_err(gc->dev,
139 "SHM offset 0x%llx out of range or unaligned (BAR0 size 0x%llx)\n",
140 shm_off, (u64)gc->bar0_size);
141 return -EPROTO;
142 }
143
144 gc->shm_base = gc->bar0_va + shm_off;
145
146 return 0;
147 }
148
mana_gd_init_registers(struct pci_dev * pdev)149 static int mana_gd_init_registers(struct pci_dev *pdev)
150 {
151 struct gdma_context *gc = pci_get_drvdata(pdev);
152
153 if (gc->is_pf)
154 return mana_gd_init_pf_regs(pdev);
155 else
156 return mana_gd_init_vf_regs(pdev);
157 }
158
159 /* Suppress logging when we set timeout to zero */
mana_need_log(struct gdma_context * gc,int err)160 bool mana_need_log(struct gdma_context *gc, int err)
161 {
162 struct hw_channel_context *hwc;
163
164 if (err != -ETIMEDOUT)
165 return true;
166
167 if (!gc)
168 return true;
169
170 hwc = gc->hwc.driver_data;
171 if (hwc && hwc->hwc_timeout == 0)
172 return false;
173
174 return true;
175 }
176
mana_gd_query_max_resources(struct pci_dev * pdev)177 static int mana_gd_query_max_resources(struct pci_dev *pdev)
178 {
179 struct gdma_context *gc = pci_get_drvdata(pdev);
180 struct gdma_query_max_resources_resp resp = {};
181 struct gdma_general_req req = {};
182 int err;
183
184 mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
185 sizeof(req), sizeof(resp));
186
187 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
188 if (err || resp.hdr.status) {
189 dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
190 err, resp.hdr.status);
191 return err ? err : -EPROTO;
192 }
193
194 if (!pci_msix_can_alloc_dyn(pdev)) {
195 if (gc->num_msix_usable > resp.max_msix)
196 gc->num_msix_usable = resp.max_msix;
197 } else {
198 /* If dynamic allocation is enabled we have already allocated
199 * hwc msi
200 */
201 gc->num_msix_usable = min(resp.max_msix, num_online_cpus() + 1);
202 }
203
204 if (gc->num_msix_usable <= 1)
205 return -ENOSPC;
206
207 gc->max_num_queues = num_online_cpus();
208 if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
209 gc->max_num_queues = MANA_MAX_NUM_QUEUES;
210
211 if (gc->max_num_queues > resp.max_eq)
212 gc->max_num_queues = resp.max_eq;
213
214 if (gc->max_num_queues > resp.max_cq)
215 gc->max_num_queues = resp.max_cq;
216
217 if (gc->max_num_queues > resp.max_sq)
218 gc->max_num_queues = resp.max_sq;
219
220 if (gc->max_num_queues > resp.max_rq)
221 gc->max_num_queues = resp.max_rq;
222
223 /* The Hardware Channel (HWC) used 1 MSI-X */
224 if (gc->max_num_queues > gc->num_msix_usable - 1)
225 gc->max_num_queues = gc->num_msix_usable - 1;
226
227 return 0;
228 }
229
mana_gd_query_hwc_timeout(struct pci_dev * pdev,u32 * timeout_val)230 static int mana_gd_query_hwc_timeout(struct pci_dev *pdev, u32 *timeout_val)
231 {
232 struct gdma_context *gc = pci_get_drvdata(pdev);
233 struct gdma_query_hwc_timeout_resp resp = {};
234 struct gdma_query_hwc_timeout_req req = {};
235 int err;
236
237 mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_HWC_TIMEOUT,
238 sizeof(req), sizeof(resp));
239 req.timeout_ms = *timeout_val;
240 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
241 if (err || resp.hdr.status)
242 return err ? err : -EPROTO;
243
244 *timeout_val = resp.timeout_ms;
245
246 return 0;
247 }
248
mana_gd_detect_devices(struct pci_dev * pdev)249 static int mana_gd_detect_devices(struct pci_dev *pdev)
250 {
251 struct gdma_context *gc = pci_get_drvdata(pdev);
252 struct gdma_list_devices_resp resp = {};
253 struct gdma_general_req req = {};
254 struct gdma_dev_id dev;
255 int found_dev = 0;
256 u16 dev_type;
257 int err;
258 u32 i;
259
260 mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
261 sizeof(resp));
262
263 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
264 if (err || resp.hdr.status) {
265 dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
266 resp.hdr.status);
267 return err ? err : -EPROTO;
268 }
269
270 for (i = 0; i < GDMA_DEV_LIST_SIZE &&
271 found_dev < resp.num_of_devs; i++) {
272 dev = resp.devs[i];
273 dev_type = dev.type;
274
275 /* Skip empty devices */
276 if (dev.as_uint32 == 0)
277 continue;
278
279 found_dev++;
280
281 /* HWC is already detected in mana_hwc_create_channel(). */
282 if (dev_type == GDMA_DEVICE_HWC)
283 continue;
284
285 if (dev_type == GDMA_DEVICE_MANA) {
286 gc->mana.gdma_context = gc;
287 gc->mana.dev_id = dev;
288 } else if (dev_type == GDMA_DEVICE_MANA_IB) {
289 gc->mana_ib.dev_id = dev;
290 gc->mana_ib.gdma_context = gc;
291 }
292 }
293
294 return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
295 }
296
mana_gd_send_request(struct gdma_context * gc,u32 req_len,const void * req,u32 resp_len,void * resp)297 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
298 u32 resp_len, void *resp)
299 {
300 struct hw_channel_context *hwc = gc->hwc.driver_data;
301
302 return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
303 }
304 EXPORT_SYMBOL_NS(mana_gd_send_request, "NET_MANA");
305
mana_gd_alloc_memory(struct gdma_context * gc,unsigned int length,struct gdma_mem_info * gmi)306 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
307 struct gdma_mem_info *gmi)
308 {
309 dma_addr_t dma_handle;
310 void *buf;
311
312 if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
313 return -EINVAL;
314
315 gmi->dev = gc->dev;
316 buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
317 if (!buf)
318 return -ENOMEM;
319
320 gmi->dma_handle = dma_handle;
321 gmi->virt_addr = buf;
322 gmi->length = length;
323
324 return 0;
325 }
326
mana_gd_free_memory(struct gdma_mem_info * gmi)327 void mana_gd_free_memory(struct gdma_mem_info *gmi)
328 {
329 dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
330 gmi->dma_handle);
331 }
332
mana_gd_create_hw_eq(struct gdma_context * gc,struct gdma_queue * queue)333 static int mana_gd_create_hw_eq(struct gdma_context *gc,
334 struct gdma_queue *queue)
335 {
336 struct gdma_create_queue_resp resp = {};
337 struct gdma_create_queue_req req = {};
338 int err;
339
340 if (queue->type != GDMA_EQ)
341 return -EINVAL;
342
343 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
344 sizeof(req), sizeof(resp));
345
346 req.hdr.dev_id = queue->gdma_dev->dev_id;
347 req.type = queue->type;
348 req.pdid = queue->gdma_dev->pdid;
349 req.doolbell_id = queue->gdma_dev->doorbell;
350 req.gdma_region = queue->mem_info.dma_region_handle;
351 req.queue_size = queue->queue_size;
352 req.log2_throttle_limit = queue->eq.log2_throttle_limit;
353 req.eq_pci_msix_index = queue->eq.msix_index;
354
355 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
356 if (err || resp.hdr.status) {
357 dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
358 resp.hdr.status);
359 return err ? err : -EPROTO;
360 }
361
362 queue->id = resp.queue_index;
363 queue->eq.disable_needed = true;
364 queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
365 return 0;
366 }
367
mana_gd_disable_queue(struct gdma_queue * queue)368 static int mana_gd_disable_queue(struct gdma_queue *queue)
369 {
370 struct gdma_context *gc = queue->gdma_dev->gdma_context;
371 struct gdma_disable_queue_req req = {};
372 struct gdma_general_resp resp = {};
373 int err;
374
375 WARN_ON(queue->type != GDMA_EQ);
376
377 mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
378 sizeof(req), sizeof(resp));
379
380 req.hdr.dev_id = queue->gdma_dev->dev_id;
381 req.type = queue->type;
382 req.queue_index = queue->id;
383 req.alloc_res_id_on_creation = 1;
384
385 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
386 if (err || resp.hdr.status) {
387 if (mana_need_log(gc, err))
388 dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
389 resp.hdr.status);
390 return err ? err : -EPROTO;
391 }
392
393 return 0;
394 }
395
396 #define DOORBELL_OFFSET_SQ 0x0
397 #define DOORBELL_OFFSET_RQ 0x400
398 #define DOORBELL_OFFSET_CQ 0x800
399 #define DOORBELL_OFFSET_EQ 0xFF8
400
mana_gd_ring_doorbell(struct gdma_context * gc,u32 db_index,enum gdma_queue_type q_type,u32 qid,u32 tail_ptr,u8 num_req)401 static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
402 enum gdma_queue_type q_type, u32 qid,
403 u32 tail_ptr, u8 num_req)
404 {
405 void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
406 union gdma_doorbell_entry e = {};
407
408 switch (q_type) {
409 case GDMA_EQ:
410 e.eq.id = qid;
411 e.eq.tail_ptr = tail_ptr;
412 e.eq.arm = num_req;
413
414 addr += DOORBELL_OFFSET_EQ;
415 break;
416
417 case GDMA_CQ:
418 e.cq.id = qid;
419 e.cq.tail_ptr = tail_ptr;
420 e.cq.arm = num_req;
421
422 addr += DOORBELL_OFFSET_CQ;
423 break;
424
425 case GDMA_RQ:
426 e.rq.id = qid;
427 e.rq.tail_ptr = tail_ptr;
428 e.rq.wqe_cnt = num_req;
429
430 addr += DOORBELL_OFFSET_RQ;
431 break;
432
433 case GDMA_SQ:
434 e.sq.id = qid;
435 e.sq.tail_ptr = tail_ptr;
436
437 addr += DOORBELL_OFFSET_SQ;
438 break;
439
440 default:
441 WARN_ON(1);
442 return;
443 }
444
445 /* Ensure all writes are done before ring doorbell */
446 wmb();
447
448 writeq(e.as_uint64, addr);
449 }
450
mana_gd_wq_ring_doorbell(struct gdma_context * gc,struct gdma_queue * queue)451 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
452 {
453 /* Hardware Spec specifies that software client should set 0 for
454 * wqe_cnt for Receive Queues. This value is not used in Send Queues.
455 */
456 mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
457 queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
458 }
459 EXPORT_SYMBOL_NS(mana_gd_wq_ring_doorbell, "NET_MANA");
460
mana_gd_ring_cq(struct gdma_queue * cq,u8 arm_bit)461 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
462 {
463 struct gdma_context *gc = cq->gdma_dev->gdma_context;
464
465 u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
466
467 u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
468
469 mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
470 head, arm_bit);
471 }
472 EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
473
474 #define MANA_SERVICE_PERIOD 10
475
mana_serv_rescan(struct pci_dev * pdev)476 static void mana_serv_rescan(struct pci_dev *pdev)
477 {
478 struct pci_bus *parent;
479
480 pci_lock_rescan_remove();
481
482 parent = pdev->bus;
483 if (!parent) {
484 dev_err(&pdev->dev, "MANA service: no parent bus\n");
485 goto out;
486 }
487
488 pci_stop_and_remove_bus_device(pdev);
489 pci_rescan_bus(parent);
490
491 out:
492 pci_unlock_rescan_remove();
493 }
494
mana_serv_fpga(struct pci_dev * pdev)495 static void mana_serv_fpga(struct pci_dev *pdev)
496 {
497 struct pci_bus *bus, *parent;
498
499 pci_lock_rescan_remove();
500
501 bus = pdev->bus;
502 if (!bus) {
503 dev_err(&pdev->dev, "MANA service: no bus\n");
504 goto out;
505 }
506
507 parent = bus->parent;
508 if (!parent) {
509 dev_err(&pdev->dev, "MANA service: no parent bus\n");
510 goto out;
511 }
512
513 pci_stop_and_remove_bus_device(bus->self);
514
515 msleep(MANA_SERVICE_PERIOD * 1000);
516
517 pci_rescan_bus(parent);
518
519 out:
520 pci_unlock_rescan_remove();
521 }
522
mana_serv_reset(struct pci_dev * pdev)523 static void mana_serv_reset(struct pci_dev *pdev)
524 {
525 struct gdma_context *gc = pci_get_drvdata(pdev);
526 struct hw_channel_context *hwc;
527 int ret;
528
529 if (!gc) {
530 /* Perform PCI rescan on device if GC is not set up */
531 dev_err(&pdev->dev, "MANA service: GC not setup, rescanning\n");
532 mana_serv_rescan(pdev);
533 return;
534 }
535
536 hwc = gc->hwc.driver_data;
537 if (!hwc) {
538 dev_err(&pdev->dev, "MANA service: no HWC\n");
539 goto out;
540 }
541
542 /* HWC is not responding in this case, so don't wait */
543 hwc->hwc_timeout = 0;
544
545 dev_info(&pdev->dev, "MANA reset cycle start\n");
546
547 mana_gd_suspend(pdev, PMSG_SUSPEND);
548
549 msleep(MANA_SERVICE_PERIOD * 1000);
550
551 ret = mana_gd_resume(pdev);
552 if (ret == -ETIMEDOUT || ret == -EPROTO) {
553 /* Perform PCI rescan on device if we failed on HWC */
554 dev_err(&pdev->dev, "MANA service: resume failed, rescanning\n");
555 mana_serv_rescan(pdev);
556 return;
557 }
558
559 if (ret)
560 dev_info(&pdev->dev, "MANA reset cycle failed err %d\n", ret);
561 else
562 dev_info(&pdev->dev, "MANA reset cycle completed\n");
563
564 out:
565 clear_bit(GC_IN_SERVICE, &gc->flags);
566 }
567
mana_do_service(enum gdma_eqe_type type,struct pci_dev * pdev)568 static void mana_do_service(enum gdma_eqe_type type, struct pci_dev *pdev)
569 {
570 switch (type) {
571 case GDMA_EQE_HWC_FPGA_RECONFIG:
572 mana_serv_fpga(pdev);
573 break;
574
575 case GDMA_EQE_HWC_RESET_REQUEST:
576 mana_serv_reset(pdev);
577 break;
578
579 default:
580 dev_err(&pdev->dev, "MANA service: unknown type %d\n", type);
581 break;
582 }
583 }
584
mana_recovery_delayed_func(struct work_struct * w)585 static void mana_recovery_delayed_func(struct work_struct *w)
586 {
587 struct mana_dev_recovery_work *work;
588 struct mana_dev_recovery *dev;
589 unsigned long flags;
590
591 work = container_of(w, struct mana_dev_recovery_work, work.work);
592
593 spin_lock_irqsave(&work->lock, flags);
594
595 while (!list_empty(&work->dev_list)) {
596 dev = list_first_entry(&work->dev_list,
597 struct mana_dev_recovery, list);
598 list_del(&dev->list);
599 spin_unlock_irqrestore(&work->lock, flags);
600
601 mana_do_service(dev->type, dev->pdev);
602 pci_dev_put(dev->pdev);
603 kfree(dev);
604
605 spin_lock_irqsave(&work->lock, flags);
606 }
607
608 spin_unlock_irqrestore(&work->lock, flags);
609 }
610
mana_serv_func(struct work_struct * w)611 static void mana_serv_func(struct work_struct *w)
612 {
613 struct mana_serv_work *mns_wk;
614 struct pci_dev *pdev;
615
616 mns_wk = container_of(w, struct mana_serv_work, serv_work);
617 pdev = mns_wk->pdev;
618
619 if (pdev)
620 mana_do_service(mns_wk->type, pdev);
621
622 pci_dev_put(pdev);
623 kfree(mns_wk);
624 module_put(THIS_MODULE);
625 }
626
mana_schedule_serv_work(struct gdma_context * gc,enum gdma_eqe_type type)627 int mana_schedule_serv_work(struct gdma_context *gc, enum gdma_eqe_type type)
628 {
629 struct mana_serv_work *mns_wk;
630
631 if (test_and_set_bit(GC_IN_SERVICE, &gc->flags)) {
632 dev_info(gc->dev, "Already in service\n");
633 return -EBUSY;
634 }
635
636 if (!try_module_get(THIS_MODULE)) {
637 dev_info(gc->dev, "Module is unloading\n");
638 clear_bit(GC_IN_SERVICE, &gc->flags);
639 return -ENODEV;
640 }
641
642 mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
643 if (!mns_wk) {
644 module_put(THIS_MODULE);
645 clear_bit(GC_IN_SERVICE, &gc->flags);
646 return -ENOMEM;
647 }
648
649 dev_info(gc->dev, "Start MANA service type:%d\n", type);
650 mns_wk->pdev = to_pci_dev(gc->dev);
651 mns_wk->type = type;
652 pci_dev_get(mns_wk->pdev);
653 INIT_WORK(&mns_wk->serv_work, mana_serv_func);
654 schedule_work(&mns_wk->serv_work);
655 return 0;
656 }
657
mana_gd_process_eqe(struct gdma_queue * eq)658 static void mana_gd_process_eqe(struct gdma_queue *eq)
659 {
660 u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
661 struct gdma_context *gc = eq->gdma_dev->gdma_context;
662 struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
663 union gdma_eqe_info eqe_info;
664 enum gdma_eqe_type type;
665 struct gdma_event event;
666 struct gdma_queue *cq;
667 struct gdma_eqe *eqe;
668 u32 cq_id;
669
670 eqe = &eq_eqe_ptr[head];
671 eqe_info.as_uint32 = eqe->eqe_info;
672 type = eqe_info.type;
673
674 switch (type) {
675 case GDMA_EQE_COMPLETION:
676 cq_id = eqe->details[0] & 0xFFFFFF;
677 if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs))
678 break;
679
680 cq = gc->cq_table[cq_id];
681 if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id))
682 break;
683
684 if (cq->cq.callback)
685 cq->cq.callback(cq->cq.context, cq);
686
687 break;
688
689 case GDMA_EQE_TEST_EVENT:
690 gc->test_event_eq_id = eq->id;
691 complete(&gc->eq_test_event);
692 break;
693
694 case GDMA_EQE_HWC_INIT_EQ_ID_DB:
695 case GDMA_EQE_HWC_INIT_DATA:
696 case GDMA_EQE_HWC_INIT_DONE:
697 case GDMA_EQE_HWC_SOC_SERVICE:
698 case GDMA_EQE_RNIC_QP_FATAL:
699 case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
700 if (!eq->eq.callback)
701 break;
702
703 event.type = type;
704 memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
705 eq->eq.callback(eq->eq.context, eq, &event);
706 break;
707
708 case GDMA_EQE_HWC_FPGA_RECONFIG:
709 case GDMA_EQE_HWC_RESET_REQUEST:
710 dev_info(gc->dev, "Recv MANA service type:%d\n", type);
711
712 if (!test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) {
713 /*
714 * Device is in probe and we received a hardware reset
715 * event, the probe function will detect that the flag
716 * has changed and perform service procedure.
717 */
718 dev_info(gc->dev,
719 "Service is to be processed in probe\n");
720 break;
721 }
722 mana_schedule_serv_work(gc, type);
723 break;
724
725 default:
726 break;
727 }
728 }
729
mana_gd_process_eq_events(void * arg)730 static void mana_gd_process_eq_events(void *arg)
731 {
732 u32 owner_bits, new_bits, old_bits;
733 union gdma_eqe_info eqe_info;
734 struct gdma_eqe *eq_eqe_ptr;
735 struct gdma_queue *eq = arg;
736 struct gdma_context *gc;
737 struct gdma_eqe *eqe;
738 u32 head, num_eqe;
739 int i;
740
741 gc = eq->gdma_dev->gdma_context;
742
743 num_eqe = eq->queue_size / GDMA_EQE_SIZE;
744 eq_eqe_ptr = eq->queue_mem_ptr;
745
746 /* Process up to 5 EQEs at a time, and update the HW head. */
747 for (i = 0; i < 5; i++) {
748 eqe = &eq_eqe_ptr[eq->head % num_eqe];
749 eqe_info.as_uint32 = eqe->eqe_info;
750 owner_bits = eqe_info.owner_bits;
751
752 old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
753 /* No more entries */
754 if (owner_bits == old_bits) {
755 /* return here without ringing the doorbell */
756 if (i == 0)
757 return;
758 break;
759 }
760
761 new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
762 if (owner_bits != new_bits) {
763 dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
764 break;
765 }
766
767 /* Per GDMA spec, rmb is necessary after checking owner_bits, before
768 * reading eqe.
769 */
770 rmb();
771
772 mana_gd_process_eqe(eq);
773
774 eq->head++;
775 }
776
777 head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
778
779 mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
780 head, SET_ARM_BIT);
781 }
782
mana_gd_register_irq(struct gdma_queue * queue,const struct gdma_queue_spec * spec)783 static int mana_gd_register_irq(struct gdma_queue *queue,
784 const struct gdma_queue_spec *spec)
785 {
786 struct gdma_dev *gd = queue->gdma_dev;
787 struct gdma_irq_context *gic;
788 struct gdma_context *gc;
789 unsigned int msi_index;
790 unsigned long flags;
791 struct device *dev;
792 int err = 0;
793
794 gc = gd->gdma_context;
795 dev = gc->dev;
796 msi_index = spec->eq.msix_index;
797
798 if (msi_index >= gc->num_msix_usable) {
799 err = -ENOSPC;
800 dev_err(dev, "Register IRQ err:%d, msi:%u nMSI:%u",
801 err, msi_index, gc->num_msix_usable);
802
803 return err;
804 }
805
806 queue->eq.msix_index = msi_index;
807 gic = xa_load(&gc->irq_contexts, msi_index);
808 if (WARN_ON(!gic))
809 return -EINVAL;
810
811 spin_lock_irqsave(&gic->lock, flags);
812 list_add_rcu(&queue->entry, &gic->eq_list);
813 spin_unlock_irqrestore(&gic->lock, flags);
814
815 return 0;
816 }
817
mana_gd_deregister_irq(struct gdma_queue * queue)818 static void mana_gd_deregister_irq(struct gdma_queue *queue)
819 {
820 struct gdma_dev *gd = queue->gdma_dev;
821 struct gdma_irq_context *gic;
822 struct gdma_context *gc;
823 unsigned int msix_index;
824 unsigned long flags;
825 struct gdma_queue *eq;
826
827 gc = gd->gdma_context;
828
829 /* At most num_online_cpus() + 1 interrupts are used. */
830 msix_index = queue->eq.msix_index;
831 if (WARN_ON(msix_index >= gc->num_msix_usable))
832 return;
833
834 gic = xa_load(&gc->irq_contexts, msix_index);
835 if (WARN_ON(!gic))
836 return;
837
838 spin_lock_irqsave(&gic->lock, flags);
839 list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
840 if (queue == eq) {
841 list_del_rcu(&eq->entry);
842 break;
843 }
844 }
845 spin_unlock_irqrestore(&gic->lock, flags);
846
847 queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
848 synchronize_rcu();
849 }
850
mana_gd_test_eq(struct gdma_context * gc,struct gdma_queue * eq)851 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
852 {
853 struct gdma_generate_test_event_req req = {};
854 struct gdma_general_resp resp = {};
855 struct device *dev = gc->dev;
856 int err;
857
858 mutex_lock(&gc->eq_test_event_mutex);
859
860 init_completion(&gc->eq_test_event);
861 gc->test_event_eq_id = INVALID_QUEUE_ID;
862
863 mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
864 sizeof(req), sizeof(resp));
865
866 req.hdr.dev_id = eq->gdma_dev->dev_id;
867 req.queue_index = eq->id;
868
869 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
870 if (err) {
871 if (mana_need_log(gc, err))
872 dev_err(dev, "test_eq failed: %d\n", err);
873 goto out;
874 }
875
876 err = -EPROTO;
877
878 if (resp.hdr.status) {
879 dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status);
880 goto out;
881 }
882
883 if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) {
884 dev_err(dev, "test_eq timed out on queue %d\n", eq->id);
885 goto out;
886 }
887
888 if (eq->id != gc->test_event_eq_id) {
889 dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n",
890 gc->test_event_eq_id, eq->id);
891 goto out;
892 }
893
894 err = 0;
895 out:
896 mutex_unlock(&gc->eq_test_event_mutex);
897 return err;
898 }
899
mana_gd_destroy_eq(struct gdma_context * gc,bool flush_evenets,struct gdma_queue * queue)900 static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
901 struct gdma_queue *queue)
902 {
903 int err;
904
905 if (flush_evenets) {
906 err = mana_gd_test_eq(gc, queue);
907 if (err && mana_need_log(gc, err))
908 dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
909 }
910
911 mana_gd_deregister_irq(queue);
912
913 if (queue->eq.disable_needed)
914 mana_gd_disable_queue(queue);
915 }
916
mana_gd_create_eq(struct gdma_dev * gd,const struct gdma_queue_spec * spec,bool create_hwq,struct gdma_queue * queue)917 static int mana_gd_create_eq(struct gdma_dev *gd,
918 const struct gdma_queue_spec *spec,
919 bool create_hwq, struct gdma_queue *queue)
920 {
921 struct gdma_context *gc = gd->gdma_context;
922 struct device *dev = gc->dev;
923 u32 log2_num_entries;
924 int err;
925
926 queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
927 queue->id = INVALID_QUEUE_ID;
928
929 log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
930
931 if (spec->eq.log2_throttle_limit > log2_num_entries) {
932 dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n",
933 spec->eq.log2_throttle_limit, log2_num_entries);
934 return -EINVAL;
935 }
936
937 err = mana_gd_register_irq(queue, spec);
938 if (err) {
939 dev_err(dev, "Failed to register irq: %d\n", err);
940 return err;
941 }
942
943 queue->eq.callback = spec->eq.callback;
944 queue->eq.context = spec->eq.context;
945 queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
946 queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
947
948 if (create_hwq) {
949 err = mana_gd_create_hw_eq(gc, queue);
950 if (err)
951 goto out;
952
953 err = mana_gd_test_eq(gc, queue);
954 if (err)
955 goto out;
956 }
957
958 return 0;
959 out:
960 dev_err(dev, "Failed to create EQ: %d\n", err);
961 mana_gd_destroy_eq(gc, false, queue);
962 return err;
963 }
964
mana_gd_create_cq(const struct gdma_queue_spec * spec,struct gdma_queue * queue)965 static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
966 struct gdma_queue *queue)
967 {
968 u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
969
970 queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
971 queue->cq.parent = spec->cq.parent_eq;
972 queue->cq.context = spec->cq.context;
973 queue->cq.callback = spec->cq.callback;
974 }
975
mana_gd_destroy_cq(struct gdma_context * gc,struct gdma_queue * queue)976 static void mana_gd_destroy_cq(struct gdma_context *gc,
977 struct gdma_queue *queue)
978 {
979 u32 id = queue->id;
980
981 if (id >= gc->max_num_cqs)
982 return;
983
984 if (!gc->cq_table[id])
985 return;
986
987 gc->cq_table[id] = NULL;
988 }
989
mana_gd_create_hwc_queue(struct gdma_dev * gd,const struct gdma_queue_spec * spec,struct gdma_queue ** queue_ptr)990 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
991 const struct gdma_queue_spec *spec,
992 struct gdma_queue **queue_ptr)
993 {
994 struct gdma_context *gc = gd->gdma_context;
995 struct gdma_mem_info *gmi;
996 struct gdma_queue *queue;
997 int err;
998
999 queue = kzalloc_obj(*queue);
1000 if (!queue)
1001 return -ENOMEM;
1002
1003 gmi = &queue->mem_info;
1004 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
1005 if (err) {
1006 dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
1007 spec->type, spec->queue_size, err);
1008 goto free_q;
1009 }
1010
1011 queue->head = 0;
1012 queue->tail = 0;
1013 queue->queue_mem_ptr = gmi->virt_addr;
1014 queue->queue_size = spec->queue_size;
1015 queue->monitor_avl_buf = spec->monitor_avl_buf;
1016 queue->type = spec->type;
1017 queue->gdma_dev = gd;
1018
1019 if (spec->type == GDMA_EQ)
1020 err = mana_gd_create_eq(gd, spec, false, queue);
1021 else if (spec->type == GDMA_CQ)
1022 mana_gd_create_cq(spec, queue);
1023
1024 if (err)
1025 goto out;
1026
1027 *queue_ptr = queue;
1028 return 0;
1029 out:
1030 dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
1031 spec->type, spec->queue_size, err);
1032 mana_gd_free_memory(gmi);
1033 free_q:
1034 kfree(queue);
1035 return err;
1036 }
1037
mana_gd_destroy_dma_region(struct gdma_context * gc,u64 dma_region_handle)1038 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
1039 {
1040 struct gdma_destroy_dma_region_req req = {};
1041 struct gdma_general_resp resp = {};
1042 int err;
1043
1044 if (dma_region_handle == GDMA_INVALID_DMA_REGION)
1045 return 0;
1046
1047 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
1048 sizeof(resp));
1049 req.dma_region_handle = dma_region_handle;
1050
1051 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1052 if (err || resp.hdr.status) {
1053 if (mana_need_log(gc, err))
1054 dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
1055 err, resp.hdr.status);
1056 return -EPROTO;
1057 }
1058
1059 return 0;
1060 }
1061 EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, "NET_MANA");
1062
mana_gd_create_dma_region(struct gdma_dev * gd,struct gdma_mem_info * gmi)1063 static int mana_gd_create_dma_region(struct gdma_dev *gd,
1064 struct gdma_mem_info *gmi)
1065 {
1066 unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
1067 struct gdma_create_dma_region_req *req = NULL;
1068 struct gdma_create_dma_region_resp resp = {};
1069 struct gdma_context *gc = gd->gdma_context;
1070 struct hw_channel_context *hwc;
1071 u32 length = gmi->length;
1072 size_t req_msg_size;
1073 int err;
1074 int i;
1075
1076 if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
1077 return -EINVAL;
1078
1079 if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
1080 return -EINVAL;
1081
1082 hwc = gc->hwc.driver_data;
1083 req_msg_size = struct_size(req, page_addr_list, num_page);
1084 if (req_msg_size > hwc->max_req_msg_size)
1085 return -EINVAL;
1086
1087 req = kzalloc(req_msg_size, GFP_KERNEL);
1088 if (!req)
1089 return -ENOMEM;
1090
1091 mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
1092 req_msg_size, sizeof(resp));
1093 req->length = length;
1094 req->offset_in_page = 0;
1095 req->gdma_page_type = GDMA_PAGE_TYPE_4K;
1096 req->page_count = num_page;
1097 req->page_addr_list_len = num_page;
1098
1099 for (i = 0; i < num_page; i++)
1100 req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE;
1101
1102 err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
1103 if (err)
1104 goto out;
1105
1106 if (resp.hdr.status ||
1107 resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
1108 dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
1109 resp.hdr.status);
1110 err = -EPROTO;
1111 goto out;
1112 }
1113
1114 gmi->dma_region_handle = resp.dma_region_handle;
1115 dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
1116 gmi->dma_region_handle);
1117 out:
1118 if (err)
1119 dev_dbg(gc->dev,
1120 "Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
1121 length, req->gdma_page_type, resp.hdr.status, err);
1122 kfree(req);
1123 return err;
1124 }
1125
mana_gd_create_mana_eq(struct gdma_dev * gd,const struct gdma_queue_spec * spec,struct gdma_queue ** queue_ptr)1126 int mana_gd_create_mana_eq(struct gdma_dev *gd,
1127 const struct gdma_queue_spec *spec,
1128 struct gdma_queue **queue_ptr)
1129 {
1130 struct gdma_context *gc = gd->gdma_context;
1131 struct gdma_mem_info *gmi;
1132 struct gdma_queue *queue;
1133 int err;
1134
1135 if (spec->type != GDMA_EQ)
1136 return -EINVAL;
1137
1138 queue = kzalloc_obj(*queue);
1139 if (!queue)
1140 return -ENOMEM;
1141
1142 gmi = &queue->mem_info;
1143 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
1144 if (err) {
1145 dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
1146 spec->type, spec->queue_size, err);
1147 goto free_q;
1148 }
1149
1150 err = mana_gd_create_dma_region(gd, gmi);
1151 if (err)
1152 goto out;
1153
1154 queue->head = 0;
1155 queue->tail = 0;
1156 queue->queue_mem_ptr = gmi->virt_addr;
1157 queue->queue_size = spec->queue_size;
1158 queue->monitor_avl_buf = spec->monitor_avl_buf;
1159 queue->type = spec->type;
1160 queue->gdma_dev = gd;
1161
1162 err = mana_gd_create_eq(gd, spec, true, queue);
1163 if (err)
1164 goto out;
1165
1166 *queue_ptr = queue;
1167 return 0;
1168 out:
1169 dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
1170 spec->type, spec->queue_size, err);
1171 mana_gd_free_memory(gmi);
1172 free_q:
1173 kfree(queue);
1174 return err;
1175 }
1176 EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, "NET_MANA");
1177
mana_gd_create_mana_wq_cq(struct gdma_dev * gd,const struct gdma_queue_spec * spec,struct gdma_queue ** queue_ptr)1178 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
1179 const struct gdma_queue_spec *spec,
1180 struct gdma_queue **queue_ptr)
1181 {
1182 struct gdma_context *gc = gd->gdma_context;
1183 struct gdma_mem_info *gmi;
1184 struct gdma_queue *queue;
1185 int err;
1186
1187 if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
1188 spec->type != GDMA_RQ)
1189 return -EINVAL;
1190
1191 queue = kzalloc_obj(*queue);
1192 if (!queue)
1193 return -ENOMEM;
1194
1195 gmi = &queue->mem_info;
1196 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
1197 if (err) {
1198 dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
1199 spec->type, spec->queue_size, err);
1200 goto free_q;
1201 }
1202
1203 err = mana_gd_create_dma_region(gd, gmi);
1204 if (err)
1205 goto out;
1206
1207 queue->head = 0;
1208 queue->tail = 0;
1209 queue->queue_mem_ptr = gmi->virt_addr;
1210 queue->queue_size = spec->queue_size;
1211 queue->monitor_avl_buf = spec->monitor_avl_buf;
1212 queue->type = spec->type;
1213 queue->gdma_dev = gd;
1214
1215 if (spec->type == GDMA_CQ)
1216 mana_gd_create_cq(spec, queue);
1217
1218 *queue_ptr = queue;
1219 return 0;
1220 out:
1221 dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
1222 spec->type, spec->queue_size, err);
1223 mana_gd_free_memory(gmi);
1224 free_q:
1225 kfree(queue);
1226 return err;
1227 }
1228 EXPORT_SYMBOL_NS(mana_gd_create_mana_wq_cq, "NET_MANA");
1229
mana_gd_destroy_queue(struct gdma_context * gc,struct gdma_queue * queue)1230 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
1231 {
1232 struct gdma_mem_info *gmi = &queue->mem_info;
1233
1234 switch (queue->type) {
1235 case GDMA_EQ:
1236 mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
1237 break;
1238
1239 case GDMA_CQ:
1240 mana_gd_destroy_cq(gc, queue);
1241 break;
1242
1243 case GDMA_RQ:
1244 break;
1245
1246 case GDMA_SQ:
1247 break;
1248
1249 default:
1250 dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n",
1251 queue->type);
1252 return;
1253 }
1254
1255 mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
1256 mana_gd_free_memory(gmi);
1257 kfree(queue);
1258 }
1259 EXPORT_SYMBOL_NS(mana_gd_destroy_queue, "NET_MANA");
1260
mana_gd_verify_vf_version(struct pci_dev * pdev)1261 int mana_gd_verify_vf_version(struct pci_dev *pdev)
1262 {
1263 struct gdma_context *gc = pci_get_drvdata(pdev);
1264 struct gdma_verify_ver_resp resp = {};
1265 struct gdma_verify_ver_req req = {};
1266 struct hw_channel_context *hwc;
1267 int err;
1268
1269 hwc = gc->hwc.driver_data;
1270 mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
1271 sizeof(req), sizeof(resp));
1272
1273 req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
1274 req.protocol_ver_max = GDMA_PROTOCOL_LAST;
1275
1276 req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1;
1277 req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2;
1278 req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
1279 req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
1280
1281 req.drv_ver = 0; /* Unused*/
1282 req.os_type = 0x10; /* Linux */
1283 req.os_ver_major = LINUX_VERSION_MAJOR;
1284 req.os_ver_minor = LINUX_VERSION_PATCHLEVEL;
1285 req.os_ver_build = LINUX_VERSION_SUBLEVEL;
1286 strscpy(req.os_ver_str1, utsname()->sysname, sizeof(req.os_ver_str1));
1287 strscpy(req.os_ver_str2, utsname()->release, sizeof(req.os_ver_str2));
1288 strscpy(req.os_ver_str3, utsname()->version, sizeof(req.os_ver_str3));
1289
1290 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1291 if (err || resp.hdr.status) {
1292 dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
1293 err, resp.hdr.status);
1294 return err ? err : -EPROTO;
1295 }
1296 gc->pf_cap_flags1 = resp.pf_cap_flags1;
1297 if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
1298 err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
1299 if (err) {
1300 dev_err(gc->dev, "Failed to set the hwc timeout %d\n", err);
1301 return err;
1302 }
1303 dev_dbg(gc->dev, "set the hwc timeout to %u\n", hwc->hwc_timeout);
1304 }
1305 return 0;
1306 }
1307
mana_gd_register_device(struct gdma_dev * gd)1308 int mana_gd_register_device(struct gdma_dev *gd)
1309 {
1310 struct gdma_context *gc = gd->gdma_context;
1311 struct gdma_register_device_resp resp = {};
1312 struct gdma_general_req req = {};
1313 int err;
1314
1315 gd->pdid = INVALID_PDID;
1316 gd->doorbell = INVALID_DOORBELL;
1317 gd->gpa_mkey = INVALID_MEM_KEY;
1318
1319 mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
1320 sizeof(resp));
1321
1322 req.hdr.dev_id = gd->dev_id;
1323
1324 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1325 if (err || resp.hdr.status) {
1326 dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n",
1327 err, resp.hdr.status);
1328 return err ? err : -EPROTO;
1329 }
1330
1331 /* Validate that doorbell page for db_id is within the BAR0 region.
1332 * In mana_gd_ring_doorbell(), the address is calculated as:
1333 * addr = db_page_base + db_page_size * db_id
1334 * = (bar0_va + db_page_off) + (db_page_size * db_id)
1335 * So we need: db_page_off + db_page_size * (db_id + 1) <= bar0_size
1336 */
1337 if (gc->db_page_off + gc->db_page_size * ((u64)resp.db_id + 1) > gc->bar0_size) {
1338 dev_err(gc->dev, "Doorbell ID %u out of range\n", resp.db_id);
1339 return -EPROTO;
1340 }
1341
1342 gd->pdid = resp.pdid;
1343 gd->gpa_mkey = resp.gpa_mkey;
1344 gd->doorbell = resp.db_id;
1345
1346 return 0;
1347 }
1348
mana_gd_deregister_device(struct gdma_dev * gd)1349 int mana_gd_deregister_device(struct gdma_dev *gd)
1350 {
1351 struct gdma_context *gc = gd->gdma_context;
1352 struct gdma_general_resp resp = {};
1353 struct gdma_general_req req = {};
1354 int err;
1355
1356 if (gd->pdid == INVALID_PDID)
1357 return -EINVAL;
1358
1359 mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
1360 sizeof(resp));
1361
1362 req.hdr.dev_id = gd->dev_id;
1363
1364 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1365 if (err || resp.hdr.status) {
1366 if (mana_need_log(gc, err))
1367 dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
1368 err, resp.hdr.status);
1369 if (!err)
1370 err = -EPROTO;
1371 }
1372
1373 gd->pdid = INVALID_PDID;
1374 gd->doorbell = INVALID_DOORBELL;
1375 gd->gpa_mkey = INVALID_MEM_KEY;
1376
1377 return err;
1378 }
1379
mana_gd_wq_avail_space(struct gdma_queue * wq)1380 u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
1381 {
1382 u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
1383 u32 wq_size = wq->queue_size;
1384
1385 WARN_ON_ONCE(used_space > wq_size);
1386
1387 return wq_size - used_space;
1388 }
1389
mana_gd_get_wqe_ptr(const struct gdma_queue * wq,u32 wqe_offset)1390 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
1391 {
1392 u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
1393
1394 WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size);
1395
1396 return wq->queue_mem_ptr + offset;
1397 }
1398
mana_gd_write_client_oob(const struct gdma_wqe_request * wqe_req,enum gdma_queue_type q_type,u32 client_oob_size,u32 sgl_data_size,u8 * wqe_ptr)1399 static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
1400 enum gdma_queue_type q_type,
1401 u32 client_oob_size, u32 sgl_data_size,
1402 u8 *wqe_ptr)
1403 {
1404 bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
1405 bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
1406 struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
1407 u8 *ptr;
1408
1409 memset(header, 0, sizeof(struct gdma_wqe));
1410 header->num_sge = wqe_req->num_sge;
1411 header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
1412
1413 if (oob_in_sgl) {
1414 WARN_ON_ONCE(wqe_req->num_sge < 2);
1415
1416 header->client_oob_in_sgl = 1;
1417
1418 if (pad_data)
1419 header->last_vbytes = wqe_req->sgl[0].size;
1420 }
1421
1422 if (q_type == GDMA_SQ)
1423 header->client_data_unit = wqe_req->client_data_unit;
1424
1425 /* The size of gdma_wqe + client_oob_size must be less than or equal
1426 * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
1427 * the queue memory buffer boundary.
1428 */
1429 ptr = wqe_ptr + sizeof(header);
1430
1431 if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
1432 memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
1433
1434 if (client_oob_size > wqe_req->inline_oob_size)
1435 memset(ptr + wqe_req->inline_oob_size, 0,
1436 client_oob_size - wqe_req->inline_oob_size);
1437 }
1438
1439 return sizeof(header) + client_oob_size;
1440 }
1441
mana_gd_write_sgl(struct gdma_queue * wq,u8 * wqe_ptr,const struct gdma_wqe_request * wqe_req)1442 static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
1443 const struct gdma_wqe_request *wqe_req)
1444 {
1445 u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1446 const u8 *address = (u8 *)wqe_req->sgl;
1447 u8 *base_ptr, *end_ptr;
1448 u32 size_to_end;
1449
1450 base_ptr = wq->queue_mem_ptr;
1451 end_ptr = base_ptr + wq->queue_size;
1452 size_to_end = (u32)(end_ptr - wqe_ptr);
1453
1454 if (size_to_end < sgl_size) {
1455 memcpy(wqe_ptr, address, size_to_end);
1456
1457 wqe_ptr = base_ptr;
1458 address += size_to_end;
1459 sgl_size -= size_to_end;
1460 }
1461
1462 memcpy(wqe_ptr, address, sgl_size);
1463 }
1464
mana_gd_post_work_request(struct gdma_queue * wq,const struct gdma_wqe_request * wqe_req,struct gdma_posted_wqe_info * wqe_info)1465 int mana_gd_post_work_request(struct gdma_queue *wq,
1466 const struct gdma_wqe_request *wqe_req,
1467 struct gdma_posted_wqe_info *wqe_info)
1468 {
1469 u32 client_oob_size = wqe_req->inline_oob_size;
1470 u32 sgl_data_size;
1471 u32 max_wqe_size;
1472 u32 wqe_size;
1473 u8 *wqe_ptr;
1474
1475 if (wqe_req->num_sge == 0)
1476 return -EINVAL;
1477
1478 if (wq->type == GDMA_RQ) {
1479 if (client_oob_size != 0)
1480 return -EINVAL;
1481
1482 client_oob_size = INLINE_OOB_SMALL_SIZE;
1483
1484 max_wqe_size = GDMA_MAX_RQE_SIZE;
1485 } else {
1486 if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
1487 client_oob_size != INLINE_OOB_LARGE_SIZE)
1488 return -EINVAL;
1489
1490 max_wqe_size = GDMA_MAX_SQE_SIZE;
1491 }
1492
1493 sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1494 wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
1495 sgl_data_size, GDMA_WQE_BU_SIZE);
1496 if (wqe_size > max_wqe_size)
1497 return -EINVAL;
1498
1499 if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq))
1500 return -ENOSPC;
1501
1502 if (wqe_info)
1503 wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
1504
1505 wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
1506 wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
1507 sgl_data_size, wqe_ptr);
1508 if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
1509 wqe_ptr -= wq->queue_size;
1510
1511 mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
1512
1513 wq->head += wqe_size / GDMA_WQE_BU_SIZE;
1514
1515 return 0;
1516 }
1517 EXPORT_SYMBOL_NS(mana_gd_post_work_request, "NET_MANA");
1518
mana_gd_post_and_ring(struct gdma_queue * queue,const struct gdma_wqe_request * wqe_req,struct gdma_posted_wqe_info * wqe_info)1519 int mana_gd_post_and_ring(struct gdma_queue *queue,
1520 const struct gdma_wqe_request *wqe_req,
1521 struct gdma_posted_wqe_info *wqe_info)
1522 {
1523 struct gdma_context *gc = queue->gdma_dev->gdma_context;
1524 int err;
1525
1526 err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1527 if (err) {
1528 dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
1529 queue->type, queue->queue_size, err);
1530 return err;
1531 }
1532
1533 mana_gd_wq_ring_doorbell(gc, queue);
1534
1535 return 0;
1536 }
1537
mana_gd_read_cqe(struct gdma_queue * cq,struct gdma_comp * comp)1538 static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
1539 {
1540 unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
1541 struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
1542 u32 owner_bits, new_bits, old_bits;
1543 struct gdma_cqe *cqe;
1544
1545 cqe = &cq_cqe[cq->head % num_cqe];
1546 owner_bits = cqe->cqe_info.owner_bits;
1547
1548 old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
1549 /* Return 0 if no more entries. */
1550 if (owner_bits == old_bits)
1551 return 0;
1552
1553 new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
1554 /* Return -1 if overflow detected. */
1555 if (WARN_ON_ONCE(owner_bits != new_bits))
1556 return -1;
1557
1558 /* Per GDMA spec, rmb is necessary after checking owner_bits, before
1559 * reading completion info
1560 */
1561 rmb();
1562
1563 comp->wq_num = cqe->cqe_info.wq_num;
1564 comp->is_sq = cqe->cqe_info.is_sq;
1565 memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
1566
1567 return 1;
1568 }
1569
mana_gd_poll_cq(struct gdma_queue * cq,struct gdma_comp * comp,int num_cqe)1570 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
1571 {
1572 int cqe_idx;
1573 int ret;
1574
1575 for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
1576 ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
1577
1578 if (ret < 0) {
1579 cq->head -= cqe_idx;
1580 return ret;
1581 }
1582
1583 if (ret == 0)
1584 break;
1585
1586 cq->head++;
1587 }
1588
1589 return cqe_idx;
1590 }
1591 EXPORT_SYMBOL_NS(mana_gd_poll_cq, "NET_MANA");
1592
mana_gd_intr(int irq,void * arg)1593 static irqreturn_t mana_gd_intr(int irq, void *arg)
1594 {
1595 struct gdma_irq_context *gic = arg;
1596 struct list_head *eq_list = &gic->eq_list;
1597 struct gdma_queue *eq;
1598
1599 rcu_read_lock();
1600 list_for_each_entry_rcu(eq, eq_list, entry) {
1601 gic->handler(eq);
1602 }
1603 rcu_read_unlock();
1604
1605 return IRQ_HANDLED;
1606 }
1607
mana_gd_alloc_res_map(u32 res_avail,struct gdma_resource * r)1608 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
1609 {
1610 r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
1611 if (!r->map)
1612 return -ENOMEM;
1613
1614 r->size = res_avail;
1615 spin_lock_init(&r->lock);
1616
1617 return 0;
1618 }
1619
mana_gd_free_res_map(struct gdma_resource * r)1620 void mana_gd_free_res_map(struct gdma_resource *r)
1621 {
1622 bitmap_free(r->map);
1623 r->map = NULL;
1624 r->size = 0;
1625 }
1626
1627 /*
1628 * Spread on CPUs with the following heuristics:
1629 *
1630 * 1. No more than one IRQ per CPU, if possible;
1631 * 2. NUMA locality is the second priority;
1632 * 3. Sibling dislocality is the last priority.
1633 *
1634 * Let's consider this topology:
1635 *
1636 * Node 0 1
1637 * Core 0 1 2 3
1638 * CPU 0 1 2 3 4 5 6 7
1639 *
1640 * The most performant IRQ distribution based on the above topology
1641 * and heuristics may look like this:
1642 *
1643 * IRQ Nodes Cores CPUs
1644 * 0 1 0 0-1
1645 * 1 1 1 2-3
1646 * 2 1 0 0-1
1647 * 3 1 1 2-3
1648 * 4 2 2 4-5
1649 * 5 2 3 6-7
1650 * 6 2 2 4-5
1651 * 7 2 3 6-7
1652 *
1653 * The heuristics is implemented as follows.
1654 *
1655 * The outer for_each() loop resets the 'weight' to the actual number
1656 * of CPUs in the hop. Then inner for_each() loop decrements it by the
1657 * number of sibling groups (cores) while assigning first set of IRQs
1658 * to each group. IRQs 0 and 1 above are distributed this way.
1659 *
1660 * Now, because NUMA locality is more important, we should walk the
1661 * same set of siblings and assign 2nd set of IRQs (2 and 3), and it's
1662 * implemented by the medium while() loop. We do like this unless the
1663 * number of IRQs assigned on this hop will not become equal to number
1664 * of CPUs in the hop (weight == 0). Then we switch to the next hop and
1665 * do the same thing.
1666 */
1667
irq_setup(unsigned int * irqs,unsigned int len,int node,bool skip_first_cpu)1668 static int irq_setup(unsigned int *irqs, unsigned int len, int node,
1669 bool skip_first_cpu)
1670 {
1671 const struct cpumask *next, *prev = cpu_none_mask;
1672 cpumask_var_t cpus __free(free_cpumask_var);
1673 int cpu, weight;
1674
1675 if (!alloc_cpumask_var(&cpus, GFP_KERNEL))
1676 return -ENOMEM;
1677
1678 rcu_read_lock();
1679 for_each_numa_hop_mask(next, node) {
1680 weight = cpumask_weight_andnot(next, prev);
1681 while (weight > 0) {
1682 cpumask_andnot(cpus, next, prev);
1683 for_each_cpu(cpu, cpus) {
1684 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
1685 --weight;
1686
1687 if (unlikely(skip_first_cpu)) {
1688 skip_first_cpu = false;
1689 continue;
1690 }
1691
1692 if (len-- == 0)
1693 goto done;
1694
1695 irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
1696 }
1697 }
1698 prev = next;
1699 }
1700 done:
1701 rcu_read_unlock();
1702 return 0;
1703 }
1704
mana_gd_setup_dyn_irqs(struct pci_dev * pdev,int nvec)1705 static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec)
1706 {
1707 struct gdma_context *gc = pci_get_drvdata(pdev);
1708 struct gdma_irq_context *gic;
1709 bool skip_first_cpu = false;
1710 int *irqs, irq, err, i;
1711
1712 irqs = kmalloc_objs(int, nvec);
1713 if (!irqs)
1714 return -ENOMEM;
1715
1716 /*
1717 * While processing the next pci irq vector, we start with index 1,
1718 * as IRQ vector at index 0 is already processed for HWC.
1719 * However, the population of irqs array starts with index 0, to be
1720 * further used in irq_setup()
1721 */
1722 for (i = 1; i <= nvec; i++) {
1723 gic = kzalloc_obj(*gic);
1724 if (!gic) {
1725 err = -ENOMEM;
1726 goto free_irq;
1727 }
1728 gic->handler = mana_gd_process_eq_events;
1729 INIT_LIST_HEAD(&gic->eq_list);
1730 spin_lock_init(&gic->lock);
1731
1732 snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
1733 i - 1, pci_name(pdev));
1734
1735 /* one pci vector is already allocated for HWC */
1736 irqs[i - 1] = pci_irq_vector(pdev, i);
1737 if (irqs[i - 1] < 0) {
1738 err = irqs[i - 1];
1739 goto free_current_gic;
1740 }
1741
1742 err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic);
1743 if (err)
1744 goto free_current_gic;
1745
1746 xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
1747 }
1748
1749 /*
1750 * When calling irq_setup() for dynamically added IRQs, if number of
1751 * CPUs is more than or equal to allocated MSI-X, we need to skip the
1752 * first CPU sibling group since they are already affinitized to HWC IRQ
1753 */
1754 cpus_read_lock();
1755 if (gc->num_msix_usable <= num_online_cpus())
1756 skip_first_cpu = true;
1757
1758 err = irq_setup(irqs, nvec, gc->numa_node, skip_first_cpu);
1759 if (err) {
1760 cpus_read_unlock();
1761 goto free_irq;
1762 }
1763
1764 cpus_read_unlock();
1765 kfree(irqs);
1766 return 0;
1767
1768 free_current_gic:
1769 kfree(gic);
1770 free_irq:
1771 for (i -= 1; i > 0; i--) {
1772 irq = pci_irq_vector(pdev, i);
1773 gic = xa_load(&gc->irq_contexts, i);
1774 if (WARN_ON(!gic))
1775 continue;
1776
1777 irq_update_affinity_hint(irq, NULL);
1778 free_irq(irq, gic);
1779 xa_erase(&gc->irq_contexts, i);
1780 kfree(gic);
1781 }
1782 kfree(irqs);
1783 return err;
1784 }
1785
mana_gd_setup_irqs(struct pci_dev * pdev,int nvec)1786 static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec)
1787 {
1788 struct gdma_context *gc = pci_get_drvdata(pdev);
1789 struct gdma_irq_context *gic;
1790 int *irqs, *start_irqs, irq;
1791 unsigned int cpu;
1792 int err, i;
1793
1794 irqs = kmalloc_objs(int, nvec);
1795 if (!irqs)
1796 return -ENOMEM;
1797
1798 start_irqs = irqs;
1799
1800 for (i = 0; i < nvec; i++) {
1801 gic = kzalloc_obj(*gic);
1802 if (!gic) {
1803 err = -ENOMEM;
1804 goto free_irq;
1805 }
1806
1807 gic->handler = mana_gd_process_eq_events;
1808 INIT_LIST_HEAD(&gic->eq_list);
1809 spin_lock_init(&gic->lock);
1810
1811 if (!i)
1812 snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
1813 pci_name(pdev));
1814 else
1815 snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
1816 i - 1, pci_name(pdev));
1817
1818 irqs[i] = pci_irq_vector(pdev, i);
1819 if (irqs[i] < 0) {
1820 err = irqs[i];
1821 goto free_current_gic;
1822 }
1823
1824 err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic);
1825 if (err)
1826 goto free_current_gic;
1827
1828 xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
1829 }
1830
1831 /* If number of IRQ is one extra than number of online CPUs,
1832 * then we need to assign IRQ0 (hwc irq) and IRQ1 to
1833 * same CPU.
1834 * Else we will use different CPUs for IRQ0 and IRQ1.
1835 * Also we are using cpumask_local_spread instead of
1836 * cpumask_first for the node, because the node can be
1837 * mem only.
1838 */
1839 cpus_read_lock();
1840 if (nvec > num_online_cpus()) {
1841 cpu = cpumask_local_spread(0, gc->numa_node);
1842 irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu));
1843 irqs++;
1844 nvec -= 1;
1845 }
1846
1847 err = irq_setup(irqs, nvec, gc->numa_node, false);
1848 if (err) {
1849 cpus_read_unlock();
1850 goto free_irq;
1851 }
1852
1853 cpus_read_unlock();
1854 kfree(start_irqs);
1855 return 0;
1856
1857 free_current_gic:
1858 kfree(gic);
1859 free_irq:
1860 for (i -= 1; i >= 0; i--) {
1861 irq = pci_irq_vector(pdev, i);
1862 gic = xa_load(&gc->irq_contexts, i);
1863 if (WARN_ON(!gic))
1864 continue;
1865
1866 irq_update_affinity_hint(irq, NULL);
1867 free_irq(irq, gic);
1868 xa_erase(&gc->irq_contexts, i);
1869 kfree(gic);
1870 }
1871
1872 kfree(start_irqs);
1873 return err;
1874 }
1875
mana_gd_setup_hwc_irqs(struct pci_dev * pdev)1876 static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev)
1877 {
1878 struct gdma_context *gc = pci_get_drvdata(pdev);
1879 unsigned int max_irqs, min_irqs;
1880 int nvec, err;
1881
1882 if (pci_msix_can_alloc_dyn(pdev)) {
1883 max_irqs = 1;
1884 min_irqs = 1;
1885 } else {
1886 /* Need 1 interrupt for HWC */
1887 max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1;
1888 min_irqs = 2;
1889 }
1890
1891 nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX);
1892 if (nvec < 0)
1893 return nvec;
1894
1895 err = mana_gd_setup_irqs(pdev, nvec);
1896 if (err) {
1897 pci_free_irq_vectors(pdev);
1898 return err;
1899 }
1900
1901 gc->num_msix_usable = nvec;
1902 gc->max_num_msix = nvec;
1903
1904 return 0;
1905 }
1906
mana_gd_setup_remaining_irqs(struct pci_dev * pdev)1907 static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev)
1908 {
1909 struct gdma_context *gc = pci_get_drvdata(pdev);
1910 struct msi_map irq_map;
1911 int max_irqs, i, err;
1912
1913 if (!pci_msix_can_alloc_dyn(pdev))
1914 /* remain irqs are already allocated with HWC IRQ */
1915 return 0;
1916
1917 /* allocate only remaining IRQs*/
1918 max_irqs = gc->num_msix_usable - 1;
1919
1920 for (i = 1; i <= max_irqs; i++) {
1921 irq_map = pci_msix_alloc_irq_at(pdev, i, NULL);
1922 if (!irq_map.virq) {
1923 err = irq_map.index;
1924 /* caller will handle cleaning up all allocated
1925 * irqs, after HWC is destroyed
1926 */
1927 return err;
1928 }
1929 }
1930
1931 err = mana_gd_setup_dyn_irqs(pdev, max_irqs);
1932 if (err)
1933 return err;
1934
1935 gc->max_num_msix = gc->max_num_msix + max_irqs;
1936
1937 return 0;
1938 }
1939
mana_gd_remove_irqs(struct pci_dev * pdev)1940 static void mana_gd_remove_irqs(struct pci_dev *pdev)
1941 {
1942 struct gdma_context *gc = pci_get_drvdata(pdev);
1943 struct gdma_irq_context *gic;
1944 int irq, i;
1945
1946 if (gc->max_num_msix < 1)
1947 return;
1948
1949 for (i = 0; i < gc->max_num_msix; i++) {
1950 irq = pci_irq_vector(pdev, i);
1951 if (irq < 0)
1952 continue;
1953
1954 gic = xa_load(&gc->irq_contexts, i);
1955 if (WARN_ON(!gic))
1956 continue;
1957
1958 /* Need to clear the hint before free_irq */
1959 irq_update_affinity_hint(irq, NULL);
1960 free_irq(irq, gic);
1961 xa_erase(&gc->irq_contexts, i);
1962 kfree(gic);
1963 }
1964
1965 pci_free_irq_vectors(pdev);
1966
1967 gc->max_num_msix = 0;
1968 gc->num_msix_usable = 0;
1969 }
1970
mana_gd_setup(struct pci_dev * pdev)1971 static int mana_gd_setup(struct pci_dev *pdev)
1972 {
1973 struct gdma_context *gc = pci_get_drvdata(pdev);
1974 int err;
1975
1976 err = mana_gd_init_registers(pdev);
1977 if (err)
1978 return err;
1979
1980 mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1981
1982 gc->service_wq = alloc_ordered_workqueue("gdma_service_wq", 0);
1983 if (!gc->service_wq)
1984 return -ENOMEM;
1985
1986 err = mana_gd_setup_hwc_irqs(pdev);
1987 if (err) {
1988 dev_err(gc->dev, "Failed to setup IRQs for HWC creation: %d\n",
1989 err);
1990 goto free_workqueue;
1991 }
1992
1993 err = mana_hwc_create_channel(gc);
1994 if (err)
1995 goto remove_irq;
1996
1997 err = mana_gd_verify_vf_version(pdev);
1998 if (err)
1999 goto destroy_hwc;
2000
2001 err = mana_gd_query_max_resources(pdev);
2002 if (err)
2003 goto destroy_hwc;
2004
2005 err = mana_gd_setup_remaining_irqs(pdev);
2006 if (err) {
2007 dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err);
2008 goto destroy_hwc;
2009 }
2010
2011 err = mana_gd_detect_devices(pdev);
2012 if (err)
2013 goto destroy_hwc;
2014
2015 dev_dbg(&pdev->dev, "mana gdma setup successful\n");
2016 return 0;
2017
2018 destroy_hwc:
2019 mana_hwc_destroy_channel(gc);
2020 remove_irq:
2021 mana_gd_remove_irqs(pdev);
2022 free_workqueue:
2023 destroy_workqueue(gc->service_wq);
2024 gc->service_wq = NULL;
2025 dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
2026 return err;
2027 }
2028
mana_gd_cleanup(struct pci_dev * pdev)2029 static void mana_gd_cleanup(struct pci_dev *pdev)
2030 {
2031 struct gdma_context *gc = pci_get_drvdata(pdev);
2032
2033 mana_hwc_destroy_channel(gc);
2034
2035 mana_gd_remove_irqs(pdev);
2036
2037 if (gc->service_wq) {
2038 destroy_workqueue(gc->service_wq);
2039 gc->service_wq = NULL;
2040 }
2041 dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
2042 }
2043
mana_is_pf(unsigned short dev_id)2044 static bool mana_is_pf(unsigned short dev_id)
2045 {
2046 return dev_id == MANA_PF_DEVICE_ID;
2047 }
2048
mana_gd_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2049 static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2050 {
2051 struct gdma_context *gc;
2052 void __iomem *bar0_va;
2053 int bar = 0;
2054 int err;
2055
2056 /* Each port has 2 CQs, each CQ has at most 1 EQE at a time */
2057 BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
2058
2059 err = pci_enable_device(pdev);
2060 if (err) {
2061 dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
2062 return -ENXIO;
2063 }
2064
2065 pci_set_master(pdev);
2066
2067 err = pci_request_regions(pdev, "mana");
2068 if (err)
2069 goto disable_dev;
2070
2071 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2072 if (err) {
2073 dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
2074 goto release_region;
2075 }
2076 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
2077
2078 err = -ENOMEM;
2079 gc = vzalloc(sizeof(*gc));
2080 if (!gc)
2081 goto release_region;
2082
2083 mutex_init(&gc->eq_test_event_mutex);
2084 pci_set_drvdata(pdev, gc);
2085 gc->bar0_pa = pci_resource_start(pdev, 0);
2086 gc->bar0_size = pci_resource_len(pdev, 0);
2087
2088 bar0_va = pci_iomap(pdev, bar, 0);
2089 if (!bar0_va)
2090 goto free_gc;
2091
2092 gc->numa_node = dev_to_node(&pdev->dev);
2093 gc->is_pf = mana_is_pf(pdev->device);
2094 gc->bar0_va = bar0_va;
2095 gc->dev = &pdev->dev;
2096 xa_init(&gc->irq_contexts);
2097
2098 gc->mana_pci_debugfs = debugfs_create_dir(pci_name(pdev),
2099 mana_debugfs_root);
2100
2101 err = mana_gd_setup(pdev);
2102 if (err)
2103 goto unmap_bar;
2104
2105 err = mana_probe(&gc->mana, false);
2106 if (err)
2107 goto cleanup_gd;
2108
2109 err = mana_rdma_probe(&gc->mana_ib);
2110 if (err)
2111 goto cleanup_mana;
2112
2113 /*
2114 * If a hardware reset event has occurred over HWC during probe,
2115 * rollback and perform hardware reset procedure.
2116 */
2117 if (test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) {
2118 err = -EPROTO;
2119 goto cleanup_mana_rdma;
2120 }
2121
2122 return 0;
2123
2124 cleanup_mana_rdma:
2125 mana_rdma_remove(&gc->mana_ib);
2126 cleanup_mana:
2127 mana_remove(&gc->mana, false);
2128 cleanup_gd:
2129 mana_gd_cleanup(pdev);
2130 unmap_bar:
2131 /*
2132 * at this point we know that the other debugfs child dir/files
2133 * are either not yet created or are already cleaned up.
2134 * The pci debugfs folder clean-up now, will only be cleaning up
2135 * adapter-MTU file and apc->mana_pci_debugfs folder.
2136 */
2137 debugfs_remove_recursive(gc->mana_pci_debugfs);
2138 gc->mana_pci_debugfs = NULL;
2139 xa_destroy(&gc->irq_contexts);
2140 pci_iounmap(pdev, bar0_va);
2141 free_gc:
2142 pci_set_drvdata(pdev, NULL);
2143 vfree(gc);
2144 release_region:
2145 pci_release_regions(pdev);
2146 disable_dev:
2147 pci_disable_device(pdev);
2148 dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
2149
2150 /*
2151 * Hardware could be in recovery mode and the HWC returns TIMEDOUT or
2152 * EPROTO from mana_gd_setup(), mana_probe() or mana_rdma_probe(), or
2153 * we received a hardware reset event over HWC interrupt. In this case,
2154 * perform the device recovery procedure after MANA_SERVICE_PERIOD
2155 * seconds.
2156 */
2157 if (err == -ETIMEDOUT || err == -EPROTO) {
2158 struct mana_dev_recovery *dev;
2159 unsigned long flags;
2160
2161 dev_info(&pdev->dev, "Start MANA recovery mode\n");
2162
2163 dev = kzalloc_obj(*dev);
2164 if (!dev)
2165 return err;
2166
2167 dev->pdev = pci_dev_get(pdev);
2168 dev->type = GDMA_EQE_HWC_RESET_REQUEST;
2169
2170 spin_lock_irqsave(&mana_dev_recovery_work.lock, flags);
2171 list_add_tail(&dev->list, &mana_dev_recovery_work.dev_list);
2172 spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags);
2173
2174 schedule_delayed_work(&mana_dev_recovery_work.work,
2175 secs_to_jiffies(MANA_SERVICE_PERIOD));
2176 }
2177
2178 return err;
2179 }
2180
mana_gd_remove(struct pci_dev * pdev)2181 static void mana_gd_remove(struct pci_dev *pdev)
2182 {
2183 struct gdma_context *gc = pci_get_drvdata(pdev);
2184
2185 mana_rdma_remove(&gc->mana_ib);
2186 mana_remove(&gc->mana, false);
2187
2188 mana_gd_cleanup(pdev);
2189
2190 debugfs_remove_recursive(gc->mana_pci_debugfs);
2191
2192 gc->mana_pci_debugfs = NULL;
2193
2194 xa_destroy(&gc->irq_contexts);
2195
2196 pci_iounmap(pdev, gc->bar0_va);
2197
2198 vfree(gc);
2199
2200 pci_release_regions(pdev);
2201 pci_disable_device(pdev);
2202
2203 dev_dbg(&pdev->dev, "mana gdma remove successful\n");
2204 }
2205
2206 /* The 'state' parameter is not used. */
mana_gd_suspend(struct pci_dev * pdev,pm_message_t state)2207 int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
2208 {
2209 struct gdma_context *gc = pci_get_drvdata(pdev);
2210
2211 mana_rdma_remove(&gc->mana_ib);
2212 mana_remove(&gc->mana, true);
2213
2214 mana_gd_cleanup(pdev);
2215
2216 return 0;
2217 }
2218
2219 /* In case the NIC hardware stops working, the suspend and resume callbacks will
2220 * fail -- if this happens, it's safer to just report an error than try to undo
2221 * what has been done.
2222 */
mana_gd_resume(struct pci_dev * pdev)2223 int mana_gd_resume(struct pci_dev *pdev)
2224 {
2225 struct gdma_context *gc = pci_get_drvdata(pdev);
2226 int err;
2227
2228 err = mana_gd_setup(pdev);
2229 if (err)
2230 return err;
2231
2232 err = mana_probe(&gc->mana, true);
2233 if (err)
2234 return err;
2235
2236 err = mana_rdma_probe(&gc->mana_ib);
2237 if (err)
2238 return err;
2239
2240 return 0;
2241 }
2242
2243 /* Quiesce the device for kexec. This is also called upon reboot/shutdown. */
mana_gd_shutdown(struct pci_dev * pdev)2244 static void mana_gd_shutdown(struct pci_dev *pdev)
2245 {
2246 struct gdma_context *gc = pci_get_drvdata(pdev);
2247
2248 dev_info(&pdev->dev, "Shutdown was called\n");
2249
2250 mana_rdma_remove(&gc->mana_ib);
2251 mana_remove(&gc->mana, true);
2252
2253 mana_gd_cleanup(pdev);
2254
2255 debugfs_remove_recursive(gc->mana_pci_debugfs);
2256
2257 gc->mana_pci_debugfs = NULL;
2258
2259 pci_disable_device(pdev);
2260 }
2261
2262 static const struct pci_device_id mana_id_table[] = {
2263 { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
2264 { PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
2265 { }
2266 };
2267
2268 static struct pci_driver mana_driver = {
2269 .name = "mana",
2270 .id_table = mana_id_table,
2271 .probe = mana_gd_probe,
2272 .remove = mana_gd_remove,
2273 .suspend = mana_gd_suspend,
2274 .resume = mana_gd_resume,
2275 .shutdown = mana_gd_shutdown,
2276 };
2277
mana_driver_init(void)2278 static int __init mana_driver_init(void)
2279 {
2280 int err;
2281
2282 INIT_LIST_HEAD(&mana_dev_recovery_work.dev_list);
2283 spin_lock_init(&mana_dev_recovery_work.lock);
2284 INIT_DELAYED_WORK(&mana_dev_recovery_work.work, mana_recovery_delayed_func);
2285
2286 mana_debugfs_root = debugfs_create_dir("mana", NULL);
2287
2288 err = pci_register_driver(&mana_driver);
2289 if (err) {
2290 debugfs_remove(mana_debugfs_root);
2291 mana_debugfs_root = NULL;
2292 }
2293
2294 return err;
2295 }
2296
mana_driver_exit(void)2297 static void __exit mana_driver_exit(void)
2298 {
2299 struct mana_dev_recovery *dev;
2300 unsigned long flags;
2301
2302 disable_delayed_work_sync(&mana_dev_recovery_work.work);
2303
2304 spin_lock_irqsave(&mana_dev_recovery_work.lock, flags);
2305 while (!list_empty(&mana_dev_recovery_work.dev_list)) {
2306 dev = list_first_entry(&mana_dev_recovery_work.dev_list,
2307 struct mana_dev_recovery, list);
2308 list_del(&dev->list);
2309 pci_dev_put(dev->pdev);
2310 kfree(dev);
2311 }
2312 spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags);
2313
2314 pci_unregister_driver(&mana_driver);
2315
2316 debugfs_remove(mana_debugfs_root);
2317
2318 mana_debugfs_root = NULL;
2319 }
2320
2321 module_init(mana_driver_init);
2322 module_exit(mana_driver_exit);
2323
2324 MODULE_DEVICE_TABLE(pci, mana_id_table);
2325
2326 MODULE_LICENSE("Dual BSD/GPL");
2327 MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver");
2328