xref: /linux/drivers/net/ethernet/microsoft/mana/gdma_main.c (revision 0ad53fe3ae82443c74ff8cfd7bd13377cc1134a3)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/module.h>
5 #include <linux/pci.h>
6 
7 #include "mana.h"
8 
9 static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
10 {
11 	return readl(g->bar0_va + offset);
12 }
13 
14 static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
15 {
16 	return readq(g->bar0_va + offset);
17 }
18 
19 static void mana_gd_init_registers(struct pci_dev *pdev)
20 {
21 	struct gdma_context *gc = pci_get_drvdata(pdev);
22 
23 	gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
24 
25 	gc->db_page_base = gc->bar0_va +
26 				mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
27 
28 	gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
29 }
30 
31 static int mana_gd_query_max_resources(struct pci_dev *pdev)
32 {
33 	struct gdma_context *gc = pci_get_drvdata(pdev);
34 	struct gdma_query_max_resources_resp resp = {};
35 	struct gdma_general_req req = {};
36 	int err;
37 
38 	mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
39 			     sizeof(req), sizeof(resp));
40 
41 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
42 	if (err || resp.hdr.status) {
43 		dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
44 			err, resp.hdr.status);
45 		return err ? err : -EPROTO;
46 	}
47 
48 	if (gc->num_msix_usable > resp.max_msix)
49 		gc->num_msix_usable = resp.max_msix;
50 
51 	if (gc->num_msix_usable <= 1)
52 		return -ENOSPC;
53 
54 	gc->max_num_queues = num_online_cpus();
55 	if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
56 		gc->max_num_queues = MANA_MAX_NUM_QUEUES;
57 
58 	if (gc->max_num_queues > resp.max_eq)
59 		gc->max_num_queues = resp.max_eq;
60 
61 	if (gc->max_num_queues > resp.max_cq)
62 		gc->max_num_queues = resp.max_cq;
63 
64 	if (gc->max_num_queues > resp.max_sq)
65 		gc->max_num_queues = resp.max_sq;
66 
67 	if (gc->max_num_queues > resp.max_rq)
68 		gc->max_num_queues = resp.max_rq;
69 
70 	/* The Hardware Channel (HWC) used 1 MSI-X */
71 	if (gc->max_num_queues > gc->num_msix_usable - 1)
72 		gc->max_num_queues = gc->num_msix_usable - 1;
73 
74 	return 0;
75 }
76 
77 static int mana_gd_detect_devices(struct pci_dev *pdev)
78 {
79 	struct gdma_context *gc = pci_get_drvdata(pdev);
80 	struct gdma_list_devices_resp resp = {};
81 	struct gdma_general_req req = {};
82 	struct gdma_dev_id dev;
83 	u32 i, max_num_devs;
84 	u16 dev_type;
85 	int err;
86 
87 	mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
88 			     sizeof(resp));
89 
90 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
91 	if (err || resp.hdr.status) {
92 		dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
93 			resp.hdr.status);
94 		return err ? err : -EPROTO;
95 	}
96 
97 	max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
98 
99 	for (i = 0; i < max_num_devs; i++) {
100 		dev = resp.devs[i];
101 		dev_type = dev.type;
102 
103 		/* HWC is already detected in mana_hwc_create_channel(). */
104 		if (dev_type == GDMA_DEVICE_HWC)
105 			continue;
106 
107 		if (dev_type == GDMA_DEVICE_MANA) {
108 			gc->mana.gdma_context = gc;
109 			gc->mana.dev_id = dev;
110 		}
111 	}
112 
113 	return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
114 }
115 
116 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
117 			 u32 resp_len, void *resp)
118 {
119 	struct hw_channel_context *hwc = gc->hwc.driver_data;
120 
121 	return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
122 }
123 
124 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
125 			 struct gdma_mem_info *gmi)
126 {
127 	dma_addr_t dma_handle;
128 	void *buf;
129 
130 	if (length < PAGE_SIZE || !is_power_of_2(length))
131 		return -EINVAL;
132 
133 	gmi->dev = gc->dev;
134 	buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
135 	if (!buf)
136 		return -ENOMEM;
137 
138 	gmi->dma_handle = dma_handle;
139 	gmi->virt_addr = buf;
140 	gmi->length = length;
141 
142 	return 0;
143 }
144 
145 void mana_gd_free_memory(struct gdma_mem_info *gmi)
146 {
147 	dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
148 			  gmi->dma_handle);
149 }
150 
151 static int mana_gd_create_hw_eq(struct gdma_context *gc,
152 				struct gdma_queue *queue)
153 {
154 	struct gdma_create_queue_resp resp = {};
155 	struct gdma_create_queue_req req = {};
156 	int err;
157 
158 	if (queue->type != GDMA_EQ)
159 		return -EINVAL;
160 
161 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
162 			     sizeof(req), sizeof(resp));
163 
164 	req.hdr.dev_id = queue->gdma_dev->dev_id;
165 	req.type = queue->type;
166 	req.pdid = queue->gdma_dev->pdid;
167 	req.doolbell_id = queue->gdma_dev->doorbell;
168 	req.gdma_region = queue->mem_info.gdma_region;
169 	req.queue_size = queue->queue_size;
170 	req.log2_throttle_limit = queue->eq.log2_throttle_limit;
171 	req.eq_pci_msix_index = queue->eq.msix_index;
172 
173 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
174 	if (err || resp.hdr.status) {
175 		dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
176 			resp.hdr.status);
177 		return err ? err : -EPROTO;
178 	}
179 
180 	queue->id = resp.queue_index;
181 	queue->eq.disable_needed = true;
182 	queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
183 	return 0;
184 }
185 
186 static int mana_gd_disable_queue(struct gdma_queue *queue)
187 {
188 	struct gdma_context *gc = queue->gdma_dev->gdma_context;
189 	struct gdma_disable_queue_req req = {};
190 	struct gdma_general_resp resp = {};
191 	int err;
192 
193 	WARN_ON(queue->type != GDMA_EQ);
194 
195 	mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
196 			     sizeof(req), sizeof(resp));
197 
198 	req.hdr.dev_id = queue->gdma_dev->dev_id;
199 	req.type = queue->type;
200 	req.queue_index =  queue->id;
201 	req.alloc_res_id_on_creation = 1;
202 
203 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
204 	if (err || resp.hdr.status) {
205 		dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
206 			resp.hdr.status);
207 		return err ? err : -EPROTO;
208 	}
209 
210 	return 0;
211 }
212 
213 #define DOORBELL_OFFSET_SQ	0x0
214 #define DOORBELL_OFFSET_RQ	0x400
215 #define DOORBELL_OFFSET_CQ	0x800
216 #define DOORBELL_OFFSET_EQ	0xFF8
217 
218 static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
219 				  enum gdma_queue_type q_type, u32 qid,
220 				  u32 tail_ptr, u8 num_req)
221 {
222 	void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
223 	union gdma_doorbell_entry e = {};
224 
225 	switch (q_type) {
226 	case GDMA_EQ:
227 		e.eq.id = qid;
228 		e.eq.tail_ptr = tail_ptr;
229 		e.eq.arm = num_req;
230 
231 		addr += DOORBELL_OFFSET_EQ;
232 		break;
233 
234 	case GDMA_CQ:
235 		e.cq.id = qid;
236 		e.cq.tail_ptr = tail_ptr;
237 		e.cq.arm = num_req;
238 
239 		addr += DOORBELL_OFFSET_CQ;
240 		break;
241 
242 	case GDMA_RQ:
243 		e.rq.id = qid;
244 		e.rq.tail_ptr = tail_ptr;
245 		e.rq.wqe_cnt = num_req;
246 
247 		addr += DOORBELL_OFFSET_RQ;
248 		break;
249 
250 	case GDMA_SQ:
251 		e.sq.id = qid;
252 		e.sq.tail_ptr = tail_ptr;
253 
254 		addr += DOORBELL_OFFSET_SQ;
255 		break;
256 
257 	default:
258 		WARN_ON(1);
259 		return;
260 	}
261 
262 	/* Ensure all writes are done before ring doorbell */
263 	wmb();
264 
265 	writeq(e.as_uint64, addr);
266 }
267 
268 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
269 {
270 	mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
271 			      queue->id, queue->head * GDMA_WQE_BU_SIZE, 1);
272 }
273 
274 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
275 {
276 	struct gdma_context *gc = cq->gdma_dev->gdma_context;
277 
278 	u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
279 
280 	u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
281 
282 	mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
283 			      head, arm_bit);
284 }
285 
286 static void mana_gd_process_eqe(struct gdma_queue *eq)
287 {
288 	u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
289 	struct gdma_context *gc = eq->gdma_dev->gdma_context;
290 	struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
291 	union gdma_eqe_info eqe_info;
292 	enum gdma_eqe_type type;
293 	struct gdma_event event;
294 	struct gdma_queue *cq;
295 	struct gdma_eqe *eqe;
296 	u32 cq_id;
297 
298 	eqe = &eq_eqe_ptr[head];
299 	eqe_info.as_uint32 = eqe->eqe_info;
300 	type = eqe_info.type;
301 
302 	switch (type) {
303 	case GDMA_EQE_COMPLETION:
304 		cq_id = eqe->details[0] & 0xFFFFFF;
305 		if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs))
306 			break;
307 
308 		cq = gc->cq_table[cq_id];
309 		if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id))
310 			break;
311 
312 		if (cq->cq.callback)
313 			cq->cq.callback(cq->cq.context, cq);
314 
315 		break;
316 
317 	case GDMA_EQE_TEST_EVENT:
318 		gc->test_event_eq_id = eq->id;
319 		complete(&gc->eq_test_event);
320 		break;
321 
322 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
323 	case GDMA_EQE_HWC_INIT_DATA:
324 	case GDMA_EQE_HWC_INIT_DONE:
325 		if (!eq->eq.callback)
326 			break;
327 
328 		event.type = type;
329 		memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
330 		eq->eq.callback(eq->eq.context, eq, &event);
331 		break;
332 
333 	default:
334 		break;
335 	}
336 }
337 
338 static void mana_gd_process_eq_events(void *arg)
339 {
340 	u32 owner_bits, new_bits, old_bits;
341 	union gdma_eqe_info eqe_info;
342 	struct gdma_eqe *eq_eqe_ptr;
343 	struct gdma_queue *eq = arg;
344 	struct gdma_context *gc;
345 	struct gdma_eqe *eqe;
346 	u32 head, num_eqe;
347 	int i;
348 
349 	gc = eq->gdma_dev->gdma_context;
350 
351 	num_eqe = eq->queue_size / GDMA_EQE_SIZE;
352 	eq_eqe_ptr = eq->queue_mem_ptr;
353 
354 	/* Process up to 5 EQEs at a time, and update the HW head. */
355 	for (i = 0; i < 5; i++) {
356 		eqe = &eq_eqe_ptr[eq->head % num_eqe];
357 		eqe_info.as_uint32 = eqe->eqe_info;
358 		owner_bits = eqe_info.owner_bits;
359 
360 		old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
361 		/* No more entries */
362 		if (owner_bits == old_bits)
363 			break;
364 
365 		new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
366 		if (owner_bits != new_bits) {
367 			dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
368 			break;
369 		}
370 
371 		mana_gd_process_eqe(eq);
372 
373 		eq->head++;
374 	}
375 
376 	head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
377 
378 	mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
379 			      head, SET_ARM_BIT);
380 }
381 
382 static int mana_gd_register_irq(struct gdma_queue *queue,
383 				const struct gdma_queue_spec *spec)
384 {
385 	struct gdma_dev *gd = queue->gdma_dev;
386 	struct gdma_irq_context *gic;
387 	struct gdma_context *gc;
388 	struct gdma_resource *r;
389 	unsigned int msi_index;
390 	unsigned long flags;
391 	struct device *dev;
392 	int err = 0;
393 
394 	gc = gd->gdma_context;
395 	r = &gc->msix_resource;
396 	dev = gc->dev;
397 
398 	spin_lock_irqsave(&r->lock, flags);
399 
400 	msi_index = find_first_zero_bit(r->map, r->size);
401 	if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
402 		err = -ENOSPC;
403 	} else {
404 		bitmap_set(r->map, msi_index, 1);
405 		queue->eq.msix_index = msi_index;
406 	}
407 
408 	spin_unlock_irqrestore(&r->lock, flags);
409 
410 	if (err) {
411 		dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
412 			err, msi_index, r->size, gc->num_msix_usable);
413 
414 		return err;
415 	}
416 
417 	gic = &gc->irq_contexts[msi_index];
418 
419 	WARN_ON(gic->handler || gic->arg);
420 
421 	gic->arg = queue;
422 
423 	gic->handler = mana_gd_process_eq_events;
424 
425 	return 0;
426 }
427 
428 static void mana_gd_deregiser_irq(struct gdma_queue *queue)
429 {
430 	struct gdma_dev *gd = queue->gdma_dev;
431 	struct gdma_irq_context *gic;
432 	struct gdma_context *gc;
433 	struct gdma_resource *r;
434 	unsigned int msix_index;
435 	unsigned long flags;
436 
437 	gc = gd->gdma_context;
438 	r = &gc->msix_resource;
439 
440 	/* At most num_online_cpus() + 1 interrupts are used. */
441 	msix_index = queue->eq.msix_index;
442 	if (WARN_ON(msix_index >= gc->num_msix_usable))
443 		return;
444 
445 	gic = &gc->irq_contexts[msix_index];
446 	gic->handler = NULL;
447 	gic->arg = NULL;
448 
449 	spin_lock_irqsave(&r->lock, flags);
450 	bitmap_clear(r->map, msix_index, 1);
451 	spin_unlock_irqrestore(&r->lock, flags);
452 
453 	queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
454 }
455 
456 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
457 {
458 	struct gdma_generate_test_event_req req = {};
459 	struct gdma_general_resp resp = {};
460 	struct device *dev = gc->dev;
461 	int err;
462 
463 	mutex_lock(&gc->eq_test_event_mutex);
464 
465 	init_completion(&gc->eq_test_event);
466 	gc->test_event_eq_id = INVALID_QUEUE_ID;
467 
468 	mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
469 			     sizeof(req), sizeof(resp));
470 
471 	req.hdr.dev_id = eq->gdma_dev->dev_id;
472 	req.queue_index = eq->id;
473 
474 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
475 	if (err) {
476 		dev_err(dev, "test_eq failed: %d\n", err);
477 		goto out;
478 	}
479 
480 	err = -EPROTO;
481 
482 	if (resp.hdr.status) {
483 		dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status);
484 		goto out;
485 	}
486 
487 	if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) {
488 		dev_err(dev, "test_eq timed out on queue %d\n", eq->id);
489 		goto out;
490 	}
491 
492 	if (eq->id != gc->test_event_eq_id) {
493 		dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n",
494 			gc->test_event_eq_id, eq->id);
495 		goto out;
496 	}
497 
498 	err = 0;
499 out:
500 	mutex_unlock(&gc->eq_test_event_mutex);
501 	return err;
502 }
503 
504 static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
505 			       struct gdma_queue *queue)
506 {
507 	int err;
508 
509 	if (flush_evenets) {
510 		err = mana_gd_test_eq(gc, queue);
511 		if (err)
512 			dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
513 	}
514 
515 	mana_gd_deregiser_irq(queue);
516 
517 	if (queue->eq.disable_needed)
518 		mana_gd_disable_queue(queue);
519 }
520 
521 static int mana_gd_create_eq(struct gdma_dev *gd,
522 			     const struct gdma_queue_spec *spec,
523 			     bool create_hwq, struct gdma_queue *queue)
524 {
525 	struct gdma_context *gc = gd->gdma_context;
526 	struct device *dev = gc->dev;
527 	u32 log2_num_entries;
528 	int err;
529 
530 	queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
531 
532 	log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
533 
534 	if (spec->eq.log2_throttle_limit > log2_num_entries) {
535 		dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n",
536 			spec->eq.log2_throttle_limit, log2_num_entries);
537 		return -EINVAL;
538 	}
539 
540 	err = mana_gd_register_irq(queue, spec);
541 	if (err) {
542 		dev_err(dev, "Failed to register irq: %d\n", err);
543 		return err;
544 	}
545 
546 	queue->eq.callback = spec->eq.callback;
547 	queue->eq.context = spec->eq.context;
548 	queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
549 	queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
550 
551 	if (create_hwq) {
552 		err = mana_gd_create_hw_eq(gc, queue);
553 		if (err)
554 			goto out;
555 
556 		err = mana_gd_test_eq(gc, queue);
557 		if (err)
558 			goto out;
559 	}
560 
561 	return 0;
562 out:
563 	dev_err(dev, "Failed to create EQ: %d\n", err);
564 	mana_gd_destroy_eq(gc, false, queue);
565 	return err;
566 }
567 
568 static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
569 			      struct gdma_queue *queue)
570 {
571 	u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
572 
573 	queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
574 	queue->cq.parent = spec->cq.parent_eq;
575 	queue->cq.context = spec->cq.context;
576 	queue->cq.callback = spec->cq.callback;
577 }
578 
579 static void mana_gd_destroy_cq(struct gdma_context *gc,
580 			       struct gdma_queue *queue)
581 {
582 	u32 id = queue->id;
583 
584 	if (id >= gc->max_num_cqs)
585 		return;
586 
587 	if (!gc->cq_table[id])
588 		return;
589 
590 	gc->cq_table[id] = NULL;
591 }
592 
593 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
594 			     const struct gdma_queue_spec *spec,
595 			     struct gdma_queue **queue_ptr)
596 {
597 	struct gdma_context *gc = gd->gdma_context;
598 	struct gdma_mem_info *gmi;
599 	struct gdma_queue *queue;
600 	int err;
601 
602 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
603 	if (!queue)
604 		return -ENOMEM;
605 
606 	gmi = &queue->mem_info;
607 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
608 	if (err)
609 		goto free_q;
610 
611 	queue->head = 0;
612 	queue->tail = 0;
613 	queue->queue_mem_ptr = gmi->virt_addr;
614 	queue->queue_size = spec->queue_size;
615 	queue->monitor_avl_buf = spec->monitor_avl_buf;
616 	queue->type = spec->type;
617 	queue->gdma_dev = gd;
618 
619 	if (spec->type == GDMA_EQ)
620 		err = mana_gd_create_eq(gd, spec, false, queue);
621 	else if (spec->type == GDMA_CQ)
622 		mana_gd_create_cq(spec, queue);
623 
624 	if (err)
625 		goto out;
626 
627 	*queue_ptr = queue;
628 	return 0;
629 out:
630 	mana_gd_free_memory(gmi);
631 free_q:
632 	kfree(queue);
633 	return err;
634 }
635 
636 static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
637 {
638 	struct gdma_destroy_dma_region_req req = {};
639 	struct gdma_general_resp resp = {};
640 	int err;
641 
642 	if (gdma_region == GDMA_INVALID_DMA_REGION)
643 		return;
644 
645 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
646 			     sizeof(resp));
647 	req.gdma_region = gdma_region;
648 
649 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
650 	if (err || resp.hdr.status)
651 		dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
652 			err, resp.hdr.status);
653 }
654 
655 static int mana_gd_create_dma_region(struct gdma_dev *gd,
656 				     struct gdma_mem_info *gmi)
657 {
658 	unsigned int num_page = gmi->length / PAGE_SIZE;
659 	struct gdma_create_dma_region_req *req = NULL;
660 	struct gdma_create_dma_region_resp resp = {};
661 	struct gdma_context *gc = gd->gdma_context;
662 	struct hw_channel_context *hwc;
663 	u32 length = gmi->length;
664 	u32 req_msg_size;
665 	int err;
666 	int i;
667 
668 	if (length < PAGE_SIZE || !is_power_of_2(length))
669 		return -EINVAL;
670 
671 	if (offset_in_page(gmi->virt_addr) != 0)
672 		return -EINVAL;
673 
674 	hwc = gc->hwc.driver_data;
675 	req_msg_size = sizeof(*req) + num_page * sizeof(u64);
676 	if (req_msg_size > hwc->max_req_msg_size)
677 		return -EINVAL;
678 
679 	req = kzalloc(req_msg_size, GFP_KERNEL);
680 	if (!req)
681 		return -ENOMEM;
682 
683 	mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
684 			     req_msg_size, sizeof(resp));
685 	req->length = length;
686 	req->offset_in_page = 0;
687 	req->gdma_page_type = GDMA_PAGE_TYPE_4K;
688 	req->page_count = num_page;
689 	req->page_addr_list_len = num_page;
690 
691 	for (i = 0; i < num_page; i++)
692 		req->page_addr_list[i] = gmi->dma_handle +  i * PAGE_SIZE;
693 
694 	err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
695 	if (err)
696 		goto out;
697 
698 	if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
699 		dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
700 			resp.hdr.status);
701 		err = -EPROTO;
702 		goto out;
703 	}
704 
705 	gmi->gdma_region = resp.gdma_region;
706 out:
707 	kfree(req);
708 	return err;
709 }
710 
711 int mana_gd_create_mana_eq(struct gdma_dev *gd,
712 			   const struct gdma_queue_spec *spec,
713 			   struct gdma_queue **queue_ptr)
714 {
715 	struct gdma_context *gc = gd->gdma_context;
716 	struct gdma_mem_info *gmi;
717 	struct gdma_queue *queue;
718 	int err;
719 
720 	if (spec->type != GDMA_EQ)
721 		return -EINVAL;
722 
723 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
724 	if (!queue)
725 		return -ENOMEM;
726 
727 	gmi = &queue->mem_info;
728 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
729 	if (err)
730 		goto free_q;
731 
732 	err = mana_gd_create_dma_region(gd, gmi);
733 	if (err)
734 		goto out;
735 
736 	queue->head = 0;
737 	queue->tail = 0;
738 	queue->queue_mem_ptr = gmi->virt_addr;
739 	queue->queue_size = spec->queue_size;
740 	queue->monitor_avl_buf = spec->monitor_avl_buf;
741 	queue->type = spec->type;
742 	queue->gdma_dev = gd;
743 
744 	err = mana_gd_create_eq(gd, spec, true, queue);
745 	if (err)
746 		goto out;
747 
748 	*queue_ptr = queue;
749 	return 0;
750 out:
751 	mana_gd_free_memory(gmi);
752 free_q:
753 	kfree(queue);
754 	return err;
755 }
756 
757 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
758 			      const struct gdma_queue_spec *spec,
759 			      struct gdma_queue **queue_ptr)
760 {
761 	struct gdma_context *gc = gd->gdma_context;
762 	struct gdma_mem_info *gmi;
763 	struct gdma_queue *queue;
764 	int err;
765 
766 	if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
767 	    spec->type != GDMA_RQ)
768 		return -EINVAL;
769 
770 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
771 	if (!queue)
772 		return -ENOMEM;
773 
774 	gmi = &queue->mem_info;
775 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
776 	if (err)
777 		goto free_q;
778 
779 	err = mana_gd_create_dma_region(gd, gmi);
780 	if (err)
781 		goto out;
782 
783 	queue->head = 0;
784 	queue->tail = 0;
785 	queue->queue_mem_ptr = gmi->virt_addr;
786 	queue->queue_size = spec->queue_size;
787 	queue->monitor_avl_buf = spec->monitor_avl_buf;
788 	queue->type = spec->type;
789 	queue->gdma_dev = gd;
790 
791 	if (spec->type == GDMA_CQ)
792 		mana_gd_create_cq(spec, queue);
793 
794 	*queue_ptr = queue;
795 	return 0;
796 out:
797 	mana_gd_free_memory(gmi);
798 free_q:
799 	kfree(queue);
800 	return err;
801 }
802 
803 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
804 {
805 	struct gdma_mem_info *gmi = &queue->mem_info;
806 
807 	switch (queue->type) {
808 	case GDMA_EQ:
809 		mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
810 		break;
811 
812 	case GDMA_CQ:
813 		mana_gd_destroy_cq(gc, queue);
814 		break;
815 
816 	case GDMA_RQ:
817 		break;
818 
819 	case GDMA_SQ:
820 		break;
821 
822 	default:
823 		dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n",
824 			queue->type);
825 		return;
826 	}
827 
828 	mana_gd_destroy_dma_region(gc, gmi->gdma_region);
829 	mana_gd_free_memory(gmi);
830 	kfree(queue);
831 }
832 
833 int mana_gd_verify_vf_version(struct pci_dev *pdev)
834 {
835 	struct gdma_context *gc = pci_get_drvdata(pdev);
836 	struct gdma_verify_ver_resp resp = {};
837 	struct gdma_verify_ver_req req = {};
838 	int err;
839 
840 	mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
841 			     sizeof(req), sizeof(resp));
842 
843 	req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
844 	req.protocol_ver_max = GDMA_PROTOCOL_LAST;
845 
846 	req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1;
847 	req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2;
848 	req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
849 	req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
850 
851 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
852 	if (err || resp.hdr.status) {
853 		dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
854 			err, resp.hdr.status);
855 		return err ? err : -EPROTO;
856 	}
857 
858 	return 0;
859 }
860 
861 int mana_gd_register_device(struct gdma_dev *gd)
862 {
863 	struct gdma_context *gc = gd->gdma_context;
864 	struct gdma_register_device_resp resp = {};
865 	struct gdma_general_req req = {};
866 	int err;
867 
868 	gd->pdid = INVALID_PDID;
869 	gd->doorbell = INVALID_DOORBELL;
870 	gd->gpa_mkey = INVALID_MEM_KEY;
871 
872 	mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
873 			     sizeof(resp));
874 
875 	req.hdr.dev_id = gd->dev_id;
876 
877 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
878 	if (err || resp.hdr.status) {
879 		dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n",
880 			err, resp.hdr.status);
881 		return err ? err : -EPROTO;
882 	}
883 
884 	gd->pdid = resp.pdid;
885 	gd->gpa_mkey = resp.gpa_mkey;
886 	gd->doorbell = resp.db_id;
887 
888 	return 0;
889 }
890 
891 int mana_gd_deregister_device(struct gdma_dev *gd)
892 {
893 	struct gdma_context *gc = gd->gdma_context;
894 	struct gdma_general_resp resp = {};
895 	struct gdma_general_req req = {};
896 	int err;
897 
898 	if (gd->pdid == INVALID_PDID)
899 		return -EINVAL;
900 
901 	mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
902 			     sizeof(resp));
903 
904 	req.hdr.dev_id = gd->dev_id;
905 
906 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
907 	if (err || resp.hdr.status) {
908 		dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
909 			err, resp.hdr.status);
910 		if (!err)
911 			err = -EPROTO;
912 	}
913 
914 	gd->pdid = INVALID_PDID;
915 	gd->doorbell = INVALID_DOORBELL;
916 	gd->gpa_mkey = INVALID_MEM_KEY;
917 
918 	return err;
919 }
920 
921 u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
922 {
923 	u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
924 	u32 wq_size = wq->queue_size;
925 
926 	WARN_ON_ONCE(used_space > wq_size);
927 
928 	return wq_size - used_space;
929 }
930 
931 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
932 {
933 	u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
934 
935 	WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size);
936 
937 	return wq->queue_mem_ptr + offset;
938 }
939 
940 static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
941 				    enum gdma_queue_type q_type,
942 				    u32 client_oob_size, u32 sgl_data_size,
943 				    u8 *wqe_ptr)
944 {
945 	bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
946 	bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
947 	struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
948 	u8 *ptr;
949 
950 	memset(header, 0, sizeof(struct gdma_wqe));
951 	header->num_sge = wqe_req->num_sge;
952 	header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
953 
954 	if (oob_in_sgl) {
955 		WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
956 
957 		header->client_oob_in_sgl = 1;
958 
959 		if (pad_data)
960 			header->last_vbytes = wqe_req->sgl[0].size;
961 	}
962 
963 	if (q_type == GDMA_SQ)
964 		header->client_data_unit = wqe_req->client_data_unit;
965 
966 	/* The size of gdma_wqe + client_oob_size must be less than or equal
967 	 * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
968 	 * the queue memory buffer boundary.
969 	 */
970 	ptr = wqe_ptr + sizeof(header);
971 
972 	if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
973 		memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
974 
975 		if (client_oob_size > wqe_req->inline_oob_size)
976 			memset(ptr + wqe_req->inline_oob_size, 0,
977 			       client_oob_size - wqe_req->inline_oob_size);
978 	}
979 
980 	return sizeof(header) + client_oob_size;
981 }
982 
983 static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
984 			      const struct gdma_wqe_request *wqe_req)
985 {
986 	u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
987 	const u8 *address = (u8 *)wqe_req->sgl;
988 	u8 *base_ptr, *end_ptr;
989 	u32 size_to_end;
990 
991 	base_ptr = wq->queue_mem_ptr;
992 	end_ptr = base_ptr + wq->queue_size;
993 	size_to_end = (u32)(end_ptr - wqe_ptr);
994 
995 	if (size_to_end < sgl_size) {
996 		memcpy(wqe_ptr, address, size_to_end);
997 
998 		wqe_ptr = base_ptr;
999 		address += size_to_end;
1000 		sgl_size -= size_to_end;
1001 	}
1002 
1003 	memcpy(wqe_ptr, address, sgl_size);
1004 }
1005 
1006 int mana_gd_post_work_request(struct gdma_queue *wq,
1007 			      const struct gdma_wqe_request *wqe_req,
1008 			      struct gdma_posted_wqe_info *wqe_info)
1009 {
1010 	u32 client_oob_size = wqe_req->inline_oob_size;
1011 	struct gdma_context *gc;
1012 	u32 sgl_data_size;
1013 	u32 max_wqe_size;
1014 	u32 wqe_size;
1015 	u8 *wqe_ptr;
1016 
1017 	if (wqe_req->num_sge == 0)
1018 		return -EINVAL;
1019 
1020 	if (wq->type == GDMA_RQ) {
1021 		if (client_oob_size != 0)
1022 			return -EINVAL;
1023 
1024 		client_oob_size = INLINE_OOB_SMALL_SIZE;
1025 
1026 		max_wqe_size = GDMA_MAX_RQE_SIZE;
1027 	} else {
1028 		if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
1029 		    client_oob_size != INLINE_OOB_LARGE_SIZE)
1030 			return -EINVAL;
1031 
1032 		max_wqe_size = GDMA_MAX_SQE_SIZE;
1033 	}
1034 
1035 	sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1036 	wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
1037 			 sgl_data_size, GDMA_WQE_BU_SIZE);
1038 	if (wqe_size > max_wqe_size)
1039 		return -EINVAL;
1040 
1041 	if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
1042 		gc = wq->gdma_dev->gdma_context;
1043 		dev_err(gc->dev, "unsuccessful flow control!\n");
1044 		return -ENOSPC;
1045 	}
1046 
1047 	if (wqe_info)
1048 		wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
1049 
1050 	wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
1051 	wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
1052 					    sgl_data_size, wqe_ptr);
1053 	if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
1054 		wqe_ptr -= wq->queue_size;
1055 
1056 	mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
1057 
1058 	wq->head += wqe_size / GDMA_WQE_BU_SIZE;
1059 
1060 	return 0;
1061 }
1062 
1063 int mana_gd_post_and_ring(struct gdma_queue *queue,
1064 			  const struct gdma_wqe_request *wqe_req,
1065 			  struct gdma_posted_wqe_info *wqe_info)
1066 {
1067 	struct gdma_context *gc = queue->gdma_dev->gdma_context;
1068 	int err;
1069 
1070 	err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1071 	if (err)
1072 		return err;
1073 
1074 	mana_gd_wq_ring_doorbell(gc, queue);
1075 
1076 	return 0;
1077 }
1078 
1079 static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
1080 {
1081 	unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
1082 	struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
1083 	u32 owner_bits, new_bits, old_bits;
1084 	struct gdma_cqe *cqe;
1085 
1086 	cqe = &cq_cqe[cq->head % num_cqe];
1087 	owner_bits = cqe->cqe_info.owner_bits;
1088 
1089 	old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
1090 	/* Return 0 if no more entries. */
1091 	if (owner_bits == old_bits)
1092 		return 0;
1093 
1094 	new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
1095 	/* Return -1 if overflow detected. */
1096 	if (WARN_ON_ONCE(owner_bits != new_bits))
1097 		return -1;
1098 
1099 	comp->wq_num = cqe->cqe_info.wq_num;
1100 	comp->is_sq = cqe->cqe_info.is_sq;
1101 	memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
1102 
1103 	return 1;
1104 }
1105 
1106 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
1107 {
1108 	int cqe_idx;
1109 	int ret;
1110 
1111 	for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
1112 		ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
1113 
1114 		if (ret < 0) {
1115 			cq->head -= cqe_idx;
1116 			return ret;
1117 		}
1118 
1119 		if (ret == 0)
1120 			break;
1121 
1122 		cq->head++;
1123 	}
1124 
1125 	return cqe_idx;
1126 }
1127 
1128 static irqreturn_t mana_gd_intr(int irq, void *arg)
1129 {
1130 	struct gdma_irq_context *gic = arg;
1131 
1132 	if (gic->handler)
1133 		gic->handler(gic->arg);
1134 
1135 	return IRQ_HANDLED;
1136 }
1137 
1138 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
1139 {
1140 	r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
1141 	if (!r->map)
1142 		return -ENOMEM;
1143 
1144 	r->size = res_avail;
1145 	spin_lock_init(&r->lock);
1146 
1147 	return 0;
1148 }
1149 
1150 void mana_gd_free_res_map(struct gdma_resource *r)
1151 {
1152 	bitmap_free(r->map);
1153 	r->map = NULL;
1154 	r->size = 0;
1155 }
1156 
1157 static int mana_gd_setup_irqs(struct pci_dev *pdev)
1158 {
1159 	unsigned int max_queues_per_port = num_online_cpus();
1160 	struct gdma_context *gc = pci_get_drvdata(pdev);
1161 	struct gdma_irq_context *gic;
1162 	unsigned int max_irqs;
1163 	int nvec, irq;
1164 	int err, i, j;
1165 
1166 	if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
1167 		max_queues_per_port = MANA_MAX_NUM_QUEUES;
1168 
1169 	/* Need 1 interrupt for the Hardware communication Channel (HWC) */
1170 	max_irqs = max_queues_per_port + 1;
1171 
1172 	nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
1173 	if (nvec < 0)
1174 		return nvec;
1175 
1176 	gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
1177 				   GFP_KERNEL);
1178 	if (!gc->irq_contexts) {
1179 		err = -ENOMEM;
1180 		goto free_irq_vector;
1181 	}
1182 
1183 	for (i = 0; i < nvec; i++) {
1184 		gic = &gc->irq_contexts[i];
1185 		gic->handler = NULL;
1186 		gic->arg = NULL;
1187 
1188 		irq = pci_irq_vector(pdev, i);
1189 		if (irq < 0) {
1190 			err = irq;
1191 			goto free_irq;
1192 		}
1193 
1194 		err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic);
1195 		if (err)
1196 			goto free_irq;
1197 	}
1198 
1199 	err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
1200 	if (err)
1201 		goto free_irq;
1202 
1203 	gc->max_num_msix = nvec;
1204 	gc->num_msix_usable = nvec;
1205 
1206 	return 0;
1207 
1208 free_irq:
1209 	for (j = i - 1; j >= 0; j--) {
1210 		irq = pci_irq_vector(pdev, j);
1211 		gic = &gc->irq_contexts[j];
1212 		free_irq(irq, gic);
1213 	}
1214 
1215 	kfree(gc->irq_contexts);
1216 	gc->irq_contexts = NULL;
1217 free_irq_vector:
1218 	pci_free_irq_vectors(pdev);
1219 	return err;
1220 }
1221 
1222 static void mana_gd_remove_irqs(struct pci_dev *pdev)
1223 {
1224 	struct gdma_context *gc = pci_get_drvdata(pdev);
1225 	struct gdma_irq_context *gic;
1226 	int irq, i;
1227 
1228 	if (gc->max_num_msix < 1)
1229 		return;
1230 
1231 	mana_gd_free_res_map(&gc->msix_resource);
1232 
1233 	for (i = 0; i < gc->max_num_msix; i++) {
1234 		irq = pci_irq_vector(pdev, i);
1235 		if (irq < 0)
1236 			continue;
1237 
1238 		gic = &gc->irq_contexts[i];
1239 		free_irq(irq, gic);
1240 	}
1241 
1242 	pci_free_irq_vectors(pdev);
1243 
1244 	gc->max_num_msix = 0;
1245 	gc->num_msix_usable = 0;
1246 	kfree(gc->irq_contexts);
1247 	gc->irq_contexts = NULL;
1248 }
1249 
1250 static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1251 {
1252 	struct gdma_context *gc;
1253 	void __iomem *bar0_va;
1254 	int bar = 0;
1255 	int err;
1256 
1257 	/* Each port has 2 CQs, each CQ has at most 1 EQE at a time */
1258 	BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
1259 
1260 	err = pci_enable_device(pdev);
1261 	if (err)
1262 		return -ENXIO;
1263 
1264 	pci_set_master(pdev);
1265 
1266 	err = pci_request_regions(pdev, "mana");
1267 	if (err)
1268 		goto disable_dev;
1269 
1270 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1271 	if (err)
1272 		goto release_region;
1273 
1274 	err = -ENOMEM;
1275 	gc = vzalloc(sizeof(*gc));
1276 	if (!gc)
1277 		goto release_region;
1278 
1279 	bar0_va = pci_iomap(pdev, bar, 0);
1280 	if (!bar0_va)
1281 		goto free_gc;
1282 
1283 	gc->bar0_va = bar0_va;
1284 	gc->dev = &pdev->dev;
1285 
1286 	pci_set_drvdata(pdev, gc);
1287 
1288 	mana_gd_init_registers(pdev);
1289 
1290 	mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1291 
1292 	err = mana_gd_setup_irqs(pdev);
1293 	if (err)
1294 		goto unmap_bar;
1295 
1296 	mutex_init(&gc->eq_test_event_mutex);
1297 
1298 	err = mana_hwc_create_channel(gc);
1299 	if (err)
1300 		goto remove_irq;
1301 
1302 	err = mana_gd_verify_vf_version(pdev);
1303 	if (err)
1304 		goto remove_irq;
1305 
1306 	err = mana_gd_query_max_resources(pdev);
1307 	if (err)
1308 		goto remove_irq;
1309 
1310 	err = mana_gd_detect_devices(pdev);
1311 	if (err)
1312 		goto remove_irq;
1313 
1314 	err = mana_probe(&gc->mana);
1315 	if (err)
1316 		goto clean_up_gdma;
1317 
1318 	return 0;
1319 
1320 clean_up_gdma:
1321 	mana_hwc_destroy_channel(gc);
1322 	vfree(gc->cq_table);
1323 	gc->cq_table = NULL;
1324 remove_irq:
1325 	mana_gd_remove_irqs(pdev);
1326 unmap_bar:
1327 	pci_iounmap(pdev, bar0_va);
1328 free_gc:
1329 	vfree(gc);
1330 release_region:
1331 	pci_release_regions(pdev);
1332 disable_dev:
1333 	pci_clear_master(pdev);
1334 	pci_disable_device(pdev);
1335 	dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
1336 	return err;
1337 }
1338 
1339 static void mana_gd_remove(struct pci_dev *pdev)
1340 {
1341 	struct gdma_context *gc = pci_get_drvdata(pdev);
1342 
1343 	mana_remove(&gc->mana);
1344 
1345 	mana_hwc_destroy_channel(gc);
1346 	vfree(gc->cq_table);
1347 	gc->cq_table = NULL;
1348 
1349 	mana_gd_remove_irqs(pdev);
1350 
1351 	pci_iounmap(pdev, gc->bar0_va);
1352 
1353 	vfree(gc);
1354 
1355 	pci_release_regions(pdev);
1356 	pci_clear_master(pdev);
1357 	pci_disable_device(pdev);
1358 }
1359 
1360 #ifndef PCI_VENDOR_ID_MICROSOFT
1361 #define PCI_VENDOR_ID_MICROSOFT 0x1414
1362 #endif
1363 
1364 static const struct pci_device_id mana_id_table[] = {
1365 	{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) },
1366 	{ }
1367 };
1368 
1369 static struct pci_driver mana_driver = {
1370 	.name		= "mana",
1371 	.id_table	= mana_id_table,
1372 	.probe		= mana_gd_probe,
1373 	.remove		= mana_gd_remove,
1374 };
1375 
1376 module_pci_driver(mana_driver);
1377 
1378 MODULE_DEVICE_TABLE(pci, mana_id_table);
1379 
1380 MODULE_LICENSE("Dual BSD/GPL");
1381 MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver");
1382