xref: /linux/drivers/net/ethernet/microsoft/mana/gdma_main.c (revision 150b567e0d572342ef08bace7ee7aff80fd75327)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/debugfs.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/utsname.h>
8 #include <linux/version.h>
9 
10 #include <net/mana/mana.h>
11 
12 struct dentry *mana_debugfs_root;
13 
mana_gd_r32(struct gdma_context * g,u64 offset)14 static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
15 {
16 	return readl(g->bar0_va + offset);
17 }
18 
mana_gd_r64(struct gdma_context * g,u64 offset)19 static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
20 {
21 	return readq(g->bar0_va + offset);
22 }
23 
mana_gd_init_pf_regs(struct pci_dev * pdev)24 static void mana_gd_init_pf_regs(struct pci_dev *pdev)
25 {
26 	struct gdma_context *gc = pci_get_drvdata(pdev);
27 	void __iomem *sriov_base_va;
28 	u64 sriov_base_off;
29 
30 	gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
31 	gc->db_page_base = gc->bar0_va +
32 				mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
33 
34 	sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
35 
36 	sriov_base_va = gc->bar0_va + sriov_base_off;
37 	gc->shm_base = sriov_base_va +
38 			mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
39 }
40 
mana_gd_init_vf_regs(struct pci_dev * pdev)41 static void mana_gd_init_vf_regs(struct pci_dev *pdev)
42 {
43 	struct gdma_context *gc = pci_get_drvdata(pdev);
44 
45 	gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
46 
47 	gc->db_page_base = gc->bar0_va +
48 				mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
49 
50 	gc->phys_db_page_base = gc->bar0_pa +
51 				mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
52 
53 	gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
54 }
55 
mana_gd_init_registers(struct pci_dev * pdev)56 static void mana_gd_init_registers(struct pci_dev *pdev)
57 {
58 	struct gdma_context *gc = pci_get_drvdata(pdev);
59 
60 	if (gc->is_pf)
61 		mana_gd_init_pf_regs(pdev);
62 	else
63 		mana_gd_init_vf_regs(pdev);
64 }
65 
mana_gd_query_max_resources(struct pci_dev * pdev)66 static int mana_gd_query_max_resources(struct pci_dev *pdev)
67 {
68 	struct gdma_context *gc = pci_get_drvdata(pdev);
69 	struct gdma_query_max_resources_resp resp = {};
70 	struct gdma_general_req req = {};
71 	int err;
72 
73 	mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
74 			     sizeof(req), sizeof(resp));
75 
76 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
77 	if (err || resp.hdr.status) {
78 		dev_err(gc->dev, "Failed to query resource info: %d, 0x%x\n",
79 			err, resp.hdr.status);
80 		return err ? err : -EPROTO;
81 	}
82 
83 	if (gc->num_msix_usable > resp.max_msix)
84 		gc->num_msix_usable = resp.max_msix;
85 
86 	if (gc->num_msix_usable <= 1)
87 		return -ENOSPC;
88 
89 	gc->max_num_queues = num_online_cpus();
90 	if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
91 		gc->max_num_queues = MANA_MAX_NUM_QUEUES;
92 
93 	if (gc->max_num_queues > resp.max_eq)
94 		gc->max_num_queues = resp.max_eq;
95 
96 	if (gc->max_num_queues > resp.max_cq)
97 		gc->max_num_queues = resp.max_cq;
98 
99 	if (gc->max_num_queues > resp.max_sq)
100 		gc->max_num_queues = resp.max_sq;
101 
102 	if (gc->max_num_queues > resp.max_rq)
103 		gc->max_num_queues = resp.max_rq;
104 
105 	/* The Hardware Channel (HWC) used 1 MSI-X */
106 	if (gc->max_num_queues > gc->num_msix_usable - 1)
107 		gc->max_num_queues = gc->num_msix_usable - 1;
108 
109 	return 0;
110 }
111 
mana_gd_query_hwc_timeout(struct pci_dev * pdev,u32 * timeout_val)112 static int mana_gd_query_hwc_timeout(struct pci_dev *pdev, u32 *timeout_val)
113 {
114 	struct gdma_context *gc = pci_get_drvdata(pdev);
115 	struct gdma_query_hwc_timeout_resp resp = {};
116 	struct gdma_query_hwc_timeout_req req = {};
117 	int err;
118 
119 	mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_HWC_TIMEOUT,
120 			     sizeof(req), sizeof(resp));
121 	req.timeout_ms = *timeout_val;
122 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
123 	if (err || resp.hdr.status)
124 		return err ? err : -EPROTO;
125 
126 	*timeout_val = resp.timeout_ms;
127 
128 	return 0;
129 }
130 
mana_gd_detect_devices(struct pci_dev * pdev)131 static int mana_gd_detect_devices(struct pci_dev *pdev)
132 {
133 	struct gdma_context *gc = pci_get_drvdata(pdev);
134 	struct gdma_list_devices_resp resp = {};
135 	struct gdma_general_req req = {};
136 	struct gdma_dev_id dev;
137 	u32 i, max_num_devs;
138 	u16 dev_type;
139 	int err;
140 
141 	mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
142 			     sizeof(resp));
143 
144 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
145 	if (err || resp.hdr.status) {
146 		dev_err(gc->dev, "Failed to detect devices: %d, 0x%x\n", err,
147 			resp.hdr.status);
148 		return err ? err : -EPROTO;
149 	}
150 
151 	max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
152 
153 	for (i = 0; i < max_num_devs; i++) {
154 		dev = resp.devs[i];
155 		dev_type = dev.type;
156 
157 		/* HWC is already detected in mana_hwc_create_channel(). */
158 		if (dev_type == GDMA_DEVICE_HWC)
159 			continue;
160 
161 		if (dev_type == GDMA_DEVICE_MANA) {
162 			gc->mana.gdma_context = gc;
163 			gc->mana.dev_id = dev;
164 		} else if (dev_type == GDMA_DEVICE_MANA_IB) {
165 			gc->mana_ib.dev_id = dev;
166 			gc->mana_ib.gdma_context = gc;
167 		}
168 	}
169 
170 	return gc->mana.dev_id.type == 0 ? -ENODEV : 0;
171 }
172 
mana_gd_send_request(struct gdma_context * gc,u32 req_len,const void * req,u32 resp_len,void * resp)173 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
174 			 u32 resp_len, void *resp)
175 {
176 	struct hw_channel_context *hwc = gc->hwc.driver_data;
177 
178 	return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
179 }
180 EXPORT_SYMBOL_NS(mana_gd_send_request, "NET_MANA");
181 
mana_gd_alloc_memory(struct gdma_context * gc,unsigned int length,struct gdma_mem_info * gmi)182 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
183 			 struct gdma_mem_info *gmi)
184 {
185 	dma_addr_t dma_handle;
186 	void *buf;
187 
188 	if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
189 		return -EINVAL;
190 
191 	gmi->dev = gc->dev;
192 	buf = dma_alloc_coherent(gmi->dev, length, &dma_handle, GFP_KERNEL);
193 	if (!buf)
194 		return -ENOMEM;
195 
196 	gmi->dma_handle = dma_handle;
197 	gmi->virt_addr = buf;
198 	gmi->length = length;
199 
200 	return 0;
201 }
202 
mana_gd_free_memory(struct gdma_mem_info * gmi)203 void mana_gd_free_memory(struct gdma_mem_info *gmi)
204 {
205 	dma_free_coherent(gmi->dev, gmi->length, gmi->virt_addr,
206 			  gmi->dma_handle);
207 }
208 
mana_gd_create_hw_eq(struct gdma_context * gc,struct gdma_queue * queue)209 static int mana_gd_create_hw_eq(struct gdma_context *gc,
210 				struct gdma_queue *queue)
211 {
212 	struct gdma_create_queue_resp resp = {};
213 	struct gdma_create_queue_req req = {};
214 	int err;
215 
216 	if (queue->type != GDMA_EQ)
217 		return -EINVAL;
218 
219 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
220 			     sizeof(req), sizeof(resp));
221 
222 	req.hdr.dev_id = queue->gdma_dev->dev_id;
223 	req.type = queue->type;
224 	req.pdid = queue->gdma_dev->pdid;
225 	req.doolbell_id = queue->gdma_dev->doorbell;
226 	req.gdma_region = queue->mem_info.dma_region_handle;
227 	req.queue_size = queue->queue_size;
228 	req.log2_throttle_limit = queue->eq.log2_throttle_limit;
229 	req.eq_pci_msix_index = queue->eq.msix_index;
230 
231 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
232 	if (err || resp.hdr.status) {
233 		dev_err(gc->dev, "Failed to create queue: %d, 0x%x\n", err,
234 			resp.hdr.status);
235 		return err ? err : -EPROTO;
236 	}
237 
238 	queue->id = resp.queue_index;
239 	queue->eq.disable_needed = true;
240 	queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
241 	return 0;
242 }
243 
mana_gd_disable_queue(struct gdma_queue * queue)244 static int mana_gd_disable_queue(struct gdma_queue *queue)
245 {
246 	struct gdma_context *gc = queue->gdma_dev->gdma_context;
247 	struct gdma_disable_queue_req req = {};
248 	struct gdma_general_resp resp = {};
249 	int err;
250 
251 	WARN_ON(queue->type != GDMA_EQ);
252 
253 	mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
254 			     sizeof(req), sizeof(resp));
255 
256 	req.hdr.dev_id = queue->gdma_dev->dev_id;
257 	req.type = queue->type;
258 	req.queue_index =  queue->id;
259 	req.alloc_res_id_on_creation = 1;
260 
261 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
262 	if (err || resp.hdr.status) {
263 		dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
264 			resp.hdr.status);
265 		return err ? err : -EPROTO;
266 	}
267 
268 	return 0;
269 }
270 
271 #define DOORBELL_OFFSET_SQ	0x0
272 #define DOORBELL_OFFSET_RQ	0x400
273 #define DOORBELL_OFFSET_CQ	0x800
274 #define DOORBELL_OFFSET_EQ	0xFF8
275 
mana_gd_ring_doorbell(struct gdma_context * gc,u32 db_index,enum gdma_queue_type q_type,u32 qid,u32 tail_ptr,u8 num_req)276 static void mana_gd_ring_doorbell(struct gdma_context *gc, u32 db_index,
277 				  enum gdma_queue_type q_type, u32 qid,
278 				  u32 tail_ptr, u8 num_req)
279 {
280 	void __iomem *addr = gc->db_page_base + gc->db_page_size * db_index;
281 	union gdma_doorbell_entry e = {};
282 
283 	switch (q_type) {
284 	case GDMA_EQ:
285 		e.eq.id = qid;
286 		e.eq.tail_ptr = tail_ptr;
287 		e.eq.arm = num_req;
288 
289 		addr += DOORBELL_OFFSET_EQ;
290 		break;
291 
292 	case GDMA_CQ:
293 		e.cq.id = qid;
294 		e.cq.tail_ptr = tail_ptr;
295 		e.cq.arm = num_req;
296 
297 		addr += DOORBELL_OFFSET_CQ;
298 		break;
299 
300 	case GDMA_RQ:
301 		e.rq.id = qid;
302 		e.rq.tail_ptr = tail_ptr;
303 		e.rq.wqe_cnt = num_req;
304 
305 		addr += DOORBELL_OFFSET_RQ;
306 		break;
307 
308 	case GDMA_SQ:
309 		e.sq.id = qid;
310 		e.sq.tail_ptr = tail_ptr;
311 
312 		addr += DOORBELL_OFFSET_SQ;
313 		break;
314 
315 	default:
316 		WARN_ON(1);
317 		return;
318 	}
319 
320 	/* Ensure all writes are done before ring doorbell */
321 	wmb();
322 
323 	writeq(e.as_uint64, addr);
324 }
325 
mana_gd_wq_ring_doorbell(struct gdma_context * gc,struct gdma_queue * queue)326 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
327 {
328 	/* Hardware Spec specifies that software client should set 0 for
329 	 * wqe_cnt for Receive Queues. This value is not used in Send Queues.
330 	 */
331 	mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
332 			      queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
333 }
334 
mana_gd_ring_cq(struct gdma_queue * cq,u8 arm_bit)335 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
336 {
337 	struct gdma_context *gc = cq->gdma_dev->gdma_context;
338 
339 	u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE;
340 
341 	u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
342 
343 	mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
344 			      head, arm_bit);
345 }
346 
mana_gd_process_eqe(struct gdma_queue * eq)347 static void mana_gd_process_eqe(struct gdma_queue *eq)
348 {
349 	u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
350 	struct gdma_context *gc = eq->gdma_dev->gdma_context;
351 	struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
352 	union gdma_eqe_info eqe_info;
353 	enum gdma_eqe_type type;
354 	struct gdma_event event;
355 	struct gdma_queue *cq;
356 	struct gdma_eqe *eqe;
357 	u32 cq_id;
358 
359 	eqe = &eq_eqe_ptr[head];
360 	eqe_info.as_uint32 = eqe->eqe_info;
361 	type = eqe_info.type;
362 
363 	switch (type) {
364 	case GDMA_EQE_COMPLETION:
365 		cq_id = eqe->details[0] & 0xFFFFFF;
366 		if (WARN_ON_ONCE(cq_id >= gc->max_num_cqs))
367 			break;
368 
369 		cq = gc->cq_table[cq_id];
370 		if (WARN_ON_ONCE(!cq || cq->type != GDMA_CQ || cq->id != cq_id))
371 			break;
372 
373 		if (cq->cq.callback)
374 			cq->cq.callback(cq->cq.context, cq);
375 
376 		break;
377 
378 	case GDMA_EQE_TEST_EVENT:
379 		gc->test_event_eq_id = eq->id;
380 		complete(&gc->eq_test_event);
381 		break;
382 
383 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
384 	case GDMA_EQE_HWC_INIT_DATA:
385 	case GDMA_EQE_HWC_INIT_DONE:
386 	case GDMA_EQE_RNIC_QP_FATAL:
387 		if (!eq->eq.callback)
388 			break;
389 
390 		event.type = type;
391 		memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
392 		eq->eq.callback(eq->eq.context, eq, &event);
393 		break;
394 
395 	default:
396 		break;
397 	}
398 }
399 
mana_gd_process_eq_events(void * arg)400 static void mana_gd_process_eq_events(void *arg)
401 {
402 	u32 owner_bits, new_bits, old_bits;
403 	union gdma_eqe_info eqe_info;
404 	struct gdma_eqe *eq_eqe_ptr;
405 	struct gdma_queue *eq = arg;
406 	struct gdma_context *gc;
407 	struct gdma_eqe *eqe;
408 	u32 head, num_eqe;
409 	int i;
410 
411 	gc = eq->gdma_dev->gdma_context;
412 
413 	num_eqe = eq->queue_size / GDMA_EQE_SIZE;
414 	eq_eqe_ptr = eq->queue_mem_ptr;
415 
416 	/* Process up to 5 EQEs at a time, and update the HW head. */
417 	for (i = 0; i < 5; i++) {
418 		eqe = &eq_eqe_ptr[eq->head % num_eqe];
419 		eqe_info.as_uint32 = eqe->eqe_info;
420 		owner_bits = eqe_info.owner_bits;
421 
422 		old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
423 		/* No more entries */
424 		if (owner_bits == old_bits) {
425 			/* return here without ringing the doorbell */
426 			if (i == 0)
427 				return;
428 			break;
429 		}
430 
431 		new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
432 		if (owner_bits != new_bits) {
433 			dev_err(gc->dev, "EQ %d: overflow detected\n", eq->id);
434 			break;
435 		}
436 
437 		/* Per GDMA spec, rmb is necessary after checking owner_bits, before
438 		 * reading eqe.
439 		 */
440 		rmb();
441 
442 		mana_gd_process_eqe(eq);
443 
444 		eq->head++;
445 	}
446 
447 	head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
448 
449 	mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
450 			      head, SET_ARM_BIT);
451 }
452 
mana_gd_register_irq(struct gdma_queue * queue,const struct gdma_queue_spec * spec)453 static int mana_gd_register_irq(struct gdma_queue *queue,
454 				const struct gdma_queue_spec *spec)
455 {
456 	struct gdma_dev *gd = queue->gdma_dev;
457 	struct gdma_irq_context *gic;
458 	struct gdma_context *gc;
459 	unsigned int msi_index;
460 	unsigned long flags;
461 	struct device *dev;
462 	int err = 0;
463 
464 	gc = gd->gdma_context;
465 	dev = gc->dev;
466 	msi_index = spec->eq.msix_index;
467 
468 	if (msi_index >= gc->num_msix_usable) {
469 		err = -ENOSPC;
470 		dev_err(dev, "Register IRQ err:%d, msi:%u nMSI:%u",
471 			err, msi_index, gc->num_msix_usable);
472 
473 		return err;
474 	}
475 
476 	queue->eq.msix_index = msi_index;
477 	gic = &gc->irq_contexts[msi_index];
478 
479 	spin_lock_irqsave(&gic->lock, flags);
480 	list_add_rcu(&queue->entry, &gic->eq_list);
481 	spin_unlock_irqrestore(&gic->lock, flags);
482 
483 	return 0;
484 }
485 
mana_gd_deregiser_irq(struct gdma_queue * queue)486 static void mana_gd_deregiser_irq(struct gdma_queue *queue)
487 {
488 	struct gdma_dev *gd = queue->gdma_dev;
489 	struct gdma_irq_context *gic;
490 	struct gdma_context *gc;
491 	unsigned int msix_index;
492 	unsigned long flags;
493 	struct gdma_queue *eq;
494 
495 	gc = gd->gdma_context;
496 
497 	/* At most num_online_cpus() + 1 interrupts are used. */
498 	msix_index = queue->eq.msix_index;
499 	if (WARN_ON(msix_index >= gc->num_msix_usable))
500 		return;
501 
502 	gic = &gc->irq_contexts[msix_index];
503 	spin_lock_irqsave(&gic->lock, flags);
504 	list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
505 		if (queue == eq) {
506 			list_del_rcu(&eq->entry);
507 			break;
508 		}
509 	}
510 	spin_unlock_irqrestore(&gic->lock, flags);
511 
512 	queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
513 	synchronize_rcu();
514 }
515 
mana_gd_test_eq(struct gdma_context * gc,struct gdma_queue * eq)516 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
517 {
518 	struct gdma_generate_test_event_req req = {};
519 	struct gdma_general_resp resp = {};
520 	struct device *dev = gc->dev;
521 	int err;
522 
523 	mutex_lock(&gc->eq_test_event_mutex);
524 
525 	init_completion(&gc->eq_test_event);
526 	gc->test_event_eq_id = INVALID_QUEUE_ID;
527 
528 	mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
529 			     sizeof(req), sizeof(resp));
530 
531 	req.hdr.dev_id = eq->gdma_dev->dev_id;
532 	req.queue_index = eq->id;
533 
534 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
535 	if (err) {
536 		dev_err(dev, "test_eq failed: %d\n", err);
537 		goto out;
538 	}
539 
540 	err = -EPROTO;
541 
542 	if (resp.hdr.status) {
543 		dev_err(dev, "test_eq failed: 0x%x\n", resp.hdr.status);
544 		goto out;
545 	}
546 
547 	if (!wait_for_completion_timeout(&gc->eq_test_event, 30 * HZ)) {
548 		dev_err(dev, "test_eq timed out on queue %d\n", eq->id);
549 		goto out;
550 	}
551 
552 	if (eq->id != gc->test_event_eq_id) {
553 		dev_err(dev, "test_eq got an event on wrong queue %d (%d)\n",
554 			gc->test_event_eq_id, eq->id);
555 		goto out;
556 	}
557 
558 	err = 0;
559 out:
560 	mutex_unlock(&gc->eq_test_event_mutex);
561 	return err;
562 }
563 
mana_gd_destroy_eq(struct gdma_context * gc,bool flush_evenets,struct gdma_queue * queue)564 static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
565 			       struct gdma_queue *queue)
566 {
567 	int err;
568 
569 	if (flush_evenets) {
570 		err = mana_gd_test_eq(gc, queue);
571 		if (err)
572 			dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
573 	}
574 
575 	mana_gd_deregiser_irq(queue);
576 
577 	if (queue->eq.disable_needed)
578 		mana_gd_disable_queue(queue);
579 }
580 
mana_gd_create_eq(struct gdma_dev * gd,const struct gdma_queue_spec * spec,bool create_hwq,struct gdma_queue * queue)581 static int mana_gd_create_eq(struct gdma_dev *gd,
582 			     const struct gdma_queue_spec *spec,
583 			     bool create_hwq, struct gdma_queue *queue)
584 {
585 	struct gdma_context *gc = gd->gdma_context;
586 	struct device *dev = gc->dev;
587 	u32 log2_num_entries;
588 	int err;
589 
590 	queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
591 	queue->id = INVALID_QUEUE_ID;
592 
593 	log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
594 
595 	if (spec->eq.log2_throttle_limit > log2_num_entries) {
596 		dev_err(dev, "EQ throttling limit (%lu) > maximum EQE (%u)\n",
597 			spec->eq.log2_throttle_limit, log2_num_entries);
598 		return -EINVAL;
599 	}
600 
601 	err = mana_gd_register_irq(queue, spec);
602 	if (err) {
603 		dev_err(dev, "Failed to register irq: %d\n", err);
604 		return err;
605 	}
606 
607 	queue->eq.callback = spec->eq.callback;
608 	queue->eq.context = spec->eq.context;
609 	queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
610 	queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
611 
612 	if (create_hwq) {
613 		err = mana_gd_create_hw_eq(gc, queue);
614 		if (err)
615 			goto out;
616 
617 		err = mana_gd_test_eq(gc, queue);
618 		if (err)
619 			goto out;
620 	}
621 
622 	return 0;
623 out:
624 	dev_err(dev, "Failed to create EQ: %d\n", err);
625 	mana_gd_destroy_eq(gc, false, queue);
626 	return err;
627 }
628 
mana_gd_create_cq(const struct gdma_queue_spec * spec,struct gdma_queue * queue)629 static void mana_gd_create_cq(const struct gdma_queue_spec *spec,
630 			      struct gdma_queue *queue)
631 {
632 	u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
633 
634 	queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
635 	queue->cq.parent = spec->cq.parent_eq;
636 	queue->cq.context = spec->cq.context;
637 	queue->cq.callback = spec->cq.callback;
638 }
639 
mana_gd_destroy_cq(struct gdma_context * gc,struct gdma_queue * queue)640 static void mana_gd_destroy_cq(struct gdma_context *gc,
641 			       struct gdma_queue *queue)
642 {
643 	u32 id = queue->id;
644 
645 	if (id >= gc->max_num_cqs)
646 		return;
647 
648 	if (!gc->cq_table[id])
649 		return;
650 
651 	gc->cq_table[id] = NULL;
652 }
653 
mana_gd_create_hwc_queue(struct gdma_dev * gd,const struct gdma_queue_spec * spec,struct gdma_queue ** queue_ptr)654 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
655 			     const struct gdma_queue_spec *spec,
656 			     struct gdma_queue **queue_ptr)
657 {
658 	struct gdma_context *gc = gd->gdma_context;
659 	struct gdma_mem_info *gmi;
660 	struct gdma_queue *queue;
661 	int err;
662 
663 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
664 	if (!queue)
665 		return -ENOMEM;
666 
667 	gmi = &queue->mem_info;
668 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
669 	if (err)
670 		goto free_q;
671 
672 	queue->head = 0;
673 	queue->tail = 0;
674 	queue->queue_mem_ptr = gmi->virt_addr;
675 	queue->queue_size = spec->queue_size;
676 	queue->monitor_avl_buf = spec->monitor_avl_buf;
677 	queue->type = spec->type;
678 	queue->gdma_dev = gd;
679 
680 	if (spec->type == GDMA_EQ)
681 		err = mana_gd_create_eq(gd, spec, false, queue);
682 	else if (spec->type == GDMA_CQ)
683 		mana_gd_create_cq(spec, queue);
684 
685 	if (err)
686 		goto out;
687 
688 	*queue_ptr = queue;
689 	return 0;
690 out:
691 	mana_gd_free_memory(gmi);
692 free_q:
693 	kfree(queue);
694 	return err;
695 }
696 
mana_gd_destroy_dma_region(struct gdma_context * gc,u64 dma_region_handle)697 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
698 {
699 	struct gdma_destroy_dma_region_req req = {};
700 	struct gdma_general_resp resp = {};
701 	int err;
702 
703 	if (dma_region_handle == GDMA_INVALID_DMA_REGION)
704 		return 0;
705 
706 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
707 			     sizeof(resp));
708 	req.dma_region_handle = dma_region_handle;
709 
710 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
711 	if (err || resp.hdr.status) {
712 		dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
713 			err, resp.hdr.status);
714 		return -EPROTO;
715 	}
716 
717 	return 0;
718 }
719 EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, "NET_MANA");
720 
mana_gd_create_dma_region(struct gdma_dev * gd,struct gdma_mem_info * gmi)721 static int mana_gd_create_dma_region(struct gdma_dev *gd,
722 				     struct gdma_mem_info *gmi)
723 {
724 	unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
725 	struct gdma_create_dma_region_req *req = NULL;
726 	struct gdma_create_dma_region_resp resp = {};
727 	struct gdma_context *gc = gd->gdma_context;
728 	struct hw_channel_context *hwc;
729 	u32 length = gmi->length;
730 	size_t req_msg_size;
731 	int err;
732 	int i;
733 
734 	if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
735 		return -EINVAL;
736 
737 	if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
738 		return -EINVAL;
739 
740 	hwc = gc->hwc.driver_data;
741 	req_msg_size = struct_size(req, page_addr_list, num_page);
742 	if (req_msg_size > hwc->max_req_msg_size)
743 		return -EINVAL;
744 
745 	req = kzalloc(req_msg_size, GFP_KERNEL);
746 	if (!req)
747 		return -ENOMEM;
748 
749 	mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
750 			     req_msg_size, sizeof(resp));
751 	req->length = length;
752 	req->offset_in_page = 0;
753 	req->gdma_page_type = GDMA_PAGE_TYPE_4K;
754 	req->page_count = num_page;
755 	req->page_addr_list_len = num_page;
756 
757 	for (i = 0; i < num_page; i++)
758 		req->page_addr_list[i] = gmi->dma_handle +  i * MANA_PAGE_SIZE;
759 
760 	err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
761 	if (err)
762 		goto out;
763 
764 	if (resp.hdr.status ||
765 	    resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
766 		dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
767 			resp.hdr.status);
768 		err = -EPROTO;
769 		goto out;
770 	}
771 
772 	gmi->dma_region_handle = resp.dma_region_handle;
773 out:
774 	kfree(req);
775 	return err;
776 }
777 
mana_gd_create_mana_eq(struct gdma_dev * gd,const struct gdma_queue_spec * spec,struct gdma_queue ** queue_ptr)778 int mana_gd_create_mana_eq(struct gdma_dev *gd,
779 			   const struct gdma_queue_spec *spec,
780 			   struct gdma_queue **queue_ptr)
781 {
782 	struct gdma_context *gc = gd->gdma_context;
783 	struct gdma_mem_info *gmi;
784 	struct gdma_queue *queue;
785 	int err;
786 
787 	if (spec->type != GDMA_EQ)
788 		return -EINVAL;
789 
790 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
791 	if (!queue)
792 		return -ENOMEM;
793 
794 	gmi = &queue->mem_info;
795 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
796 	if (err)
797 		goto free_q;
798 
799 	err = mana_gd_create_dma_region(gd, gmi);
800 	if (err)
801 		goto out;
802 
803 	queue->head = 0;
804 	queue->tail = 0;
805 	queue->queue_mem_ptr = gmi->virt_addr;
806 	queue->queue_size = spec->queue_size;
807 	queue->monitor_avl_buf = spec->monitor_avl_buf;
808 	queue->type = spec->type;
809 	queue->gdma_dev = gd;
810 
811 	err = mana_gd_create_eq(gd, spec, true, queue);
812 	if (err)
813 		goto out;
814 
815 	*queue_ptr = queue;
816 	return 0;
817 out:
818 	mana_gd_free_memory(gmi);
819 free_q:
820 	kfree(queue);
821 	return err;
822 }
823 EXPORT_SYMBOL_NS(mana_gd_create_mana_eq, "NET_MANA");
824 
mana_gd_create_mana_wq_cq(struct gdma_dev * gd,const struct gdma_queue_spec * spec,struct gdma_queue ** queue_ptr)825 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
826 			      const struct gdma_queue_spec *spec,
827 			      struct gdma_queue **queue_ptr)
828 {
829 	struct gdma_context *gc = gd->gdma_context;
830 	struct gdma_mem_info *gmi;
831 	struct gdma_queue *queue;
832 	int err;
833 
834 	if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
835 	    spec->type != GDMA_RQ)
836 		return -EINVAL;
837 
838 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
839 	if (!queue)
840 		return -ENOMEM;
841 
842 	gmi = &queue->mem_info;
843 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
844 	if (err)
845 		goto free_q;
846 
847 	err = mana_gd_create_dma_region(gd, gmi);
848 	if (err)
849 		goto out;
850 
851 	queue->head = 0;
852 	queue->tail = 0;
853 	queue->queue_mem_ptr = gmi->virt_addr;
854 	queue->queue_size = spec->queue_size;
855 	queue->monitor_avl_buf = spec->monitor_avl_buf;
856 	queue->type = spec->type;
857 	queue->gdma_dev = gd;
858 
859 	if (spec->type == GDMA_CQ)
860 		mana_gd_create_cq(spec, queue);
861 
862 	*queue_ptr = queue;
863 	return 0;
864 out:
865 	mana_gd_free_memory(gmi);
866 free_q:
867 	kfree(queue);
868 	return err;
869 }
870 
mana_gd_destroy_queue(struct gdma_context * gc,struct gdma_queue * queue)871 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
872 {
873 	struct gdma_mem_info *gmi = &queue->mem_info;
874 
875 	switch (queue->type) {
876 	case GDMA_EQ:
877 		mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
878 		break;
879 
880 	case GDMA_CQ:
881 		mana_gd_destroy_cq(gc, queue);
882 		break;
883 
884 	case GDMA_RQ:
885 		break;
886 
887 	case GDMA_SQ:
888 		break;
889 
890 	default:
891 		dev_err(gc->dev, "Can't destroy unknown queue: type=%d\n",
892 			queue->type);
893 		return;
894 	}
895 
896 	mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
897 	mana_gd_free_memory(gmi);
898 	kfree(queue);
899 }
900 EXPORT_SYMBOL_NS(mana_gd_destroy_queue, "NET_MANA");
901 
mana_gd_verify_vf_version(struct pci_dev * pdev)902 int mana_gd_verify_vf_version(struct pci_dev *pdev)
903 {
904 	struct gdma_context *gc = pci_get_drvdata(pdev);
905 	struct gdma_verify_ver_resp resp = {};
906 	struct gdma_verify_ver_req req = {};
907 	struct hw_channel_context *hwc;
908 	int err;
909 
910 	hwc = gc->hwc.driver_data;
911 	mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
912 			     sizeof(req), sizeof(resp));
913 
914 	req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
915 	req.protocol_ver_max = GDMA_PROTOCOL_LAST;
916 
917 	req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1;
918 	req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2;
919 	req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
920 	req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
921 
922 	req.drv_ver = 0;	/* Unused*/
923 	req.os_type = 0x10;	/* Linux */
924 	req.os_ver_major = LINUX_VERSION_MAJOR;
925 	req.os_ver_minor = LINUX_VERSION_PATCHLEVEL;
926 	req.os_ver_build = LINUX_VERSION_SUBLEVEL;
927 	strscpy(req.os_ver_str1, utsname()->sysname, sizeof(req.os_ver_str1));
928 	strscpy(req.os_ver_str2, utsname()->release, sizeof(req.os_ver_str2));
929 	strscpy(req.os_ver_str3, utsname()->version, sizeof(req.os_ver_str3));
930 
931 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
932 	if (err || resp.hdr.status) {
933 		dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
934 			err, resp.hdr.status);
935 		return err ? err : -EPROTO;
936 	}
937 	if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
938 		err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
939 		if (err) {
940 			dev_err(gc->dev, "Failed to set the hwc timeout %d\n", err);
941 			return err;
942 		}
943 		dev_dbg(gc->dev, "set the hwc timeout to %u\n", hwc->hwc_timeout);
944 	}
945 	return 0;
946 }
947 
mana_gd_register_device(struct gdma_dev * gd)948 int mana_gd_register_device(struct gdma_dev *gd)
949 {
950 	struct gdma_context *gc = gd->gdma_context;
951 	struct gdma_register_device_resp resp = {};
952 	struct gdma_general_req req = {};
953 	int err;
954 
955 	gd->pdid = INVALID_PDID;
956 	gd->doorbell = INVALID_DOORBELL;
957 	gd->gpa_mkey = INVALID_MEM_KEY;
958 
959 	mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
960 			     sizeof(resp));
961 
962 	req.hdr.dev_id = gd->dev_id;
963 
964 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
965 	if (err || resp.hdr.status) {
966 		dev_err(gc->dev, "gdma_register_device_resp failed: %d, 0x%x\n",
967 			err, resp.hdr.status);
968 		return err ? err : -EPROTO;
969 	}
970 
971 	gd->pdid = resp.pdid;
972 	gd->gpa_mkey = resp.gpa_mkey;
973 	gd->doorbell = resp.db_id;
974 
975 	return 0;
976 }
977 EXPORT_SYMBOL_NS(mana_gd_register_device, "NET_MANA");
978 
mana_gd_deregister_device(struct gdma_dev * gd)979 int mana_gd_deregister_device(struct gdma_dev *gd)
980 {
981 	struct gdma_context *gc = gd->gdma_context;
982 	struct gdma_general_resp resp = {};
983 	struct gdma_general_req req = {};
984 	int err;
985 
986 	if (gd->pdid == INVALID_PDID)
987 		return -EINVAL;
988 
989 	mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
990 			     sizeof(resp));
991 
992 	req.hdr.dev_id = gd->dev_id;
993 
994 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
995 	if (err || resp.hdr.status) {
996 		dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
997 			err, resp.hdr.status);
998 		if (!err)
999 			err = -EPROTO;
1000 	}
1001 
1002 	gd->pdid = INVALID_PDID;
1003 	gd->doorbell = INVALID_DOORBELL;
1004 	gd->gpa_mkey = INVALID_MEM_KEY;
1005 
1006 	return err;
1007 }
1008 EXPORT_SYMBOL_NS(mana_gd_deregister_device, "NET_MANA");
1009 
mana_gd_wq_avail_space(struct gdma_queue * wq)1010 u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
1011 {
1012 	u32 used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
1013 	u32 wq_size = wq->queue_size;
1014 
1015 	WARN_ON_ONCE(used_space > wq_size);
1016 
1017 	return wq_size - used_space;
1018 }
1019 
mana_gd_get_wqe_ptr(const struct gdma_queue * wq,u32 wqe_offset)1020 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset)
1021 {
1022 	u32 offset = (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
1023 
1024 	WARN_ON_ONCE((offset + GDMA_WQE_BU_SIZE) > wq->queue_size);
1025 
1026 	return wq->queue_mem_ptr + offset;
1027 }
1028 
mana_gd_write_client_oob(const struct gdma_wqe_request * wqe_req,enum gdma_queue_type q_type,u32 client_oob_size,u32 sgl_data_size,u8 * wqe_ptr)1029 static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
1030 				    enum gdma_queue_type q_type,
1031 				    u32 client_oob_size, u32 sgl_data_size,
1032 				    u8 *wqe_ptr)
1033 {
1034 	bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
1035 	bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
1036 	struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
1037 	u8 *ptr;
1038 
1039 	memset(header, 0, sizeof(struct gdma_wqe));
1040 	header->num_sge = wqe_req->num_sge;
1041 	header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
1042 
1043 	if (oob_in_sgl) {
1044 		WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
1045 
1046 		header->client_oob_in_sgl = 1;
1047 
1048 		if (pad_data)
1049 			header->last_vbytes = wqe_req->sgl[0].size;
1050 	}
1051 
1052 	if (q_type == GDMA_SQ)
1053 		header->client_data_unit = wqe_req->client_data_unit;
1054 
1055 	/* The size of gdma_wqe + client_oob_size must be less than or equal
1056 	 * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
1057 	 * the queue memory buffer boundary.
1058 	 */
1059 	ptr = wqe_ptr + sizeof(header);
1060 
1061 	if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
1062 		memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
1063 
1064 		if (client_oob_size > wqe_req->inline_oob_size)
1065 			memset(ptr + wqe_req->inline_oob_size, 0,
1066 			       client_oob_size - wqe_req->inline_oob_size);
1067 	}
1068 
1069 	return sizeof(header) + client_oob_size;
1070 }
1071 
mana_gd_write_sgl(struct gdma_queue * wq,u8 * wqe_ptr,const struct gdma_wqe_request * wqe_req)1072 static void mana_gd_write_sgl(struct gdma_queue *wq, u8 *wqe_ptr,
1073 			      const struct gdma_wqe_request *wqe_req)
1074 {
1075 	u32 sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1076 	const u8 *address = (u8 *)wqe_req->sgl;
1077 	u8 *base_ptr, *end_ptr;
1078 	u32 size_to_end;
1079 
1080 	base_ptr = wq->queue_mem_ptr;
1081 	end_ptr = base_ptr + wq->queue_size;
1082 	size_to_end = (u32)(end_ptr - wqe_ptr);
1083 
1084 	if (size_to_end < sgl_size) {
1085 		memcpy(wqe_ptr, address, size_to_end);
1086 
1087 		wqe_ptr = base_ptr;
1088 		address += size_to_end;
1089 		sgl_size -= size_to_end;
1090 	}
1091 
1092 	memcpy(wqe_ptr, address, sgl_size);
1093 }
1094 
mana_gd_post_work_request(struct gdma_queue * wq,const struct gdma_wqe_request * wqe_req,struct gdma_posted_wqe_info * wqe_info)1095 int mana_gd_post_work_request(struct gdma_queue *wq,
1096 			      const struct gdma_wqe_request *wqe_req,
1097 			      struct gdma_posted_wqe_info *wqe_info)
1098 {
1099 	u32 client_oob_size = wqe_req->inline_oob_size;
1100 	struct gdma_context *gc;
1101 	u32 sgl_data_size;
1102 	u32 max_wqe_size;
1103 	u32 wqe_size;
1104 	u8 *wqe_ptr;
1105 
1106 	if (wqe_req->num_sge == 0)
1107 		return -EINVAL;
1108 
1109 	if (wq->type == GDMA_RQ) {
1110 		if (client_oob_size != 0)
1111 			return -EINVAL;
1112 
1113 		client_oob_size = INLINE_OOB_SMALL_SIZE;
1114 
1115 		max_wqe_size = GDMA_MAX_RQE_SIZE;
1116 	} else {
1117 		if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
1118 		    client_oob_size != INLINE_OOB_LARGE_SIZE)
1119 			return -EINVAL;
1120 
1121 		max_wqe_size = GDMA_MAX_SQE_SIZE;
1122 	}
1123 
1124 	sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1125 	wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
1126 			 sgl_data_size, GDMA_WQE_BU_SIZE);
1127 	if (wqe_size > max_wqe_size)
1128 		return -EINVAL;
1129 
1130 	if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
1131 		gc = wq->gdma_dev->gdma_context;
1132 		dev_err(gc->dev, "unsuccessful flow control!\n");
1133 		return -ENOSPC;
1134 	}
1135 
1136 	if (wqe_info)
1137 		wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
1138 
1139 	wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
1140 	wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
1141 					    sgl_data_size, wqe_ptr);
1142 	if (wqe_ptr >= (u8 *)wq->queue_mem_ptr + wq->queue_size)
1143 		wqe_ptr -= wq->queue_size;
1144 
1145 	mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
1146 
1147 	wq->head += wqe_size / GDMA_WQE_BU_SIZE;
1148 
1149 	return 0;
1150 }
1151 
mana_gd_post_and_ring(struct gdma_queue * queue,const struct gdma_wqe_request * wqe_req,struct gdma_posted_wqe_info * wqe_info)1152 int mana_gd_post_and_ring(struct gdma_queue *queue,
1153 			  const struct gdma_wqe_request *wqe_req,
1154 			  struct gdma_posted_wqe_info *wqe_info)
1155 {
1156 	struct gdma_context *gc = queue->gdma_dev->gdma_context;
1157 	int err;
1158 
1159 	err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1160 	if (err)
1161 		return err;
1162 
1163 	mana_gd_wq_ring_doorbell(gc, queue);
1164 
1165 	return 0;
1166 }
1167 
mana_gd_read_cqe(struct gdma_queue * cq,struct gdma_comp * comp)1168 static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
1169 {
1170 	unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
1171 	struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
1172 	u32 owner_bits, new_bits, old_bits;
1173 	struct gdma_cqe *cqe;
1174 
1175 	cqe = &cq_cqe[cq->head % num_cqe];
1176 	owner_bits = cqe->cqe_info.owner_bits;
1177 
1178 	old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
1179 	/* Return 0 if no more entries. */
1180 	if (owner_bits == old_bits)
1181 		return 0;
1182 
1183 	new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
1184 	/* Return -1 if overflow detected. */
1185 	if (WARN_ON_ONCE(owner_bits != new_bits))
1186 		return -1;
1187 
1188 	/* Per GDMA spec, rmb is necessary after checking owner_bits, before
1189 	 * reading completion info
1190 	 */
1191 	rmb();
1192 
1193 	comp->wq_num = cqe->cqe_info.wq_num;
1194 	comp->is_sq = cqe->cqe_info.is_sq;
1195 	memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
1196 
1197 	return 1;
1198 }
1199 
mana_gd_poll_cq(struct gdma_queue * cq,struct gdma_comp * comp,int num_cqe)1200 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
1201 {
1202 	int cqe_idx;
1203 	int ret;
1204 
1205 	for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
1206 		ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
1207 
1208 		if (ret < 0) {
1209 			cq->head -= cqe_idx;
1210 			return ret;
1211 		}
1212 
1213 		if (ret == 0)
1214 			break;
1215 
1216 		cq->head++;
1217 	}
1218 
1219 	return cqe_idx;
1220 }
1221 
mana_gd_intr(int irq,void * arg)1222 static irqreturn_t mana_gd_intr(int irq, void *arg)
1223 {
1224 	struct gdma_irq_context *gic = arg;
1225 	struct list_head *eq_list = &gic->eq_list;
1226 	struct gdma_queue *eq;
1227 
1228 	rcu_read_lock();
1229 	list_for_each_entry_rcu(eq, eq_list, entry) {
1230 		gic->handler(eq);
1231 	}
1232 	rcu_read_unlock();
1233 
1234 	return IRQ_HANDLED;
1235 }
1236 
mana_gd_alloc_res_map(u32 res_avail,struct gdma_resource * r)1237 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r)
1238 {
1239 	r->map = bitmap_zalloc(res_avail, GFP_KERNEL);
1240 	if (!r->map)
1241 		return -ENOMEM;
1242 
1243 	r->size = res_avail;
1244 	spin_lock_init(&r->lock);
1245 
1246 	return 0;
1247 }
1248 
mana_gd_free_res_map(struct gdma_resource * r)1249 void mana_gd_free_res_map(struct gdma_resource *r)
1250 {
1251 	bitmap_free(r->map);
1252 	r->map = NULL;
1253 	r->size = 0;
1254 }
1255 
irq_setup(unsigned int * irqs,unsigned int len,int node)1256 static int irq_setup(unsigned int *irqs, unsigned int len, int node)
1257 {
1258 	const struct cpumask *next, *prev = cpu_none_mask;
1259 	cpumask_var_t cpus __free(free_cpumask_var);
1260 	int cpu, weight;
1261 
1262 	if (!alloc_cpumask_var(&cpus, GFP_KERNEL))
1263 		return -ENOMEM;
1264 
1265 	rcu_read_lock();
1266 	for_each_numa_hop_mask(next, node) {
1267 		weight = cpumask_weight_andnot(next, prev);
1268 		while (weight > 0) {
1269 			cpumask_andnot(cpus, next, prev);
1270 			for_each_cpu(cpu, cpus) {
1271 				if (len-- == 0)
1272 					goto done;
1273 				irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
1274 				cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
1275 				--weight;
1276 			}
1277 		}
1278 		prev = next;
1279 	}
1280 done:
1281 	rcu_read_unlock();
1282 	return 0;
1283 }
1284 
mana_gd_setup_irqs(struct pci_dev * pdev)1285 static int mana_gd_setup_irqs(struct pci_dev *pdev)
1286 {
1287 	struct gdma_context *gc = pci_get_drvdata(pdev);
1288 	unsigned int max_queues_per_port;
1289 	struct gdma_irq_context *gic;
1290 	unsigned int max_irqs, cpu;
1291 	int start_irq_index = 1;
1292 	int nvec, *irqs, irq;
1293 	int err, i = 0, j;
1294 
1295 	cpus_read_lock();
1296 	max_queues_per_port = num_online_cpus();
1297 	if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
1298 		max_queues_per_port = MANA_MAX_NUM_QUEUES;
1299 
1300 	/* Need 1 interrupt for the Hardware communication Channel (HWC) */
1301 	max_irqs = max_queues_per_port + 1;
1302 
1303 	nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
1304 	if (nvec < 0) {
1305 		cpus_read_unlock();
1306 		return nvec;
1307 	}
1308 	if (nvec <= num_online_cpus())
1309 		start_irq_index = 0;
1310 
1311 	irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
1312 	if (!irqs) {
1313 		err = -ENOMEM;
1314 		goto free_irq_vector;
1315 	}
1316 
1317 	gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
1318 				   GFP_KERNEL);
1319 	if (!gc->irq_contexts) {
1320 		err = -ENOMEM;
1321 		goto free_irq_array;
1322 	}
1323 
1324 	for (i = 0; i < nvec; i++) {
1325 		gic = &gc->irq_contexts[i];
1326 		gic->handler = mana_gd_process_eq_events;
1327 		INIT_LIST_HEAD(&gic->eq_list);
1328 		spin_lock_init(&gic->lock);
1329 
1330 		if (!i)
1331 			snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s",
1332 				 pci_name(pdev));
1333 		else
1334 			snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
1335 				 i - 1, pci_name(pdev));
1336 
1337 		irq = pci_irq_vector(pdev, i);
1338 		if (irq < 0) {
1339 			err = irq;
1340 			goto free_irq;
1341 		}
1342 
1343 		if (!i) {
1344 			err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
1345 			if (err)
1346 				goto free_irq;
1347 
1348 			/* If number of IRQ is one extra than number of online CPUs,
1349 			 * then we need to assign IRQ0 (hwc irq) and IRQ1 to
1350 			 * same CPU.
1351 			 * Else we will use different CPUs for IRQ0 and IRQ1.
1352 			 * Also we are using cpumask_local_spread instead of
1353 			 * cpumask_first for the node, because the node can be
1354 			 * mem only.
1355 			 */
1356 			if (start_irq_index) {
1357 				cpu = cpumask_local_spread(i, gc->numa_node);
1358 				irq_set_affinity_and_hint(irq, cpumask_of(cpu));
1359 			} else {
1360 				irqs[start_irq_index] = irq;
1361 			}
1362 		} else {
1363 			irqs[i - start_irq_index] = irq;
1364 			err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
1365 					  gic->name, gic);
1366 			if (err)
1367 				goto free_irq;
1368 		}
1369 	}
1370 
1371 	err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
1372 	if (err)
1373 		goto free_irq;
1374 
1375 	gc->max_num_msix = nvec;
1376 	gc->num_msix_usable = nvec;
1377 	cpus_read_unlock();
1378 	kfree(irqs);
1379 	return 0;
1380 
1381 free_irq:
1382 	for (j = i - 1; j >= 0; j--) {
1383 		irq = pci_irq_vector(pdev, j);
1384 		gic = &gc->irq_contexts[j];
1385 
1386 		irq_update_affinity_hint(irq, NULL);
1387 		free_irq(irq, gic);
1388 	}
1389 
1390 	kfree(gc->irq_contexts);
1391 	gc->irq_contexts = NULL;
1392 free_irq_array:
1393 	kfree(irqs);
1394 free_irq_vector:
1395 	cpus_read_unlock();
1396 	pci_free_irq_vectors(pdev);
1397 	return err;
1398 }
1399 
mana_gd_remove_irqs(struct pci_dev * pdev)1400 static void mana_gd_remove_irqs(struct pci_dev *pdev)
1401 {
1402 	struct gdma_context *gc = pci_get_drvdata(pdev);
1403 	struct gdma_irq_context *gic;
1404 	int irq, i;
1405 
1406 	if (gc->max_num_msix < 1)
1407 		return;
1408 
1409 	for (i = 0; i < gc->max_num_msix; i++) {
1410 		irq = pci_irq_vector(pdev, i);
1411 		if (irq < 0)
1412 			continue;
1413 
1414 		gic = &gc->irq_contexts[i];
1415 
1416 		/* Need to clear the hint before free_irq */
1417 		irq_update_affinity_hint(irq, NULL);
1418 		free_irq(irq, gic);
1419 	}
1420 
1421 	pci_free_irq_vectors(pdev);
1422 
1423 	gc->max_num_msix = 0;
1424 	gc->num_msix_usable = 0;
1425 	kfree(gc->irq_contexts);
1426 	gc->irq_contexts = NULL;
1427 }
1428 
mana_gd_setup(struct pci_dev * pdev)1429 static int mana_gd_setup(struct pci_dev *pdev)
1430 {
1431 	struct gdma_context *gc = pci_get_drvdata(pdev);
1432 	int err;
1433 
1434 	mana_gd_init_registers(pdev);
1435 	mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1436 
1437 	err = mana_gd_setup_irqs(pdev);
1438 	if (err)
1439 		return err;
1440 
1441 	err = mana_hwc_create_channel(gc);
1442 	if (err)
1443 		goto remove_irq;
1444 
1445 	err = mana_gd_verify_vf_version(pdev);
1446 	if (err)
1447 		goto destroy_hwc;
1448 
1449 	err = mana_gd_query_max_resources(pdev);
1450 	if (err)
1451 		goto destroy_hwc;
1452 
1453 	err = mana_gd_detect_devices(pdev);
1454 	if (err)
1455 		goto destroy_hwc;
1456 
1457 	return 0;
1458 
1459 destroy_hwc:
1460 	mana_hwc_destroy_channel(gc);
1461 remove_irq:
1462 	mana_gd_remove_irqs(pdev);
1463 	return err;
1464 }
1465 
mana_gd_cleanup(struct pci_dev * pdev)1466 static void mana_gd_cleanup(struct pci_dev *pdev)
1467 {
1468 	struct gdma_context *gc = pci_get_drvdata(pdev);
1469 
1470 	mana_hwc_destroy_channel(gc);
1471 
1472 	mana_gd_remove_irqs(pdev);
1473 }
1474 
mana_is_pf(unsigned short dev_id)1475 static bool mana_is_pf(unsigned short dev_id)
1476 {
1477 	return dev_id == MANA_PF_DEVICE_ID;
1478 }
1479 
mana_gd_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1480 static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1481 {
1482 	struct gdma_context *gc;
1483 	void __iomem *bar0_va;
1484 	int bar = 0;
1485 	int err;
1486 
1487 	/* Each port has 2 CQs, each CQ has at most 1 EQE at a time */
1488 	BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
1489 
1490 	err = pci_enable_device(pdev);
1491 	if (err)
1492 		return -ENXIO;
1493 
1494 	pci_set_master(pdev);
1495 
1496 	err = pci_request_regions(pdev, "mana");
1497 	if (err)
1498 		goto disable_dev;
1499 
1500 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1501 	if (err)
1502 		goto release_region;
1503 
1504 	dma_set_max_seg_size(&pdev->dev, UINT_MAX);
1505 
1506 	err = -ENOMEM;
1507 	gc = vzalloc(sizeof(*gc));
1508 	if (!gc)
1509 		goto release_region;
1510 
1511 	mutex_init(&gc->eq_test_event_mutex);
1512 	pci_set_drvdata(pdev, gc);
1513 	gc->bar0_pa = pci_resource_start(pdev, 0);
1514 
1515 	bar0_va = pci_iomap(pdev, bar, 0);
1516 	if (!bar0_va)
1517 		goto free_gc;
1518 
1519 	gc->numa_node = dev_to_node(&pdev->dev);
1520 	gc->is_pf = mana_is_pf(pdev->device);
1521 	gc->bar0_va = bar0_va;
1522 	gc->dev = &pdev->dev;
1523 
1524 	if (gc->is_pf)
1525 		gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
1526 	else
1527 		gc->mana_pci_debugfs = debugfs_create_dir(pci_slot_name(pdev->slot),
1528 							  mana_debugfs_root);
1529 
1530 	err = mana_gd_setup(pdev);
1531 	if (err)
1532 		goto unmap_bar;
1533 
1534 	err = mana_probe(&gc->mana, false);
1535 	if (err)
1536 		goto cleanup_gd;
1537 
1538 	return 0;
1539 
1540 cleanup_gd:
1541 	mana_gd_cleanup(pdev);
1542 unmap_bar:
1543 	/*
1544 	 * at this point we know that the other debugfs child dir/files
1545 	 * are either not yet created or are already cleaned up.
1546 	 * The pci debugfs folder clean-up now, will only be cleaning up
1547 	 * adapter-MTU file and apc->mana_pci_debugfs folder.
1548 	 */
1549 	debugfs_remove_recursive(gc->mana_pci_debugfs);
1550 	pci_iounmap(pdev, bar0_va);
1551 free_gc:
1552 	pci_set_drvdata(pdev, NULL);
1553 	vfree(gc);
1554 release_region:
1555 	pci_release_regions(pdev);
1556 disable_dev:
1557 	pci_disable_device(pdev);
1558 	dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err);
1559 	return err;
1560 }
1561 
mana_gd_remove(struct pci_dev * pdev)1562 static void mana_gd_remove(struct pci_dev *pdev)
1563 {
1564 	struct gdma_context *gc = pci_get_drvdata(pdev);
1565 
1566 	mana_remove(&gc->mana, false);
1567 
1568 	mana_gd_cleanup(pdev);
1569 
1570 	debugfs_remove_recursive(gc->mana_pci_debugfs);
1571 
1572 	pci_iounmap(pdev, gc->bar0_va);
1573 
1574 	vfree(gc);
1575 
1576 	pci_release_regions(pdev);
1577 	pci_disable_device(pdev);
1578 }
1579 
1580 /* The 'state' parameter is not used. */
mana_gd_suspend(struct pci_dev * pdev,pm_message_t state)1581 static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
1582 {
1583 	struct gdma_context *gc = pci_get_drvdata(pdev);
1584 
1585 	mana_remove(&gc->mana, true);
1586 
1587 	mana_gd_cleanup(pdev);
1588 
1589 	return 0;
1590 }
1591 
1592 /* In case the NIC hardware stops working, the suspend and resume callbacks will
1593  * fail -- if this happens, it's safer to just report an error than try to undo
1594  * what has been done.
1595  */
mana_gd_resume(struct pci_dev * pdev)1596 static int mana_gd_resume(struct pci_dev *pdev)
1597 {
1598 	struct gdma_context *gc = pci_get_drvdata(pdev);
1599 	int err;
1600 
1601 	err = mana_gd_setup(pdev);
1602 	if (err)
1603 		return err;
1604 
1605 	err = mana_probe(&gc->mana, true);
1606 	if (err)
1607 		return err;
1608 
1609 	return 0;
1610 }
1611 
1612 /* Quiesce the device for kexec. This is also called upon reboot/shutdown. */
mana_gd_shutdown(struct pci_dev * pdev)1613 static void mana_gd_shutdown(struct pci_dev *pdev)
1614 {
1615 	struct gdma_context *gc = pci_get_drvdata(pdev);
1616 
1617 	dev_info(&pdev->dev, "Shutdown was called\n");
1618 
1619 	mana_remove(&gc->mana, true);
1620 
1621 	mana_gd_cleanup(pdev);
1622 
1623 	debugfs_remove_recursive(gc->mana_pci_debugfs);
1624 
1625 	pci_disable_device(pdev);
1626 }
1627 
1628 static const struct pci_device_id mana_id_table[] = {
1629 	{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
1630 	{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
1631 	{ }
1632 };
1633 
1634 static struct pci_driver mana_driver = {
1635 	.name		= "mana",
1636 	.id_table	= mana_id_table,
1637 	.probe		= mana_gd_probe,
1638 	.remove		= mana_gd_remove,
1639 	.suspend	= mana_gd_suspend,
1640 	.resume		= mana_gd_resume,
1641 	.shutdown	= mana_gd_shutdown,
1642 };
1643 
mana_driver_init(void)1644 static int __init mana_driver_init(void)
1645 {
1646 	int err;
1647 
1648 	mana_debugfs_root = debugfs_create_dir("mana", NULL);
1649 
1650 	err = pci_register_driver(&mana_driver);
1651 	if (err)
1652 		debugfs_remove(mana_debugfs_root);
1653 
1654 	return err;
1655 }
1656 
mana_driver_exit(void)1657 static void __exit mana_driver_exit(void)
1658 {
1659 	debugfs_remove(mana_debugfs_root);
1660 
1661 	pci_unregister_driver(&mana_driver);
1662 }
1663 
1664 module_init(mana_driver_init);
1665 module_exit(mana_driver_exit);
1666 
1667 MODULE_DEVICE_TABLE(pci, mana_id_table);
1668 
1669 MODULE_LICENSE("Dual BSD/GPL");
1670 MODULE_DESCRIPTION("Microsoft Azure Network Adapter driver");
1671