xref: /freebsd/sys/dev/mana/gdma_main.c (revision f81cdf24ba5436367377f7c8e8f51f6df2a75ca7)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/rman.h>
40 #include <sys/smp.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
44 #include <sys/time.h>
45 #include <sys/eventhandler.h>
46 
47 #include <machine/bus.h>
48 #include <machine/resource.h>
49 #include <machine/in_cksum.h>
50 
51 #include <net/if.h>
52 #include <net/if_var.h>
53 
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pcireg.h>
56 
57 #include "gdma_util.h"
58 #include "mana.h"
59 
60 
61 static mana_vendor_id_t mana_id_table[] = {
62     { PCI_VENDOR_ID_MICROSOFT, PCI_DEV_ID_MANA_VF},
63     /* Last entry */
64     { 0, 0}
65 };
66 
67 static inline uint32_t
68 mana_gd_r32(struct gdma_context *g, uint64_t offset)
69 {
70 	uint32_t v = bus_space_read_4(g->gd_bus.bar0_t,
71 	    g->gd_bus.bar0_h, offset);
72 	rmb();
73 	return (v);
74 }
75 
76 #if defined(__amd64__)
77 static inline uint64_t
78 mana_gd_r64(struct gdma_context *g, uint64_t offset)
79 {
80 	uint64_t v = bus_space_read_8(g->gd_bus.bar0_t,
81 	    g->gd_bus.bar0_h, offset);
82 	rmb();
83 	return (v);
84 }
85 #else
86 static inline uint64_t
87 mana_gd_r64(struct gdma_context *g, uint64_t offset)
88 {
89 	uint64_t v;
90 	uint32_t *vp = (uint32_t *)&v;
91 
92 	*vp =  mana_gd_r32(g, offset);
93 	*(vp + 1) = mana_gd_r32(g, offset + 4);
94 	rmb();
95 	return (v);
96 }
97 #endif
98 
99 static int
100 mana_gd_query_max_resources(device_t dev)
101 {
102 	struct gdma_context *gc = device_get_softc(dev);
103 	struct gdma_query_max_resources_resp resp = {};
104 	struct gdma_general_req req = {};
105 	int err;
106 
107 	mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
108 	    sizeof(req), sizeof(resp));
109 
110 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
111 	if (err || resp.hdr.status) {
112 		device_printf(gc->dev,
113 		   "Failed to query resource info: %d, 0x%x\n",
114 		   err, resp.hdr.status);
115 		return err ? err : EPROTO;
116 	}
117 
118 	mana_dbg(NULL, "max_msix %u, max_eq %u, max_cq %u, "
119 	    "max_sq %u, max_rq %u\n",
120 	    resp.max_msix, resp.max_eq, resp.max_cq,
121 	    resp.max_sq, resp.max_rq);
122 
123 	if (gc->num_msix_usable > resp.max_msix)
124 		gc->num_msix_usable = resp.max_msix;
125 
126 	if (gc->num_msix_usable <= 1)
127 		return ENOSPC;
128 
129 	gc->max_num_queues = mp_ncpus;
130 	if (gc->max_num_queues > MANA_MAX_NUM_QUEUES)
131 		gc->max_num_queues = MANA_MAX_NUM_QUEUES;
132 
133 	if (gc->max_num_queues > resp.max_eq)
134 		gc->max_num_queues = resp.max_eq;
135 
136 	if (gc->max_num_queues > resp.max_cq)
137 		gc->max_num_queues = resp.max_cq;
138 
139 	if (gc->max_num_queues > resp.max_sq)
140 		gc->max_num_queues = resp.max_sq;
141 
142 	if (gc->max_num_queues > resp.max_rq)
143 		gc->max_num_queues = resp.max_rq;
144 
145 	return 0;
146 }
147 
148 static int
149 mana_gd_detect_devices(device_t dev)
150 {
151 	struct gdma_context *gc = device_get_softc(dev);
152 	struct gdma_list_devices_resp resp = {};
153 	struct gdma_general_req req = {};
154 	struct gdma_dev_id gd_dev;
155 	uint32_t i, max_num_devs;
156 	uint16_t dev_type;
157 	int err;
158 
159 	mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
160 	    sizeof(resp));
161 
162 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
163 	if (err || resp.hdr.status) {
164 		device_printf(gc->dev,
165 		    "Failed to detect devices: %d, 0x%x\n", err,
166 		    resp.hdr.status);
167 		return err ? err : EPROTO;
168 	}
169 
170 	max_num_devs = min_t(uint32_t, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
171 
172 	for (i = 0; i < max_num_devs; i++) {
173 		gd_dev = resp.devs[i];
174 		dev_type = gd_dev.type;
175 
176 		mana_dbg(NULL, "gdma dev %d, type %u\n",
177 		    i, dev_type);
178 
179 		/* HWC is already detected in mana_hwc_create_channel(). */
180 		if (dev_type == GDMA_DEVICE_HWC)
181 			continue;
182 
183 		if (dev_type == GDMA_DEVICE_MANA) {
184 			gc->mana.gdma_context = gc;
185 			gc->mana.dev_id = gd_dev;
186 		}
187 	}
188 
189 	return gc->mana.dev_id.type == 0 ? ENODEV : 0;
190 }
191 
192 int
193 mana_gd_send_request(struct gdma_context *gc, uint32_t req_len,
194     const void *req, uint32_t resp_len, void *resp)
195 {
196 	struct hw_channel_context *hwc = gc->hwc.driver_data;
197 
198 	return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
199 }
200 
201 void
202 mana_gd_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
203 {
204 	bus_addr_t *paddr = arg;
205 
206 	if (error)
207 		return;
208 
209 	KASSERT(nseg == 1, ("too many segments %d!", nseg));
210 	*paddr = segs->ds_addr;
211 }
212 
213 int
214 mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
215     struct gdma_mem_info *gmi)
216 {
217 	bus_addr_t dma_handle;
218 	void *buf;
219 	int err;
220 
221 	if (!gc || !gmi)
222 		return EINVAL;
223 
224 	if (length < PAGE_SIZE || (length != roundup_pow_of_two(length)))
225 		return EINVAL;
226 
227 	err = bus_dma_tag_create(bus_get_dma_tag(gc->dev),	/* parent */
228 	    PAGE_SIZE, 0,		/* alignment, boundary	*/
229 	    BUS_SPACE_MAXADDR,		/* lowaddr		*/
230 	    BUS_SPACE_MAXADDR,		/* highaddr		*/
231 	    NULL, NULL,			/* filter, filterarg	*/
232 	    length,			/* maxsize		*/
233 	    1,				/* nsegments		*/
234 	    length,			/* maxsegsize		*/
235 	    0,				/* flags		*/
236 	    NULL, NULL,			/* lockfunc, lockfuncarg*/
237 	    &gmi->dma_tag);
238 	if (err) {
239 		device_printf(gc->dev,
240 		    "failed to create dma tag, err: %d\n", err);
241 		return (err);
242 	}
243 
244 	/*
245 	 * Must have BUS_DMA_ZERO flag to clear the dma memory.
246 	 * Otherwise the queue overflow detection mechanism does
247 	 * not work.
248 	 */
249 	err = bus_dmamem_alloc(gmi->dma_tag, &buf,
250 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &gmi->dma_map);
251 	if (err) {
252 		device_printf(gc->dev,
253 		    "failed to alloc dma mem, err: %d\n", err);
254 		bus_dma_tag_destroy(gmi->dma_tag);
255 		return (err);
256 	}
257 
258 	err = bus_dmamap_load(gmi->dma_tag, gmi->dma_map, buf,
259 	    length, mana_gd_dma_map_paddr, &dma_handle, BUS_DMA_NOWAIT);
260 	if (err) {
261 		device_printf(gc->dev,
262 		    "failed to load dma mem, err: %d\n", err);
263 		bus_dmamem_free(gmi->dma_tag, buf, gmi->dma_map);
264 		bus_dma_tag_destroy(gmi->dma_tag);
265 		return (err);
266 	}
267 
268 	gmi->dev = gc->dev;
269 	gmi->dma_handle = dma_handle;
270 	gmi->virt_addr = buf;
271 	gmi->length = length;
272 
273 	return 0;
274 }
275 
276 void
277 mana_gd_free_memory(struct gdma_mem_info *gmi)
278 {
279 	bus_dmamap_unload(gmi->dma_tag, gmi->dma_map);
280 	bus_dmamem_free(gmi->dma_tag, gmi->virt_addr, gmi->dma_map);
281 	bus_dma_tag_destroy(gmi->dma_tag);
282 }
283 
284 int
285 mana_gd_destroy_doorbell_page(struct gdma_context *gc, int doorbell_page)
286 {
287 	struct gdma_destroy_resource_range_req req = {};
288 	struct gdma_resp_hdr resp = {};
289 	int err;
290 
291 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE,
292 	    sizeof(req), sizeof(resp));
293 
294 	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
295 	req.num_resources = 1;
296 	req.allocated_resources = doorbell_page;
297 
298 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
299 	if (err || resp.status) {
300 		device_printf(gc->dev,
301 		    "Failed to destroy doorbell page: ret %d, 0x%x\n",
302 		    err, resp.status);
303 		return err ? err : EPROTO;
304 	}
305 
306 	return 0;
307 }
308 
309 int
310 mana_gd_allocate_doorbell_page(struct gdma_context *gc, int *doorbell_page)
311 {
312 	struct gdma_allocate_resource_range_req req = {};
313 	struct gdma_allocate_resource_range_resp resp = {};
314 	int err;
315 
316 	mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE,
317 	    sizeof(req), sizeof(resp));
318 
319 	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
320 	req.num_resources = 1;
321 	req.alignment = 1;
322 
323 	/* Have GDMA start searching from 0 */
324 	req.allocated_resources = 0;
325 
326 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
327 	if (err || resp.hdr.status) {
328 		device_printf(gc->dev,
329 		    "Failed to allocate doorbell page: ret %d, 0x%x\n",
330 		    err, resp.hdr.status);
331 		return err ? err : EPROTO;
332 	}
333 
334 	*doorbell_page = resp.allocated_resources;
335 
336 	return 0;
337 }
338 
339 static int
340 mana_gd_create_hw_eq(struct gdma_context *gc,
341     struct gdma_queue *queue)
342 {
343 	struct gdma_create_queue_resp resp = {};
344 	struct gdma_create_queue_req req = {};
345 	int err;
346 
347 	if (queue->type != GDMA_EQ)
348 		return EINVAL;
349 
350 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_QUEUE,
351 			     sizeof(req), sizeof(resp));
352 
353 	req.hdr.dev_id = queue->gdma_dev->dev_id;
354 	req.type = queue->type;
355 	req.pdid = queue->gdma_dev->pdid;
356 	req.doolbell_id = queue->gdma_dev->doorbell;
357 	req.gdma_region = queue->mem_info.dma_region_handle;
358 	req.queue_size = queue->queue_size;
359 	req.log2_throttle_limit = queue->eq.log2_throttle_limit;
360 	req.eq_pci_msix_index = queue->eq.msix_index;
361 
362 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
363 	if (err || resp.hdr.status) {
364 		device_printf(gc->dev,
365 		    "Failed to create queue: %d, 0x%x\n",
366 		    err, resp.hdr.status);
367 		return err ? err : EPROTO;
368 	}
369 
370 	queue->id = resp.queue_index;
371 	queue->eq.disable_needed = true;
372 	queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
373 	return 0;
374 }
375 
376 static
377 int mana_gd_disable_queue(struct gdma_queue *queue)
378 {
379 	struct gdma_context *gc = queue->gdma_dev->gdma_context;
380 	struct gdma_disable_queue_req req = {};
381 	struct gdma_general_resp resp = {};
382 	int err;
383 
384 	if (queue->type != GDMA_EQ)
385 		mana_warn(NULL, "Not event queue type 0x%x\n",
386 		    queue->type);
387 
388 	mana_gd_init_req_hdr(&req.hdr, GDMA_DISABLE_QUEUE,
389 	    sizeof(req), sizeof(resp));
390 
391 	req.hdr.dev_id = queue->gdma_dev->dev_id;
392 	req.type = queue->type;
393 	req.queue_index =  queue->id;
394 	req.alloc_res_id_on_creation = 1;
395 
396 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
397 	if (err || resp.hdr.status) {
398 		device_printf(gc->dev,
399 		    "Failed to disable queue: %d, 0x%x\n", err,
400 		    resp.hdr.status);
401 		return err ? err : EPROTO;
402 	}
403 
404 	return 0;
405 }
406 
407 #define DOORBELL_OFFSET_SQ	0x0
408 #define DOORBELL_OFFSET_RQ	0x400
409 #define DOORBELL_OFFSET_CQ	0x800
410 #define DOORBELL_OFFSET_EQ	0xFF8
411 
412 static void
413 mana_gd_ring_doorbell(struct gdma_context *gc, uint32_t db_index,
414     enum gdma_queue_type q_type, uint32_t qid,
415     uint32_t tail_ptr, uint8_t num_req)
416 {
417 	union gdma_doorbell_entry e = {};
418 	void __iomem *addr;
419 
420 	addr = (char *)gc->db_page_base + gc->db_page_size * db_index;
421 	switch (q_type) {
422 	case GDMA_EQ:
423 		e.eq.id = qid;
424 		e.eq.tail_ptr = tail_ptr;
425 		e.eq.arm = num_req;
426 
427 		addr = (char *)addr + DOORBELL_OFFSET_EQ;
428 		break;
429 
430 	case GDMA_CQ:
431 		e.cq.id = qid;
432 		e.cq.tail_ptr = tail_ptr;
433 		e.cq.arm = num_req;
434 
435 		addr = (char *)addr + DOORBELL_OFFSET_CQ;
436 		break;
437 
438 	case GDMA_RQ:
439 		e.rq.id = qid;
440 		e.rq.tail_ptr = tail_ptr;
441 		e.rq.wqe_cnt = num_req;
442 
443 		addr = (char *)addr + DOORBELL_OFFSET_RQ;
444 		break;
445 
446 	case GDMA_SQ:
447 		e.sq.id = qid;
448 		e.sq.tail_ptr = tail_ptr;
449 
450 		addr = (char *)addr + DOORBELL_OFFSET_SQ;
451 		break;
452 
453 	default:
454 		mana_warn(NULL, "Invalid queue type 0x%x\n", q_type);
455 		return;
456 	}
457 
458 	/* Ensure all writes are done before ring doorbell */
459 	wmb();
460 
461 #if defined(__amd64__)
462 	writeq(addr, e.as_uint64);
463 #else
464 	uint32_t *p = (uint32_t *)&e.as_uint64;
465 	writel(addr, *p);
466 	writel((char *)addr + 4, *(p + 1));
467 #endif
468 }
469 
470 void
471 mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
472 {
473 	mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
474 	    queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
475 }
476 
477 void
478 mana_gd_ring_cq(struct gdma_queue *cq, uint8_t arm_bit)
479 {
480 	struct gdma_context *gc = cq->gdma_dev->gdma_context;
481 
482 	uint32_t num_cqe = cq->queue_size / GDMA_CQE_SIZE;
483 
484 	uint32_t head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS);
485 
486 	mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
487 	    head, arm_bit);
488 }
489 
490 static void
491 mana_gd_process_eqe(struct gdma_queue *eq)
492 {
493 	uint32_t head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
494 	struct gdma_context *gc = eq->gdma_dev->gdma_context;
495 	struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
496 	union gdma_eqe_info eqe_info;
497 	enum gdma_eqe_type type;
498 	struct gdma_event event;
499 	struct gdma_queue *cq;
500 	struct gdma_eqe *eqe;
501 	uint32_t cq_id;
502 
503 	eqe = &eq_eqe_ptr[head];
504 	eqe_info.as_uint32 = eqe->eqe_info;
505 	type = eqe_info.type;
506 
507 	switch (type) {
508 	case GDMA_EQE_COMPLETION:
509 		cq_id = eqe->details[0] & 0xFFFFFF;
510 		if (cq_id >= gc->max_num_cqs) {
511 			mana_warn(NULL,
512 			    "failed: cq_id %u > max_num_cqs %u\n",
513 			    cq_id, gc->max_num_cqs);
514 			break;
515 		}
516 
517 		cq = gc->cq_table[cq_id];
518 		if (!cq || cq->type != GDMA_CQ || cq->id != cq_id) {
519 			mana_warn(NULL,
520 			    "failed: invalid cq_id %u\n", cq_id);
521 			break;
522 		}
523 
524 		if (cq->cq.callback)
525 			cq->cq.callback(cq->cq.context, cq);
526 
527 		break;
528 
529 	case GDMA_EQE_TEST_EVENT:
530 		gc->test_event_eq_id = eq->id;
531 
532 		mana_dbg(NULL,
533 		    "EQE TEST EVENT received for EQ %u\n", eq->id);
534 
535 		complete(&gc->eq_test_event);
536 		break;
537 
538 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
539 	case GDMA_EQE_HWC_INIT_DATA:
540 	case GDMA_EQE_HWC_INIT_DONE:
541 		if (!eq->eq.callback)
542 			break;
543 
544 		event.type = type;
545 		memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE);
546 		eq->eq.callback(eq->eq.context, eq, &event);
547 		break;
548 
549 	default:
550 		break;
551 	}
552 }
553 
554 static void
555 mana_gd_process_eq_events(void *arg)
556 {
557 	uint32_t owner_bits, new_bits, old_bits;
558 	union gdma_eqe_info eqe_info;
559 	struct gdma_eqe *eq_eqe_ptr;
560 	struct gdma_queue *eq = arg;
561 	struct gdma_context *gc;
562 	uint32_t head, num_eqe;
563 	struct gdma_eqe *eqe;
564 	int i, j;
565 
566 	gc = eq->gdma_dev->gdma_context;
567 
568 	num_eqe = eq->queue_size / GDMA_EQE_SIZE;
569 	eq_eqe_ptr = eq->queue_mem_ptr;
570 
571 	bus_dmamap_sync(eq->mem_info.dma_tag, eq->mem_info.dma_map,
572 	    BUS_DMASYNC_POSTREAD);
573 
574 	/* Process up to 5 EQEs at a time, and update the HW head. */
575 	for (i = 0; i < 5; i++) {
576 		eqe = &eq_eqe_ptr[eq->head % num_eqe];
577 		eqe_info.as_uint32 = eqe->eqe_info;
578 		owner_bits = eqe_info.owner_bits;
579 
580 		old_bits = (eq->head / num_eqe - 1) & GDMA_EQE_OWNER_MASK;
581 
582 		/* No more entries */
583 		if (owner_bits == old_bits)
584 			break;
585 
586 		new_bits = (eq->head / num_eqe) & GDMA_EQE_OWNER_MASK;
587 		if (owner_bits != new_bits) {
588 			/* Something wrong. Log for debugging purpose */
589 			device_printf(gc->dev,
590 			    "EQ %d: overflow detected, "
591 			    "i = %d, eq->head = %u "
592 			    "got owner_bits = %u, new_bits = %u "
593 			    "eqe addr %p, eqe->eqe_info 0x%x, "
594 			    "eqe type = %x, reserved1 = %x, client_id = %x, "
595 			    "reserved2 = %x, owner_bits = %x\n",
596 			    eq->id, i, eq->head,
597 			    owner_bits, new_bits,
598 			    eqe, eqe->eqe_info,
599 			    eqe_info.type, eqe_info.reserved1,
600 			    eqe_info.client_id, eqe_info.reserved2,
601 			    eqe_info.owner_bits);
602 
603 			uint32_t *eqe_dump = (uint32_t *) eq_eqe_ptr;
604 			for (j = 0; j < 20; j++) {
605 				device_printf(gc->dev, "%p: %x\t%x\t%x\t%x\n",
606 				    &eqe_dump[j * 4], eqe_dump[j * 4], eqe_dump[j * 4 + 1],
607 				    eqe_dump[j * 4 + 2], eqe_dump[j * 4 + 3]);
608 			}
609 			break;
610 		}
611 
612 		rmb();
613 
614 		mana_gd_process_eqe(eq);
615 
616 		eq->head++;
617 	}
618 
619 	bus_dmamap_sync(eq->mem_info.dma_tag, eq->mem_info.dma_map,
620 	    BUS_DMASYNC_PREREAD);
621 
622 	head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS);
623 
624 	mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id,
625 	    head, SET_ARM_BIT);
626 }
627 
628 static int
629 mana_gd_register_irq(struct gdma_queue *queue,
630     const struct gdma_queue_spec *spec)
631 {
632 	struct gdma_dev *gd = queue->gdma_dev;
633 	struct gdma_irq_context *gic;
634 	struct gdma_context *gc;
635 	struct gdma_resource *r;
636 	unsigned int msi_index;
637 	int err;
638 
639 	gc = gd->gdma_context;
640 	r = &gc->msix_resource;
641 
642 	mtx_lock_spin(&r->lock_spin);
643 
644 	msi_index = find_first_zero_bit(r->map, r->size);
645 	if (msi_index >= r->size) {
646 		err = ENOSPC;
647 	} else {
648 		bitmap_set(r->map, msi_index, 1);
649 		queue->eq.msix_index = msi_index;
650 		err = 0;
651 	}
652 
653 	mtx_unlock_spin(&r->lock_spin);
654 
655 	if (err)
656 		return err;
657 
658 	if (unlikely(msi_index >= gc->num_msix_usable)) {
659 		device_printf(gc->dev,
660 		    "chose an invalid msix index %d, usable %d\n",
661 		    msi_index, gc->num_msix_usable);
662 		return ENOSPC;
663 	}
664 
665 	gic = &gc->irq_contexts[msi_index];
666 
667 	if (unlikely(gic->handler || gic->arg)) {
668 		device_printf(gc->dev,
669 		    "interrupt handler or arg already assigned, "
670 		    "msix index: %d\n", msi_index);
671 	}
672 
673 	gic->arg = queue;
674 
675 	gic->handler = mana_gd_process_eq_events;
676 
677 	mana_dbg(NULL, "registered msix index %d vector %d irq %ju\n",
678 	    msi_index, gic->msix_e.vector, rman_get_start(gic->res));
679 
680 	return 0;
681 }
682 
683 static void
684 mana_gd_deregiser_irq(struct gdma_queue *queue)
685 {
686 	struct gdma_dev *gd = queue->gdma_dev;
687 	struct gdma_irq_context *gic;
688 	struct gdma_context *gc;
689 	struct gdma_resource *r;
690 	unsigned int msix_index;
691 
692 	gc = gd->gdma_context;
693 	r = &gc->msix_resource;
694 
695 	/* At most num_online_cpus() + 1 interrupts are used. */
696 	msix_index = queue->eq.msix_index;
697 	if (unlikely(msix_index >= gc->num_msix_usable))
698 		return;
699 
700 	gic = &gc->irq_contexts[msix_index];
701 	gic->handler = NULL;
702 	gic->arg = NULL;
703 
704 	mtx_lock_spin(&r->lock_spin);
705 	bitmap_clear(r->map, msix_index, 1);
706 	mtx_unlock_spin(&r->lock_spin);
707 
708 	queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
709 
710 	mana_dbg(NULL, "deregistered msix index %d vector %d irq %ju\n",
711 	    msix_index, gic->msix_e.vector, rman_get_start(gic->res));
712 }
713 
714 int
715 mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
716 {
717 	struct gdma_generate_test_event_req req = {};
718 	struct gdma_general_resp resp = {};
719 	device_t dev = gc->dev;
720 	int err;
721 
722 	sx_xlock(&gc->eq_test_event_sx);
723 
724 	init_completion(&gc->eq_test_event);
725 	gc->test_event_eq_id = INVALID_QUEUE_ID;
726 
727 	mana_gd_init_req_hdr(&req.hdr, GDMA_GENERATE_TEST_EQE,
728 			     sizeof(req), sizeof(resp));
729 
730 	req.hdr.dev_id = eq->gdma_dev->dev_id;
731 	req.queue_index = eq->id;
732 
733 	err = mana_gd_send_request(gc, sizeof(req), &req,
734 	    sizeof(resp), &resp);
735 	if (err) {
736 		device_printf(dev, "test_eq failed: %d\n", err);
737 		goto out;
738 	}
739 
740 	err = EPROTO;
741 
742 	if (resp.hdr.status) {
743 		device_printf(dev, "test_eq failed: 0x%x\n",
744 		    resp.hdr.status);
745 		goto out;
746 	}
747 
748 	if (wait_for_completion_timeout(&gc->eq_test_event, 30 * hz)) {
749 		device_printf(dev, "test_eq timed out on queue %d\n",
750 		    eq->id);
751 		goto out;
752 	}
753 
754 	if (eq->id != gc->test_event_eq_id) {
755 		device_printf(dev,
756 		    "test_eq got an event on wrong queue %d (%d)\n",
757 		    gc->test_event_eq_id, eq->id);
758 		goto out;
759 	}
760 
761 	err = 0;
762 out:
763 	sx_xunlock(&gc->eq_test_event_sx);
764 	return err;
765 }
766 
767 static void
768 mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
769     struct gdma_queue *queue)
770 {
771 	int err;
772 
773 	if (flush_evenets) {
774 		err = mana_gd_test_eq(gc, queue);
775 		if (err)
776 			device_printf(gc->dev,
777 			    "Failed to flush EQ: %d\n", err);
778 	}
779 
780 	mana_gd_deregiser_irq(queue);
781 
782 	if (queue->eq.disable_needed)
783 		mana_gd_disable_queue(queue);
784 }
785 
786 static int mana_gd_create_eq(struct gdma_dev *gd,
787     const struct gdma_queue_spec *spec,
788     bool create_hwq, struct gdma_queue *queue)
789 {
790 	struct gdma_context *gc = gd->gdma_context;
791 	device_t dev = gc->dev;
792 	uint32_t log2_num_entries;
793 	int err;
794 
795 	queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
796 
797 	log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
798 
799 	if (spec->eq.log2_throttle_limit > log2_num_entries) {
800 		device_printf(dev,
801 		    "EQ throttling limit (%lu) > maximum EQE (%u)\n",
802 		    spec->eq.log2_throttle_limit, log2_num_entries);
803 		return EINVAL;
804 	}
805 
806 	err = mana_gd_register_irq(queue, spec);
807 	if (err) {
808 		device_printf(dev, "Failed to register irq: %d\n", err);
809 		return err;
810 	}
811 
812 	queue->eq.callback = spec->eq.callback;
813 	queue->eq.context = spec->eq.context;
814 	queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
815 	queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
816 
817 	if (create_hwq) {
818 		err = mana_gd_create_hw_eq(gc, queue);
819 		if (err)
820 			goto out;
821 
822 		err = mana_gd_test_eq(gc, queue);
823 		if (err)
824 			goto out;
825 	}
826 
827 	return 0;
828 out:
829 	device_printf(dev, "Failed to create EQ: %d\n", err);
830 	mana_gd_destroy_eq(gc, false, queue);
831 	return err;
832 }
833 
834 static void
835 mana_gd_create_cq(const struct gdma_queue_spec *spec,
836     struct gdma_queue *queue)
837 {
838 	uint32_t log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE);
839 
840 	queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
841 	queue->cq.parent = spec->cq.parent_eq;
842 	queue->cq.context = spec->cq.context;
843 	queue->cq.callback = spec->cq.callback;
844 }
845 
846 static void
847 mana_gd_destroy_cq(struct gdma_context *gc,
848     struct gdma_queue *queue)
849 {
850 	uint32_t id = queue->id;
851 
852 	if (id >= gc->max_num_cqs)
853 		return;
854 
855 	if (!gc->cq_table[id])
856 		return;
857 
858 	gc->cq_table[id] = NULL;
859 }
860 
861 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
862     const struct gdma_queue_spec *spec,
863     struct gdma_queue **queue_ptr)
864 {
865 	struct gdma_context *gc = gd->gdma_context;
866 	struct gdma_mem_info *gmi;
867 	struct gdma_queue *queue;
868 	int err;
869 
870 	queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
871 	if (!queue)
872 		return ENOMEM;
873 
874 	gmi = &queue->mem_info;
875 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
876 	if (err)
877 		goto free_q;
878 
879 	queue->head = 0;
880 	queue->tail = 0;
881 	queue->queue_mem_ptr = gmi->virt_addr;
882 	queue->queue_size = spec->queue_size;
883 	queue->monitor_avl_buf = spec->monitor_avl_buf;
884 	queue->type = spec->type;
885 	queue->gdma_dev = gd;
886 
887 	if (spec->type == GDMA_EQ)
888 		err = mana_gd_create_eq(gd, spec, false, queue);
889 	else if (spec->type == GDMA_CQ)
890 		mana_gd_create_cq(spec, queue);
891 
892 	if (err)
893 		goto out;
894 
895 	*queue_ptr = queue;
896 	return 0;
897 out:
898 	mana_gd_free_memory(gmi);
899 free_q:
900 	free(queue, M_DEVBUF);
901 	return err;
902 }
903 
904 int
905 mana_gd_destroy_dma_region(struct gdma_context *gc,
906     gdma_obj_handle_t dma_region_handle)
907 {
908 	struct gdma_destroy_dma_region_req req = {};
909 	struct gdma_general_resp resp = {};
910 	int err;
911 
912 	if (dma_region_handle == GDMA_INVALID_DMA_REGION)
913 		return 0;
914 
915 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
916 	    sizeof(resp));
917 	req.dma_region_handle = dma_region_handle;
918 
919 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
920 	    &resp);
921 	if (err || resp.hdr.status) {
922 		device_printf(gc->dev,
923 		    "Failed to destroy DMA region: %d, 0x%x\n",
924 		    err, resp.hdr.status);
925 		return EPROTO;
926 	}
927 
928 	return 0;
929 }
930 
931 static int
932 mana_gd_create_dma_region(struct gdma_dev *gd,
933     struct gdma_mem_info *gmi)
934 {
935 	unsigned int num_page = gmi->length / PAGE_SIZE;
936 	struct gdma_create_dma_region_req *req = NULL;
937 	struct gdma_create_dma_region_resp resp = {};
938 	struct gdma_context *gc = gd->gdma_context;
939 	struct hw_channel_context *hwc;
940 	uint32_t length = gmi->length;
941 	uint32_t req_msg_size;
942 	int err;
943 	int i;
944 
945 	if (length < PAGE_SIZE || !is_power_of_2(length)) {
946 		mana_err(NULL, "gmi size incorrect: %u\n", length);
947 		return EINVAL;
948 	}
949 
950 	if (offset_in_page((uintptr_t)gmi->virt_addr) != 0) {
951 		mana_err(NULL, "gmi not page aligned: %p\n",
952 		    gmi->virt_addr);
953 		return EINVAL;
954 	}
955 
956 	hwc = gc->hwc.driver_data;
957 	req_msg_size = sizeof(*req) + num_page * sizeof(uint64_t);
958 	if (req_msg_size > hwc->max_req_msg_size) {
959 		mana_err(NULL, "req msg size too large: %u, %u\n",
960 		    req_msg_size, hwc->max_req_msg_size);
961 		return EINVAL;
962 	}
963 
964 	req = malloc(req_msg_size, M_DEVBUF, M_WAITOK | M_ZERO);
965 	if (!req)
966 		return ENOMEM;
967 
968 	mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
969 	    req_msg_size, sizeof(resp));
970 	req->length = length;
971 	req->offset_in_page = 0;
972 	req->gdma_page_type = GDMA_PAGE_TYPE_4K;
973 	req->page_count = num_page;
974 	req->page_addr_list_len = num_page;
975 
976 	for (i = 0; i < num_page; i++)
977 		req->page_addr_list[i] = gmi->dma_handle +  i * PAGE_SIZE;
978 
979 	err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
980 	if (err)
981 		goto out;
982 
983 	if (resp.hdr.status ||
984 	    resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
985 		device_printf(gc->dev, "Failed to create DMA region: 0x%x\n",
986 			resp.hdr.status);
987 		err = EPROTO;
988 		goto out;
989 	}
990 
991 	gmi->dma_region_handle = resp.dma_region_handle;
992 out:
993 	free(req, M_DEVBUF);
994 	return err;
995 }
996 
997 int
998 mana_gd_create_mana_eq(struct gdma_dev *gd,
999     const struct gdma_queue_spec *spec,
1000     struct gdma_queue **queue_ptr)
1001 {
1002 	struct gdma_context *gc = gd->gdma_context;
1003 	struct gdma_mem_info *gmi;
1004 	struct gdma_queue *queue;
1005 	int err;
1006 
1007 	if (spec->type != GDMA_EQ)
1008 		return EINVAL;
1009 
1010 	queue = malloc(sizeof(*queue),  M_DEVBUF, M_WAITOK | M_ZERO);
1011 	if (!queue)
1012 		return ENOMEM;
1013 
1014 	gmi = &queue->mem_info;
1015 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
1016 	if (err)
1017 		goto free_q;
1018 
1019 	err = mana_gd_create_dma_region(gd, gmi);
1020 	if (err)
1021 		goto out;
1022 
1023 	queue->head = 0;
1024 	queue->tail = 0;
1025 	queue->queue_mem_ptr = gmi->virt_addr;
1026 	queue->queue_size = spec->queue_size;
1027 	queue->monitor_avl_buf = spec->monitor_avl_buf;
1028 	queue->type = spec->type;
1029 	queue->gdma_dev = gd;
1030 
1031 	err = mana_gd_create_eq(gd, spec, true, queue);
1032 	if (err)
1033 		goto out;
1034 
1035 	*queue_ptr = queue;
1036 	return 0;
1037 
1038 out:
1039 	mana_gd_free_memory(gmi);
1040 free_q:
1041 	free(queue, M_DEVBUF);
1042 	return err;
1043 }
1044 
1045 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
1046     const struct gdma_queue_spec *spec,
1047     struct gdma_queue **queue_ptr)
1048 {
1049 	struct gdma_context *gc = gd->gdma_context;
1050 	struct gdma_mem_info *gmi;
1051 	struct gdma_queue *queue;
1052 	int err;
1053 
1054 	if (spec->type != GDMA_CQ && spec->type != GDMA_SQ &&
1055 	    spec->type != GDMA_RQ)
1056 		return EINVAL;
1057 
1058 	queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
1059 	if (!queue)
1060 		return ENOMEM;
1061 
1062 	gmi = &queue->mem_info;
1063 	err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
1064 	if (err)
1065 		goto free_q;
1066 
1067 	err = mana_gd_create_dma_region(gd, gmi);
1068 	if (err)
1069 		goto out;
1070 
1071 	queue->head = 0;
1072 	queue->tail = 0;
1073 	queue->queue_mem_ptr = gmi->virt_addr;
1074 	queue->queue_size = spec->queue_size;
1075 	queue->monitor_avl_buf = spec->monitor_avl_buf;
1076 	queue->type = spec->type;
1077 	queue->gdma_dev = gd;
1078 
1079 	if (spec->type == GDMA_CQ)
1080 		mana_gd_create_cq(spec, queue);
1081 
1082 	*queue_ptr = queue;
1083 	return 0;
1084 
1085 out:
1086 	mana_gd_free_memory(gmi);
1087 free_q:
1088 	free(queue, M_DEVBUF);
1089 	return err;
1090 }
1091 
1092 void
1093 mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
1094 {
1095 	struct gdma_mem_info *gmi = &queue->mem_info;
1096 
1097 	switch (queue->type) {
1098 	case GDMA_EQ:
1099 		mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
1100 		break;
1101 
1102 	case GDMA_CQ:
1103 		mana_gd_destroy_cq(gc, queue);
1104 		break;
1105 
1106 	case GDMA_RQ:
1107 		break;
1108 
1109 	case GDMA_SQ:
1110 		break;
1111 
1112 	default:
1113 		device_printf(gc->dev,
1114 		    "Can't destroy unknown queue: type = %d\n",
1115 		    queue->type);
1116 		return;
1117 	}
1118 
1119 	mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
1120 	mana_gd_free_memory(gmi);
1121 	free(queue, M_DEVBUF);
1122 }
1123 
1124 #define OS_MAJOR_DIV		100000
1125 #define OS_BUILD_MOD		1000
1126 
1127 int
1128 mana_gd_verify_vf_version(device_t dev)
1129 {
1130 	struct gdma_context *gc = device_get_softc(dev);
1131 	struct gdma_verify_ver_resp resp = {};
1132 	struct gdma_verify_ver_req req = {};
1133 	int err;
1134 
1135 	mana_gd_init_req_hdr(&req.hdr, GDMA_VERIFY_VF_DRIVER_VERSION,
1136 	    sizeof(req), sizeof(resp));
1137 
1138 	req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
1139 	req.protocol_ver_max = GDMA_PROTOCOL_LAST;
1140 
1141 	req.drv_ver = 0;	/* Unused */
1142 	req.os_type = 0x30;	/* Other */
1143 	req.os_ver_major = osreldate / OS_MAJOR_DIV;
1144 	req.os_ver_minor = (osreldate % OS_MAJOR_DIV) / OS_BUILD_MOD;
1145 	req.os_ver_build = osreldate % OS_BUILD_MOD;
1146 	strncpy(req.os_ver_str1, ostype, sizeof(req.os_ver_str1) - 1);
1147 	strncpy(req.os_ver_str2, osrelease, sizeof(req.os_ver_str2) - 1);
1148 
1149 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1150 	if (err || resp.hdr.status) {
1151 		device_printf(gc->dev,
1152 		    "VfVerifyVersionOutput: %d, status=0x%x\n",
1153 		    err, resp.hdr.status);
1154 		return err ? err : EPROTO;
1155 	}
1156 
1157 	return 0;
1158 }
1159 
1160 int
1161 mana_gd_register_device(struct gdma_dev *gd)
1162 {
1163 	struct gdma_context *gc = gd->gdma_context;
1164 	struct gdma_register_device_resp resp = {};
1165 	struct gdma_general_req req = {};
1166 	int err;
1167 
1168 	gd->pdid = INVALID_PDID;
1169 	gd->doorbell = INVALID_DOORBELL;
1170 	gd->gpa_mkey = INVALID_MEM_KEY;
1171 
1172 	mana_gd_init_req_hdr(&req.hdr, GDMA_REGISTER_DEVICE, sizeof(req),
1173 	    sizeof(resp));
1174 
1175 	req.hdr.dev_id = gd->dev_id;
1176 
1177 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1178 	if (err || resp.hdr.status) {
1179 		device_printf(gc->dev,
1180 		    "gdma_register_device_resp failed: %d, 0x%x\n",
1181 		    err, resp.hdr.status);
1182 		return err ? err : -EPROTO;
1183 	}
1184 
1185 	gd->pdid = resp.pdid;
1186 	gd->gpa_mkey = resp.gpa_mkey;
1187 	gd->doorbell = resp.db_id;
1188 
1189 	mana_dbg(NULL, "mana device pdid %u, gpa_mkey %u, doorbell %u \n",
1190 	    gd->pdid, gd->gpa_mkey, gd->doorbell);
1191 
1192 	return 0;
1193 }
1194 
1195 int
1196 mana_gd_deregister_device(struct gdma_dev *gd)
1197 {
1198 	struct gdma_context *gc = gd->gdma_context;
1199 	struct gdma_general_resp resp = {};
1200 	struct gdma_general_req req = {};
1201 	int err;
1202 
1203 	if (gd->pdid == INVALID_PDID)
1204 		return EINVAL;
1205 
1206 	mana_gd_init_req_hdr(&req.hdr, GDMA_DEREGISTER_DEVICE, sizeof(req),
1207 	    sizeof(resp));
1208 
1209 	req.hdr.dev_id = gd->dev_id;
1210 
1211 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
1212 	if (err || resp.hdr.status) {
1213 		device_printf(gc->dev,
1214 		    "Failed to deregister device: %d, 0x%x\n",
1215 		    err, resp.hdr.status);
1216 		if (!err)
1217 			err = EPROTO;
1218 	}
1219 
1220 	gd->pdid = INVALID_PDID;
1221 	gd->doorbell = INVALID_DOORBELL;
1222 	gd->gpa_mkey = INVALID_MEM_KEY;
1223 
1224 	return err;
1225 }
1226 
1227 uint32_t
1228 mana_gd_wq_avail_space(struct gdma_queue *wq)
1229 {
1230 	uint32_t used_space = (wq->head - wq->tail) * GDMA_WQE_BU_SIZE;
1231 	uint32_t wq_size = wq->queue_size;
1232 
1233 	if (used_space > wq_size) {
1234 		mana_warn(NULL, "failed: used space %u > queue size %u\n",
1235 		    used_space, wq_size);
1236 	}
1237 
1238 	return wq_size - used_space;
1239 }
1240 
1241 uint8_t *
1242 mana_gd_get_wqe_ptr(const struct gdma_queue *wq, uint32_t wqe_offset)
1243 {
1244 	uint32_t offset =
1245 	    (wqe_offset * GDMA_WQE_BU_SIZE) & (wq->queue_size - 1);
1246 
1247 	if ((offset + GDMA_WQE_BU_SIZE) > wq->queue_size) {
1248 		mana_warn(NULL, "failed: write end out of queue bound %u, "
1249 		    "queue size %u\n",
1250 		    offset + GDMA_WQE_BU_SIZE, wq->queue_size);
1251 	}
1252 
1253 	return (uint8_t *)wq->queue_mem_ptr + offset;
1254 }
1255 
1256 static uint32_t
1257 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
1258     enum gdma_queue_type q_type,
1259     uint32_t client_oob_size, uint32_t sgl_data_size,
1260     uint8_t *wqe_ptr)
1261 {
1262 	bool oob_in_sgl = !!(wqe_req->flags & GDMA_WR_OOB_IN_SGL);
1263 	bool pad_data = !!(wqe_req->flags & GDMA_WR_PAD_BY_SGE0);
1264 	struct gdma_wqe *header = (struct gdma_wqe *)wqe_ptr;
1265 	uint8_t *ptr;
1266 
1267 	memset(header, 0, sizeof(struct gdma_wqe));
1268 	header->num_sge = wqe_req->num_sge;
1269 	header->inline_oob_size_div4 = client_oob_size / sizeof(uint32_t);
1270 
1271 	if (oob_in_sgl) {
1272 		if (!pad_data || wqe_req->num_sge < 2) {
1273 			mana_warn(NULL, "no pad_data or num_sge < 2\n");
1274 		}
1275 
1276 		header->client_oob_in_sgl = 1;
1277 
1278 		if (pad_data)
1279 			header->last_vbytes = wqe_req->sgl[0].size;
1280 	}
1281 
1282 	if (q_type == GDMA_SQ)
1283 		header->client_data_unit = wqe_req->client_data_unit;
1284 
1285 	/*
1286 	 * The size of gdma_wqe + client_oob_size must be less than or equal
1287 	 * to one Basic Unit (i.e. 32 bytes), so the pointer can't go beyond
1288 	 * the queue memory buffer boundary.
1289 	 */
1290 	ptr = wqe_ptr + sizeof(header);
1291 
1292 	if (wqe_req->inline_oob_data && wqe_req->inline_oob_size > 0) {
1293 		memcpy(ptr, wqe_req->inline_oob_data, wqe_req->inline_oob_size);
1294 
1295 		if (client_oob_size > wqe_req->inline_oob_size)
1296 			memset(ptr + wqe_req->inline_oob_size, 0,
1297 			       client_oob_size - wqe_req->inline_oob_size);
1298 	}
1299 
1300 	return sizeof(header) + client_oob_size;
1301 }
1302 
1303 static void
1304 mana_gd_write_sgl(struct gdma_queue *wq, uint8_t *wqe_ptr,
1305     const struct gdma_wqe_request *wqe_req)
1306 {
1307 	uint32_t sgl_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1308 	const uint8_t *address = (uint8_t *)wqe_req->sgl;
1309 	uint8_t *base_ptr, *end_ptr;
1310 	uint32_t size_to_end;
1311 
1312 	base_ptr = wq->queue_mem_ptr;
1313 	end_ptr = base_ptr + wq->queue_size;
1314 	size_to_end = (uint32_t)(end_ptr - wqe_ptr);
1315 
1316 	if (size_to_end < sgl_size) {
1317 		memcpy(wqe_ptr, address, size_to_end);
1318 
1319 		wqe_ptr = base_ptr;
1320 		address += size_to_end;
1321 		sgl_size -= size_to_end;
1322 	}
1323 
1324 	memcpy(wqe_ptr, address, sgl_size);
1325 }
1326 
1327 int
1328 mana_gd_post_work_request(struct gdma_queue *wq,
1329     const struct gdma_wqe_request *wqe_req,
1330     struct gdma_posted_wqe_info *wqe_info)
1331 {
1332 	uint32_t client_oob_size = wqe_req->inline_oob_size;
1333 	struct gdma_context *gc;
1334 	uint32_t sgl_data_size;
1335 	uint32_t max_wqe_size;
1336 	uint32_t wqe_size;
1337 	uint8_t *wqe_ptr;
1338 
1339 	if (wqe_req->num_sge == 0)
1340 		return EINVAL;
1341 
1342 	if (wq->type == GDMA_RQ) {
1343 		if (client_oob_size != 0)
1344 			return EINVAL;
1345 
1346 		client_oob_size = INLINE_OOB_SMALL_SIZE;
1347 
1348 		max_wqe_size = GDMA_MAX_RQE_SIZE;
1349 	} else {
1350 		if (client_oob_size != INLINE_OOB_SMALL_SIZE &&
1351 		    client_oob_size != INLINE_OOB_LARGE_SIZE)
1352 			return EINVAL;
1353 
1354 		max_wqe_size = GDMA_MAX_SQE_SIZE;
1355 	}
1356 
1357 	sgl_data_size = sizeof(struct gdma_sge) * wqe_req->num_sge;
1358 	wqe_size = ALIGN(sizeof(struct gdma_wqe) + client_oob_size +
1359 	    sgl_data_size, GDMA_WQE_BU_SIZE);
1360 	if (wqe_size > max_wqe_size)
1361 		return EINVAL;
1362 
1363 	if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
1364 		gc = wq->gdma_dev->gdma_context;
1365 		device_printf(gc->dev, "unsuccessful flow control!\n");
1366 		return ENOSPC;
1367 	}
1368 
1369 	if (wqe_info)
1370 		wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
1371 
1372 	wqe_ptr = mana_gd_get_wqe_ptr(wq, wq->head);
1373 	wqe_ptr += mana_gd_write_client_oob(wqe_req, wq->type, client_oob_size,
1374 	    sgl_data_size, wqe_ptr);
1375 	if (wqe_ptr >= (uint8_t *)wq->queue_mem_ptr + wq->queue_size)
1376 		wqe_ptr -= wq->queue_size;
1377 
1378 	mana_gd_write_sgl(wq, wqe_ptr, wqe_req);
1379 
1380 	wq->head += wqe_size / GDMA_WQE_BU_SIZE;
1381 
1382 	bus_dmamap_sync(wq->mem_info.dma_tag, wq->mem_info.dma_map,
1383 	    BUS_DMASYNC_PREWRITE);
1384 
1385 	return 0;
1386 }
1387 
1388 int
1389 mana_gd_post_and_ring(struct gdma_queue *queue,
1390     const struct gdma_wqe_request *wqe_req,
1391     struct gdma_posted_wqe_info *wqe_info)
1392 {
1393 	struct gdma_context *gc = queue->gdma_dev->gdma_context;
1394 	int err;
1395 
1396 	err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
1397 	if (err)
1398 		return err;
1399 
1400 	mana_gd_wq_ring_doorbell(gc, queue);
1401 
1402 	return 0;
1403 }
1404 
1405 static int
1406 mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp)
1407 {
1408 	unsigned int num_cqe = cq->queue_size / sizeof(struct gdma_cqe);
1409 	struct gdma_cqe *cq_cqe = cq->queue_mem_ptr;
1410 	uint32_t owner_bits, new_bits, old_bits;
1411 	struct gdma_cqe *cqe;
1412 
1413 	cqe = &cq_cqe[cq->head % num_cqe];
1414 	owner_bits = cqe->cqe_info.owner_bits;
1415 
1416 	old_bits = (cq->head / num_cqe - 1) & GDMA_CQE_OWNER_MASK;
1417 	/* Return 0 if no more entries. */
1418 	if (owner_bits == old_bits)
1419 		return 0;
1420 
1421 	new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK;
1422 	/* Return -1 if overflow detected. */
1423 	if (owner_bits != new_bits) {
1424 		mana_warn(NULL,
1425 		    "overflow detected! owner_bits %u != new_bits %u\n",
1426 		    owner_bits, new_bits);
1427 		return -1;
1428 	}
1429 
1430 	rmb();
1431 
1432 	comp->wq_num = cqe->cqe_info.wq_num;
1433 	comp->is_sq = cqe->cqe_info.is_sq;
1434 	memcpy(comp->cqe_data, cqe->cqe_data, GDMA_COMP_DATA_SIZE);
1435 
1436 	return 1;
1437 }
1438 
1439 int
1440 mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
1441 {
1442 	int cqe_idx;
1443 	int ret;
1444 
1445 	bus_dmamap_sync(cq->mem_info.dma_tag, cq->mem_info.dma_map,
1446 	    BUS_DMASYNC_POSTREAD);
1447 
1448 	for (cqe_idx = 0; cqe_idx < num_cqe; cqe_idx++) {
1449 		ret = mana_gd_read_cqe(cq, &comp[cqe_idx]);
1450 
1451 		if (ret < 0) {
1452 			cq->head -= cqe_idx;
1453 			return ret;
1454 		}
1455 
1456 		if (ret == 0)
1457 			break;
1458 
1459 		cq->head++;
1460 	}
1461 
1462 	return cqe_idx;
1463 }
1464 
1465 static void
1466 mana_gd_intr(void *arg)
1467 {
1468 	struct gdma_irq_context *gic = arg;
1469 
1470 	if (gic->handler) {
1471 		gic->handler(gic->arg);
1472 	}
1473 }
1474 
1475 int
1476 mana_gd_alloc_res_map(uint32_t res_avail,
1477     struct gdma_resource *r, const char *lock_name)
1478 {
1479 	int n = howmany(res_avail, BITS_PER_LONG);
1480 
1481 	r->map =
1482 	    malloc(n * sizeof(unsigned long), M_DEVBUF, M_WAITOK | M_ZERO);
1483 	if (!r->map)
1484 		return ENOMEM;
1485 
1486 	r->size = res_avail;
1487 	mtx_init(&r->lock_spin, lock_name, NULL, MTX_SPIN);
1488 
1489 	mana_dbg(NULL,
1490 	    "total res %u, total number of unsigned longs %u\n",
1491 	    r->size, n);
1492 	return (0);
1493 }
1494 
1495 void
1496 mana_gd_free_res_map(struct gdma_resource *r)
1497 {
1498 	if (!r || !r->map)
1499 		return;
1500 
1501 	free(r->map, M_DEVBUF);
1502 	r->map = NULL;
1503 	r->size = 0;
1504 }
1505 
1506 static void
1507 mana_gd_init_registers(struct gdma_context *gc)
1508 {
1509 	uintptr_t bar0_va = rman_get_bushandle(gc->bar0);
1510 	vm_paddr_t bar0_pa = rman_get_start(gc->bar0);
1511 
1512 	gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
1513 
1514 	gc->db_page_base =
1515 	    (void *)(bar0_va + (size_t)mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET));
1516 
1517 	gc->phys_db_page_base =
1518 	    bar0_pa + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
1519 
1520 	gc->shm_base =
1521 	    (void *)(bar0_va + (size_t)mana_gd_r64(gc, GDMA_REG_SHM_OFFSET));
1522 
1523 	mana_dbg(NULL, "db_page_size 0x%xx, db_page_base %p,"
1524 		    " shm_base %p\n",
1525 		    gc->db_page_size, gc->db_page_base, gc->shm_base);
1526 }
1527 
1528 static struct resource *
1529 mana_gd_alloc_bar(device_t dev, int bar)
1530 {
1531 	struct resource *res = NULL;
1532 	struct pci_map *pm;
1533 	int rid, type;
1534 
1535 	if (bar < 0 || bar > PCIR_MAX_BAR_0)
1536 		goto alloc_bar_out;
1537 
1538 	pm = pci_find_bar(dev, PCIR_BAR(bar));
1539 	if (!pm)
1540 		goto alloc_bar_out;
1541 
1542 	if (PCI_BAR_IO(pm->pm_value))
1543 		type = SYS_RES_IOPORT;
1544 	else
1545 		type = SYS_RES_MEMORY;
1546 	if (type < 0)
1547 		goto alloc_bar_out;
1548 
1549 	rid = PCIR_BAR(bar);
1550 	res = bus_alloc_resource_any(dev, type, &rid, RF_ACTIVE);
1551 #if defined(__amd64__)
1552 	if (res)
1553 		mana_dbg(NULL, "bar %d: rid 0x%x, type 0x%jx,"
1554 		    " handle 0x%jx\n",
1555 		    bar, rid, res->r_bustag, res->r_bushandle);
1556 #endif
1557 
1558 alloc_bar_out:
1559 	return (res);
1560 }
1561 
1562 static void
1563 mana_gd_free_pci_res(struct gdma_context *gc)
1564 {
1565 	if (!gc || !gc->dev)
1566 		return;
1567 
1568 	if (gc->bar0 != NULL) {
1569 		bus_release_resource(gc->dev, SYS_RES_MEMORY,
1570 		    PCIR_BAR(GDMA_BAR0), gc->bar0);
1571 	}
1572 
1573 	if (gc->msix != NULL) {
1574 		bus_release_resource(gc->dev, SYS_RES_MEMORY,
1575 		    gc->msix_rid, gc->msix);
1576 	}
1577 }
1578 
1579 static int
1580 mana_gd_setup_irqs(device_t dev)
1581 {
1582 	unsigned int max_queues_per_port = mp_ncpus;
1583 	struct gdma_context *gc = device_get_softc(dev);
1584 	struct gdma_irq_context *gic;
1585 	unsigned int max_irqs;
1586 	int nvec;
1587 	int rc, rcc, i;
1588 
1589 	if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
1590 		max_queues_per_port = MANA_MAX_NUM_QUEUES;
1591 
1592 	/* Need 1 interrupt for the Hardware communication Channel (HWC) */
1593 	max_irqs = max_queues_per_port + 1;
1594 
1595 	nvec = max_irqs;
1596 	rc = pci_alloc_msix(dev, &nvec);
1597 	if (unlikely(rc != 0)) {
1598 		device_printf(dev,
1599 		    "Failed to allocate MSIX, vectors %d, error: %d\n",
1600 		    nvec, rc);
1601 		rc = ENOSPC;
1602 		goto err_setup_irq_alloc;
1603 	}
1604 
1605 	if (nvec != max_irqs) {
1606 		if (nvec == 1) {
1607 			device_printf(dev,
1608 			    "Not enough number of MSI-x allocated: %d\n",
1609 			    nvec);
1610 			rc = ENOSPC;
1611 			goto err_setup_irq_release;
1612 		}
1613 		device_printf(dev, "Allocated only %d MSI-x (%d requested)\n",
1614 		    nvec, max_irqs);
1615 	}
1616 
1617 	gc->irq_contexts = malloc(nvec * sizeof(struct gdma_irq_context),
1618 	    M_DEVBUF, M_WAITOK | M_ZERO);
1619 	if (!gc->irq_contexts) {
1620 		rc = ENOMEM;
1621 		goto err_setup_irq_release;
1622 	}
1623 
1624 	for (i = 0; i < nvec; i++) {
1625 		gic = &gc->irq_contexts[i];
1626 		gic->msix_e.entry = i;
1627 		/* Vector starts from 1. */
1628 		gic->msix_e.vector = i + 1;
1629 		gic->handler = NULL;
1630 		gic->arg = NULL;
1631 
1632 		gic->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1633 		    &gic->msix_e.vector, RF_ACTIVE | RF_SHAREABLE);
1634 		if (unlikely(gic->res == NULL)) {
1635 			rc = ENOMEM;
1636 			device_printf(dev, "could not allocate resource "
1637 			    "for irq vector %d\n", gic->msix_e.vector);
1638 			goto err_setup_irq;
1639 		}
1640 
1641 		rc = bus_setup_intr(dev, gic->res,
1642 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, mana_gd_intr,
1643 		    gic, &gic->cookie);
1644 		if (unlikely(rc != 0)) {
1645 			device_printf(dev, "failed to register interrupt "
1646 			    "handler for irq %ju vector %d: error %d\n",
1647 			    rman_get_start(gic->res), gic->msix_e.vector, rc);
1648 			goto err_setup_irq;
1649 		}
1650 		gic->requested = true;
1651 
1652 		mana_dbg(NULL, "added msix vector %d irq %ju\n",
1653 		    gic->msix_e.vector, rman_get_start(gic->res));
1654 	}
1655 
1656 	rc = mana_gd_alloc_res_map(nvec, &gc->msix_resource,
1657 	    "gdma msix res lock");
1658 	if (rc != 0) {
1659 		device_printf(dev, "failed to allocate memory "
1660 		    "for msix bitmap\n");
1661 		goto err_setup_irq;
1662 	}
1663 
1664 	gc->max_num_msix = nvec;
1665 	gc->num_msix_usable = nvec;
1666 
1667 	mana_dbg(NULL, "setup %d msix interrupts\n", nvec);
1668 
1669 	return (0);
1670 
1671 err_setup_irq:
1672 	for (; i >= 0; i--) {
1673 		gic = &gc->irq_contexts[i];
1674 		rcc = 0;
1675 
1676 		/*
1677 		 * If gic->requested is true, we need to free both intr and
1678 		 * resources.
1679 		 */
1680 		if (gic->requested)
1681 			rcc = bus_teardown_intr(dev, gic->res, gic->cookie);
1682 		if (unlikely(rcc != 0))
1683 			device_printf(dev, "could not release "
1684 			    "irq vector %d, error: %d\n",
1685 			    gic->msix_e.vector, rcc);
1686 
1687 		rcc = 0;
1688 		if (gic->res != NULL) {
1689 			rcc = bus_release_resource(dev, SYS_RES_IRQ,
1690 			    gic->msix_e.vector, gic->res);
1691 		}
1692 		if (unlikely(rcc != 0))
1693 			device_printf(dev, "dev has no parent while "
1694 			    "releasing resource for irq vector %d\n",
1695 			    gic->msix_e.vector);
1696 		gic->requested = false;
1697 		gic->res = NULL;
1698 	}
1699 
1700 	free(gc->irq_contexts, M_DEVBUF);
1701 	gc->irq_contexts = NULL;
1702 err_setup_irq_release:
1703 	pci_release_msi(dev);
1704 err_setup_irq_alloc:
1705 	return (rc);
1706 }
1707 
1708 static void
1709 mana_gd_remove_irqs(device_t dev)
1710 {
1711 	struct gdma_context *gc = device_get_softc(dev);
1712 	struct gdma_irq_context *gic;
1713 	int rc, i;
1714 
1715 	mana_gd_free_res_map(&gc->msix_resource);
1716 
1717 	for (i = 0; i < gc->max_num_msix; i++) {
1718 		gic = &gc->irq_contexts[i];
1719 		if (gic->requested) {
1720 			rc = bus_teardown_intr(dev, gic->res, gic->cookie);
1721 			if (unlikely(rc != 0)) {
1722 				device_printf(dev, "failed to tear down "
1723 				    "irq vector %d, error: %d\n",
1724 				    gic->msix_e.vector, rc);
1725 			}
1726 			gic->requested = false;
1727 		}
1728 
1729 		if (gic->res != NULL) {
1730 			rc = bus_release_resource(dev, SYS_RES_IRQ,
1731 			    gic->msix_e.vector, gic->res);
1732 			if (unlikely(rc != 0)) {
1733 				device_printf(dev, "dev has no parent while "
1734 				    "releasing resource for irq vector %d\n",
1735 				    gic->msix_e.vector);
1736 			}
1737 			gic->res = NULL;
1738 		}
1739 	}
1740 
1741 	gc->max_num_msix = 0;
1742 	gc->num_msix_usable = 0;
1743 	free(gc->irq_contexts, M_DEVBUF);
1744 	gc->irq_contexts = NULL;
1745 
1746 	pci_release_msi(dev);
1747 }
1748 
1749 static int
1750 mana_gd_probe(device_t dev)
1751 {
1752 	mana_vendor_id_t *ent;
1753 	char		adapter_name[60];
1754 	uint16_t	pci_vendor_id = 0;
1755 	uint16_t	pci_device_id = 0;
1756 
1757 	pci_vendor_id = pci_get_vendor(dev);
1758 	pci_device_id = pci_get_device(dev);
1759 
1760 	ent = mana_id_table;
1761 	while (ent->vendor_id != 0) {
1762 		if ((pci_vendor_id == ent->vendor_id) &&
1763 		    (pci_device_id == ent->device_id)) {
1764 			mana_dbg(NULL, "vendor=%x device=%x\n",
1765 			    pci_vendor_id, pci_device_id);
1766 
1767 			sprintf(adapter_name, DEVICE_DESC);
1768 			device_set_desc_copy(dev, adapter_name);
1769 			return (BUS_PROBE_DEFAULT);
1770 		}
1771 
1772 		ent++;
1773 	}
1774 
1775 	return (ENXIO);
1776 }
1777 
1778 /**
1779  * mana_attach - Device Initialization Routine
1780  * @dev: device information struct
1781  *
1782  * Returns 0 on success, otherwise on failure.
1783  *
1784  * mana_attach initializes a GDMA adapter identified by a device structure.
1785  **/
1786 static int
1787 mana_gd_attach(device_t dev)
1788 {
1789 	struct gdma_context *gc;
1790 	int msix_rid;
1791 	int rc;
1792 
1793 	gc = device_get_softc(dev);
1794 	gc->dev = dev;
1795 
1796 	pci_enable_io(dev, SYS_RES_IOPORT);
1797 	pci_enable_io(dev, SYS_RES_MEMORY);
1798 
1799 	pci_enable_busmaster(dev);
1800 
1801 	gc->bar0 = mana_gd_alloc_bar(dev, GDMA_BAR0);
1802 	if (unlikely(gc->bar0 == NULL)) {
1803 		device_printf(dev,
1804 		    "unable to allocate bus resource for bar0!\n");
1805 		rc = ENOMEM;
1806 		goto err_disable_dev;
1807 	}
1808 
1809 	/* Store bar0 tage and handle for quick access */
1810 	gc->gd_bus.bar0_t = rman_get_bustag(gc->bar0);
1811 	gc->gd_bus.bar0_h = rman_get_bushandle(gc->bar0);
1812 
1813 	/* Map MSI-x vector table */
1814 	msix_rid = pci_msix_table_bar(dev);
1815 
1816 	mana_dbg(NULL, "msix_rid 0x%x\n", msix_rid);
1817 
1818 	gc->msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1819 	    &msix_rid, RF_ACTIVE);
1820 	if (unlikely(gc->msix == NULL)) {
1821 		device_printf(dev,
1822 		    "unable to allocate bus resource for msix!\n");
1823 		rc = ENOMEM;
1824 		goto err_free_pci_res;
1825 	}
1826 	gc->msix_rid = msix_rid;
1827 
1828 	if (unlikely(gc->gd_bus.bar0_h  == 0)) {
1829 		device_printf(dev, "failed to map bar0!\n");
1830 		rc = ENXIO;
1831 		goto err_free_pci_res;
1832 	}
1833 
1834 	mana_gd_init_registers(gc);
1835 
1836 	mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
1837 
1838 	rc = mana_gd_setup_irqs(dev);
1839 	if (rc) {
1840 		goto err_free_pci_res;
1841 	}
1842 
1843 	sx_init(&gc->eq_test_event_sx, "gdma test event sx");
1844 
1845 	rc = mana_hwc_create_channel(gc);
1846 	if (rc) {
1847 		mana_dbg(NULL, "Failed to create hwc channel\n");
1848 		if (rc == EIO)
1849 			goto err_clean_up_gdma;
1850 		else
1851 			goto err_remove_irq;
1852 	}
1853 
1854 	rc = mana_gd_verify_vf_version(dev);
1855 	if (rc) {
1856 		mana_dbg(NULL, "Failed to verify vf\n");
1857 		goto err_clean_up_gdma;
1858 	}
1859 
1860 	rc = mana_gd_query_max_resources(dev);
1861 	if (rc) {
1862 		mana_dbg(NULL, "Failed to query max resources\n");
1863 		goto err_clean_up_gdma;
1864 	}
1865 
1866 	rc = mana_gd_detect_devices(dev);
1867 	if (rc) {
1868 		mana_dbg(NULL, "Failed to detect  mana device\n");
1869 		goto err_clean_up_gdma;
1870 	}
1871 
1872 	rc = mana_probe(&gc->mana);
1873 	if (rc) {
1874 		mana_dbg(NULL, "Failed to probe mana device\n");
1875 		goto err_clean_up_gdma;
1876 	}
1877 
1878 	return (0);
1879 
1880 err_clean_up_gdma:
1881 	mana_hwc_destroy_channel(gc);
1882 err_remove_irq:
1883 	mana_gd_remove_irqs(dev);
1884 err_free_pci_res:
1885 	mana_gd_free_pci_res(gc);
1886 err_disable_dev:
1887 	pci_disable_busmaster(dev);
1888 
1889 	return(rc);
1890 }
1891 
1892 /**
1893  * mana_detach - Device Removal Routine
1894  * @pdev: device information struct
1895  *
1896  * mana_detach is called by the device subsystem to alert the driver
1897  * that it should release a PCI device.
1898  **/
1899 static int
1900 mana_gd_detach(device_t dev)
1901 {
1902 	struct gdma_context *gc = device_get_softc(dev);
1903 
1904 	mana_remove(&gc->mana);
1905 
1906 	mana_hwc_destroy_channel(gc);
1907 
1908 	mana_gd_remove_irqs(dev);
1909 
1910 	mana_gd_free_pci_res(gc);
1911 
1912 	pci_disable_busmaster(dev);
1913 
1914 	return (bus_generic_detach(dev));
1915 }
1916 
1917 
1918 /*********************************************************************
1919  *  FreeBSD Device Interface Entry Points
1920  *********************************************************************/
1921 
1922 static device_method_t mana_methods[] = {
1923     /* Device interface */
1924     DEVMETHOD(device_probe, mana_gd_probe),
1925     DEVMETHOD(device_attach, mana_gd_attach),
1926     DEVMETHOD(device_detach, mana_gd_detach),
1927     DEVMETHOD_END
1928 };
1929 
1930 static driver_t mana_driver = {
1931     "mana", mana_methods, sizeof(struct gdma_context),
1932 };
1933 
1934 DRIVER_MODULE(mana, pci, mana_driver, 0, 0);
1935 MODULE_PNP_INFO("U16:vendor;U16:device", pci, mana, mana_id_table,
1936     nitems(mana_id_table) - 1);
1937 MODULE_DEPEND(mana, pci, 1, 1, 1);
1938 MODULE_DEPEND(mana, ether, 1, 1, 1);
1939 
1940 /*********************************************************************/
1941