xref: /freebsd/sys/dev/ntb/ntb_transport.c (revision c93b6e5fa24ba172ab271432c6692f9cc604e15a)
1 /*-
2  * Copyright (c) 2016-2017 Alexander Motin <mav@FreeBSD.org>
3  * Copyright (C) 2013 Intel Corporation
4  * Copyright (C) 2015 EMC Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * The Non-Transparent Bridge (NTB) is a device that allows you to connect
31  * two or more systems using a PCI-e links, providing remote memory access.
32  *
33  * This module contains a transport for sending and receiving messages by
34  * writing to remote memory window(s) provided by underlying NTB device.
35  *
36  * NOTE: Much of the code in this module is shared with Linux. Any patches may
37  * be picked up and redistributed in Linux with a dual GPL/BSD license.
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/kernel.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/ktr.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/malloc.h>
51 #include <sys/mbuf.h>
52 #include <sys/module.h>
53 #include <sys/mutex.h>
54 #include <sys/queue.h>
55 #include <sys/sysctl.h>
56 #include <sys/taskqueue.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 
61 #include <machine/bus.h>
62 
63 #include "ntb.h"
64 #include "ntb_transport.h"
65 
66 #define KTR_NTB KTR_SPARE3
67 
68 #define NTB_TRANSPORT_VERSION	4
69 
70 static SYSCTL_NODE(_hw, OID_AUTO, ntb_transport, CTLFLAG_RW, 0, "ntb_transport");
71 
72 static unsigned g_ntb_transport_debug_level;
73 SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, debug_level, CTLFLAG_RWTUN,
74     &g_ntb_transport_debug_level, 0,
75     "ntb_transport log level -- higher is more verbose");
76 #define ntb_printf(lvl, ...) do {			\
77 	if ((lvl) <= g_ntb_transport_debug_level) {	\
78 		printf(__VA_ARGS__);			\
79 	}						\
80 } while (0)
81 
82 static unsigned transport_mtu = 0x10000;
83 
84 static uint64_t max_mw_size;
85 SYSCTL_UQUAD(_hw_ntb_transport, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0,
86     "If enabled (non-zero), limit the size of large memory windows. "
87     "Both sides of the NTB MUST set the same value here.");
88 
89 static unsigned enable_xeon_watchdog;
90 SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, enable_xeon_watchdog, CTLFLAG_RDTUN,
91     &enable_xeon_watchdog, 0, "If non-zero, write a register every second to "
92     "keep a watchdog from tearing down the NTB link");
93 
94 STAILQ_HEAD(ntb_queue_list, ntb_queue_entry);
95 
96 typedef uint32_t ntb_q_idx_t;
97 
98 struct ntb_queue_entry {
99 	/* ntb_queue list reference */
100 	STAILQ_ENTRY(ntb_queue_entry) entry;
101 
102 	/* info on data to be transferred */
103 	void		*cb_data;
104 	void		*buf;
105 	uint32_t	len;
106 	uint32_t	flags;
107 
108 	struct ntb_transport_qp		*qp;
109 	struct ntb_payload_header	*x_hdr;
110 	ntb_q_idx_t	index;
111 };
112 
113 struct ntb_rx_info {
114 	ntb_q_idx_t	entry;
115 };
116 
117 struct ntb_transport_qp {
118 	struct ntb_transport_ctx	*transport;
119 	device_t		 dev;
120 
121 	void			*cb_data;
122 
123 	bool			client_ready;
124 	volatile bool		link_is_up;
125 	uint8_t			qp_num;	/* Only 64 QPs are allowed.  0-63 */
126 
127 	struct ntb_rx_info	*rx_info;
128 	struct ntb_rx_info	*remote_rx_info;
129 
130 	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
131 	    void *data, int len);
132 	struct ntb_queue_list	tx_free_q;
133 	struct mtx		ntb_tx_free_q_lock;
134 	caddr_t			tx_mw;
135 	bus_addr_t		tx_mw_phys;
136 	ntb_q_idx_t		tx_index;
137 	ntb_q_idx_t		tx_max_entry;
138 	uint64_t		tx_max_frame;
139 
140 	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
141 	    void *data, int len);
142 	struct ntb_queue_list	rx_post_q;
143 	struct ntb_queue_list	rx_pend_q;
144 	/* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
145 	struct mtx		ntb_rx_q_lock;
146 	struct task		rxc_db_work;
147 	struct taskqueue	*rxc_tq;
148 	caddr_t			rx_buff;
149 	ntb_q_idx_t		rx_index;
150 	ntb_q_idx_t		rx_max_entry;
151 	uint64_t		rx_max_frame;
152 
153 	void (*event_handler)(void *data, enum ntb_link_event status);
154 	struct callout		link_work;
155 	struct callout		rx_full;
156 
157 	uint64_t		last_rx_no_buf;
158 
159 	/* Stats */
160 	uint64_t		rx_bytes;
161 	uint64_t		rx_pkts;
162 	uint64_t		rx_ring_empty;
163 	uint64_t		rx_err_no_buf;
164 	uint64_t		rx_err_oflow;
165 	uint64_t		rx_err_ver;
166 	uint64_t		tx_bytes;
167 	uint64_t		tx_pkts;
168 	uint64_t		tx_ring_full;
169 	uint64_t		tx_err_no_buf;
170 
171 	struct mtx		tx_lock;
172 };
173 
174 struct ntb_transport_mw {
175 	vm_paddr_t	phys_addr;
176 	size_t		phys_size;
177 	size_t		xlat_align;
178 	size_t		xlat_align_size;
179 	bus_addr_t	addr_limit;
180 	/* Tx buff is off vbase / phys_addr */
181 	caddr_t		vbase;
182 	size_t		buff_size;
183 	/* Rx buff is off virt_addr / dma_addr */
184 	bus_dma_tag_t	dma_tag;
185 	bus_dmamap_t	dma_map;
186 	caddr_t		virt_addr;
187 	bus_addr_t	dma_addr;
188 };
189 
190 struct ntb_transport_child {
191 	device_t	dev;
192 	int		consumer;
193 	int		qpoff;
194 	int		qpcnt;
195 	struct ntb_transport_child *next;
196 };
197 
198 struct ntb_transport_ctx {
199 	device_t		 dev;
200 	struct ntb_transport_child *child;
201 	struct ntb_transport_mw	*mw_vec;
202 	struct ntb_transport_qp	*qp_vec;
203 	unsigned		mw_count;
204 	unsigned		qp_count;
205 	uint64_t		qp_bitmap;
206 	volatile bool		link_is_up;
207 	enum ntb_speed		link_speed;
208 	enum ntb_width		link_width;
209 	struct callout		link_work;
210 	struct callout		link_watchdog;
211 	struct task		link_cleanup;
212 };
213 
214 enum {
215 	NTBT_DESC_DONE_FLAG = 1 << 0,
216 	NTBT_LINK_DOWN_FLAG = 1 << 1,
217 };
218 
219 struct ntb_payload_header {
220 	ntb_q_idx_t ver;
221 	uint32_t len;
222 	uint32_t flags;
223 };
224 
225 enum {
226 	/*
227 	 * The order of this enum is part of the remote protocol.  Do not
228 	 * reorder without bumping protocol version (and it's probably best
229 	 * to keep the protocol in lock-step with the Linux NTB driver.
230 	 */
231 	NTBT_VERSION = 0,
232 	NTBT_QP_LINKS,
233 	NTBT_NUM_QPS,
234 	NTBT_NUM_MWS,
235 	/*
236 	 * N.B.: transport_link_work assumes MW1 enums = MW0 + 2.
237 	 */
238 	NTBT_MW0_SZ_HIGH,
239 	NTBT_MW0_SZ_LOW,
240 	NTBT_MW1_SZ_HIGH,
241 	NTBT_MW1_SZ_LOW,
242 
243 	/*
244 	 * Some NTB-using hardware have a watchdog to work around NTB hangs; if
245 	 * a register or doorbell isn't written every few seconds, the link is
246 	 * torn down.  Write an otherwise unused register every few seconds to
247 	 * work around this watchdog.
248 	 */
249 	NTBT_WATCHDOG_SPAD = 15
250 };
251 
252 #define QP_TO_MW(nt, qp)	((qp) % nt->mw_count)
253 #define NTB_QP_DEF_NUM_ENTRIES	100
254 #define NTB_LINK_DOWN_TIMEOUT	100
255 
256 static int ntb_transport_probe(device_t dev);
257 static int ntb_transport_attach(device_t dev);
258 static int ntb_transport_detach(device_t dev);
259 static void ntb_transport_init_queue(struct ntb_transport_ctx *nt,
260     unsigned int qp_num);
261 static int ntb_process_tx(struct ntb_transport_qp *qp,
262     struct ntb_queue_entry *entry);
263 static void ntb_transport_rxc_db(void *arg, int pending);
264 static int ntb_process_rxc(struct ntb_transport_qp *qp);
265 static void ntb_memcpy_rx(struct ntb_transport_qp *qp,
266     struct ntb_queue_entry *entry, void *offset);
267 static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp,
268     void *data);
269 static void ntb_complete_rxc(struct ntb_transport_qp *qp);
270 static void ntb_transport_doorbell_callback(void *data, uint32_t vector);
271 static void ntb_transport_event_callback(void *data);
272 static void ntb_transport_link_work(void *arg);
273 static int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, size_t size);
274 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw);
275 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
276     unsigned int qp_num);
277 static void ntb_qp_link_work(void *arg);
278 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt);
279 static void ntb_transport_link_cleanup_work(void *, int);
280 static void ntb_qp_link_down(struct ntb_transport_qp *qp);
281 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp);
282 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp);
283 static void ntb_send_link_down(struct ntb_transport_qp *qp);
284 static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
285     struct ntb_queue_list *list);
286 static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock,
287     struct ntb_queue_list *list);
288 static struct ntb_queue_entry *ntb_list_mv(struct mtx *lock,
289     struct ntb_queue_list *from, struct ntb_queue_list *to);
290 static void xeon_link_watchdog_hb(void *);
291 
292 static const struct ntb_ctx_ops ntb_transport_ops = {
293 	.link_event = ntb_transport_event_callback,
294 	.db_event = ntb_transport_doorbell_callback,
295 };
296 
297 MALLOC_DEFINE(M_NTB_T, "ntb_transport", "ntb transport driver");
298 
299 static inline void
300 iowrite32(uint32_t val, void *addr)
301 {
302 
303 	bus_space_write_4(X86_BUS_SPACE_MEM, 0/* HACK */, (uintptr_t)addr,
304 	    val);
305 }
306 
307 /* Transport Init and teardown */
308 
309 static void
310 xeon_link_watchdog_hb(void *arg)
311 {
312 	struct ntb_transport_ctx *nt;
313 
314 	nt = arg;
315 	ntb_spad_write(nt->dev, NTBT_WATCHDOG_SPAD, 0);
316 	callout_reset(&nt->link_watchdog, 1 * hz, xeon_link_watchdog_hb, nt);
317 }
318 
319 static int
320 ntb_transport_probe(device_t dev)
321 {
322 
323 	device_set_desc(dev, "NTB Transport");
324 	return (0);
325 }
326 
327 static int
328 ntb_transport_attach(device_t dev)
329 {
330 	struct ntb_transport_ctx *nt = device_get_softc(dev);
331 	struct ntb_transport_child **cpp = &nt->child;
332 	struct ntb_transport_child *nc;
333 	struct ntb_transport_mw *mw;
334 	uint64_t db_bitmap, size;
335 	int rc, i, db_count, spad_count, qp, qpu, qpo, qpt;
336 	char cfg[128] = "";
337 	char buf[32];
338 	char *n, *np, *c, *name;
339 
340 	nt->dev = dev;
341 	nt->mw_count = ntb_mw_count(dev);
342 	spad_count = ntb_spad_count(dev);
343 	db_bitmap = ntb_db_valid_mask(dev);
344 	db_count = flsll(db_bitmap);
345 	KASSERT(db_bitmap == (1 << db_count) - 1,
346 	    ("Doorbells are not sequential (%jx).\n", db_bitmap));
347 
348 	if (nt->mw_count == 0) {
349 		device_printf(dev, "At least 1 memory window required.\n");
350 		return (ENXIO);
351 	}
352 	if (spad_count < 6) {
353 		device_printf(dev, "At least 6 scratchpads required.\n");
354 		return (ENXIO);
355 	}
356 	if (spad_count < 4 + 2 * nt->mw_count) {
357 		nt->mw_count = (spad_count - 4) / 2;
358 		device_printf(dev, "Scratchpads enough only for %d "
359 		    "memory windows.\n", nt->mw_count);
360 	}
361 	if (db_bitmap == 0) {
362 		device_printf(dev, "At least one doorbell required.\n");
363 		return (ENXIO);
364 	}
365 
366 	nt->mw_vec = malloc(nt->mw_count * sizeof(*nt->mw_vec), M_NTB_T,
367 	    M_WAITOK | M_ZERO);
368 	for (i = 0; i < nt->mw_count; i++) {
369 		mw = &nt->mw_vec[i];
370 
371 		rc = ntb_mw_get_range(dev, i, &mw->phys_addr, &mw->vbase,
372 		    &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size,
373 		    &mw->addr_limit);
374 		if (rc != 0)
375 			goto err;
376 
377 		mw->buff_size = 0;
378 		mw->virt_addr = NULL;
379 		mw->dma_addr = 0;
380 
381 		rc = ntb_mw_set_wc(dev, i, VM_MEMATTR_WRITE_COMBINING);
382 		if (rc)
383 			ntb_printf(0, "Unable to set mw%d caching\n", i);
384 
385 		/*
386 		 * Try to preallocate receive memory early, since there may
387 		 * be not enough contiguous memory later.  It is quite likely
388 		 * that NTB windows are symmetric and this allocation remain,
389 		 * but even if not, we will just reallocate it later.
390 		 */
391 		size = mw->phys_size;
392 		if (max_mw_size != 0 && size > max_mw_size)
393 			size = max_mw_size;
394 		ntb_set_mw(nt, i, size);
395 	}
396 
397 	qpu = 0;
398 	qpo = imin(db_count, nt->mw_count);
399 	qpt = db_count;
400 
401 	snprintf(buf, sizeof(buf), "hint.%s.%d.config", device_get_name(dev),
402 	    device_get_unit(dev));
403 	TUNABLE_STR_FETCH(buf, cfg, sizeof(cfg));
404 	n = cfg;
405 	i = 0;
406 	while ((c = strsep(&n, ",")) != NULL) {
407 		np = c;
408 		name = strsep(&np, ":");
409 		if (name != NULL && name[0] == 0)
410 			name = NULL;
411 		qp = (np && np[0] != 0) ? strtol(np, NULL, 10) : qpo - qpu;
412 		if (qp <= 0)
413 			qp = 1;
414 
415 		if (qp > qpt - qpu) {
416 			device_printf(dev, "Not enough resources for config\n");
417 			break;
418 		}
419 
420 		nc = malloc(sizeof(*nc), M_DEVBUF, M_WAITOK | M_ZERO);
421 		nc->consumer = i;
422 		nc->qpoff = qpu;
423 		nc->qpcnt = qp;
424 		nc->dev = device_add_child(dev, name, -1);
425 		if (nc->dev == NULL) {
426 			device_printf(dev, "Can not add child.\n");
427 			break;
428 		}
429 		device_set_ivars(nc->dev, nc);
430 		*cpp = nc;
431 		cpp = &nc->next;
432 
433 		if (bootverbose) {
434 			device_printf(dev, "%d \"%s\": queues %d",
435 			    i, name, qpu);
436 			if (qp > 1)
437 				printf("-%d", qpu + qp - 1);
438 			printf("\n");
439 		}
440 
441 		qpu += qp;
442 		i++;
443 	}
444 	nt->qp_count = qpu;
445 
446 	nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_T,
447 	    M_WAITOK | M_ZERO);
448 
449 	for (i = 0; i < nt->qp_count; i++)
450 		ntb_transport_init_queue(nt, i);
451 
452 	callout_init(&nt->link_work, 0);
453 	callout_init(&nt->link_watchdog, 0);
454 	TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt);
455 	nt->link_is_up = false;
456 
457 	rc = ntb_set_ctx(dev, nt, &ntb_transport_ops);
458 	if (rc != 0)
459 		goto err;
460 
461 	ntb_link_enable(dev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
462 
463 	for (i = 0; i < nt->mw_count; i++) {
464 		mw = &nt->mw_vec[i];
465 		rc = ntb_mw_set_trans(nt->dev, i, mw->dma_addr, mw->buff_size);
466 		if (rc != 0)
467 			ntb_printf(0, "load time mw%d xlat fails, rc %d\n", i, rc);
468 	}
469 
470 	if (enable_xeon_watchdog != 0)
471 		callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt);
472 
473 	bus_generic_attach(dev);
474 	return (0);
475 
476 err:
477 	free(nt->qp_vec, M_NTB_T);
478 	free(nt->mw_vec, M_NTB_T);
479 	return (rc);
480 }
481 
482 static int
483 ntb_transport_detach(device_t dev)
484 {
485 	struct ntb_transport_ctx *nt = device_get_softc(dev);
486 	struct ntb_transport_child **cpp = &nt->child;
487 	struct ntb_transport_child *nc;
488 	int error = 0, i;
489 
490 	while ((nc = *cpp) != NULL) {
491 		*cpp = (*cpp)->next;
492 		error = device_delete_child(dev, nc->dev);
493 		if (error)
494 			break;
495 		free(nc, M_DEVBUF);
496 	}
497 	KASSERT(nt->qp_bitmap == 0,
498 	    ("Some queues not freed on detach (%jx)", nt->qp_bitmap));
499 
500 	ntb_transport_link_cleanup(nt);
501 	taskqueue_drain(taskqueue_swi, &nt->link_cleanup);
502 	callout_drain(&nt->link_work);
503 	callout_drain(&nt->link_watchdog);
504 
505 	ntb_link_disable(dev);
506 	ntb_clear_ctx(dev);
507 
508 	for (i = 0; i < nt->mw_count; i++)
509 		ntb_free_mw(nt, i);
510 
511 	free(nt->qp_vec, M_NTB_T);
512 	free(nt->mw_vec, M_NTB_T);
513 	return (0);
514 }
515 
516 static int
517 ntb_transport_print_child(device_t dev, device_t child)
518 {
519 	struct ntb_transport_child *nc = device_get_ivars(child);
520 	int retval;
521 
522 	retval = bus_print_child_header(dev, child);
523 	if (nc->qpcnt > 0) {
524 		printf(" queue %d", nc->qpoff);
525 		if (nc->qpcnt > 1)
526 			printf("-%d", nc->qpoff + nc->qpcnt - 1);
527 	}
528 	retval += printf(" at consumer %d", nc->consumer);
529 	retval += bus_print_child_domain(dev, child);
530 	retval += bus_print_child_footer(dev, child);
531 
532 	return (retval);
533 }
534 
535 static int
536 ntb_transport_child_location_str(device_t dev, device_t child, char *buf,
537     size_t buflen)
538 {
539 	struct ntb_transport_child *nc = device_get_ivars(child);
540 
541 	snprintf(buf, buflen, "consumer=%d", nc->consumer);
542 	return (0);
543 }
544 
545 int
546 ntb_transport_queue_count(device_t dev)
547 {
548 	struct ntb_transport_child *nc = device_get_ivars(dev);
549 
550 	return (nc->qpcnt);
551 }
552 
553 static void
554 ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num)
555 {
556 	struct ntb_transport_mw *mw;
557 	struct ntb_transport_qp *qp;
558 	vm_paddr_t mw_base;
559 	uint64_t mw_size, qp_offset;
560 	size_t tx_size;
561 	unsigned num_qps_mw, mw_num, mw_count;
562 
563 	mw_count = nt->mw_count;
564 	mw_num = QP_TO_MW(nt, qp_num);
565 	mw = &nt->mw_vec[mw_num];
566 
567 	qp = &nt->qp_vec[qp_num];
568 	qp->qp_num = qp_num;
569 	qp->transport = nt;
570 	qp->dev = nt->dev;
571 	qp->client_ready = false;
572 	qp->event_handler = NULL;
573 	ntb_qp_link_down_reset(qp);
574 
575 	if (mw_num < nt->qp_count % mw_count)
576 		num_qps_mw = nt->qp_count / mw_count + 1;
577 	else
578 		num_qps_mw = nt->qp_count / mw_count;
579 
580 	mw_base = mw->phys_addr;
581 	mw_size = mw->phys_size;
582 
583 	tx_size = mw_size / num_qps_mw;
584 	qp_offset = tx_size * (qp_num / mw_count);
585 
586 	qp->tx_mw = mw->vbase + qp_offset;
587 	KASSERT(qp->tx_mw != NULL, ("uh oh?"));
588 
589 	/* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */
590 	qp->tx_mw_phys = mw_base + qp_offset;
591 	KASSERT(qp->tx_mw_phys != 0, ("uh oh?"));
592 
593 	tx_size -= sizeof(struct ntb_rx_info);
594 	qp->rx_info = (void *)(qp->tx_mw + tx_size);
595 
596 	/* Due to house-keeping, there must be at least 2 buffs */
597 	qp->tx_max_frame = qmin(transport_mtu, tx_size / 2);
598 	qp->tx_max_entry = tx_size / qp->tx_max_frame;
599 
600 	callout_init(&qp->link_work, 0);
601 	callout_init(&qp->rx_full, 1);
602 
603 	mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN);
604 	mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN);
605 	mtx_init(&qp->tx_lock, "ntb transport tx", NULL, MTX_DEF);
606 	TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp);
607 	qp->rxc_tq = taskqueue_create("ntbt_rx", M_WAITOK,
608 	    taskqueue_thread_enqueue, &qp->rxc_tq);
609 	taskqueue_start_threads(&qp->rxc_tq, 1, PI_NET, "%s rx%d",
610 	    device_get_nameunit(nt->dev), qp_num);
611 
612 	STAILQ_INIT(&qp->rx_post_q);
613 	STAILQ_INIT(&qp->rx_pend_q);
614 	STAILQ_INIT(&qp->tx_free_q);
615 }
616 
617 void
618 ntb_transport_free_queue(struct ntb_transport_qp *qp)
619 {
620 	struct ntb_transport_ctx *nt = qp->transport;
621 	struct ntb_queue_entry *entry;
622 
623 	callout_drain(&qp->link_work);
624 
625 	ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
626 	taskqueue_drain_all(qp->rxc_tq);
627 	taskqueue_free(qp->rxc_tq);
628 
629 	qp->cb_data = NULL;
630 	qp->rx_handler = NULL;
631 	qp->tx_handler = NULL;
632 	qp->event_handler = NULL;
633 
634 	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q)))
635 		free(entry, M_NTB_T);
636 
637 	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q)))
638 		free(entry, M_NTB_T);
639 
640 	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
641 		free(entry, M_NTB_T);
642 
643 	nt->qp_bitmap &= ~(1 << qp->qp_num);
644 }
645 
646 /**
647  * ntb_transport_create_queue - Create a new NTB transport layer queue
648  * @rx_handler: receive callback function
649  * @tx_handler: transmit callback function
650  * @event_handler: event callback function
651  *
652  * Create a new NTB transport layer queue and provide the queue with a callback
653  * routine for both transmit and receive.  The receive callback routine will be
654  * used to pass up data when the transport has received it on the queue.   The
655  * transmit callback routine will be called when the transport has completed the
656  * transmission of the data on the queue and the data is ready to be freed.
657  *
658  * RETURNS: pointer to newly created ntb_queue, NULL on error.
659  */
660 struct ntb_transport_qp *
661 ntb_transport_create_queue(device_t dev, int q,
662     const struct ntb_queue_handlers *handlers, void *data)
663 {
664 	struct ntb_transport_child *nc = device_get_ivars(dev);
665 	struct ntb_transport_ctx *nt = device_get_softc(device_get_parent(dev));
666 	struct ntb_queue_entry *entry;
667 	struct ntb_transport_qp *qp;
668 	int i;
669 
670 	if (q < 0 || q >= nc->qpcnt)
671 		return (NULL);
672 
673 	qp = &nt->qp_vec[nc->qpoff + q];
674 	nt->qp_bitmap |= (1 << qp->qp_num);
675 	qp->cb_data = data;
676 	qp->rx_handler = handlers->rx_handler;
677 	qp->tx_handler = handlers->tx_handler;
678 	qp->event_handler = handlers->event_handler;
679 
680 	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
681 		entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO);
682 		entry->cb_data = data;
683 		entry->buf = NULL;
684 		entry->len = transport_mtu;
685 		entry->qp = qp;
686 		ntb_list_add(&qp->ntb_rx_q_lock, entry, &qp->rx_pend_q);
687 	}
688 
689 	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
690 		entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO);
691 		entry->qp = qp;
692 		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
693 	}
694 
695 	ntb_db_clear(dev, 1ull << qp->qp_num);
696 	return (qp);
697 }
698 
699 /**
700  * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
701  * @qp: NTB transport layer queue to be enabled
702  *
703  * Notify NTB transport layer of client readiness to use queue
704  */
705 void
706 ntb_transport_link_up(struct ntb_transport_qp *qp)
707 {
708 	struct ntb_transport_ctx *nt = qp->transport;
709 
710 	qp->client_ready = true;
711 
712 	ntb_printf(2, "qp %d client ready\n", qp->qp_num);
713 
714 	if (nt->link_is_up)
715 		callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
716 }
717 
718 
719 
720 /* Transport Tx */
721 
722 /**
723  * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
724  * @qp: NTB transport layer queue the entry is to be enqueued on
725  * @cb: per buffer pointer for callback function to use
726  * @data: pointer to data buffer that will be sent
727  * @len: length of the data buffer
728  *
729  * Enqueue a new transmit buffer onto the transport queue from which a NTB
730  * payload will be transmitted.  This assumes that a lock is being held to
731  * serialize access to the qp.
732  *
733  * RETURNS: An appropriate ERRNO error value on error, or zero for success.
734  */
735 int
736 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
737     unsigned int len)
738 {
739 	struct ntb_queue_entry *entry;
740 	int rc;
741 
742 	if (!qp->link_is_up || len == 0) {
743 		CTR0(KTR_NTB, "TX: link not up");
744 		return (EINVAL);
745 	}
746 
747 	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
748 	if (entry == NULL) {
749 		CTR0(KTR_NTB, "TX: could not get entry from tx_free_q");
750 		qp->tx_err_no_buf++;
751 		return (EBUSY);
752 	}
753 	CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry);
754 
755 	entry->cb_data = cb;
756 	entry->buf = data;
757 	entry->len = len;
758 	entry->flags = 0;
759 
760 	mtx_lock(&qp->tx_lock);
761 	rc = ntb_process_tx(qp, entry);
762 	mtx_unlock(&qp->tx_lock);
763 	if (rc != 0) {
764 		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
765 		CTR1(KTR_NTB,
766 		    "TX: process_tx failed. Returning entry %p to tx_free_q",
767 		    entry);
768 	}
769 	return (rc);
770 }
771 
772 static void
773 ntb_tx_copy_callback(void *data)
774 {
775 	struct ntb_queue_entry *entry = data;
776 	struct ntb_transport_qp *qp = entry->qp;
777 	struct ntb_payload_header *hdr = entry->x_hdr;
778 
779 	iowrite32(entry->flags | NTBT_DESC_DONE_FLAG, &hdr->flags);
780 	CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr);
781 
782 	ntb_peer_db_set(qp->dev, 1ull << qp->qp_num);
783 
784 	/*
785 	 * The entry length can only be zero if the packet is intended to be a
786 	 * "link down" or similar.  Since no payload is being sent in these
787 	 * cases, there is nothing to add to the completion queue.
788 	 */
789 	if (entry->len > 0) {
790 		qp->tx_bytes += entry->len;
791 
792 		if (qp->tx_handler)
793 			qp->tx_handler(qp, qp->cb_data, entry->buf,
794 			    entry->len);
795 		else
796 			m_freem(entry->buf);
797 		entry->buf = NULL;
798 	}
799 
800 	CTR3(KTR_NTB,
801 	    "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning "
802 	    "to tx_free_q", entry, hdr->ver, hdr->flags);
803 	ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
804 }
805 
806 static void
807 ntb_memcpy_tx(struct ntb_queue_entry *entry, void *offset)
808 {
809 
810 	CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset);
811 	if (entry->buf != NULL) {
812 		m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset);
813 
814 		/*
815 		 * Ensure that the data is fully copied before setting the
816 		 * flags
817 		 */
818 		wmb();
819 	}
820 
821 	ntb_tx_copy_callback(entry);
822 }
823 
824 static void
825 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
826 {
827 	struct ntb_payload_header *hdr;
828 	void *offset;
829 
830 	offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
831 	hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame -
832 	    sizeof(struct ntb_payload_header));
833 	entry->x_hdr = hdr;
834 
835 	iowrite32(entry->len, &hdr->len);
836 	iowrite32(qp->tx_pkts, &hdr->ver);
837 
838 	ntb_memcpy_tx(entry, offset);
839 }
840 
841 static int
842 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
843 {
844 
845 	CTR3(KTR_NTB,
846 	    "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u",
847 	    qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry);
848 	if (qp->tx_index == qp->remote_rx_info->entry) {
849 		CTR0(KTR_NTB, "TX: ring full");
850 		qp->tx_ring_full++;
851 		return (EAGAIN);
852 	}
853 
854 	if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
855 		if (qp->tx_handler != NULL)
856 			qp->tx_handler(qp, qp->cb_data, entry->buf,
857 			    EIO);
858 		else
859 			m_freem(entry->buf);
860 
861 		entry->buf = NULL;
862 		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
863 		CTR1(KTR_NTB,
864 		    "TX: frame too big. returning entry %p to tx_free_q",
865 		    entry);
866 		return (0);
867 	}
868 	CTR2(KTR_NTB, "TX: copying entry %p to index %u", entry, qp->tx_index);
869 	ntb_async_tx(qp, entry);
870 
871 	qp->tx_index++;
872 	qp->tx_index %= qp->tx_max_entry;
873 
874 	qp->tx_pkts++;
875 
876 	return (0);
877 }
878 
879 /* Transport Rx */
880 static void
881 ntb_transport_rxc_db(void *arg, int pending __unused)
882 {
883 	struct ntb_transport_qp *qp = arg;
884 	uint64_t qp_mask = 1ull << qp->qp_num;
885 	int rc;
886 
887 	CTR0(KTR_NTB, "RX: transport_rx");
888 again:
889 	while ((rc = ntb_process_rxc(qp)) == 0)
890 		;
891 	CTR1(KTR_NTB, "RX: process_rxc returned %d", rc);
892 
893 	if ((ntb_db_read(qp->dev) & qp_mask) != 0) {
894 		/* If db is set, clear it and check queue once more. */
895 		ntb_db_clear(qp->dev, qp_mask);
896 		goto again;
897 	}
898 	if (qp->link_is_up)
899 		ntb_db_clear_mask(qp->dev, qp_mask);
900 }
901 
902 static int
903 ntb_process_rxc(struct ntb_transport_qp *qp)
904 {
905 	struct ntb_payload_header *hdr;
906 	struct ntb_queue_entry *entry;
907 	caddr_t offset;
908 
909 	offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
910 	hdr = (void *)(offset + qp->rx_max_frame -
911 	    sizeof(struct ntb_payload_header));
912 
913 	CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index);
914 	if ((hdr->flags & NTBT_DESC_DONE_FLAG) == 0) {
915 		CTR0(KTR_NTB, "RX: hdr not done");
916 		qp->rx_ring_empty++;
917 		return (EAGAIN);
918 	}
919 
920 	if ((hdr->flags & NTBT_LINK_DOWN_FLAG) != 0) {
921 		CTR0(KTR_NTB, "RX: link down");
922 		ntb_qp_link_down(qp);
923 		hdr->flags = 0;
924 		return (EAGAIN);
925 	}
926 
927 	if (hdr->ver != (uint32_t)qp->rx_pkts) {
928 		CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). "
929 		    "Returning entry to rx_pend_q", hdr->ver, qp->rx_pkts);
930 		qp->rx_err_ver++;
931 		return (EIO);
932 	}
933 
934 	entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
935 	if (entry == NULL) {
936 		qp->rx_err_no_buf++;
937 		CTR0(KTR_NTB, "RX: No entries in rx_pend_q");
938 		return (EAGAIN);
939 	}
940 	callout_stop(&qp->rx_full);
941 	CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry);
942 
943 	entry->x_hdr = hdr;
944 	entry->index = qp->rx_index;
945 
946 	if (hdr->len > entry->len) {
947 		CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju",
948 		    (uintmax_t)hdr->len, (uintmax_t)entry->len);
949 		qp->rx_err_oflow++;
950 
951 		entry->len = -EIO;
952 		entry->flags |= NTBT_DESC_DONE_FLAG;
953 
954 		ntb_complete_rxc(qp);
955 	} else {
956 		qp->rx_bytes += hdr->len;
957 		qp->rx_pkts++;
958 
959 		CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts);
960 
961 		entry->len = hdr->len;
962 
963 		ntb_memcpy_rx(qp, entry, offset);
964 	}
965 
966 	qp->rx_index++;
967 	qp->rx_index %= qp->rx_max_entry;
968 	return (0);
969 }
970 
971 static void
972 ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry,
973     void *offset)
974 {
975 	struct ifnet *ifp = entry->cb_data;
976 	unsigned int len = entry->len;
977 
978 	CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset);
979 
980 	entry->buf = (void *)m_devget(offset, len, 0, ifp, NULL);
981 	if (entry->buf == NULL)
982 		entry->len = -ENOMEM;
983 
984 	/* Ensure that the data is globally visible before clearing the flag */
985 	wmb();
986 
987 	CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, entry->buf);
988 	ntb_rx_copy_callback(qp, entry);
989 }
990 
991 static inline void
992 ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data)
993 {
994 	struct ntb_queue_entry *entry;
995 
996 	entry = data;
997 	entry->flags |= NTBT_DESC_DONE_FLAG;
998 	ntb_complete_rxc(qp);
999 }
1000 
1001 static void
1002 ntb_complete_rxc(struct ntb_transport_qp *qp)
1003 {
1004 	struct ntb_queue_entry *entry;
1005 	struct mbuf *m;
1006 	unsigned len;
1007 
1008 	CTR0(KTR_NTB, "RX: rx_completion_task");
1009 
1010 	mtx_lock_spin(&qp->ntb_rx_q_lock);
1011 
1012 	while (!STAILQ_EMPTY(&qp->rx_post_q)) {
1013 		entry = STAILQ_FIRST(&qp->rx_post_q);
1014 		if ((entry->flags & NTBT_DESC_DONE_FLAG) == 0)
1015 			break;
1016 
1017 		entry->x_hdr->flags = 0;
1018 		iowrite32(entry->index, &qp->rx_info->entry);
1019 
1020 		STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry);
1021 
1022 		len = entry->len;
1023 		m = entry->buf;
1024 
1025 		/*
1026 		 * Re-initialize queue_entry for reuse; rx_handler takes
1027 		 * ownership of the mbuf.
1028 		 */
1029 		entry->buf = NULL;
1030 		entry->len = transport_mtu;
1031 		entry->cb_data = qp->cb_data;
1032 
1033 		STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry);
1034 
1035 		mtx_unlock_spin(&qp->ntb_rx_q_lock);
1036 
1037 		CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m);
1038 		if (qp->rx_handler != NULL && qp->client_ready)
1039 			qp->rx_handler(qp, qp->cb_data, m, len);
1040 		else
1041 			m_freem(m);
1042 
1043 		mtx_lock_spin(&qp->ntb_rx_q_lock);
1044 	}
1045 
1046 	mtx_unlock_spin(&qp->ntb_rx_q_lock);
1047 }
1048 
1049 static void
1050 ntb_transport_doorbell_callback(void *data, uint32_t vector)
1051 {
1052 	struct ntb_transport_ctx *nt = data;
1053 	struct ntb_transport_qp *qp;
1054 	uint64_t vec_mask;
1055 	unsigned qp_num;
1056 
1057 	vec_mask = ntb_db_vector_mask(nt->dev, vector);
1058 	vec_mask &= nt->qp_bitmap;
1059 	if ((vec_mask & (vec_mask - 1)) != 0)
1060 		vec_mask &= ntb_db_read(nt->dev);
1061 	if (vec_mask != 0) {
1062 		ntb_db_set_mask(nt->dev, vec_mask);
1063 		ntb_db_clear(nt->dev, vec_mask);
1064 	}
1065 	while (vec_mask != 0) {
1066 		qp_num = ffsll(vec_mask) - 1;
1067 
1068 		qp = &nt->qp_vec[qp_num];
1069 		if (qp->link_is_up)
1070 			taskqueue_enqueue(qp->rxc_tq, &qp->rxc_db_work);
1071 
1072 		vec_mask &= ~(1ull << qp_num);
1073 	}
1074 }
1075 
1076 /* Link Event handler */
1077 static void
1078 ntb_transport_event_callback(void *data)
1079 {
1080 	struct ntb_transport_ctx *nt = data;
1081 
1082 	if (ntb_link_is_up(nt->dev, &nt->link_speed, &nt->link_width)) {
1083 		ntb_printf(1, "HW link up\n");
1084 		callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
1085 	} else {
1086 		ntb_printf(1, "HW link down\n");
1087 		taskqueue_enqueue(taskqueue_swi, &nt->link_cleanup);
1088 	}
1089 }
1090 
1091 /* Link bring up */
1092 static void
1093 ntb_transport_link_work(void *arg)
1094 {
1095 	struct ntb_transport_ctx *nt = arg;
1096 	struct ntb_transport_mw *mw;
1097 	device_t dev = nt->dev;
1098 	struct ntb_transport_qp *qp;
1099 	uint64_t val64, size;
1100 	uint32_t val;
1101 	unsigned i;
1102 	int rc;
1103 
1104 	/* send the local info, in the opposite order of the way we read it */
1105 	for (i = 0; i < nt->mw_count; i++) {
1106 		size = nt->mw_vec[i].phys_size;
1107 
1108 		if (max_mw_size != 0 && size > max_mw_size)
1109 			size = max_mw_size;
1110 
1111 		ntb_peer_spad_write(dev, NTBT_MW0_SZ_HIGH + (i * 2),
1112 		    size >> 32);
1113 		ntb_peer_spad_write(dev, NTBT_MW0_SZ_LOW + (i * 2), size);
1114 	}
1115 	ntb_peer_spad_write(dev, NTBT_NUM_MWS, nt->mw_count);
1116 	ntb_peer_spad_write(dev, NTBT_NUM_QPS, nt->qp_count);
1117 	ntb_peer_spad_write(dev, NTBT_QP_LINKS, 0);
1118 	ntb_peer_spad_write(dev, NTBT_VERSION, NTB_TRANSPORT_VERSION);
1119 
1120 	/* Query the remote side for its info */
1121 	val = 0;
1122 	ntb_spad_read(dev, NTBT_VERSION, &val);
1123 	if (val != NTB_TRANSPORT_VERSION)
1124 		goto out;
1125 
1126 	ntb_spad_read(dev, NTBT_NUM_QPS, &val);
1127 	if (val != nt->qp_count)
1128 		goto out;
1129 
1130 	ntb_spad_read(dev, NTBT_NUM_MWS, &val);
1131 	if (val != nt->mw_count)
1132 		goto out;
1133 
1134 	for (i = 0; i < nt->mw_count; i++) {
1135 		ntb_spad_read(dev, NTBT_MW0_SZ_HIGH + (i * 2), &val);
1136 		val64 = (uint64_t)val << 32;
1137 
1138 		ntb_spad_read(dev, NTBT_MW0_SZ_LOW + (i * 2), &val);
1139 		val64 |= val;
1140 
1141 		mw = &nt->mw_vec[i];
1142 		val64 = roundup(val64, mw->xlat_align_size);
1143 		if (mw->buff_size != val64) {
1144 
1145 			rc = ntb_set_mw(nt, i, val64);
1146 			if (rc != 0) {
1147 				ntb_printf(0, "link up set mw%d fails, rc %d\n",
1148 				    i, rc);
1149 				goto free_mws;
1150 			}
1151 
1152 			/* Notify HW the memory location of the receive buffer */
1153 			rc = ntb_mw_set_trans(nt->dev, i, mw->dma_addr,
1154 			    mw->buff_size);
1155 			if (rc != 0) {
1156 				ntb_printf(0, "link up mw%d xlat fails, rc %d\n",
1157 				     i, rc);
1158 				goto free_mws;
1159 			}
1160 		}
1161 	}
1162 
1163 	nt->link_is_up = true;
1164 	ntb_printf(1, "transport link up\n");
1165 
1166 	for (i = 0; i < nt->qp_count; i++) {
1167 		qp = &nt->qp_vec[i];
1168 
1169 		ntb_transport_setup_qp_mw(nt, i);
1170 
1171 		if (qp->client_ready)
1172 			callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
1173 	}
1174 
1175 	return;
1176 
1177 free_mws:
1178 	for (i = 0; i < nt->mw_count; i++)
1179 		ntb_free_mw(nt, i);
1180 out:
1181 	if (ntb_link_is_up(dev, &nt->link_speed, &nt->link_width))
1182 		callout_reset(&nt->link_work,
1183 		    NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt);
1184 }
1185 
1186 struct ntb_load_cb_args {
1187 	bus_addr_t addr;
1188 	int error;
1189 };
1190 
1191 static void
1192 ntb_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
1193 {
1194 	struct ntb_load_cb_args *cba = (struct ntb_load_cb_args *)xsc;
1195 
1196 	if (!(cba->error = error))
1197 		cba->addr = segs[0].ds_addr;
1198 }
1199 
1200 static int
1201 ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size)
1202 {
1203 	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
1204 	struct ntb_load_cb_args cba;
1205 	size_t buff_size;
1206 
1207 	if (size == 0)
1208 		return (EINVAL);
1209 
1210 	buff_size = roundup(size, mw->xlat_align_size);
1211 
1212 	/* No need to re-setup */
1213 	if (mw->buff_size == buff_size)
1214 		return (0);
1215 
1216 	if (mw->buff_size != 0)
1217 		ntb_free_mw(nt, num_mw);
1218 
1219 	/* Alloc memory for receiving data.  Must be aligned */
1220 	mw->buff_size = buff_size;
1221 
1222 	if (bus_dma_tag_create(bus_get_dma_tag(nt->dev), mw->xlat_align, 0,
1223 	    mw->addr_limit, BUS_SPACE_MAXADDR,
1224 	    NULL, NULL, mw->buff_size, 1, mw->buff_size,
1225 	    0, NULL, NULL, &mw->dma_tag)) {
1226 		ntb_printf(0, "Unable to create MW tag of size %zu\n",
1227 		    mw->buff_size);
1228 		mw->buff_size = 0;
1229 		return (ENOMEM);
1230 	}
1231 	if (bus_dmamem_alloc(mw->dma_tag, (void **)&mw->virt_addr,
1232 	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &mw->dma_map)) {
1233 		bus_dma_tag_destroy(mw->dma_tag);
1234 		ntb_printf(0, "Unable to allocate MW buffer of size %zu\n",
1235 		    mw->buff_size);
1236 		mw->buff_size = 0;
1237 		return (ENOMEM);
1238 	}
1239 	if (bus_dmamap_load(mw->dma_tag, mw->dma_map, mw->virt_addr,
1240 	    mw->buff_size, ntb_load_cb, &cba, BUS_DMA_NOWAIT) || cba.error) {
1241 		bus_dmamem_free(mw->dma_tag, mw->virt_addr, mw->dma_map);
1242 		bus_dma_tag_destroy(mw->dma_tag);
1243 		ntb_printf(0, "Unable to load MW buffer of size %zu\n",
1244 		    mw->buff_size);
1245 		mw->buff_size = 0;
1246 		return (ENOMEM);
1247 	}
1248 	mw->dma_addr = cba.addr;
1249 
1250 	return (0);
1251 }
1252 
1253 static void
1254 ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
1255 {
1256 	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
1257 
1258 	if (mw->virt_addr == NULL)
1259 		return;
1260 
1261 	ntb_mw_clear_trans(nt->dev, num_mw);
1262 	bus_dmamap_unload(mw->dma_tag, mw->dma_map);
1263 	bus_dmamem_free(mw->dma_tag, mw->virt_addr, mw->dma_map);
1264 	bus_dma_tag_destroy(mw->dma_tag);
1265 	mw->buff_size = 0;
1266 	mw->virt_addr = NULL;
1267 }
1268 
1269 static int
1270 ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num)
1271 {
1272 	struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
1273 	struct ntb_transport_mw *mw;
1274 	void *offset;
1275 	ntb_q_idx_t i;
1276 	size_t rx_size;
1277 	unsigned num_qps_mw, mw_num, mw_count;
1278 
1279 	mw_count = nt->mw_count;
1280 	mw_num = QP_TO_MW(nt, qp_num);
1281 	mw = &nt->mw_vec[mw_num];
1282 
1283 	if (mw->virt_addr == NULL)
1284 		return (ENOMEM);
1285 
1286 	if (mw_num < nt->qp_count % mw_count)
1287 		num_qps_mw = nt->qp_count / mw_count + 1;
1288 	else
1289 		num_qps_mw = nt->qp_count / mw_count;
1290 
1291 	rx_size = mw->buff_size / num_qps_mw;
1292 	qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
1293 	rx_size -= sizeof(struct ntb_rx_info);
1294 
1295 	qp->remote_rx_info = (void*)(qp->rx_buff + rx_size);
1296 
1297 	/* Due to house-keeping, there must be at least 2 buffs */
1298 	qp->rx_max_frame = qmin(transport_mtu, rx_size / 2);
1299 	qp->rx_max_entry = rx_size / qp->rx_max_frame;
1300 	qp->rx_index = 0;
1301 
1302 	qp->remote_rx_info->entry = qp->rx_max_entry - 1;
1303 
1304 	/* Set up the hdr offsets with 0s */
1305 	for (i = 0; i < qp->rx_max_entry; i++) {
1306 		offset = (void *)(qp->rx_buff + qp->rx_max_frame * (i + 1) -
1307 		    sizeof(struct ntb_payload_header));
1308 		memset(offset, 0, sizeof(struct ntb_payload_header));
1309 	}
1310 
1311 	qp->rx_pkts = 0;
1312 	qp->tx_pkts = 0;
1313 	qp->tx_index = 0;
1314 
1315 	return (0);
1316 }
1317 
1318 static void
1319 ntb_qp_link_work(void *arg)
1320 {
1321 	struct ntb_transport_qp *qp = arg;
1322 	device_t dev = qp->dev;
1323 	struct ntb_transport_ctx *nt = qp->transport;
1324 	int i;
1325 	uint32_t val;
1326 
1327 	/* Report queues that are up on our side */
1328 	for (i = 0, val = 0; i < nt->qp_count; i++) {
1329 		if (nt->qp_vec[i].client_ready)
1330 			val |= (1 << i);
1331 	}
1332 	ntb_peer_spad_write(dev, NTBT_QP_LINKS, val);
1333 
1334 	/* See if the remote side is up */
1335 	ntb_spad_read(dev, NTBT_QP_LINKS, &val);
1336 	if ((val & (1ull << qp->qp_num)) != 0) {
1337 		ntb_printf(2, "qp %d link up\n", qp->qp_num);
1338 		qp->link_is_up = true;
1339 
1340 		if (qp->event_handler != NULL)
1341 			qp->event_handler(qp->cb_data, NTB_LINK_UP);
1342 
1343 		ntb_db_clear_mask(dev, 1ull << qp->qp_num);
1344 	} else if (nt->link_is_up)
1345 		callout_reset(&qp->link_work,
1346 		    NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp);
1347 }
1348 
1349 /* Link down event*/
1350 static void
1351 ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
1352 {
1353 	struct ntb_transport_qp *qp;
1354 	int i;
1355 
1356 	callout_drain(&nt->link_work);
1357 	nt->link_is_up = 0;
1358 
1359 	/* Pass along the info to any clients */
1360 	for (i = 0; i < nt->qp_count; i++) {
1361 		if ((nt->qp_bitmap & (1 << i)) != 0) {
1362 			qp = &nt->qp_vec[i];
1363 			ntb_qp_link_cleanup(qp);
1364 			callout_drain(&qp->link_work);
1365 		}
1366 	}
1367 
1368 	/*
1369 	 * The scratchpad registers keep the values if the remote side
1370 	 * goes down, blast them now to give them a sane value the next
1371 	 * time they are accessed
1372 	 */
1373 	ntb_spad_clear(nt->dev);
1374 }
1375 
1376 static void
1377 ntb_transport_link_cleanup_work(void *arg, int pending __unused)
1378 {
1379 
1380 	ntb_transport_link_cleanup(arg);
1381 }
1382 
1383 static void
1384 ntb_qp_link_down(struct ntb_transport_qp *qp)
1385 {
1386 
1387 	ntb_qp_link_cleanup(qp);
1388 }
1389 
1390 static void
1391 ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
1392 {
1393 
1394 	qp->link_is_up = false;
1395 	ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
1396 
1397 	qp->tx_index = qp->rx_index = 0;
1398 	qp->tx_bytes = qp->rx_bytes = 0;
1399 	qp->tx_pkts = qp->rx_pkts = 0;
1400 
1401 	qp->rx_ring_empty = 0;
1402 	qp->tx_ring_full = 0;
1403 
1404 	qp->rx_err_no_buf = qp->tx_err_no_buf = 0;
1405 	qp->rx_err_oflow = qp->rx_err_ver = 0;
1406 }
1407 
1408 static void
1409 ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
1410 {
1411 
1412 	callout_drain(&qp->link_work);
1413 	ntb_qp_link_down_reset(qp);
1414 
1415 	if (qp->event_handler != NULL)
1416 		qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
1417 }
1418 
1419 /* Link commanded down */
1420 /**
1421  * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1422  * @qp: NTB transport layer queue to be disabled
1423  *
1424  * Notify NTB transport layer of client's desire to no longer receive data on
1425  * transport queue specified.  It is the client's responsibility to ensure all
1426  * entries on queue are purged or otherwise handled appropriately.
1427  */
1428 void
1429 ntb_transport_link_down(struct ntb_transport_qp *qp)
1430 {
1431 	struct ntb_transport_ctx *nt = qp->transport;
1432 	int i;
1433 	uint32_t val;
1434 
1435 	qp->client_ready = false;
1436 	for (i = 0, val = 0; i < nt->qp_count; i++) {
1437 		if (nt->qp_vec[i].client_ready)
1438 			val |= (1 << i);
1439 	}
1440 	ntb_peer_spad_write(qp->dev, NTBT_QP_LINKS, val);
1441 
1442 	if (qp->link_is_up)
1443 		ntb_send_link_down(qp);
1444 	else
1445 		callout_drain(&qp->link_work);
1446 }
1447 
1448 /**
1449  * ntb_transport_link_query - Query transport link state
1450  * @qp: NTB transport layer queue to be queried
1451  *
1452  * Query connectivity to the remote system of the NTB transport queue
1453  *
1454  * RETURNS: true for link up or false for link down
1455  */
1456 bool
1457 ntb_transport_link_query(struct ntb_transport_qp *qp)
1458 {
1459 
1460 	return (qp->link_is_up);
1461 }
1462 
1463 /**
1464  * ntb_transport_link_speed - Query transport link speed
1465  * @qp: NTB transport layer queue to be queried
1466  *
1467  * Query connection speed to the remote system of the NTB transport queue
1468  *
1469  * RETURNS: link speed in bits per second
1470  */
1471 uint64_t
1472 ntb_transport_link_speed(struct ntb_transport_qp *qp)
1473 {
1474 	struct ntb_transport_ctx *nt = qp->transport;
1475 	uint64_t rate;
1476 
1477 	if (!nt->link_is_up)
1478 		return (0);
1479 	switch (nt->link_speed) {
1480 	case NTB_SPEED_GEN1:
1481 		rate = 2500000000 * 8 / 10;
1482 		break;
1483 	case NTB_SPEED_GEN2:
1484 		rate = 5000000000 * 8 / 10;
1485 		break;
1486 	case NTB_SPEED_GEN3:
1487 		rate = 8000000000 * 128 / 130;
1488 		break;
1489 	case NTB_SPEED_GEN4:
1490 		rate = 16000000000 * 128 / 130;
1491 		break;
1492 	default:
1493 		return (0);
1494 	}
1495 	if (nt->link_width <= 0)
1496 		return (0);
1497 	return (rate * nt->link_width);
1498 }
1499 
1500 static void
1501 ntb_send_link_down(struct ntb_transport_qp *qp)
1502 {
1503 	struct ntb_queue_entry *entry;
1504 	int i, rc;
1505 
1506 	if (!qp->link_is_up)
1507 		return;
1508 
1509 	for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1510 		entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1511 		if (entry != NULL)
1512 			break;
1513 		pause("NTB Wait for link down", hz / 10);
1514 	}
1515 
1516 	if (entry == NULL)
1517 		return;
1518 
1519 	entry->cb_data = NULL;
1520 	entry->buf = NULL;
1521 	entry->len = 0;
1522 	entry->flags = NTBT_LINK_DOWN_FLAG;
1523 
1524 	mtx_lock(&qp->tx_lock);
1525 	rc = ntb_process_tx(qp, entry);
1526 	mtx_unlock(&qp->tx_lock);
1527 	if (rc != 0)
1528 		printf("ntb: Failed to send link down\n");
1529 
1530 	ntb_qp_link_down_reset(qp);
1531 }
1532 
1533 
1534 /* List Management */
1535 
1536 static void
1537 ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
1538     struct ntb_queue_list *list)
1539 {
1540 
1541 	mtx_lock_spin(lock);
1542 	STAILQ_INSERT_TAIL(list, entry, entry);
1543 	mtx_unlock_spin(lock);
1544 }
1545 
1546 static struct ntb_queue_entry *
1547 ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list)
1548 {
1549 	struct ntb_queue_entry *entry;
1550 
1551 	mtx_lock_spin(lock);
1552 	if (STAILQ_EMPTY(list)) {
1553 		entry = NULL;
1554 		goto out;
1555 	}
1556 	entry = STAILQ_FIRST(list);
1557 	STAILQ_REMOVE_HEAD(list, entry);
1558 out:
1559 	mtx_unlock_spin(lock);
1560 
1561 	return (entry);
1562 }
1563 
1564 static struct ntb_queue_entry *
1565 ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from,
1566     struct ntb_queue_list *to)
1567 {
1568 	struct ntb_queue_entry *entry;
1569 
1570 	mtx_lock_spin(lock);
1571 	if (STAILQ_EMPTY(from)) {
1572 		entry = NULL;
1573 		goto out;
1574 	}
1575 	entry = STAILQ_FIRST(from);
1576 	STAILQ_REMOVE_HEAD(from, entry);
1577 	STAILQ_INSERT_TAIL(to, entry, entry);
1578 
1579 out:
1580 	mtx_unlock_spin(lock);
1581 	return (entry);
1582 }
1583 
1584 /**
1585  * ntb_transport_qp_num - Query the qp number
1586  * @qp: NTB transport layer queue to be queried
1587  *
1588  * Query qp number of the NTB transport queue
1589  *
1590  * RETURNS: a zero based number specifying the qp number
1591  */
1592 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1593 {
1594 
1595 	return (qp->qp_num);
1596 }
1597 
1598 /**
1599  * ntb_transport_max_size - Query the max payload size of a qp
1600  * @qp: NTB transport layer queue to be queried
1601  *
1602  * Query the maximum payload size permissible on the given qp
1603  *
1604  * RETURNS: the max payload size of a qp
1605  */
1606 unsigned int
1607 ntb_transport_max_size(struct ntb_transport_qp *qp)
1608 {
1609 
1610 	return (qp->tx_max_frame - sizeof(struct ntb_payload_header));
1611 }
1612 
1613 unsigned int
1614 ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
1615 {
1616 	unsigned int head = qp->tx_index;
1617 	unsigned int tail = qp->remote_rx_info->entry;
1618 
1619 	return (tail >= head ? tail - head : qp->tx_max_entry + tail - head);
1620 }
1621 
1622 static device_method_t ntb_transport_methods[] = {
1623 	/* Device interface */
1624 	DEVMETHOD(device_probe,     ntb_transport_probe),
1625 	DEVMETHOD(device_attach,    ntb_transport_attach),
1626 	DEVMETHOD(device_detach,    ntb_transport_detach),
1627 	/* Bus interface */
1628 	DEVMETHOD(bus_child_location_str, ntb_transport_child_location_str),
1629 	DEVMETHOD(bus_print_child,  ntb_transport_print_child),
1630 	DEVMETHOD_END
1631 };
1632 
1633 devclass_t ntb_transport_devclass;
1634 static DEFINE_CLASS_0(ntb_transport, ntb_transport_driver,
1635     ntb_transport_methods, sizeof(struct ntb_transport_ctx));
1636 DRIVER_MODULE(ntb_transport, ntb_hw, ntb_transport_driver,
1637     ntb_transport_devclass, NULL, NULL);
1638 MODULE_DEPEND(ntb_transport, ntb, 1, 1, 1);
1639 MODULE_VERSION(ntb_transport, 1);
1640