xref: /freebsd/sys/dev/ntb/ntb_transport.c (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
1 /*-
2  * Copyright (c) 2016-2017 Alexander Motin <mav@FreeBSD.org>
3  * Copyright (C) 2013 Intel Corporation
4  * Copyright (C) 2015 EMC Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * The Non-Transparent Bridge (NTB) is a device that allows you to connect
31  * two or more systems using a PCI-e links, providing remote memory access.
32  *
33  * This module contains a transport for sending and receiving messages by
34  * writing to remote memory window(s) provided by underlying NTB device.
35  *
36  * NOTE: Much of the code in this module is shared with Linux. Any patches may
37  * be picked up and redistributed in Linux with a dual GPL/BSD license.
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42 
43 #include <sys/param.h>
44 #include <sys/kernel.h>
45 #include <sys/systm.h>
46 #include <sys/bus.h>
47 #include <sys/ktr.h>
48 #include <sys/limits.h>
49 #include <sys/lock.h>
50 #include <sys/malloc.h>
51 #include <sys/mbuf.h>
52 #include <sys/module.h>
53 #include <sys/mutex.h>
54 #include <sys/queue.h>
55 #include <sys/sysctl.h>
56 #include <sys/taskqueue.h>
57 
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60 
61 #include <machine/bus.h>
62 
63 #include "ntb.h"
64 #include "ntb_transport.h"
65 
66 #define KTR_NTB KTR_SPARE3
67 
68 #define NTB_TRANSPORT_VERSION	4
69 
70 static SYSCTL_NODE(_hw, OID_AUTO, ntb_transport,
71     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
72     "ntb_transport");
73 
74 static unsigned g_ntb_transport_debug_level;
75 SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, debug_level, CTLFLAG_RWTUN,
76     &g_ntb_transport_debug_level, 0,
77     "ntb_transport log level -- higher is more verbose");
78 #define ntb_printf(lvl, ...) do {			\
79 	if ((lvl) <= g_ntb_transport_debug_level) {	\
80 		printf(__VA_ARGS__);			\
81 	}						\
82 } while (0)
83 
84 static unsigned transport_mtu = 0x10000;
85 
86 static uint64_t max_mw_size = 256*1024*1024;
87 SYSCTL_UQUAD(_hw_ntb_transport, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0,
88     "If enabled (non-zero), limit the size of large memory windows. "
89     "Both sides of the NTB MUST set the same value here.");
90 
91 static unsigned enable_xeon_watchdog;
92 SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, enable_xeon_watchdog, CTLFLAG_RDTUN,
93     &enable_xeon_watchdog, 0, "If non-zero, write a register every second to "
94     "keep a watchdog from tearing down the NTB link");
95 
96 STAILQ_HEAD(ntb_queue_list, ntb_queue_entry);
97 
98 typedef uint32_t ntb_q_idx_t;
99 
100 struct ntb_queue_entry {
101 	/* ntb_queue list reference */
102 	STAILQ_ENTRY(ntb_queue_entry) entry;
103 
104 	/* info on data to be transferred */
105 	void		*cb_data;
106 	void		*buf;
107 	uint32_t	len;
108 	uint32_t	flags;
109 
110 	struct ntb_transport_qp		*qp;
111 	struct ntb_payload_header	*x_hdr;
112 	ntb_q_idx_t	index;
113 };
114 
115 struct ntb_rx_info {
116 	ntb_q_idx_t	entry;
117 };
118 
119 struct ntb_transport_qp {
120 	struct ntb_transport_ctx	*transport;
121 	device_t		 dev;
122 
123 	void			*cb_data;
124 
125 	bool			client_ready;
126 	volatile bool		link_is_up;
127 	uint8_t			qp_num;	/* Only 64 QPs are allowed.  0-63 */
128 
129 	struct ntb_rx_info	*rx_info;
130 	struct ntb_rx_info	*remote_rx_info;
131 
132 	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
133 	    void *data, int len);
134 	struct ntb_queue_list	tx_free_q;
135 	struct mtx		ntb_tx_free_q_lock;
136 	caddr_t			tx_mw;
137 	bus_addr_t		tx_mw_phys;
138 	ntb_q_idx_t		tx_index;
139 	ntb_q_idx_t		tx_max_entry;
140 	uint64_t		tx_max_frame;
141 
142 	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
143 	    void *data, int len);
144 	struct ntb_queue_list	rx_post_q;
145 	struct ntb_queue_list	rx_pend_q;
146 	/* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
147 	struct mtx		ntb_rx_q_lock;
148 	struct task		rxc_db_work;
149 	struct taskqueue	*rxc_tq;
150 	caddr_t			rx_buff;
151 	ntb_q_idx_t		rx_index;
152 	ntb_q_idx_t		rx_max_entry;
153 	uint64_t		rx_max_frame;
154 
155 	void (*event_handler)(void *data, enum ntb_link_event status);
156 	struct callout		link_work;
157 	struct callout		rx_full;
158 
159 	uint64_t		last_rx_no_buf;
160 
161 	/* Stats */
162 	uint64_t		rx_bytes;
163 	uint64_t		rx_pkts;
164 	uint64_t		rx_ring_empty;
165 	uint64_t		rx_err_no_buf;
166 	uint64_t		rx_err_oflow;
167 	uint64_t		rx_err_ver;
168 	uint64_t		tx_bytes;
169 	uint64_t		tx_pkts;
170 	uint64_t		tx_ring_full;
171 	uint64_t		tx_err_no_buf;
172 
173 	struct mtx		tx_lock;
174 };
175 
176 struct ntb_transport_mw {
177 	vm_paddr_t	phys_addr;
178 	size_t		phys_size;
179 	size_t		xlat_align;
180 	size_t		xlat_align_size;
181 	bus_addr_t	addr_limit;
182 	/* Tx buff is vbase / phys_addr / tx_size */
183 	caddr_t		vbase;
184 	size_t		tx_size;
185 	/* Rx buff is virt_addr / dma_addr / rx_size */
186 	bus_dma_tag_t	dma_tag;
187 	bus_dmamap_t	dma_map;
188 	caddr_t		virt_addr;
189 	bus_addr_t	dma_addr;
190 	size_t		rx_size;
191 	/* rx_size increased to size alignment requirements of the hardware. */
192 	size_t		buff_size;
193 };
194 
195 struct ntb_transport_child {
196 	device_t	dev;
197 	int		consumer;
198 	int		qpoff;
199 	int		qpcnt;
200 	struct ntb_transport_child *next;
201 };
202 
203 struct ntb_transport_ctx {
204 	device_t		 dev;
205 	struct ntb_transport_child *child;
206 	struct ntb_transport_mw	*mw_vec;
207 	struct ntb_transport_qp	*qp_vec;
208 	int			compact;
209 	unsigned		mw_count;
210 	unsigned		qp_count;
211 	uint64_t		qp_bitmap;
212 	volatile bool		link_is_up;
213 	enum ntb_speed		link_speed;
214 	enum ntb_width		link_width;
215 	struct callout		link_work;
216 	struct callout		link_watchdog;
217 	struct task		link_cleanup;
218 };
219 
220 enum {
221 	NTBT_DESC_DONE_FLAG = 1 << 0,
222 	NTBT_LINK_DOWN_FLAG = 1 << 1,
223 };
224 
225 struct ntb_payload_header {
226 	ntb_q_idx_t ver;
227 	uint32_t len;
228 	uint32_t flags;
229 };
230 
231 enum {
232 	/*
233 	 * The order of this enum is part of the remote protocol.  Do not
234 	 * reorder without bumping protocol version (and it's probably best
235 	 * to keep the protocol in lock-step with the Linux NTB driver.
236 	 */
237 	NTBT_VERSION = 0,
238 	NTBT_QP_LINKS,
239 	NTBT_NUM_QPS,
240 	NTBT_NUM_MWS,
241 	/*
242 	 * N.B.: transport_link_work assumes MW1 enums = MW0 + 2.
243 	 */
244 	NTBT_MW0_SZ_HIGH,
245 	NTBT_MW0_SZ_LOW,
246 	NTBT_MW1_SZ_HIGH,
247 	NTBT_MW1_SZ_LOW,
248 
249 	/*
250 	 * Some NTB-using hardware have a watchdog to work around NTB hangs; if
251 	 * a register or doorbell isn't written every few seconds, the link is
252 	 * torn down.  Write an otherwise unused register every few seconds to
253 	 * work around this watchdog.
254 	 */
255 	NTBT_WATCHDOG_SPAD = 15
256 };
257 
258 /*
259  * Compart version of sratchpad protocol, using twice less registers.
260  */
261 enum {
262 	NTBTC_PARAMS = 0,	/* NUM_QPS << 24 + NUM_MWS << 16 + VERSION */
263 	NTBTC_QP_LINKS,		/* QP links status */
264 	NTBTC_MW0_SZ,		/* MW size limited to 32 bits. */
265 };
266 
267 #define QP_TO_MW(nt, qp)	((qp) % nt->mw_count)
268 #define NTB_QP_DEF_NUM_ENTRIES	100
269 #define NTB_LINK_DOWN_TIMEOUT	100
270 
271 static int ntb_transport_probe(device_t dev);
272 static int ntb_transport_attach(device_t dev);
273 static int ntb_transport_detach(device_t dev);
274 static void ntb_transport_init_queue(struct ntb_transport_ctx *nt,
275     unsigned int qp_num);
276 static int ntb_process_tx(struct ntb_transport_qp *qp,
277     struct ntb_queue_entry *entry);
278 static void ntb_transport_rxc_db(void *arg, int pending);
279 static int ntb_process_rxc(struct ntb_transport_qp *qp);
280 static void ntb_memcpy_rx(struct ntb_transport_qp *qp,
281     struct ntb_queue_entry *entry, void *offset);
282 static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp,
283     void *data);
284 static void ntb_complete_rxc(struct ntb_transport_qp *qp);
285 static void ntb_transport_doorbell_callback(void *data, uint32_t vector);
286 static void ntb_transport_event_callback(void *data);
287 static void ntb_transport_link_work(void *arg);
288 static int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, size_t size);
289 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw);
290 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
291     unsigned int qp_num);
292 static void ntb_qp_link_work(void *arg);
293 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt);
294 static void ntb_transport_link_cleanup_work(void *, int);
295 static void ntb_qp_link_down(struct ntb_transport_qp *qp);
296 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp);
297 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp);
298 static void ntb_send_link_down(struct ntb_transport_qp *qp);
299 static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
300     struct ntb_queue_list *list);
301 static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock,
302     struct ntb_queue_list *list);
303 static struct ntb_queue_entry *ntb_list_mv(struct mtx *lock,
304     struct ntb_queue_list *from, struct ntb_queue_list *to);
305 static void xeon_link_watchdog_hb(void *);
306 
307 static const struct ntb_ctx_ops ntb_transport_ops = {
308 	.link_event = ntb_transport_event_callback,
309 	.db_event = ntb_transport_doorbell_callback,
310 };
311 
312 MALLOC_DEFINE(M_NTB_T, "ntb_transport", "ntb transport driver");
313 
314 static inline void
315 iowrite32(uint32_t val, void *addr)
316 {
317 
318 	bus_space_write_4(X86_BUS_SPACE_MEM, 0/* HACK */, (uintptr_t)addr,
319 	    val);
320 }
321 
322 /* Transport Init and teardown */
323 
324 static void
325 xeon_link_watchdog_hb(void *arg)
326 {
327 	struct ntb_transport_ctx *nt;
328 
329 	nt = arg;
330 	ntb_spad_write(nt->dev, NTBT_WATCHDOG_SPAD, 0);
331 	callout_reset(&nt->link_watchdog, 1 * hz, xeon_link_watchdog_hb, nt);
332 }
333 
334 static int
335 ntb_transport_probe(device_t dev)
336 {
337 
338 	device_set_desc(dev, "NTB Transport");
339 	return (0);
340 }
341 
342 static int
343 ntb_transport_attach(device_t dev)
344 {
345 	struct ntb_transport_ctx *nt = device_get_softc(dev);
346 	struct ntb_transport_child **cpp = &nt->child;
347 	struct ntb_transport_child *nc;
348 	struct ntb_transport_mw *mw;
349 	uint64_t db_bitmap;
350 	int rc, i, db_count, spad_count, qp, qpu, qpo, qpt;
351 	char cfg[128] = "";
352 	char buf[32];
353 	char *n, *np, *c, *name;
354 
355 	nt->dev = dev;
356 	nt->mw_count = ntb_mw_count(dev);
357 	spad_count = ntb_spad_count(dev);
358 	db_bitmap = ntb_db_valid_mask(dev);
359 	db_count = flsll(db_bitmap);
360 	KASSERT(db_bitmap == (1 << db_count) - 1,
361 	    ("Doorbells are not sequential (%jx).\n", db_bitmap));
362 
363 	if (nt->mw_count == 0) {
364 		device_printf(dev, "At least 1 memory window required.\n");
365 		return (ENXIO);
366 	}
367 	nt->compact = (spad_count < 4 + 2 * nt->mw_count);
368 	snprintf(buf, sizeof(buf), "hint.%s.%d.compact", device_get_name(dev),
369 	    device_get_unit(dev));
370 	TUNABLE_INT_FETCH(buf, &nt->compact);
371 	if (nt->compact) {
372 		if (spad_count < 3) {
373 			device_printf(dev, "At least 3 scratchpads required.\n");
374 			return (ENXIO);
375 		}
376 		if (spad_count < 2 + nt->mw_count) {
377 			nt->mw_count = spad_count - 2;
378 			device_printf(dev, "Scratchpads enough only for %d "
379 			    "memory windows.\n", nt->mw_count);
380 		}
381 	} else {
382 		if (spad_count < 6) {
383 			device_printf(dev, "At least 6 scratchpads required.\n");
384 			return (ENXIO);
385 		}
386 		if (spad_count < 4 + 2 * nt->mw_count) {
387 			nt->mw_count = (spad_count - 4) / 2;
388 			device_printf(dev, "Scratchpads enough only for %d "
389 			    "memory windows.\n", nt->mw_count);
390 		}
391 	}
392 	if (db_bitmap == 0) {
393 		device_printf(dev, "At least one doorbell required.\n");
394 		return (ENXIO);
395 	}
396 
397 	nt->mw_vec = malloc(nt->mw_count * sizeof(*nt->mw_vec), M_NTB_T,
398 	    M_WAITOK | M_ZERO);
399 	for (i = 0; i < nt->mw_count; i++) {
400 		mw = &nt->mw_vec[i];
401 
402 		rc = ntb_mw_get_range(dev, i, &mw->phys_addr, &mw->vbase,
403 		    &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size,
404 		    &mw->addr_limit);
405 		if (rc != 0)
406 			goto err;
407 
408 		mw->tx_size = mw->phys_size;
409 		if (max_mw_size != 0 && mw->tx_size > max_mw_size) {
410 			device_printf(dev, "Memory window %d limited from "
411 			    "%ju to %ju\n", i, (uintmax_t)mw->tx_size,
412 			    max_mw_size);
413 			mw->tx_size = max_mw_size;
414 		}
415 		if (nt->compact && mw->tx_size > UINT32_MAX) {
416 			device_printf(dev, "Memory window %d is too big "
417 			    "(%ju)\n", i, (uintmax_t)mw->tx_size);
418 			rc = ENXIO;
419 			goto err;
420 		}
421 
422 		mw->rx_size = 0;
423 		mw->buff_size = 0;
424 		mw->virt_addr = NULL;
425 		mw->dma_addr = 0;
426 
427 		rc = ntb_mw_set_wc(dev, i, VM_MEMATTR_WRITE_COMBINING);
428 		if (rc)
429 			ntb_printf(0, "Unable to set mw%d caching\n", i);
430 
431 		/*
432 		 * Try to preallocate receive memory early, since there may
433 		 * be not enough contiguous memory later.  It is quite likely
434 		 * that NTB windows are symmetric and this allocation remain,
435 		 * but even if not, we will just reallocate it later.
436 		 */
437 		ntb_set_mw(nt, i, mw->tx_size);
438 	}
439 
440 	qpu = 0;
441 	qpo = imin(db_count, nt->mw_count);
442 	qpt = db_count;
443 
444 	snprintf(buf, sizeof(buf), "hint.%s.%d.config", device_get_name(dev),
445 	    device_get_unit(dev));
446 	TUNABLE_STR_FETCH(buf, cfg, sizeof(cfg));
447 	n = cfg;
448 	i = 0;
449 	while ((c = strsep(&n, ",")) != NULL) {
450 		np = c;
451 		name = strsep(&np, ":");
452 		if (name != NULL && name[0] == 0)
453 			name = NULL;
454 		qp = (np && np[0] != 0) ? strtol(np, NULL, 10) : qpo - qpu;
455 		if (qp <= 0)
456 			qp = 1;
457 
458 		if (qp > qpt - qpu) {
459 			device_printf(dev, "Not enough resources for config\n");
460 			break;
461 		}
462 
463 		nc = malloc(sizeof(*nc), M_DEVBUF, M_WAITOK | M_ZERO);
464 		nc->consumer = i;
465 		nc->qpoff = qpu;
466 		nc->qpcnt = qp;
467 		nc->dev = device_add_child(dev, name, -1);
468 		if (nc->dev == NULL) {
469 			device_printf(dev, "Can not add child.\n");
470 			break;
471 		}
472 		device_set_ivars(nc->dev, nc);
473 		*cpp = nc;
474 		cpp = &nc->next;
475 
476 		if (bootverbose) {
477 			device_printf(dev, "%d \"%s\": queues %d",
478 			    i, name, qpu);
479 			if (qp > 1)
480 				printf("-%d", qpu + qp - 1);
481 			printf("\n");
482 		}
483 
484 		qpu += qp;
485 		i++;
486 	}
487 	nt->qp_count = qpu;
488 
489 	nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_T,
490 	    M_WAITOK | M_ZERO);
491 
492 	for (i = 0; i < nt->qp_count; i++)
493 		ntb_transport_init_queue(nt, i);
494 
495 	callout_init(&nt->link_work, 0);
496 	callout_init(&nt->link_watchdog, 0);
497 	TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt);
498 	nt->link_is_up = false;
499 
500 	rc = ntb_set_ctx(dev, nt, &ntb_transport_ops);
501 	if (rc != 0)
502 		goto err;
503 
504 	ntb_link_enable(dev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
505 
506 	for (i = 0; i < nt->mw_count; i++) {
507 		mw = &nt->mw_vec[i];
508 		rc = ntb_mw_set_trans(nt->dev, i, mw->dma_addr, mw->buff_size);
509 		if (rc != 0)
510 			ntb_printf(0, "load time mw%d xlat fails, rc %d\n", i, rc);
511 	}
512 
513 	if (enable_xeon_watchdog != 0)
514 		callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt);
515 
516 	bus_generic_attach(dev);
517 	return (0);
518 
519 err:
520 	free(nt->qp_vec, M_NTB_T);
521 	free(nt->mw_vec, M_NTB_T);
522 	return (rc);
523 }
524 
525 static int
526 ntb_transport_detach(device_t dev)
527 {
528 	struct ntb_transport_ctx *nt = device_get_softc(dev);
529 	struct ntb_transport_child **cpp = &nt->child;
530 	struct ntb_transport_child *nc;
531 	int error = 0, i;
532 
533 	while ((nc = *cpp) != NULL) {
534 		*cpp = (*cpp)->next;
535 		error = device_delete_child(dev, nc->dev);
536 		if (error)
537 			break;
538 		free(nc, M_DEVBUF);
539 	}
540 	KASSERT(nt->qp_bitmap == 0,
541 	    ("Some queues not freed on detach (%jx)", nt->qp_bitmap));
542 
543 	ntb_transport_link_cleanup(nt);
544 	taskqueue_drain(taskqueue_swi, &nt->link_cleanup);
545 	callout_drain(&nt->link_work);
546 	callout_drain(&nt->link_watchdog);
547 
548 	ntb_link_disable(dev);
549 	ntb_clear_ctx(dev);
550 
551 	for (i = 0; i < nt->mw_count; i++)
552 		ntb_free_mw(nt, i);
553 
554 	free(nt->qp_vec, M_NTB_T);
555 	free(nt->mw_vec, M_NTB_T);
556 	return (0);
557 }
558 
559 static int
560 ntb_transport_print_child(device_t dev, device_t child)
561 {
562 	struct ntb_transport_child *nc = device_get_ivars(child);
563 	int retval;
564 
565 	retval = bus_print_child_header(dev, child);
566 	if (nc->qpcnt > 0) {
567 		printf(" queue %d", nc->qpoff);
568 		if (nc->qpcnt > 1)
569 			printf("-%d", nc->qpoff + nc->qpcnt - 1);
570 	}
571 	retval += printf(" at consumer %d", nc->consumer);
572 	retval += bus_print_child_domain(dev, child);
573 	retval += bus_print_child_footer(dev, child);
574 
575 	return (retval);
576 }
577 
578 static int
579 ntb_transport_child_location_str(device_t dev, device_t child, char *buf,
580     size_t buflen)
581 {
582 	struct ntb_transport_child *nc = device_get_ivars(child);
583 
584 	snprintf(buf, buflen, "consumer=%d", nc->consumer);
585 	return (0);
586 }
587 
588 int
589 ntb_transport_queue_count(device_t dev)
590 {
591 	struct ntb_transport_child *nc = device_get_ivars(dev);
592 
593 	return (nc->qpcnt);
594 }
595 
596 static void
597 ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num)
598 {
599 	struct ntb_transport_mw *mw;
600 	struct ntb_transport_qp *qp;
601 	vm_paddr_t mw_base;
602 	uint64_t qp_offset;
603 	size_t tx_size;
604 	unsigned num_qps_mw, mw_num, mw_count;
605 
606 	mw_count = nt->mw_count;
607 	mw_num = QP_TO_MW(nt, qp_num);
608 	mw = &nt->mw_vec[mw_num];
609 
610 	qp = &nt->qp_vec[qp_num];
611 	qp->qp_num = qp_num;
612 	qp->transport = nt;
613 	qp->dev = nt->dev;
614 	qp->client_ready = false;
615 	qp->event_handler = NULL;
616 	ntb_qp_link_down_reset(qp);
617 
618 	if (mw_num < nt->qp_count % mw_count)
619 		num_qps_mw = nt->qp_count / mw_count + 1;
620 	else
621 		num_qps_mw = nt->qp_count / mw_count;
622 
623 	mw_base = mw->phys_addr;
624 
625 	tx_size = mw->tx_size / num_qps_mw;
626 	qp_offset = tx_size * (qp_num / mw_count);
627 
628 	qp->tx_mw = mw->vbase + qp_offset;
629 	KASSERT(qp->tx_mw != NULL, ("uh oh?"));
630 
631 	/* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */
632 	qp->tx_mw_phys = mw_base + qp_offset;
633 	KASSERT(qp->tx_mw_phys != 0, ("uh oh?"));
634 
635 	tx_size -= sizeof(struct ntb_rx_info);
636 	qp->rx_info = (void *)(qp->tx_mw + tx_size);
637 
638 	/* Due to house-keeping, there must be at least 2 buffs */
639 	qp->tx_max_frame = qmin(transport_mtu, tx_size / 2);
640 	qp->tx_max_entry = tx_size / qp->tx_max_frame;
641 
642 	callout_init(&qp->link_work, 0);
643 	callout_init(&qp->rx_full, 1);
644 
645 	mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN);
646 	mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN);
647 	mtx_init(&qp->tx_lock, "ntb transport tx", NULL, MTX_DEF);
648 	TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp);
649 	qp->rxc_tq = taskqueue_create("ntbt_rx", M_WAITOK,
650 	    taskqueue_thread_enqueue, &qp->rxc_tq);
651 	taskqueue_start_threads(&qp->rxc_tq, 1, PI_NET, "%s rx%d",
652 	    device_get_nameunit(nt->dev), qp_num);
653 
654 	STAILQ_INIT(&qp->rx_post_q);
655 	STAILQ_INIT(&qp->rx_pend_q);
656 	STAILQ_INIT(&qp->tx_free_q);
657 }
658 
659 void
660 ntb_transport_free_queue(struct ntb_transport_qp *qp)
661 {
662 	struct ntb_transport_ctx *nt = qp->transport;
663 	struct ntb_queue_entry *entry;
664 
665 	callout_drain(&qp->link_work);
666 
667 	ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
668 	taskqueue_drain_all(qp->rxc_tq);
669 	taskqueue_free(qp->rxc_tq);
670 
671 	qp->cb_data = NULL;
672 	qp->rx_handler = NULL;
673 	qp->tx_handler = NULL;
674 	qp->event_handler = NULL;
675 
676 	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q)))
677 		free(entry, M_NTB_T);
678 
679 	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q)))
680 		free(entry, M_NTB_T);
681 
682 	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
683 		free(entry, M_NTB_T);
684 
685 	nt->qp_bitmap &= ~(1 << qp->qp_num);
686 }
687 
688 /**
689  * ntb_transport_create_queue - Create a new NTB transport layer queue
690  * @rx_handler: receive callback function
691  * @tx_handler: transmit callback function
692  * @event_handler: event callback function
693  *
694  * Create a new NTB transport layer queue and provide the queue with a callback
695  * routine for both transmit and receive.  The receive callback routine will be
696  * used to pass up data when the transport has received it on the queue.   The
697  * transmit callback routine will be called when the transport has completed the
698  * transmission of the data on the queue and the data is ready to be freed.
699  *
700  * RETURNS: pointer to newly created ntb_queue, NULL on error.
701  */
702 struct ntb_transport_qp *
703 ntb_transport_create_queue(device_t dev, int q,
704     const struct ntb_queue_handlers *handlers, void *data)
705 {
706 	struct ntb_transport_child *nc = device_get_ivars(dev);
707 	struct ntb_transport_ctx *nt = device_get_softc(device_get_parent(dev));
708 	struct ntb_queue_entry *entry;
709 	struct ntb_transport_qp *qp;
710 	int i;
711 
712 	if (q < 0 || q >= nc->qpcnt)
713 		return (NULL);
714 
715 	qp = &nt->qp_vec[nc->qpoff + q];
716 	nt->qp_bitmap |= (1 << qp->qp_num);
717 	qp->cb_data = data;
718 	qp->rx_handler = handlers->rx_handler;
719 	qp->tx_handler = handlers->tx_handler;
720 	qp->event_handler = handlers->event_handler;
721 
722 	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
723 		entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO);
724 		entry->cb_data = data;
725 		entry->buf = NULL;
726 		entry->len = transport_mtu;
727 		entry->qp = qp;
728 		ntb_list_add(&qp->ntb_rx_q_lock, entry, &qp->rx_pend_q);
729 	}
730 
731 	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
732 		entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO);
733 		entry->qp = qp;
734 		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
735 	}
736 
737 	ntb_db_clear(dev, 1ull << qp->qp_num);
738 	return (qp);
739 }
740 
741 /**
742  * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
743  * @qp: NTB transport layer queue to be enabled
744  *
745  * Notify NTB transport layer of client readiness to use queue
746  */
747 void
748 ntb_transport_link_up(struct ntb_transport_qp *qp)
749 {
750 	struct ntb_transport_ctx *nt = qp->transport;
751 
752 	qp->client_ready = true;
753 
754 	ntb_printf(2, "qp %d client ready\n", qp->qp_num);
755 
756 	if (nt->link_is_up)
757 		callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
758 }
759 
760 
761 
762 /* Transport Tx */
763 
764 /**
765  * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
766  * @qp: NTB transport layer queue the entry is to be enqueued on
767  * @cb: per buffer pointer for callback function to use
768  * @data: pointer to data buffer that will be sent
769  * @len: length of the data buffer
770  *
771  * Enqueue a new transmit buffer onto the transport queue from which a NTB
772  * payload will be transmitted.  This assumes that a lock is being held to
773  * serialize access to the qp.
774  *
775  * RETURNS: An appropriate ERRNO error value on error, or zero for success.
776  */
777 int
778 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
779     unsigned int len)
780 {
781 	struct ntb_queue_entry *entry;
782 	int rc;
783 
784 	if (!qp->link_is_up || len == 0) {
785 		CTR0(KTR_NTB, "TX: link not up");
786 		return (EINVAL);
787 	}
788 
789 	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
790 	if (entry == NULL) {
791 		CTR0(KTR_NTB, "TX: could not get entry from tx_free_q");
792 		qp->tx_err_no_buf++;
793 		return (EBUSY);
794 	}
795 	CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry);
796 
797 	entry->cb_data = cb;
798 	entry->buf = data;
799 	entry->len = len;
800 	entry->flags = 0;
801 
802 	mtx_lock(&qp->tx_lock);
803 	rc = ntb_process_tx(qp, entry);
804 	mtx_unlock(&qp->tx_lock);
805 	if (rc != 0) {
806 		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
807 		CTR1(KTR_NTB,
808 		    "TX: process_tx failed. Returning entry %p to tx_free_q",
809 		    entry);
810 	}
811 	return (rc);
812 }
813 
814 static void
815 ntb_tx_copy_callback(void *data)
816 {
817 	struct ntb_queue_entry *entry = data;
818 	struct ntb_transport_qp *qp = entry->qp;
819 	struct ntb_payload_header *hdr = entry->x_hdr;
820 
821 	iowrite32(entry->flags | NTBT_DESC_DONE_FLAG, &hdr->flags);
822 	CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr);
823 
824 	ntb_peer_db_set(qp->dev, 1ull << qp->qp_num);
825 
826 	/*
827 	 * The entry length can only be zero if the packet is intended to be a
828 	 * "link down" or similar.  Since no payload is being sent in these
829 	 * cases, there is nothing to add to the completion queue.
830 	 */
831 	if (entry->len > 0) {
832 		qp->tx_bytes += entry->len;
833 
834 		if (qp->tx_handler)
835 			qp->tx_handler(qp, qp->cb_data, entry->buf,
836 			    entry->len);
837 		else
838 			m_freem(entry->buf);
839 		entry->buf = NULL;
840 	}
841 
842 	CTR3(KTR_NTB,
843 	    "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning "
844 	    "to tx_free_q", entry, hdr->ver, hdr->flags);
845 	ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
846 }
847 
848 static void
849 ntb_memcpy_tx(struct ntb_queue_entry *entry, void *offset)
850 {
851 
852 	CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset);
853 	if (entry->buf != NULL) {
854 		m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset);
855 
856 		/*
857 		 * Ensure that the data is fully copied before setting the
858 		 * flags
859 		 */
860 		wmb();
861 	}
862 
863 	ntb_tx_copy_callback(entry);
864 }
865 
866 static void
867 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
868 {
869 	struct ntb_payload_header *hdr;
870 	void *offset;
871 
872 	offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
873 	hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame -
874 	    sizeof(struct ntb_payload_header));
875 	entry->x_hdr = hdr;
876 
877 	iowrite32(entry->len, &hdr->len);
878 	iowrite32(qp->tx_pkts, &hdr->ver);
879 
880 	ntb_memcpy_tx(entry, offset);
881 }
882 
883 static int
884 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
885 {
886 
887 	CTR3(KTR_NTB,
888 	    "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u",
889 	    qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry);
890 	if (qp->tx_index == qp->remote_rx_info->entry) {
891 		CTR0(KTR_NTB, "TX: ring full");
892 		qp->tx_ring_full++;
893 		return (EAGAIN);
894 	}
895 
896 	if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
897 		if (qp->tx_handler != NULL)
898 			qp->tx_handler(qp, qp->cb_data, entry->buf,
899 			    EIO);
900 		else
901 			m_freem(entry->buf);
902 
903 		entry->buf = NULL;
904 		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
905 		CTR1(KTR_NTB,
906 		    "TX: frame too big. returning entry %p to tx_free_q",
907 		    entry);
908 		return (0);
909 	}
910 	CTR2(KTR_NTB, "TX: copying entry %p to index %u", entry, qp->tx_index);
911 	ntb_async_tx(qp, entry);
912 
913 	qp->tx_index++;
914 	qp->tx_index %= qp->tx_max_entry;
915 
916 	qp->tx_pkts++;
917 
918 	return (0);
919 }
920 
921 /* Transport Rx */
922 static void
923 ntb_transport_rxc_db(void *arg, int pending __unused)
924 {
925 	struct ntb_transport_qp *qp = arg;
926 	uint64_t qp_mask = 1ull << qp->qp_num;
927 	int rc;
928 
929 	CTR0(KTR_NTB, "RX: transport_rx");
930 again:
931 	while ((rc = ntb_process_rxc(qp)) == 0)
932 		;
933 	CTR1(KTR_NTB, "RX: process_rxc returned %d", rc);
934 
935 	if ((ntb_db_read(qp->dev) & qp_mask) != 0) {
936 		/* If db is set, clear it and check queue once more. */
937 		ntb_db_clear(qp->dev, qp_mask);
938 		goto again;
939 	}
940 	if (qp->link_is_up)
941 		ntb_db_clear_mask(qp->dev, qp_mask);
942 }
943 
944 static int
945 ntb_process_rxc(struct ntb_transport_qp *qp)
946 {
947 	struct ntb_payload_header *hdr;
948 	struct ntb_queue_entry *entry;
949 	caddr_t offset;
950 
951 	offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
952 	hdr = (void *)(offset + qp->rx_max_frame -
953 	    sizeof(struct ntb_payload_header));
954 
955 	CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index);
956 	if ((hdr->flags & NTBT_DESC_DONE_FLAG) == 0) {
957 		CTR0(KTR_NTB, "RX: hdr not done");
958 		qp->rx_ring_empty++;
959 		return (EAGAIN);
960 	}
961 
962 	if ((hdr->flags & NTBT_LINK_DOWN_FLAG) != 0) {
963 		CTR0(KTR_NTB, "RX: link down");
964 		ntb_qp_link_down(qp);
965 		hdr->flags = 0;
966 		return (EAGAIN);
967 	}
968 
969 	if (hdr->ver != (uint32_t)qp->rx_pkts) {
970 		CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). "
971 		    "Returning entry to rx_pend_q", hdr->ver, qp->rx_pkts);
972 		qp->rx_err_ver++;
973 		return (EIO);
974 	}
975 
976 	entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
977 	if (entry == NULL) {
978 		qp->rx_err_no_buf++;
979 		CTR0(KTR_NTB, "RX: No entries in rx_pend_q");
980 		return (EAGAIN);
981 	}
982 	callout_stop(&qp->rx_full);
983 	CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry);
984 
985 	entry->x_hdr = hdr;
986 	entry->index = qp->rx_index;
987 
988 	if (hdr->len > entry->len) {
989 		CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju",
990 		    (uintmax_t)hdr->len, (uintmax_t)entry->len);
991 		qp->rx_err_oflow++;
992 
993 		entry->len = -EIO;
994 		entry->flags |= NTBT_DESC_DONE_FLAG;
995 
996 		ntb_complete_rxc(qp);
997 	} else {
998 		qp->rx_bytes += hdr->len;
999 		qp->rx_pkts++;
1000 
1001 		CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts);
1002 
1003 		entry->len = hdr->len;
1004 
1005 		ntb_memcpy_rx(qp, entry, offset);
1006 	}
1007 
1008 	qp->rx_index++;
1009 	qp->rx_index %= qp->rx_max_entry;
1010 	return (0);
1011 }
1012 
1013 static void
1014 ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry,
1015     void *offset)
1016 {
1017 	struct ifnet *ifp = entry->cb_data;
1018 	unsigned int len = entry->len;
1019 
1020 	CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset);
1021 
1022 	entry->buf = (void *)m_devget(offset, len, 0, ifp, NULL);
1023 	if (entry->buf == NULL)
1024 		entry->len = -ENOMEM;
1025 
1026 	/* Ensure that the data is globally visible before clearing the flag */
1027 	wmb();
1028 
1029 	CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, entry->buf);
1030 	ntb_rx_copy_callback(qp, entry);
1031 }
1032 
1033 static inline void
1034 ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data)
1035 {
1036 	struct ntb_queue_entry *entry;
1037 
1038 	entry = data;
1039 	entry->flags |= NTBT_DESC_DONE_FLAG;
1040 	ntb_complete_rxc(qp);
1041 }
1042 
1043 static void
1044 ntb_complete_rxc(struct ntb_transport_qp *qp)
1045 {
1046 	struct ntb_queue_entry *entry;
1047 	struct mbuf *m;
1048 	unsigned len;
1049 
1050 	CTR0(KTR_NTB, "RX: rx_completion_task");
1051 
1052 	mtx_lock_spin(&qp->ntb_rx_q_lock);
1053 
1054 	while (!STAILQ_EMPTY(&qp->rx_post_q)) {
1055 		entry = STAILQ_FIRST(&qp->rx_post_q);
1056 		if ((entry->flags & NTBT_DESC_DONE_FLAG) == 0)
1057 			break;
1058 
1059 		entry->x_hdr->flags = 0;
1060 		iowrite32(entry->index, &qp->rx_info->entry);
1061 
1062 		STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry);
1063 
1064 		len = entry->len;
1065 		m = entry->buf;
1066 
1067 		/*
1068 		 * Re-initialize queue_entry for reuse; rx_handler takes
1069 		 * ownership of the mbuf.
1070 		 */
1071 		entry->buf = NULL;
1072 		entry->len = transport_mtu;
1073 		entry->cb_data = qp->cb_data;
1074 
1075 		STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry);
1076 
1077 		mtx_unlock_spin(&qp->ntb_rx_q_lock);
1078 
1079 		CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m);
1080 		if (qp->rx_handler != NULL && qp->client_ready)
1081 			qp->rx_handler(qp, qp->cb_data, m, len);
1082 		else
1083 			m_freem(m);
1084 
1085 		mtx_lock_spin(&qp->ntb_rx_q_lock);
1086 	}
1087 
1088 	mtx_unlock_spin(&qp->ntb_rx_q_lock);
1089 }
1090 
1091 static void
1092 ntb_transport_doorbell_callback(void *data, uint32_t vector)
1093 {
1094 	struct ntb_transport_ctx *nt = data;
1095 	struct ntb_transport_qp *qp;
1096 	uint64_t vec_mask;
1097 	unsigned qp_num;
1098 
1099 	vec_mask = ntb_db_vector_mask(nt->dev, vector);
1100 	vec_mask &= nt->qp_bitmap;
1101 	if ((vec_mask & (vec_mask - 1)) != 0)
1102 		vec_mask &= ntb_db_read(nt->dev);
1103 	if (vec_mask != 0) {
1104 		ntb_db_set_mask(nt->dev, vec_mask);
1105 		ntb_db_clear(nt->dev, vec_mask);
1106 	}
1107 	while (vec_mask != 0) {
1108 		qp_num = ffsll(vec_mask) - 1;
1109 
1110 		qp = &nt->qp_vec[qp_num];
1111 		if (qp->link_is_up)
1112 			taskqueue_enqueue(qp->rxc_tq, &qp->rxc_db_work);
1113 
1114 		vec_mask &= ~(1ull << qp_num);
1115 	}
1116 }
1117 
1118 /* Link Event handler */
1119 static void
1120 ntb_transport_event_callback(void *data)
1121 {
1122 	struct ntb_transport_ctx *nt = data;
1123 
1124 	if (ntb_link_is_up(nt->dev, &nt->link_speed, &nt->link_width)) {
1125 		ntb_printf(1, "HW link up\n");
1126 		callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
1127 	} else {
1128 		ntb_printf(1, "HW link down\n");
1129 		taskqueue_enqueue(taskqueue_swi, &nt->link_cleanup);
1130 	}
1131 }
1132 
1133 /* Link bring up */
1134 static void
1135 ntb_transport_link_work(void *arg)
1136 {
1137 	struct ntb_transport_ctx *nt = arg;
1138 	struct ntb_transport_mw *mw;
1139 	device_t dev = nt->dev;
1140 	struct ntb_transport_qp *qp;
1141 	uint64_t val64, size;
1142 	uint32_t val;
1143 	unsigned i;
1144 	int rc;
1145 
1146 	/* send the local info, in the opposite order of the way we read it */
1147 	if (nt->compact) {
1148 		for (i = 0; i < nt->mw_count; i++) {
1149 			size = nt->mw_vec[i].tx_size;
1150 			KASSERT(size <= UINT32_MAX, ("size too big (%jx)", size));
1151 			ntb_peer_spad_write(dev, NTBTC_MW0_SZ + i, size);
1152 		}
1153 		ntb_peer_spad_write(dev, NTBTC_QP_LINKS, 0);
1154 		ntb_peer_spad_write(dev, NTBTC_PARAMS,
1155 		    (nt->qp_count << 24) | (nt->mw_count << 16) |
1156 		    NTB_TRANSPORT_VERSION);
1157 	} else {
1158 		for (i = 0; i < nt->mw_count; i++) {
1159 			size = nt->mw_vec[i].tx_size;
1160 			ntb_peer_spad_write(dev, NTBT_MW0_SZ_HIGH + (i * 2),
1161 			    size >> 32);
1162 			ntb_peer_spad_write(dev, NTBT_MW0_SZ_LOW + (i * 2), size);
1163 		}
1164 		ntb_peer_spad_write(dev, NTBT_NUM_MWS, nt->mw_count);
1165 		ntb_peer_spad_write(dev, NTBT_NUM_QPS, nt->qp_count);
1166 		ntb_peer_spad_write(dev, NTBT_QP_LINKS, 0);
1167 		ntb_peer_spad_write(dev, NTBT_VERSION, NTB_TRANSPORT_VERSION);
1168 	}
1169 
1170 	/* Query the remote side for its info */
1171 	val = 0;
1172 	if (nt->compact) {
1173 		ntb_spad_read(dev, NTBTC_PARAMS, &val);
1174 		if (val != ((nt->qp_count << 24) | (nt->mw_count << 16) |
1175 		    NTB_TRANSPORT_VERSION))
1176 			goto out;
1177 	} else {
1178 		ntb_spad_read(dev, NTBT_VERSION, &val);
1179 		if (val != NTB_TRANSPORT_VERSION)
1180 			goto out;
1181 
1182 		ntb_spad_read(dev, NTBT_NUM_QPS, &val);
1183 		if (val != nt->qp_count)
1184 			goto out;
1185 
1186 		ntb_spad_read(dev, NTBT_NUM_MWS, &val);
1187 		if (val != nt->mw_count)
1188 			goto out;
1189 	}
1190 
1191 	for (i = 0; i < nt->mw_count; i++) {
1192 		if (nt->compact) {
1193 			ntb_spad_read(dev, NTBTC_MW0_SZ + i, &val);
1194 			val64 = val;
1195 		} else {
1196 			ntb_spad_read(dev, NTBT_MW0_SZ_HIGH + (i * 2), &val);
1197 			val64 = (uint64_t)val << 32;
1198 
1199 			ntb_spad_read(dev, NTBT_MW0_SZ_LOW + (i * 2), &val);
1200 			val64 |= val;
1201 		}
1202 
1203 		mw = &nt->mw_vec[i];
1204 		mw->rx_size = val64;
1205 		val64 = roundup(val64, mw->xlat_align_size);
1206 		if (mw->buff_size != val64) {
1207 
1208 			rc = ntb_set_mw(nt, i, val64);
1209 			if (rc != 0) {
1210 				ntb_printf(0, "link up set mw%d fails, rc %d\n",
1211 				    i, rc);
1212 				goto free_mws;
1213 			}
1214 
1215 			/* Notify HW the memory location of the receive buffer */
1216 			rc = ntb_mw_set_trans(nt->dev, i, mw->dma_addr,
1217 			    mw->buff_size);
1218 			if (rc != 0) {
1219 				ntb_printf(0, "link up mw%d xlat fails, rc %d\n",
1220 				     i, rc);
1221 				goto free_mws;
1222 			}
1223 		}
1224 	}
1225 
1226 	nt->link_is_up = true;
1227 	ntb_printf(1, "transport link up\n");
1228 
1229 	for (i = 0; i < nt->qp_count; i++) {
1230 		qp = &nt->qp_vec[i];
1231 
1232 		ntb_transport_setup_qp_mw(nt, i);
1233 
1234 		if (qp->client_ready)
1235 			callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
1236 	}
1237 
1238 	return;
1239 
1240 free_mws:
1241 	for (i = 0; i < nt->mw_count; i++)
1242 		ntb_free_mw(nt, i);
1243 out:
1244 	if (ntb_link_is_up(dev, &nt->link_speed, &nt->link_width))
1245 		callout_reset(&nt->link_work,
1246 		    NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt);
1247 }
1248 
1249 struct ntb_load_cb_args {
1250 	bus_addr_t addr;
1251 	int error;
1252 };
1253 
1254 static void
1255 ntb_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
1256 {
1257 	struct ntb_load_cb_args *cba = (struct ntb_load_cb_args *)xsc;
1258 
1259 	if (!(cba->error = error))
1260 		cba->addr = segs[0].ds_addr;
1261 }
1262 
1263 static int
1264 ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size)
1265 {
1266 	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
1267 	struct ntb_load_cb_args cba;
1268 	size_t buff_size;
1269 
1270 	if (size == 0)
1271 		return (EINVAL);
1272 
1273 	buff_size = roundup(size, mw->xlat_align_size);
1274 
1275 	/* No need to re-setup */
1276 	if (mw->buff_size == buff_size)
1277 		return (0);
1278 
1279 	if (mw->buff_size != 0)
1280 		ntb_free_mw(nt, num_mw);
1281 
1282 	/* Alloc memory for receiving data.  Must be aligned */
1283 	mw->buff_size = buff_size;
1284 
1285 	if (bus_dma_tag_create(bus_get_dma_tag(nt->dev), mw->xlat_align, 0,
1286 	    mw->addr_limit, BUS_SPACE_MAXADDR,
1287 	    NULL, NULL, mw->buff_size, 1, mw->buff_size,
1288 	    0, NULL, NULL, &mw->dma_tag)) {
1289 		ntb_printf(0, "Unable to create MW tag of size %zu\n",
1290 		    mw->buff_size);
1291 		mw->buff_size = 0;
1292 		return (ENOMEM);
1293 	}
1294 	if (bus_dmamem_alloc(mw->dma_tag, (void **)&mw->virt_addr,
1295 	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &mw->dma_map)) {
1296 		bus_dma_tag_destroy(mw->dma_tag);
1297 		ntb_printf(0, "Unable to allocate MW buffer of size %zu\n",
1298 		    mw->buff_size);
1299 		mw->buff_size = 0;
1300 		return (ENOMEM);
1301 	}
1302 	if (bus_dmamap_load(mw->dma_tag, mw->dma_map, mw->virt_addr,
1303 	    mw->buff_size, ntb_load_cb, &cba, BUS_DMA_NOWAIT) || cba.error) {
1304 		bus_dmamem_free(mw->dma_tag, mw->virt_addr, mw->dma_map);
1305 		bus_dma_tag_destroy(mw->dma_tag);
1306 		ntb_printf(0, "Unable to load MW buffer of size %zu\n",
1307 		    mw->buff_size);
1308 		mw->buff_size = 0;
1309 		return (ENOMEM);
1310 	}
1311 	mw->dma_addr = cba.addr;
1312 
1313 	return (0);
1314 }
1315 
1316 static void
1317 ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
1318 {
1319 	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
1320 
1321 	if (mw->virt_addr == NULL)
1322 		return;
1323 
1324 	ntb_mw_clear_trans(nt->dev, num_mw);
1325 	bus_dmamap_unload(mw->dma_tag, mw->dma_map);
1326 	bus_dmamem_free(mw->dma_tag, mw->virt_addr, mw->dma_map);
1327 	bus_dma_tag_destroy(mw->dma_tag);
1328 	mw->buff_size = 0;
1329 	mw->virt_addr = NULL;
1330 }
1331 
1332 static int
1333 ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num)
1334 {
1335 	struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
1336 	struct ntb_transport_mw *mw;
1337 	void *offset;
1338 	ntb_q_idx_t i;
1339 	size_t rx_size;
1340 	unsigned num_qps_mw, mw_num, mw_count;
1341 
1342 	mw_count = nt->mw_count;
1343 	mw_num = QP_TO_MW(nt, qp_num);
1344 	mw = &nt->mw_vec[mw_num];
1345 
1346 	if (mw->virt_addr == NULL)
1347 		return (ENOMEM);
1348 
1349 	if (mw_num < nt->qp_count % mw_count)
1350 		num_qps_mw = nt->qp_count / mw_count + 1;
1351 	else
1352 		num_qps_mw = nt->qp_count / mw_count;
1353 
1354 	rx_size = mw->rx_size / num_qps_mw;
1355 	qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
1356 	rx_size -= sizeof(struct ntb_rx_info);
1357 
1358 	qp->remote_rx_info = (void*)(qp->rx_buff + rx_size);
1359 
1360 	/* Due to house-keeping, there must be at least 2 buffs */
1361 	qp->rx_max_frame = qmin(transport_mtu, rx_size / 2);
1362 	qp->rx_max_entry = rx_size / qp->rx_max_frame;
1363 	qp->rx_index = 0;
1364 
1365 	qp->remote_rx_info->entry = qp->rx_max_entry - 1;
1366 
1367 	/* Set up the hdr offsets with 0s */
1368 	for (i = 0; i < qp->rx_max_entry; i++) {
1369 		offset = (void *)(qp->rx_buff + qp->rx_max_frame * (i + 1) -
1370 		    sizeof(struct ntb_payload_header));
1371 		memset(offset, 0, sizeof(struct ntb_payload_header));
1372 	}
1373 
1374 	qp->rx_pkts = 0;
1375 	qp->tx_pkts = 0;
1376 	qp->tx_index = 0;
1377 
1378 	return (0);
1379 }
1380 
1381 static void
1382 ntb_qp_link_work(void *arg)
1383 {
1384 	struct ntb_transport_qp *qp = arg;
1385 	device_t dev = qp->dev;
1386 	struct ntb_transport_ctx *nt = qp->transport;
1387 	int i;
1388 	uint32_t val;
1389 
1390 	/* Report queues that are up on our side */
1391 	for (i = 0, val = 0; i < nt->qp_count; i++) {
1392 		if (nt->qp_vec[i].client_ready)
1393 			val |= (1 << i);
1394 	}
1395 	ntb_peer_spad_write(dev, NTBT_QP_LINKS, val);
1396 
1397 	/* See if the remote side is up */
1398 	ntb_spad_read(dev, NTBT_QP_LINKS, &val);
1399 	if ((val & (1ull << qp->qp_num)) != 0) {
1400 		ntb_printf(2, "qp %d link up\n", qp->qp_num);
1401 		qp->link_is_up = true;
1402 
1403 		if (qp->event_handler != NULL)
1404 			qp->event_handler(qp->cb_data, NTB_LINK_UP);
1405 
1406 		ntb_db_clear_mask(dev, 1ull << qp->qp_num);
1407 	} else if (nt->link_is_up)
1408 		callout_reset(&qp->link_work,
1409 		    NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp);
1410 }
1411 
1412 /* Link down event*/
1413 static void
1414 ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
1415 {
1416 	struct ntb_transport_qp *qp;
1417 	int i;
1418 
1419 	callout_drain(&nt->link_work);
1420 	nt->link_is_up = 0;
1421 
1422 	/* Pass along the info to any clients */
1423 	for (i = 0; i < nt->qp_count; i++) {
1424 		if ((nt->qp_bitmap & (1 << i)) != 0) {
1425 			qp = &nt->qp_vec[i];
1426 			ntb_qp_link_cleanup(qp);
1427 			callout_drain(&qp->link_work);
1428 		}
1429 	}
1430 
1431 	/*
1432 	 * The scratchpad registers keep the values if the remote side
1433 	 * goes down, blast them now to give them a sane value the next
1434 	 * time they are accessed
1435 	 */
1436 	ntb_spad_clear(nt->dev);
1437 }
1438 
1439 static void
1440 ntb_transport_link_cleanup_work(void *arg, int pending __unused)
1441 {
1442 
1443 	ntb_transport_link_cleanup(arg);
1444 }
1445 
1446 static void
1447 ntb_qp_link_down(struct ntb_transport_qp *qp)
1448 {
1449 
1450 	ntb_qp_link_cleanup(qp);
1451 }
1452 
1453 static void
1454 ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
1455 {
1456 
1457 	qp->link_is_up = false;
1458 	ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
1459 
1460 	qp->tx_index = qp->rx_index = 0;
1461 	qp->tx_bytes = qp->rx_bytes = 0;
1462 	qp->tx_pkts = qp->rx_pkts = 0;
1463 
1464 	qp->rx_ring_empty = 0;
1465 	qp->tx_ring_full = 0;
1466 
1467 	qp->rx_err_no_buf = qp->tx_err_no_buf = 0;
1468 	qp->rx_err_oflow = qp->rx_err_ver = 0;
1469 }
1470 
1471 static void
1472 ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
1473 {
1474 
1475 	callout_drain(&qp->link_work);
1476 	ntb_qp_link_down_reset(qp);
1477 
1478 	if (qp->event_handler != NULL)
1479 		qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
1480 }
1481 
1482 /* Link commanded down */
1483 /**
1484  * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1485  * @qp: NTB transport layer queue to be disabled
1486  *
1487  * Notify NTB transport layer of client's desire to no longer receive data on
1488  * transport queue specified.  It is the client's responsibility to ensure all
1489  * entries on queue are purged or otherwise handled appropriately.
1490  */
1491 void
1492 ntb_transport_link_down(struct ntb_transport_qp *qp)
1493 {
1494 	struct ntb_transport_ctx *nt = qp->transport;
1495 	int i;
1496 	uint32_t val;
1497 
1498 	qp->client_ready = false;
1499 	for (i = 0, val = 0; i < nt->qp_count; i++) {
1500 		if (nt->qp_vec[i].client_ready)
1501 			val |= (1 << i);
1502 	}
1503 	ntb_peer_spad_write(qp->dev, NTBT_QP_LINKS, val);
1504 
1505 	if (qp->link_is_up)
1506 		ntb_send_link_down(qp);
1507 	else
1508 		callout_drain(&qp->link_work);
1509 }
1510 
1511 /**
1512  * ntb_transport_link_query - Query transport link state
1513  * @qp: NTB transport layer queue to be queried
1514  *
1515  * Query connectivity to the remote system of the NTB transport queue
1516  *
1517  * RETURNS: true for link up or false for link down
1518  */
1519 bool
1520 ntb_transport_link_query(struct ntb_transport_qp *qp)
1521 {
1522 
1523 	return (qp->link_is_up);
1524 }
1525 
1526 /**
1527  * ntb_transport_link_speed - Query transport link speed
1528  * @qp: NTB transport layer queue to be queried
1529  *
1530  * Query connection speed to the remote system of the NTB transport queue
1531  *
1532  * RETURNS: link speed in bits per second
1533  */
1534 uint64_t
1535 ntb_transport_link_speed(struct ntb_transport_qp *qp)
1536 {
1537 	struct ntb_transport_ctx *nt = qp->transport;
1538 	uint64_t rate;
1539 
1540 	if (!nt->link_is_up)
1541 		return (0);
1542 	switch (nt->link_speed) {
1543 	case NTB_SPEED_GEN1:
1544 		rate = 2500000000 * 8 / 10;
1545 		break;
1546 	case NTB_SPEED_GEN2:
1547 		rate = 5000000000 * 8 / 10;
1548 		break;
1549 	case NTB_SPEED_GEN3:
1550 		rate = 8000000000 * 128 / 130;
1551 		break;
1552 	case NTB_SPEED_GEN4:
1553 		rate = 16000000000 * 128 / 130;
1554 		break;
1555 	default:
1556 		return (0);
1557 	}
1558 	if (nt->link_width <= 0)
1559 		return (0);
1560 	return (rate * nt->link_width);
1561 }
1562 
1563 static void
1564 ntb_send_link_down(struct ntb_transport_qp *qp)
1565 {
1566 	struct ntb_queue_entry *entry;
1567 	int i, rc;
1568 
1569 	if (!qp->link_is_up)
1570 		return;
1571 
1572 	for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1573 		entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1574 		if (entry != NULL)
1575 			break;
1576 		pause("NTB Wait for link down", hz / 10);
1577 	}
1578 
1579 	if (entry == NULL)
1580 		return;
1581 
1582 	entry->cb_data = NULL;
1583 	entry->buf = NULL;
1584 	entry->len = 0;
1585 	entry->flags = NTBT_LINK_DOWN_FLAG;
1586 
1587 	mtx_lock(&qp->tx_lock);
1588 	rc = ntb_process_tx(qp, entry);
1589 	mtx_unlock(&qp->tx_lock);
1590 	if (rc != 0)
1591 		printf("ntb: Failed to send link down\n");
1592 
1593 	ntb_qp_link_down_reset(qp);
1594 }
1595 
1596 
1597 /* List Management */
1598 
1599 static void
1600 ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
1601     struct ntb_queue_list *list)
1602 {
1603 
1604 	mtx_lock_spin(lock);
1605 	STAILQ_INSERT_TAIL(list, entry, entry);
1606 	mtx_unlock_spin(lock);
1607 }
1608 
1609 static struct ntb_queue_entry *
1610 ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list)
1611 {
1612 	struct ntb_queue_entry *entry;
1613 
1614 	mtx_lock_spin(lock);
1615 	if (STAILQ_EMPTY(list)) {
1616 		entry = NULL;
1617 		goto out;
1618 	}
1619 	entry = STAILQ_FIRST(list);
1620 	STAILQ_REMOVE_HEAD(list, entry);
1621 out:
1622 	mtx_unlock_spin(lock);
1623 
1624 	return (entry);
1625 }
1626 
1627 static struct ntb_queue_entry *
1628 ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from,
1629     struct ntb_queue_list *to)
1630 {
1631 	struct ntb_queue_entry *entry;
1632 
1633 	mtx_lock_spin(lock);
1634 	if (STAILQ_EMPTY(from)) {
1635 		entry = NULL;
1636 		goto out;
1637 	}
1638 	entry = STAILQ_FIRST(from);
1639 	STAILQ_REMOVE_HEAD(from, entry);
1640 	STAILQ_INSERT_TAIL(to, entry, entry);
1641 
1642 out:
1643 	mtx_unlock_spin(lock);
1644 	return (entry);
1645 }
1646 
1647 /**
1648  * ntb_transport_qp_num - Query the qp number
1649  * @qp: NTB transport layer queue to be queried
1650  *
1651  * Query qp number of the NTB transport queue
1652  *
1653  * RETURNS: a zero based number specifying the qp number
1654  */
1655 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1656 {
1657 
1658 	return (qp->qp_num);
1659 }
1660 
1661 /**
1662  * ntb_transport_max_size - Query the max payload size of a qp
1663  * @qp: NTB transport layer queue to be queried
1664  *
1665  * Query the maximum payload size permissible on the given qp
1666  *
1667  * RETURNS: the max payload size of a qp
1668  */
1669 unsigned int
1670 ntb_transport_max_size(struct ntb_transport_qp *qp)
1671 {
1672 
1673 	return (qp->tx_max_frame - sizeof(struct ntb_payload_header));
1674 }
1675 
1676 unsigned int
1677 ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
1678 {
1679 	unsigned int head = qp->tx_index;
1680 	unsigned int tail = qp->remote_rx_info->entry;
1681 
1682 	return (tail >= head ? tail - head : qp->tx_max_entry + tail - head);
1683 }
1684 
1685 static device_method_t ntb_transport_methods[] = {
1686 	/* Device interface */
1687 	DEVMETHOD(device_probe,     ntb_transport_probe),
1688 	DEVMETHOD(device_attach,    ntb_transport_attach),
1689 	DEVMETHOD(device_detach,    ntb_transport_detach),
1690 	/* Bus interface */
1691 	DEVMETHOD(bus_child_location_str, ntb_transport_child_location_str),
1692 	DEVMETHOD(bus_print_child,  ntb_transport_print_child),
1693 	DEVMETHOD_END
1694 };
1695 
1696 devclass_t ntb_transport_devclass;
1697 static DEFINE_CLASS_0(ntb_transport, ntb_transport_driver,
1698     ntb_transport_methods, sizeof(struct ntb_transport_ctx));
1699 DRIVER_MODULE(ntb_transport, ntb_hw, ntb_transport_driver,
1700     ntb_transport_devclass, NULL, NULL);
1701 MODULE_DEPEND(ntb_transport, ntb, 1, 1, 1);
1702 MODULE_VERSION(ntb_transport, 1);
1703