xref: /freebsd/sys/dev/ntb/ntb_hw/ntb_hw_intel.c (revision e3cf7ebc1d36d068f1d1a83ea73ce2eed547e3cb)
1 /*-
2  * Copyright (c) 2016-2017 Alexander Motin <mav@FreeBSD.org>
3  * Copyright (C) 2013 Intel Corporation
4  * Copyright (C) 2015 EMC Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * The Non-Transparent Bridge (NTB) is a device that allows you to connect
31  * two or more systems using a PCI-e links, providing remote memory access.
32  *
33  * This module contains a driver for NTB hardware in Intel Xeon/Atom CPUs.
34  *
35  * NOTE: Much of the code in this module is shared with Linux. Any patches may
36  * be picked up and redistributed in Linux with a dual GPL/BSD license.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/endian.h>
47 #include <sys/interrupt.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/module.h>
51 #include <sys/mutex.h>
52 #include <sys/pciio.h>
53 #include <sys/taskqueue.h>
54 #include <sys/tree.h>
55 #include <sys/queue.h>
56 #include <sys/rman.h>
57 #include <sys/sbuf.h>
58 #include <sys/sysctl.h>
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61 #include <machine/bus.h>
62 #include <machine/intr_machdep.h>
63 #include <machine/resource.h>
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
66 #include <dev/iommu/iommu.h>
67 
68 #include "ntb_hw_intel.h"
69 #include "../ntb.h"
70 
71 #define MAX_MSIX_INTERRUPTS	\
72 	MAX(MAX(XEON_DB_COUNT, ATOM_DB_COUNT), XEON_GEN3_DB_COUNT)
73 
74 #define NTB_HB_TIMEOUT		1 /* second */
75 #define ATOM_LINK_RECOVERY_TIME	500 /* ms */
76 #define BAR_HIGH_MASK		(~((1ull << 12) - 1))
77 
78 #define	NTB_MSIX_VER_GUARD	0xaabbccdd
79 #define	NTB_MSIX_RECEIVED	0xe0f0e0f0
80 
81 /*
82  * PCI constants could be somewhere more generic, but aren't defined/used in
83  * pci.c.
84  */
85 #define	PCI_MSIX_ENTRY_SIZE		16
86 #define	PCI_MSIX_ENTRY_LOWER_ADDR	0
87 #define	PCI_MSIX_ENTRY_UPPER_ADDR	4
88 #define	PCI_MSIX_ENTRY_DATA		8
89 
90 enum ntb_device_type {
91 	NTB_XEON_GEN1,
92 	NTB_XEON_GEN3,
93 	NTB_ATOM
94 };
95 
96 /* ntb_conn_type are hardware numbers, cannot change. */
97 enum ntb_conn_type {
98 	NTB_CONN_TRANSPARENT = 0,
99 	NTB_CONN_B2B = 1,
100 	NTB_CONN_RP = 2,
101 };
102 
103 enum ntb_b2b_direction {
104 	NTB_DEV_USD = 0,
105 	NTB_DEV_DSD = 1,
106 };
107 
108 enum ntb_bar {
109 	NTB_CONFIG_BAR = 0,
110 	NTB_B2B_BAR_1,
111 	NTB_B2B_BAR_2,
112 	NTB_B2B_BAR_3,
113 	NTB_MAX_BARS
114 };
115 
116 enum {
117 	NTB_MSIX_GUARD = 0,
118 	NTB_MSIX_DATA0,
119 	NTB_MSIX_DATA1,
120 	NTB_MSIX_DATA2,
121 	NTB_MSIX_OFS0,
122 	NTB_MSIX_OFS1,
123 	NTB_MSIX_OFS2,
124 	NTB_MSIX_DONE,
125 	NTB_MAX_MSIX_SPAD
126 };
127 
128 /* Device features and workarounds */
129 #define HAS_FEATURE(ntb, feature)	\
130 	(((ntb)->features & (feature)) != 0)
131 
132 struct ntb_hw_info {
133 	uint32_t		device_id;
134 	const char		*desc;
135 	enum ntb_device_type	type;
136 	uint32_t		features;
137 };
138 
139 struct ntb_pci_bar_info {
140 	bus_space_tag_t		pci_bus_tag;
141 	bus_space_handle_t	pci_bus_handle;
142 	int			pci_resource_id;
143 	struct resource		*pci_resource;
144 	vm_paddr_t		pbase;
145 	caddr_t			vbase;
146 	vm_size_t		size;
147 	vm_memattr_t		map_mode;
148 
149 	/* Configuration register offsets */
150 	uint32_t		psz_off;
151 	uint32_t		ssz_off;
152 	uint32_t		pbarxlat_off;
153 };
154 
155 struct ntb_int_info {
156 	struct resource	*res;
157 	int		rid;
158 	void		*tag;
159 };
160 
161 struct ntb_vec {
162 	struct ntb_softc	*ntb;
163 	uint32_t		num;
164 	unsigned		masked;
165 };
166 
167 struct ntb_reg {
168 	uint32_t	ntb_ctl;
169 	uint32_t	lnk_sta;
170 	uint8_t		db_size;
171 	unsigned	mw_bar[NTB_MAX_BARS];
172 };
173 
174 struct ntb_alt_reg {
175 	uint32_t	db_bell;
176 	uint32_t	db_mask;
177 	uint32_t	spad;
178 };
179 
180 struct ntb_xlat_reg {
181 	uint32_t	bar0_base;
182 	uint32_t	bar2_base;
183 	uint32_t	bar4_base;
184 	uint32_t	bar5_base;
185 
186 	uint32_t	bar2_xlat;
187 	uint32_t	bar4_xlat;
188 	uint32_t	bar5_xlat;
189 
190 	uint32_t	bar2_limit;
191 	uint32_t	bar4_limit;
192 	uint32_t	bar5_limit;
193 };
194 
195 struct ntb_b2b_addr {
196 	uint64_t	bar0_addr;
197 	uint64_t	bar2_addr64;
198 	uint64_t	bar4_addr64;
199 	uint64_t	bar4_addr32;
200 	uint64_t	bar5_addr32;
201 };
202 
203 struct ntb_msix_data {
204 	uint32_t	nmd_ofs;
205 	uint32_t	nmd_data;
206 };
207 
208 struct ntb_softc {
209 	/* ntb.c context. Do not move! Must go first! */
210 	void			*ntb_store;
211 
212 	device_t		device;
213 	enum ntb_device_type	type;
214 	uint32_t		features;
215 
216 	struct ntb_pci_bar_info	bar_info[NTB_MAX_BARS];
217 	struct ntb_int_info	int_info[MAX_MSIX_INTERRUPTS];
218 	uint32_t		allocated_interrupts;
219 
220 	struct ntb_msix_data	peer_msix_data[XEON_NONLINK_DB_MSIX_BITS];
221 	struct ntb_msix_data	msix_data[XEON_NONLINK_DB_MSIX_BITS];
222 	bool			peer_msix_good;
223 	bool			peer_msix_done;
224 	struct ntb_pci_bar_info	*peer_lapic_bar;
225 	struct callout		peer_msix_work;
226 
227 	bus_dma_tag_t		bar0_dma_tag;
228 	bus_dmamap_t		bar0_dma_map;
229 
230 	struct callout		heartbeat_timer;
231 	struct callout		lr_timer;
232 
233 	struct ntb_vec		*msix_vec;
234 
235 	uint32_t		ppd;
236 	enum ntb_conn_type	conn_type;
237 	enum ntb_b2b_direction	dev_type;
238 
239 	/* Offset of peer bar0 in B2B BAR */
240 	uint64_t			b2b_off;
241 	/* Memory window used to access peer bar0 */
242 #define B2B_MW_DISABLED			UINT8_MAX
243 	uint8_t				b2b_mw_idx;
244 	uint32_t			msix_xlat;
245 	uint8_t				msix_mw_idx;
246 
247 	uint8_t				mw_count;
248 	uint8_t				spad_count;
249 	uint8_t				db_count;
250 	uint8_t				db_vec_count;
251 	uint8_t				db_vec_shift;
252 
253 	/* Protects local db_mask. */
254 #define DB_MASK_LOCK(sc)	mtx_lock_spin(&(sc)->db_mask_lock)
255 #define DB_MASK_UNLOCK(sc)	mtx_unlock_spin(&(sc)->db_mask_lock)
256 #define DB_MASK_ASSERT(sc,f)	mtx_assert(&(sc)->db_mask_lock, (f))
257 	struct mtx			db_mask_lock;
258 
259 	volatile uint32_t		ntb_ctl;
260 	volatile uint32_t		lnk_sta;
261 
262 	uint64_t			db_valid_mask;
263 	uint64_t			db_link_mask;
264 	uint64_t			db_mask;
265 	uint64_t			fake_db;	/* NTB_SB01BASE_LOCKUP*/
266 	uint64_t			force_db;	/* NTB_SB01BASE_LOCKUP*/
267 
268 	int				last_ts;	/* ticks @ last irq */
269 
270 	const struct ntb_reg		*reg;
271 	const struct ntb_alt_reg	*self_reg;
272 	const struct ntb_alt_reg	*peer_reg;
273 	const struct ntb_xlat_reg	*xlat_reg;
274 };
275 
276 #ifdef __i386__
277 static __inline uint64_t
278 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
279     bus_size_t offset)
280 {
281 
282 	return (bus_space_read_4(tag, handle, offset) |
283 	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
284 }
285 
286 static __inline void
287 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
288     bus_size_t offset, uint64_t val)
289 {
290 
291 	bus_space_write_4(tag, handle, offset, val);
292 	bus_space_write_4(tag, handle, offset + 4, val >> 32);
293 }
294 #endif
295 
296 #define intel_ntb_bar_read(SIZE, bar, offset) \
297 	    bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
298 	    ntb->bar_info[(bar)].pci_bus_handle, (offset))
299 #define intel_ntb_bar_write(SIZE, bar, offset, val) \
300 	    bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
301 	    ntb->bar_info[(bar)].pci_bus_handle, (offset), (val))
302 #define intel_ntb_reg_read(SIZE, offset) \
303 	    intel_ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset)
304 #define intel_ntb_reg_write(SIZE, offset, val) \
305 	    intel_ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val)
306 #define intel_ntb_mw_read(SIZE, offset) \
307 	    intel_ntb_bar_read(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
308 		offset)
309 #define intel_ntb_mw_write(SIZE, offset, val) \
310 	    intel_ntb_bar_write(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
311 		offset, val)
312 
313 static int intel_ntb_probe(device_t device);
314 static int intel_ntb_attach(device_t device);
315 static int intel_ntb_detach(device_t device);
316 static uint64_t intel_ntb_db_valid_mask(device_t dev);
317 static void intel_ntb_spad_clear(device_t dev);
318 static uint64_t intel_ntb_db_vector_mask(device_t dev, uint32_t vector);
319 static bool intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed,
320     enum ntb_width *width);
321 static int intel_ntb_link_enable(device_t dev, enum ntb_speed speed,
322     enum ntb_width width);
323 static int intel_ntb_link_disable(device_t dev);
324 static int intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val);
325 static int intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val);
326 
327 static unsigned intel_ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx);
328 static inline enum ntb_bar intel_ntb_mw_to_bar(struct ntb_softc *, unsigned mw);
329 static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar);
330 static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar,
331     uint32_t *base, uint32_t *xlat, uint32_t *lmt);
332 static int intel_ntb_map_pci_bars(struct ntb_softc *ntb);
333 static int intel_ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx,
334     vm_memattr_t);
335 static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *,
336     const char *);
337 static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar);
338 static int map_memory_window_bar(struct ntb_softc *ntb,
339     struct ntb_pci_bar_info *bar);
340 static void intel_ntb_unmap_pci_bar(struct ntb_softc *ntb);
341 static int intel_ntb_remap_msix(device_t, uint32_t desired, uint32_t avail);
342 static int intel_ntb_init_isr(struct ntb_softc *ntb);
343 static int intel_ntb_xeon_gen3_init_isr(struct ntb_softc *ntb);
344 static int intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb);
345 static int intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors);
346 static void intel_ntb_teardown_interrupts(struct ntb_softc *ntb);
347 static inline uint64_t intel_ntb_vec_mask(struct ntb_softc *, uint64_t db_vector);
348 static void intel_ntb_interrupt(struct ntb_softc *, uint32_t vec);
349 static void ndev_vec_isr(void *arg);
350 static void ndev_irq_isr(void *arg);
351 static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff);
352 static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t);
353 static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t);
354 static int intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors);
355 static void intel_ntb_free_msix_vec(struct ntb_softc *ntb);
356 static void intel_ntb_get_msix_info(struct ntb_softc *ntb);
357 static void intel_ntb_exchange_msix(void *);
358 static struct ntb_hw_info *intel_ntb_get_device_info(uint32_t device_id);
359 static void intel_ntb_detect_max_mw(struct ntb_softc *ntb);
360 static int intel_ntb_detect_xeon(struct ntb_softc *ntb);
361 static int intel_ntb_detect_xeon_gen3(struct ntb_softc *ntb);
362 static int intel_ntb_detect_atom(struct ntb_softc *ntb);
363 static int intel_ntb_xeon_init_dev(struct ntb_softc *ntb);
364 static int intel_ntb_xeon_gen3_init_dev(struct ntb_softc *ntb);
365 static int intel_ntb_atom_init_dev(struct ntb_softc *ntb);
366 static void intel_ntb_teardown_xeon(struct ntb_softc *ntb);
367 static void configure_atom_secondary_side_bars(struct ntb_softc *ntb);
368 static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx,
369     enum ntb_bar regbar);
370 static void xeon_set_sbar_base_and_limit(struct ntb_softc *,
371     uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar);
372 static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr,
373     enum ntb_bar idx);
374 static int xeon_setup_b2b_mw(struct ntb_softc *,
375     const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr);
376 static int xeon_gen3_setup_b2b_mw(struct ntb_softc *);
377 static int intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr,
378     size_t size);
379 static inline bool link_is_up(struct ntb_softc *ntb);
380 static inline bool _xeon_link_is_up(struct ntb_softc *ntb);
381 static inline bool atom_link_is_err(struct ntb_softc *ntb);
382 static inline enum ntb_speed intel_ntb_link_sta_speed(struct ntb_softc *);
383 static inline enum ntb_width intel_ntb_link_sta_width(struct ntb_softc *);
384 static void atom_link_hb(void *arg);
385 static void recover_atom_link(void *arg);
386 static bool intel_ntb_poll_link(struct ntb_softc *ntb);
387 static void save_bar_parameters(struct ntb_pci_bar_info *bar);
388 static void intel_ntb_sysctl_init(struct ntb_softc *);
389 static int sysctl_handle_features(SYSCTL_HANDLER_ARGS);
390 static int sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS);
391 static int sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS);
392 static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS);
393 static int sysctl_handle_register(SYSCTL_HANDLER_ARGS);
394 
395 static unsigned g_ntb_hw_debug_level;
396 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
397     &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose");
398 #define intel_ntb_printf(lvl, ...) do {				\
399 	if ((lvl) <= g_ntb_hw_debug_level) {			\
400 		device_printf(ntb->device, __VA_ARGS__);	\
401 	}							\
402 } while (0)
403 
404 #define	_NTB_PAT_UC	0
405 #define	_NTB_PAT_WC	1
406 #define	_NTB_PAT_WT	4
407 #define	_NTB_PAT_WP	5
408 #define	_NTB_PAT_WB	6
409 #define	_NTB_PAT_UCM	7
410 static unsigned g_ntb_mw_pat = _NTB_PAT_UC;
411 SYSCTL_UINT(_hw_ntb, OID_AUTO, default_mw_pat, CTLFLAG_RDTUN,
412     &g_ntb_mw_pat, 0, "Configure the default memory window cache flags (PAT): "
413     "UC: "  __XSTRING(_NTB_PAT_UC) ", "
414     "WC: "  __XSTRING(_NTB_PAT_WC) ", "
415     "WT: "  __XSTRING(_NTB_PAT_WT) ", "
416     "WP: "  __XSTRING(_NTB_PAT_WP) ", "
417     "WB: "  __XSTRING(_NTB_PAT_WB) ", "
418     "UC-: " __XSTRING(_NTB_PAT_UCM));
419 
420 static inline vm_memattr_t
421 intel_ntb_pat_flags(void)
422 {
423 
424 	switch (g_ntb_mw_pat) {
425 	case _NTB_PAT_WC:
426 		return (VM_MEMATTR_WRITE_COMBINING);
427 	case _NTB_PAT_WT:
428 		return (VM_MEMATTR_WRITE_THROUGH);
429 	case _NTB_PAT_WP:
430 		return (VM_MEMATTR_WRITE_PROTECTED);
431 	case _NTB_PAT_WB:
432 		return (VM_MEMATTR_WRITE_BACK);
433 	case _NTB_PAT_UCM:
434 		return (VM_MEMATTR_WEAK_UNCACHEABLE);
435 	case _NTB_PAT_UC:
436 		/* FALLTHROUGH */
437 	default:
438 		return (VM_MEMATTR_UNCACHEABLE);
439 	}
440 }
441 
442 /*
443  * Well, this obviously doesn't belong here, but it doesn't seem to exist
444  * anywhere better yet.
445  */
446 static inline const char *
447 intel_ntb_vm_memattr_to_str(vm_memattr_t pat)
448 {
449 
450 	switch (pat) {
451 	case VM_MEMATTR_WRITE_COMBINING:
452 		return ("WRITE_COMBINING");
453 	case VM_MEMATTR_WRITE_THROUGH:
454 		return ("WRITE_THROUGH");
455 	case VM_MEMATTR_WRITE_PROTECTED:
456 		return ("WRITE_PROTECTED");
457 	case VM_MEMATTR_WRITE_BACK:
458 		return ("WRITE_BACK");
459 	case VM_MEMATTR_WEAK_UNCACHEABLE:
460 		return ("UNCACHED");
461 	case VM_MEMATTR_UNCACHEABLE:
462 		return ("UNCACHEABLE");
463 	default:
464 		return ("UNKNOWN");
465 	}
466 }
467 
468 static int g_ntb_msix_idx = 1;
469 SYSCTL_INT(_hw_ntb, OID_AUTO, msix_mw_idx, CTLFLAG_RDTUN, &g_ntb_msix_idx,
470     0, "Use this memory window to access the peer MSIX message complex on "
471     "certain Xeon-based NTB systems, as a workaround for a hardware errata.  "
472     "Like b2b_mw_idx, negative values index from the last available memory "
473     "window.  (Applies on Xeon platforms with SB01BASE_LOCKUP errata.)");
474 
475 static int g_ntb_mw_idx = -1;
476 SYSCTL_INT(_hw_ntb, OID_AUTO, b2b_mw_idx, CTLFLAG_RDTUN, &g_ntb_mw_idx,
477     0, "Use this memory window to access the peer NTB registers.  A "
478     "non-negative value starts from the first MW index; a negative value "
479     "starts from the last MW index.  The default is -1, i.e., the last "
480     "available memory window.  Both sides of the NTB MUST set the same "
481     "value here!  (Applies on Xeon platforms with SDOORBELL_LOCKUP errata.)");
482 
483 /* Hardware owns the low 16 bits of features. */
484 #define NTB_BAR_SIZE_4K		(1 << 0)
485 #define NTB_SDOORBELL_LOCKUP	(1 << 1)
486 #define NTB_SB01BASE_LOCKUP	(1 << 2)
487 #define NTB_B2BDOORBELL_BIT14	(1 << 3)
488 /* Software/configuration owns the top 16 bits. */
489 #define NTB_SPLIT_BAR		(1ull << 16)
490 #define NTB_ONE_MSIX		(1ull << 17)
491 
492 #define NTB_FEATURES_STR \
493     "\20\21SPLIT_BAR4\04B2B_DOORBELL_BIT14\03SB01BASE_LOCKUP" \
494     "\02SDOORBELL_LOCKUP\01BAR_SIZE_4K"
495 
496 static struct ntb_hw_info pci_ids[] = {
497 	/* XXX: PS/SS IDs left out until they are supported. */
498 	{ 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B",
499 		NTB_ATOM, 0 },
500 
501 	{ 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B",
502 		NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 },
503 	{ 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B",
504 		NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 },
505 	{ 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B",
506 		NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
507 		    NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K },
508 	{ 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B",
509 		NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
510 		    NTB_SB01BASE_LOCKUP },
511 	{ 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B",
512 		NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
513 		    NTB_SB01BASE_LOCKUP },
514 
515 	{ 0x201C8086, "SKL Xeon E5 V5 Non-Transparent Bridge B2B",
516 		NTB_XEON_GEN3, 0 },
517 };
518 
519 static const struct ntb_reg atom_reg = {
520 	.ntb_ctl = ATOM_NTBCNTL_OFFSET,
521 	.lnk_sta = ATOM_LINK_STATUS_OFFSET,
522 	.db_size = sizeof(uint64_t),
523 	.mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 },
524 };
525 
526 static const struct ntb_alt_reg atom_pri_reg = {
527 	.db_bell = ATOM_PDOORBELL_OFFSET,
528 	.db_mask = ATOM_PDBMSK_OFFSET,
529 	.spad = ATOM_SPAD_OFFSET,
530 };
531 
532 static const struct ntb_alt_reg atom_b2b_reg = {
533 	.db_bell = ATOM_B2B_DOORBELL_OFFSET,
534 	.spad = ATOM_B2B_SPAD_OFFSET,
535 };
536 
537 static const struct ntb_xlat_reg atom_sec_xlat = {
538 #if 0
539 	/* "FIXME" says the Linux driver. */
540 	.bar0_base = ATOM_SBAR0BASE_OFFSET,
541 	.bar2_base = ATOM_SBAR2BASE_OFFSET,
542 	.bar4_base = ATOM_SBAR4BASE_OFFSET,
543 
544 	.bar2_limit = ATOM_SBAR2LMT_OFFSET,
545 	.bar4_limit = ATOM_SBAR4LMT_OFFSET,
546 #endif
547 
548 	.bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
549 	.bar4_xlat = ATOM_SBAR4XLAT_OFFSET,
550 };
551 
552 static const struct ntb_reg xeon_reg = {
553 	.ntb_ctl = XEON_NTBCNTL_OFFSET,
554 	.lnk_sta = XEON_LINK_STATUS_OFFSET,
555 	.db_size = sizeof(uint16_t),
556 	.mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 },
557 };
558 
559 static const struct ntb_alt_reg xeon_pri_reg = {
560 	.db_bell = XEON_PDOORBELL_OFFSET,
561 	.db_mask = XEON_PDBMSK_OFFSET,
562 	.spad = XEON_SPAD_OFFSET,
563 };
564 
565 static const struct ntb_alt_reg xeon_b2b_reg = {
566 	.db_bell = XEON_B2B_DOORBELL_OFFSET,
567 	.spad = XEON_B2B_SPAD_OFFSET,
568 };
569 
570 static const struct ntb_xlat_reg xeon_sec_xlat = {
571 	.bar0_base = XEON_SBAR0BASE_OFFSET,
572 	.bar2_base = XEON_SBAR2BASE_OFFSET,
573 	.bar4_base = XEON_SBAR4BASE_OFFSET,
574 	.bar5_base = XEON_SBAR5BASE_OFFSET,
575 
576 	.bar2_limit = XEON_SBAR2LMT_OFFSET,
577 	.bar4_limit = XEON_SBAR4LMT_OFFSET,
578 	.bar5_limit = XEON_SBAR5LMT_OFFSET,
579 
580 	.bar2_xlat = XEON_SBAR2XLAT_OFFSET,
581 	.bar4_xlat = XEON_SBAR4XLAT_OFFSET,
582 	.bar5_xlat = XEON_SBAR5XLAT_OFFSET,
583 };
584 
585 static struct ntb_b2b_addr xeon_b2b_usd_addr = {
586 	.bar0_addr = XEON_B2B_BAR0_ADDR,
587 	.bar2_addr64 = XEON_B2B_BAR2_ADDR64,
588 	.bar4_addr64 = XEON_B2B_BAR4_ADDR64,
589 	.bar4_addr32 = XEON_B2B_BAR4_ADDR32,
590 	.bar5_addr32 = XEON_B2B_BAR5_ADDR32,
591 };
592 
593 static struct ntb_b2b_addr xeon_b2b_dsd_addr = {
594 	.bar0_addr = XEON_B2B_BAR0_ADDR,
595 	.bar2_addr64 = XEON_B2B_BAR2_ADDR64,
596 	.bar4_addr64 = XEON_B2B_BAR4_ADDR64,
597 	.bar4_addr32 = XEON_B2B_BAR4_ADDR32,
598 	.bar5_addr32 = XEON_B2B_BAR5_ADDR32,
599 };
600 
601 static const struct ntb_reg xeon_gen3_reg = {
602 	.ntb_ctl = XEON_GEN3_REG_IMNTB_CTRL,
603 	.lnk_sta = XEON_GEN3_INT_LNK_STS_OFFSET,
604 	.db_size = sizeof(uint32_t),
605 	.mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 },
606 };
607 
608 static const struct ntb_alt_reg xeon_gen3_pri_reg = {
609 	.db_bell = XEON_GEN3_REG_EMDOORBELL,
610 	.db_mask = XEON_GEN3_REG_IMINT_DISABLE,
611 	.spad = XEON_GEN3_REG_IMSPAD,
612 };
613 
614 static const struct ntb_alt_reg xeon_gen3_b2b_reg = {
615 	.db_bell = XEON_GEN3_REG_IMDOORBELL,
616 	.db_mask = XEON_GEN3_REG_EMINT_DISABLE,
617 	.spad = XEON_GEN3_REG_IMB2B_SSPAD,
618 };
619 
620 static const struct ntb_xlat_reg xeon_gen3_sec_xlat = {
621 	.bar0_base = XEON_GEN3_EXT_REG_BAR0BASE,
622 	.bar2_base = XEON_GEN3_EXT_REG_BAR1BASE,
623 	.bar4_base = XEON_GEN3_EXT_REG_BAR2BASE,
624 
625 	.bar2_limit = XEON_GEN3_REG_IMBAR1XLIMIT,
626 	.bar4_limit = XEON_GEN3_REG_IMBAR2XLIMIT,
627 
628 	.bar2_xlat = XEON_GEN3_REG_IMBAR1XBASE,
629 	.bar4_xlat = XEON_GEN3_REG_IMBAR2XBASE,
630 };
631 
632 SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
633     "B2B MW segment overrides -- MUST be the same on both sides");
634 
635 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN,
636     &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon "
637     "hardware, use this 64-bit address on the bus between the NTB devices for "
638     "the window at BAR2, on the upstream side of the link.  MUST be the same "
639     "address on both sides.");
640 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN,
641     &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4.");
642 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN,
643     &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 "
644     "(split-BAR mode).");
645 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN,
646     &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 "
647     "(split-BAR mode).");
648 
649 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN,
650     &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon "
651     "hardware, use this 64-bit address on the bus between the NTB devices for "
652     "the window at BAR2, on the downstream side of the link.  MUST be the same"
653     " address on both sides.");
654 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN,
655     &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4.");
656 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN,
657     &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 "
658     "(split-BAR mode).");
659 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN,
660     &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 "
661     "(split-BAR mode).");
662 
663 /*
664  * OS <-> Driver interface structures
665  */
666 MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations");
667 
668 /*
669  * OS <-> Driver linkage functions
670  */
671 static int
672 intel_ntb_probe(device_t device)
673 {
674 	struct ntb_hw_info *p;
675 
676 	p = intel_ntb_get_device_info(pci_get_devid(device));
677 	if (p == NULL)
678 		return (ENXIO);
679 
680 	device_set_desc(device, p->desc);
681 	return (0);
682 }
683 
684 static int
685 intel_ntb_attach(device_t device)
686 {
687 	struct ntb_softc *ntb;
688 	struct ntb_hw_info *p;
689 	int error;
690 
691 	ntb = device_get_softc(device);
692 	p = intel_ntb_get_device_info(pci_get_devid(device));
693 
694 	ntb->device = device;
695 	ntb->type = p->type;
696 	ntb->features = p->features;
697 	ntb->b2b_mw_idx = B2B_MW_DISABLED;
698 	ntb->msix_mw_idx = B2B_MW_DISABLED;
699 
700 	/* Heartbeat timer for NTB_ATOM since there is no link interrupt */
701 	callout_init(&ntb->heartbeat_timer, 1);
702 	callout_init(&ntb->lr_timer, 1);
703 	callout_init(&ntb->peer_msix_work, 1);
704 	mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN);
705 
706 	if (ntb->type == NTB_ATOM)
707 		error = intel_ntb_detect_atom(ntb);
708 	else if (ntb->type == NTB_XEON_GEN3)
709 		error = intel_ntb_detect_xeon_gen3(ntb);
710 	else
711 		error = intel_ntb_detect_xeon(ntb);
712 	if (error != 0)
713 		goto out;
714 
715 	intel_ntb_detect_max_mw(ntb);
716 
717 	pci_enable_busmaster(ntb->device);
718 
719 	error = intel_ntb_map_pci_bars(ntb);
720 	if (error != 0)
721 		goto out;
722 	if (ntb->type == NTB_ATOM)
723 		error = intel_ntb_atom_init_dev(ntb);
724 	else if (ntb->type == NTB_XEON_GEN3)
725 		error = intel_ntb_xeon_gen3_init_dev(ntb);
726 	else
727 		error = intel_ntb_xeon_init_dev(ntb);
728 	if (error != 0)
729 		goto out;
730 
731 	intel_ntb_spad_clear(device);
732 
733 	intel_ntb_poll_link(ntb);
734 
735 	intel_ntb_sysctl_init(ntb);
736 
737 	/* Attach children to this controller */
738 	error = ntb_register_device(device);
739 
740 out:
741 	if (error != 0)
742 		intel_ntb_detach(device);
743 	return (error);
744 }
745 
746 static int
747 intel_ntb_detach(device_t device)
748 {
749 	struct ntb_softc *ntb;
750 
751 	ntb = device_get_softc(device);
752 
753 	/* Detach & delete all children */
754 	ntb_unregister_device(device);
755 
756 	if (ntb->self_reg != NULL) {
757 		DB_MASK_LOCK(ntb);
758 		db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_valid_mask);
759 		DB_MASK_UNLOCK(ntb);
760 	}
761 	callout_drain(&ntb->heartbeat_timer);
762 	callout_drain(&ntb->lr_timer);
763 	callout_drain(&ntb->peer_msix_work);
764 	pci_disable_busmaster(ntb->device);
765 	if (ntb->type == NTB_XEON_GEN1)
766 		intel_ntb_teardown_xeon(ntb);
767 	intel_ntb_teardown_interrupts(ntb);
768 
769 	mtx_destroy(&ntb->db_mask_lock);
770 
771 	intel_ntb_unmap_pci_bar(ntb);
772 
773 	return (0);
774 }
775 
776 /*
777  * Driver internal routines
778  */
779 static inline enum ntb_bar
780 intel_ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw)
781 {
782 
783 	KASSERT(mw < ntb->mw_count,
784 	    ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count));
785 	KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw"));
786 
787 	return (ntb->reg->mw_bar[mw]);
788 }
789 
790 static inline bool
791 bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar)
792 {
793 	/* XXX This assertion could be stronger. */
794 	KASSERT(bar < NTB_MAX_BARS, ("bogus bar"));
795 	return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(ntb, NTB_SPLIT_BAR));
796 }
797 
798 static inline void
799 bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base,
800     uint32_t *xlat, uint32_t *lmt)
801 {
802 	uint32_t basev, lmtv, xlatv;
803 
804 	switch (bar) {
805 	case NTB_B2B_BAR_1:
806 		basev = ntb->xlat_reg->bar2_base;
807 		lmtv = ntb->xlat_reg->bar2_limit;
808 		xlatv = ntb->xlat_reg->bar2_xlat;
809 		break;
810 	case NTB_B2B_BAR_2:
811 		basev = ntb->xlat_reg->bar4_base;
812 		lmtv = ntb->xlat_reg->bar4_limit;
813 		xlatv = ntb->xlat_reg->bar4_xlat;
814 		break;
815 	case NTB_B2B_BAR_3:
816 		basev = ntb->xlat_reg->bar5_base;
817 		lmtv = ntb->xlat_reg->bar5_limit;
818 		xlatv = ntb->xlat_reg->bar5_xlat;
819 		break;
820 	default:
821 		KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS,
822 		    ("bad bar"));
823 		basev = lmtv = xlatv = 0;
824 		break;
825 	}
826 
827 	if (base != NULL)
828 		*base = basev;
829 	if (xlat != NULL)
830 		*xlat = xlatv;
831 	if (lmt != NULL)
832 		*lmt = lmtv;
833 }
834 
835 static int
836 intel_ntb_map_pci_bars(struct ntb_softc *ntb)
837 {
838 	struct ntb_pci_bar_info *bar;
839 	int rc;
840 
841 	bar = &ntb->bar_info[NTB_CONFIG_BAR];
842 	bar->pci_resource_id = PCIR_BAR(0);
843 	rc = map_mmr_bar(ntb, bar);
844 	if (rc != 0)
845 		goto out;
846 
847 	/*
848 	 * At least on Xeon v4 NTB device leaks to host some remote side
849 	 * BAR0 writes supposed to update scratchpad registers.  I am not
850 	 * sure why it happens, but it may be related to the fact that
851 	 * on a link side BAR0 is 32KB, while on a host side it is 64KB.
852 	 * Without this hack DMAR blocks those accesses as not allowed.
853 	 */
854 	if (bus_dma_tag_create(bus_get_dma_tag(ntb->device), 1, 0,
855 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
856 	    bar->size, 1, bar->size, 0, NULL, NULL, &ntb->bar0_dma_tag)) {
857 		device_printf(ntb->device, "Unable to create BAR0 tag\n");
858 		return (ENOMEM);
859 	}
860 	if (bus_dmamap_create(ntb->bar0_dma_tag, 0, &ntb->bar0_dma_map)) {
861 		device_printf(ntb->device, "Unable to create BAR0 map\n");
862 		return (ENOMEM);
863 	}
864 	if (bus_dma_iommu_load_ident(ntb->bar0_dma_tag, ntb->bar0_dma_map,
865 	    bar->pbase, bar->size, 0)) {
866 		device_printf(ntb->device, "Unable to load BAR0 map\n");
867 		return (ENOMEM);
868 	}
869 
870 	bar = &ntb->bar_info[NTB_B2B_BAR_1];
871 	bar->pci_resource_id = PCIR_BAR(2);
872 	rc = map_memory_window_bar(ntb, bar);
873 	if (rc != 0)
874 		goto out;
875 	if (ntb->type == NTB_XEON_GEN3) {
876 		bar->psz_off = XEON_GEN3_INT_REG_IMBAR1SZ;
877 		bar->ssz_off = XEON_GEN3_INT_REG_EMBAR1SZ;
878 		bar->pbarxlat_off = XEON_GEN3_REG_EMBAR1XBASE;
879 	} else {
880 		bar->psz_off = XEON_PBAR23SZ_OFFSET;
881 		bar->ssz_off = XEON_SBAR23SZ_OFFSET;
882 		bar->pbarxlat_off = XEON_PBAR2XLAT_OFFSET;
883 	}
884 
885 	bar = &ntb->bar_info[NTB_B2B_BAR_2];
886 	bar->pci_resource_id = PCIR_BAR(4);
887 	rc = map_memory_window_bar(ntb, bar);
888 	if (rc != 0)
889 		goto out;
890 	if (ntb->type == NTB_XEON_GEN3) {
891 		bar->psz_off = XEON_GEN3_INT_REG_IMBAR2SZ;
892 		bar->ssz_off = XEON_GEN3_INT_REG_EMBAR2SZ;
893 		bar->pbarxlat_off = XEON_GEN3_REG_EMBAR2XBASE;
894 	} else {
895 		bar->psz_off = XEON_PBAR4SZ_OFFSET;
896 		bar->ssz_off = XEON_SBAR4SZ_OFFSET;
897 		bar->pbarxlat_off = XEON_PBAR4XLAT_OFFSET;
898 	}
899 
900 	if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR))
901 		goto out;
902 
903 	if (ntb->type == NTB_XEON_GEN3) {
904 		device_printf(ntb->device, "no split bar support\n");
905 		return (ENXIO);
906 	}
907 
908 	bar = &ntb->bar_info[NTB_B2B_BAR_3];
909 	bar->pci_resource_id = PCIR_BAR(5);
910 	rc = map_memory_window_bar(ntb, bar);
911 	bar->psz_off = XEON_PBAR5SZ_OFFSET;
912 	bar->ssz_off = XEON_SBAR5SZ_OFFSET;
913 	bar->pbarxlat_off = XEON_PBAR5XLAT_OFFSET;
914 
915 out:
916 	if (rc != 0)
917 		device_printf(ntb->device,
918 		    "unable to allocate pci resource\n");
919 	return (rc);
920 }
921 
922 static void
923 print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar,
924     const char *kind)
925 {
926 
927 	device_printf(ntb->device,
928 	    "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n",
929 	    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
930 	    (char *)bar->vbase + bar->size - 1,
931 	    (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
932 	    (uintmax_t)bar->size, kind);
933 }
934 
935 static int
936 map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
937 {
938 
939 	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
940 	    &bar->pci_resource_id, RF_ACTIVE);
941 	if (bar->pci_resource == NULL)
942 		return (ENXIO);
943 
944 	save_bar_parameters(bar);
945 	bar->map_mode = VM_MEMATTR_UNCACHEABLE;
946 	print_map_success(ntb, bar, "mmr");
947 	return (0);
948 }
949 
950 static int
951 map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
952 {
953 	int rc;
954 	vm_memattr_t mapmode;
955 	uint8_t bar_size_bits = 0;
956 
957 	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
958 	    &bar->pci_resource_id, RF_ACTIVE);
959 
960 	if (bar->pci_resource == NULL)
961 		return (ENXIO);
962 
963 	save_bar_parameters(bar);
964 	/*
965 	 * Ivytown NTB BAR sizes are misreported by the hardware due to a
966 	 * hardware issue. To work around this, query the size it should be
967 	 * configured to by the device and modify the resource to correspond to
968 	 * this new size. The BIOS on systems with this problem is required to
969 	 * provide enough address space to allow the driver to make this change
970 	 * safely.
971 	 *
972 	 * Ideally I could have just specified the size when I allocated the
973 	 * resource like:
974 	 *  bus_alloc_resource(ntb->device,
975 	 *	SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul,
976 	 *	1ul << bar_size_bits, RF_ACTIVE);
977 	 * but the PCI driver does not honor the size in this call, so we have
978 	 * to modify it after the fact.
979 	 */
980 	if (HAS_FEATURE(ntb, NTB_BAR_SIZE_4K)) {
981 		if (bar->pci_resource_id == PCIR_BAR(2))
982 			bar_size_bits = pci_read_config(ntb->device,
983 			    XEON_PBAR23SZ_OFFSET, 1);
984 		else
985 			bar_size_bits = pci_read_config(ntb->device,
986 			    XEON_PBAR45SZ_OFFSET, 1);
987 
988 		rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY,
989 		    bar->pci_resource, bar->pbase,
990 		    bar->pbase + (1ul << bar_size_bits) - 1);
991 		if (rc != 0) {
992 			device_printf(ntb->device,
993 			    "unable to resize bar\n");
994 			return (rc);
995 		}
996 
997 		save_bar_parameters(bar);
998 	}
999 
1000 	bar->map_mode = VM_MEMATTR_UNCACHEABLE;
1001 	print_map_success(ntb, bar, "mw");
1002 
1003 	/*
1004 	 * Optionally, mark MW BARs as anything other than UC to improve
1005 	 * performance.
1006 	 */
1007 	mapmode = intel_ntb_pat_flags();
1008 	if (mapmode == bar->map_mode)
1009 		return (0);
1010 
1011 	rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mapmode);
1012 	if (rc == 0) {
1013 		bar->map_mode = mapmode;
1014 		device_printf(ntb->device,
1015 		    "Marked BAR%d v:[%p-%p] p:[%p-%p] as "
1016 		    "%s.\n",
1017 		    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1018 		    (char *)bar->vbase + bar->size - 1,
1019 		    (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
1020 		    intel_ntb_vm_memattr_to_str(mapmode));
1021 	} else
1022 		device_printf(ntb->device,
1023 		    "Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as "
1024 		    "%s: %d\n",
1025 		    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1026 		    (char *)bar->vbase + bar->size - 1,
1027 		    (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
1028 		    intel_ntb_vm_memattr_to_str(mapmode), rc);
1029 		/* Proceed anyway */
1030 	return (0);
1031 }
1032 
1033 static void
1034 intel_ntb_unmap_pci_bar(struct ntb_softc *ntb)
1035 {
1036 	struct ntb_pci_bar_info *bar;
1037 	int i;
1038 
1039 	if (ntb->bar0_dma_map != NULL) {
1040 		bus_dmamap_unload(ntb->bar0_dma_tag, ntb->bar0_dma_map);
1041 		bus_dmamap_destroy(ntb->bar0_dma_tag, ntb->bar0_dma_map);
1042 	}
1043 	if (ntb->bar0_dma_tag != NULL)
1044 		bus_dma_tag_destroy(ntb->bar0_dma_tag);
1045 	for (i = 0; i < NTB_MAX_BARS; i++) {
1046 		bar = &ntb->bar_info[i];
1047 		if (bar->pci_resource != NULL)
1048 			bus_release_resource(ntb->device, SYS_RES_MEMORY,
1049 			    bar->pci_resource_id, bar->pci_resource);
1050 	}
1051 }
1052 
1053 static int
1054 intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors)
1055 {
1056 	uint32_t i;
1057 	int rc;
1058 
1059 	for (i = 0; i < num_vectors; i++) {
1060 		ntb->int_info[i].rid = i + 1;
1061 		ntb->int_info[i].res = bus_alloc_resource_any(ntb->device,
1062 		    SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE);
1063 		if (ntb->int_info[i].res == NULL) {
1064 			device_printf(ntb->device,
1065 			    "bus_alloc_resource failed\n");
1066 			return (ENOMEM);
1067 		}
1068 		ntb->int_info[i].tag = NULL;
1069 		ntb->allocated_interrupts++;
1070 		rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
1071 		    INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr,
1072 		    &ntb->msix_vec[i], &ntb->int_info[i].tag);
1073 		if (rc != 0) {
1074 			device_printf(ntb->device, "bus_setup_intr failed\n");
1075 			return (ENXIO);
1076 		}
1077 	}
1078 	return (0);
1079 }
1080 
1081 /*
1082  * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector
1083  * cannot be allocated for each MSI-X message.  JHB seems to think remapping
1084  * should be okay.  This tunable should enable us to test that hypothesis
1085  * when someone gets their hands on some Xeon hardware.
1086  */
1087 static int ntb_force_remap_mode;
1088 SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN,
1089     &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped"
1090     " to a smaller number of ithreads, even if the desired number are "
1091     "available");
1092 
1093 /*
1094  * In case it is NOT ok, give consumers an abort button.
1095  */
1096 static int ntb_prefer_intx;
1097 SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN,
1098     &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather "
1099     "than remapping MSI-X messages over available slots (match Linux driver "
1100     "behavior)");
1101 
1102 /*
1103  * Remap the desired number of MSI-X messages to available ithreads in a simple
1104  * round-robin fashion.
1105  */
1106 static int
1107 intel_ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
1108 {
1109 	u_int *vectors;
1110 	uint32_t i;
1111 	int rc;
1112 
1113 	if (ntb_prefer_intx != 0)
1114 		return (ENXIO);
1115 
1116 	vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK);
1117 
1118 	for (i = 0; i < desired; i++)
1119 		vectors[i] = (i % avail) + 1;
1120 
1121 	rc = pci_remap_msix(dev, desired, vectors);
1122 	free(vectors, M_NTB);
1123 	return (rc);
1124 }
1125 
1126 static int
1127 intel_ntb_xeon_gen3_init_isr(struct ntb_softc *ntb)
1128 {
1129 	uint64_t i, reg;
1130 	uint32_t desired_vectors, num_vectors;
1131 	int rc;
1132 
1133 	ntb->allocated_interrupts = 0;
1134 	ntb->last_ts = ticks;
1135 
1136 	/* Mask all the interrupts, including hardware interrupt */
1137 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_DISABLE, ~0ULL);
1138 
1139 	/* Clear Interrupt Status */
1140 	reg = intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS);
1141 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_STATUS, reg);
1142 
1143 	num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device),
1144 	    XEON_GEN3_DB_MSIX_VECTOR_COUNT);
1145 
1146 	rc = pci_alloc_msix(ntb->device, &num_vectors);
1147 	if (rc != 0) {
1148 		device_printf(ntb->device,
1149 		    "Interrupt allocation failed %d\n", rc);
1150 		return (rc);
1151 	}
1152 	if (desired_vectors != num_vectors) {
1153 		device_printf(ntb->device, "Couldn't get %d vectors\n",
1154 		    XEON_GEN3_DB_MSIX_VECTOR_COUNT);
1155 		return (ENXIO);
1156 	}
1157 	/* 32 db + 1 hardware */
1158 	if (num_vectors == XEON_GEN3_DB_MSIX_VECTOR_COUNT) {
1159 		/* Program INTVECXX source register */
1160 		for (i = 0; i < XEON_GEN3_DB_MSIX_VECTOR_COUNT; i++) {
1161 			/* interrupt source i for vector i */
1162 			intel_ntb_reg_write(1, XEON_GEN3_REG_IMINTVEC00 + i, i);
1163 			if (i == (XEON_GEN3_DB_MSIX_VECTOR_COUNT - 1)) {
1164 				intel_ntb_reg_write(1,
1165 				    XEON_GEN3_REG_IMINTVEC00 + i,
1166 				    XEON_GEN3_LINK_VECTOR_INDEX);
1167 			}
1168 		}
1169 
1170 		intel_ntb_create_msix_vec(ntb, num_vectors);
1171 		rc = intel_ntb_setup_msix(ntb, num_vectors);
1172 
1173 		/* enable all interrupts */
1174 		intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_DISABLE, 0ULL);
1175 	} else {
1176 		device_printf(ntb->device, "need to remap interrupts, giving up.\n");
1177 		return (ENXIO);
1178 	}
1179 
1180 	return (0);
1181 }
1182 
1183 static int
1184 intel_ntb_init_isr(struct ntb_softc *ntb)
1185 {
1186 	uint32_t desired_vectors, num_vectors;
1187 	int rc;
1188 
1189 	ntb->allocated_interrupts = 0;
1190 	ntb->last_ts = ticks;
1191 
1192 	/*
1193 	 * Mask all doorbell interrupts.  (Except link events!)
1194 	 */
1195 	DB_MASK_LOCK(ntb);
1196 	ntb->db_mask = ntb->db_valid_mask;
1197 	db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1198 	DB_MASK_UNLOCK(ntb);
1199 
1200 	num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device),
1201 	    ntb->db_count);
1202 	if (desired_vectors >= 1) {
1203 		rc = pci_alloc_msix(ntb->device, &num_vectors);
1204 
1205 		if (ntb_force_remap_mode != 0 && rc == 0 &&
1206 		    num_vectors == desired_vectors)
1207 			num_vectors--;
1208 
1209 		if (rc == 0 && num_vectors < desired_vectors) {
1210 			rc = intel_ntb_remap_msix(ntb->device, desired_vectors,
1211 			    num_vectors);
1212 			if (rc == 0)
1213 				num_vectors = desired_vectors;
1214 			else
1215 				pci_release_msi(ntb->device);
1216 		}
1217 		if (rc != 0)
1218 			num_vectors = 1;
1219 	} else
1220 		num_vectors = 1;
1221 
1222 	if (ntb->type == NTB_XEON_GEN1 && num_vectors < ntb->db_vec_count) {
1223 		if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1224 			device_printf(ntb->device,
1225 			    "Errata workaround does not support MSI or INTX\n");
1226 			return (EINVAL);
1227 		}
1228 
1229 		ntb->db_vec_count = 1;
1230 		ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT;
1231 		rc = intel_ntb_setup_legacy_interrupt(ntb);
1232 	} else {
1233 		if (num_vectors - 1 != XEON_NONLINK_DB_MSIX_BITS &&
1234 		    HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1235 			device_printf(ntb->device,
1236 			    "Errata workaround expects %d doorbell bits\n",
1237 			    XEON_NONLINK_DB_MSIX_BITS);
1238 			return (EINVAL);
1239 		}
1240 
1241 		intel_ntb_create_msix_vec(ntb, num_vectors);
1242 		rc = intel_ntb_setup_msix(ntb, num_vectors);
1243 	}
1244 	if (rc != 0) {
1245 		device_printf(ntb->device,
1246 		    "Error allocating interrupts: %d\n", rc);
1247 		intel_ntb_free_msix_vec(ntb);
1248 	}
1249 
1250 	return (rc);
1251 }
1252 
1253 static int
1254 intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
1255 {
1256 	int rc;
1257 
1258 	ntb->int_info[0].rid = 0;
1259 	ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ,
1260 	    &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE);
1261 	if (ntb->int_info[0].res == NULL) {
1262 		device_printf(ntb->device, "bus_alloc_resource failed\n");
1263 		return (ENOMEM);
1264 	}
1265 
1266 	ntb->int_info[0].tag = NULL;
1267 	ntb->allocated_interrupts = 1;
1268 
1269 	rc = bus_setup_intr(ntb->device, ntb->int_info[0].res,
1270 	    INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr,
1271 	    ntb, &ntb->int_info[0].tag);
1272 	if (rc != 0) {
1273 		device_printf(ntb->device, "bus_setup_intr failed\n");
1274 		return (ENXIO);
1275 	}
1276 
1277 	return (0);
1278 }
1279 
1280 static void
1281 intel_ntb_teardown_interrupts(struct ntb_softc *ntb)
1282 {
1283 	struct ntb_int_info *current_int;
1284 	int i;
1285 
1286 	for (i = 0; i < ntb->allocated_interrupts; i++) {
1287 		current_int = &ntb->int_info[i];
1288 		if (current_int->tag != NULL)
1289 			bus_teardown_intr(ntb->device, current_int->res,
1290 			    current_int->tag);
1291 
1292 		if (current_int->res != NULL)
1293 			bus_release_resource(ntb->device, SYS_RES_IRQ,
1294 			    rman_get_rid(current_int->res), current_int->res);
1295 	}
1296 
1297 	intel_ntb_free_msix_vec(ntb);
1298 	pci_release_msi(ntb->device);
1299 }
1300 
1301 static inline uint64_t
1302 db_ioread(struct ntb_softc *ntb, uint64_t regoff)
1303 {
1304 
1305 	switch (ntb->type) {
1306 	case NTB_ATOM:
1307 	case NTB_XEON_GEN3:
1308 		return (intel_ntb_reg_read(8, regoff));
1309 	case NTB_XEON_GEN1:
1310 		return (intel_ntb_reg_read(2, regoff));
1311 	}
1312 	__assert_unreachable();
1313 }
1314 
1315 static inline void
1316 db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
1317 {
1318 
1319 	KASSERT((val & ~ntb->db_valid_mask) == 0,
1320 	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1321 	     (uintmax_t)(val & ~ntb->db_valid_mask),
1322 	     (uintmax_t)ntb->db_valid_mask));
1323 
1324 	if (regoff == ntb->self_reg->db_mask)
1325 		DB_MASK_ASSERT(ntb, MA_OWNED);
1326 	db_iowrite_raw(ntb, regoff, val);
1327 }
1328 
1329 static inline void
1330 db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
1331 {
1332 
1333 	switch (ntb->type) {
1334 	case NTB_ATOM:
1335 	case NTB_XEON_GEN3:
1336 		intel_ntb_reg_write(8, regoff, val);
1337 		break;
1338 	case NTB_XEON_GEN1:
1339 		intel_ntb_reg_write(2, regoff, (uint16_t)val);
1340 		break;
1341 	}
1342 }
1343 
1344 static void
1345 intel_ntb_db_set_mask(device_t dev, uint64_t bits)
1346 {
1347 	struct ntb_softc *ntb = device_get_softc(dev);
1348 
1349 	DB_MASK_LOCK(ntb);
1350 	ntb->db_mask |= bits;
1351 	if (!HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1352 		db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1353 	DB_MASK_UNLOCK(ntb);
1354 }
1355 
1356 static void
1357 intel_ntb_db_clear_mask(device_t dev, uint64_t bits)
1358 {
1359 	struct ntb_softc *ntb = device_get_softc(dev);
1360 	uint64_t ibits;
1361 	int i;
1362 
1363 	KASSERT((bits & ~ntb->db_valid_mask) == 0,
1364 	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1365 	     (uintmax_t)(bits & ~ntb->db_valid_mask),
1366 	     (uintmax_t)ntb->db_valid_mask));
1367 
1368 	DB_MASK_LOCK(ntb);
1369 	ibits = ntb->fake_db & ntb->db_mask & bits;
1370 	ntb->db_mask &= ~bits;
1371 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1372 		/* Simulate fake interrupts if unmasked DB bits are set. */
1373 		ntb->force_db |= ibits;
1374 		for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
1375 			if ((ibits & intel_ntb_db_vector_mask(dev, i)) != 0)
1376 				swi_sched(ntb->int_info[i].tag, 0);
1377 		}
1378 	} else {
1379 		db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1380 	}
1381 	DB_MASK_UNLOCK(ntb);
1382 }
1383 
1384 static uint64_t
1385 intel_ntb_db_read(device_t dev)
1386 {
1387 	struct ntb_softc *ntb = device_get_softc(dev);
1388 
1389 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1390 		return (ntb->fake_db);
1391 	if (ntb->type == NTB_XEON_GEN3)
1392 		return (intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS));
1393 	else
1394 		return (db_ioread(ntb, ntb->self_reg->db_bell));
1395 }
1396 
1397 static void
1398 intel_ntb_db_clear(device_t dev, uint64_t bits)
1399 {
1400 	struct ntb_softc *ntb = device_get_softc(dev);
1401 
1402 	KASSERT((bits & ~ntb->db_valid_mask) == 0,
1403 	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1404 	     (uintmax_t)(bits & ~ntb->db_valid_mask),
1405 	     (uintmax_t)ntb->db_valid_mask));
1406 
1407 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1408 		DB_MASK_LOCK(ntb);
1409 		ntb->fake_db &= ~bits;
1410 		DB_MASK_UNLOCK(ntb);
1411 		return;
1412 	}
1413 
1414 	if (ntb->type == NTB_XEON_GEN3)
1415 		intel_ntb_reg_write(4, XEON_GEN3_REG_IMINT_STATUS,
1416 		    (uint32_t)bits);
1417 	else
1418 		db_iowrite(ntb, ntb->self_reg->db_bell, bits);
1419 }
1420 
1421 static inline uint64_t
1422 intel_ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector)
1423 {
1424 	uint64_t shift, mask;
1425 
1426 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1427 		/*
1428 		 * Remap vectors in custom way to make at least first
1429 		 * three doorbells to not generate stray events.
1430 		 * This breaks Linux compatibility (if one existed)
1431 		 * when more then one DB is used (not by if_ntb).
1432 		 */
1433 		if (db_vector < XEON_NONLINK_DB_MSIX_BITS - 1)
1434 			return (1 << db_vector);
1435 		if (db_vector == XEON_NONLINK_DB_MSIX_BITS - 1)
1436 			return (0x7ffc);
1437 	}
1438 
1439 	shift = ntb->db_vec_shift;
1440 	mask = (1ull << shift) - 1;
1441 	return (mask << (shift * db_vector));
1442 }
1443 
1444 static void
1445 intel_ntb_interrupt(struct ntb_softc *ntb, uint32_t vec)
1446 {
1447 	uint64_t vec_mask;
1448 
1449 	ntb->last_ts = ticks;
1450 	vec_mask = intel_ntb_vec_mask(ntb, vec);
1451 
1452 	if (ntb->type == NTB_XEON_GEN3 && vec == XEON_GEN3_LINK_VECTOR_INDEX)
1453 		vec_mask |= ntb->db_link_mask;
1454 	if ((vec_mask & ntb->db_link_mask) != 0) {
1455 		if (intel_ntb_poll_link(ntb))
1456 			ntb_link_event(ntb->device);
1457 		if (ntb->type == NTB_XEON_GEN3)
1458 			intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_STATUS,
1459 			    intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS));
1460 	}
1461 
1462 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) &&
1463 	    (vec_mask & ntb->db_link_mask) == 0) {
1464 		DB_MASK_LOCK(ntb);
1465 
1466 		/*
1467 		 * Do not report same DB events again if not cleared yet,
1468 		 * unless the mask was just cleared for them and this
1469 		 * interrupt handler call can be the consequence of it.
1470 		 */
1471 		vec_mask &= ~ntb->fake_db | ntb->force_db;
1472 		ntb->force_db &= ~vec_mask;
1473 
1474 		/* Update our internal doorbell register. */
1475 		ntb->fake_db |= vec_mask;
1476 
1477 		/* Do not report masked DB events. */
1478 		vec_mask &= ~ntb->db_mask;
1479 
1480 		DB_MASK_UNLOCK(ntb);
1481 	}
1482 
1483 	if ((vec_mask & ntb->db_valid_mask) != 0)
1484 		ntb_db_event(ntb->device, vec);
1485 }
1486 
1487 static void
1488 ndev_vec_isr(void *arg)
1489 {
1490 	struct ntb_vec *nvec = arg;
1491 
1492 	intel_ntb_interrupt(nvec->ntb, nvec->num);
1493 }
1494 
1495 static void
1496 ndev_irq_isr(void *arg)
1497 {
1498 	/* If we couldn't set up MSI-X, we only have the one vector. */
1499 	intel_ntb_interrupt(arg, 0);
1500 }
1501 
1502 static int
1503 intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
1504 {
1505 	uint32_t i;
1506 
1507 	ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB,
1508 	    M_ZERO | M_WAITOK);
1509 	for (i = 0; i < num_vectors; i++) {
1510 		ntb->msix_vec[i].num = i;
1511 		ntb->msix_vec[i].ntb = ntb;
1512 	}
1513 
1514 	return (0);
1515 }
1516 
1517 static void
1518 intel_ntb_free_msix_vec(struct ntb_softc *ntb)
1519 {
1520 
1521 	if (ntb->msix_vec == NULL)
1522 		return;
1523 
1524 	free(ntb->msix_vec, M_NTB);
1525 	ntb->msix_vec = NULL;
1526 }
1527 
1528 static void
1529 intel_ntb_get_msix_info(struct ntb_softc *ntb)
1530 {
1531 	struct pci_devinfo *dinfo;
1532 	struct pcicfg_msix *msix;
1533 	uint32_t laddr, data, i, offset;
1534 
1535 	dinfo = device_get_ivars(ntb->device);
1536 	msix = &dinfo->cfg.msix;
1537 
1538 	CTASSERT(XEON_NONLINK_DB_MSIX_BITS == nitems(ntb->msix_data));
1539 
1540 	for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
1541 		offset = msix->msix_table_offset + i * PCI_MSIX_ENTRY_SIZE;
1542 
1543 		laddr = bus_read_4(msix->msix_table_res, offset +
1544 		    PCI_MSIX_ENTRY_LOWER_ADDR);
1545 		intel_ntb_printf(2, "local MSIX addr(%u): 0x%x\n", i, laddr);
1546 
1547 		KASSERT((laddr & MSI_INTEL_ADDR_BASE) == MSI_INTEL_ADDR_BASE,
1548 		    ("local MSIX addr 0x%x not in MSI base 0x%x", laddr,
1549 		     MSI_INTEL_ADDR_BASE));
1550 		ntb->msix_data[i].nmd_ofs = laddr;
1551 
1552 		data = bus_read_4(msix->msix_table_res, offset +
1553 		    PCI_MSIX_ENTRY_DATA);
1554 		intel_ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data);
1555 
1556 		ntb->msix_data[i].nmd_data = data;
1557 	}
1558 }
1559 
1560 static struct ntb_hw_info *
1561 intel_ntb_get_device_info(uint32_t device_id)
1562 {
1563 	struct ntb_hw_info *ep;
1564 
1565 	for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) {
1566 		if (ep->device_id == device_id)
1567 			return (ep);
1568 	}
1569 	return (NULL);
1570 }
1571 
1572 static void
1573 intel_ntb_teardown_xeon(struct ntb_softc *ntb)
1574 {
1575 
1576 	if (ntb->reg != NULL)
1577 		intel_ntb_link_disable(ntb->device);
1578 }
1579 
1580 static void
1581 intel_ntb_detect_max_mw(struct ntb_softc *ntb)
1582 {
1583 
1584 	switch (ntb->type) {
1585 	case NTB_ATOM:
1586 		ntb->mw_count = ATOM_MW_COUNT;
1587 		break;
1588 	case NTB_XEON_GEN1:
1589 		if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
1590 			ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT;
1591 		else
1592 			ntb->mw_count = XEON_SNB_MW_COUNT;
1593 		break;
1594 	case NTB_XEON_GEN3:
1595 		if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
1596 			ntb->mw_count = XEON_GEN3_SPLIT_MW_COUNT;
1597 		else
1598 			ntb->mw_count = XEON_GEN3_MW_COUNT;
1599 		break;
1600 	}
1601 }
1602 
1603 static int
1604 intel_ntb_detect_xeon(struct ntb_softc *ntb)
1605 {
1606 	uint8_t ppd, conn_type;
1607 
1608 	ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1);
1609 	ntb->ppd = ppd;
1610 
1611 	if ((ppd & XEON_PPD_DEV_TYPE) != 0)
1612 		ntb->dev_type = NTB_DEV_DSD;
1613 	else
1614 		ntb->dev_type = NTB_DEV_USD;
1615 
1616 	if ((ppd & XEON_PPD_SPLIT_BAR) != 0)
1617 		ntb->features |= NTB_SPLIT_BAR;
1618 
1619 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) &&
1620 	    !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
1621 		device_printf(ntb->device,
1622 		    "Can not apply SB01BASE_LOCKUP workaround "
1623 		    "with split BARs disabled!\n");
1624 		device_printf(ntb->device,
1625 		    "Expect system hangs under heavy NTB traffic!\n");
1626 		ntb->features &= ~NTB_SB01BASE_LOCKUP;
1627 	}
1628 
1629 	/*
1630 	 * SDOORBELL errata workaround gets in the way of SB01BASE_LOCKUP
1631 	 * errata workaround; only do one at a time.
1632 	 */
1633 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1634 		ntb->features &= ~NTB_SDOORBELL_LOCKUP;
1635 
1636 	conn_type = ppd & XEON_PPD_CONN_TYPE;
1637 	switch (conn_type) {
1638 	case NTB_CONN_B2B:
1639 		ntb->conn_type = conn_type;
1640 		break;
1641 	case NTB_CONN_RP:
1642 	case NTB_CONN_TRANSPARENT:
1643 	default:
1644 		device_printf(ntb->device, "Unsupported connection type: %u\n",
1645 		    (unsigned)conn_type);
1646 		return (ENXIO);
1647 	}
1648 	return (0);
1649 }
1650 
1651 static int
1652 intel_ntb_detect_atom(struct ntb_softc *ntb)
1653 {
1654 	uint32_t ppd, conn_type;
1655 
1656 	ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4);
1657 	ntb->ppd = ppd;
1658 
1659 	if ((ppd & ATOM_PPD_DEV_TYPE) != 0)
1660 		ntb->dev_type = NTB_DEV_DSD;
1661 	else
1662 		ntb->dev_type = NTB_DEV_USD;
1663 
1664 	conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8;
1665 	switch (conn_type) {
1666 	case NTB_CONN_B2B:
1667 		ntb->conn_type = conn_type;
1668 		break;
1669 	default:
1670 		device_printf(ntb->device, "Unsupported NTB configuration\n");
1671 		return (ENXIO);
1672 	}
1673 	return (0);
1674 }
1675 
1676 static int
1677 intel_ntb_detect_xeon_gen3(struct ntb_softc *ntb)
1678 {
1679 	uint8_t ppd, conn_type;
1680 
1681 	ppd = pci_read_config(ntb->device, XEON_GEN3_INT_REG_PPD, 1);
1682 	ntb->ppd = ppd;
1683 
1684 	/* check port definition */
1685 	conn_type = XEON_GEN3_REG_PPD_PORT_DEF_F(ppd);
1686 	switch (conn_type) {
1687 	case NTB_CONN_B2B:
1688 		ntb->conn_type = conn_type;
1689 		break;
1690 	default:
1691 		device_printf(ntb->device, "Unsupported connection type: %u\n",
1692 		    conn_type);
1693 		return (ENXIO);
1694 	}
1695 
1696 	/* check cross link configuration status */
1697 	if (XEON_GEN3_REG_PPD_CONF_STS_F(ppd)) {
1698 		/* NTB Port is configured as DSD/USP */
1699 		ntb->dev_type = NTB_DEV_DSD;
1700 	} else {
1701 		/* NTB Port is configured as USD/DSP */
1702 		ntb->dev_type = NTB_DEV_USD;
1703 	}
1704 
1705 	if (XEON_GEN3_REG_PPD_ONE_MSIX_F(ppd)) {
1706 		/*
1707 		 * This bit when set, causes only a single MSI-X message to be
1708 		 * generated if MSI-X is enabled.
1709 		 */
1710 		ntb->features |= NTB_ONE_MSIX;
1711 	}
1712 
1713 	if (XEON_GEN3_REG_PPD_BAR45_SPL_F(ppd)) {
1714 		/* BARs 4 and 5 are presented as two 32b non-prefetchable BARs */
1715 		ntb->features |= NTB_SPLIT_BAR;
1716 	}
1717 
1718 	device_printf(ntb->device, "conn type 0x%02x, dev type 0x%02x,"
1719 	    "features 0x%02x\n", ntb->conn_type, ntb->dev_type, ntb->features);
1720 
1721 	return (0);
1722 }
1723 
1724 static int
1725 intel_ntb_xeon_init_dev(struct ntb_softc *ntb)
1726 {
1727 	int rc;
1728 
1729 	ntb->spad_count		= XEON_SPAD_COUNT;
1730 	ntb->db_count		= XEON_DB_COUNT;
1731 	ntb->db_link_mask	= XEON_DB_LINK_BIT;
1732 	ntb->db_vec_count	= XEON_DB_MSIX_VECTOR_COUNT;
1733 	ntb->db_vec_shift	= XEON_DB_MSIX_VECTOR_SHIFT;
1734 
1735 	if (ntb->conn_type != NTB_CONN_B2B) {
1736 		device_printf(ntb->device, "Connection type %d not supported\n",
1737 		    ntb->conn_type);
1738 		return (ENXIO);
1739 	}
1740 
1741 	ntb->reg = &xeon_reg;
1742 	ntb->self_reg = &xeon_pri_reg;
1743 	ntb->peer_reg = &xeon_b2b_reg;
1744 	ntb->xlat_reg = &xeon_sec_xlat;
1745 
1746 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1747 		ntb->force_db = ntb->fake_db = 0;
1748 		ntb->msix_mw_idx = (ntb->mw_count + g_ntb_msix_idx) %
1749 		    ntb->mw_count;
1750 		intel_ntb_printf(2, "Setting up MSIX mw idx %d means %u\n",
1751 		    g_ntb_msix_idx, ntb->msix_mw_idx);
1752 		rc = intel_ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx,
1753 		    VM_MEMATTR_UNCACHEABLE);
1754 		KASSERT(rc == 0, ("shouldn't fail"));
1755 	} else if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
1756 		/*
1757 		 * There is a Xeon hardware errata related to writes to SDOORBELL or
1758 		 * B2BDOORBELL in conjunction with inbound access to NTB MMIO space,
1759 		 * which may hang the system.  To workaround this, use a memory
1760 		 * window to access the interrupt and scratch pad registers on the
1761 		 * remote system.
1762 		 */
1763 		ntb->b2b_mw_idx = (ntb->mw_count + g_ntb_mw_idx) %
1764 		    ntb->mw_count;
1765 		intel_ntb_printf(2, "Setting up b2b mw idx %d means %u\n",
1766 		    g_ntb_mw_idx, ntb->b2b_mw_idx);
1767 		rc = intel_ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx,
1768 		    VM_MEMATTR_UNCACHEABLE);
1769 		KASSERT(rc == 0, ("shouldn't fail"));
1770 	} else if (HAS_FEATURE(ntb, NTB_B2BDOORBELL_BIT14))
1771 		/*
1772 		 * HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
1773 		 * mirrored to the remote system.  Shrink the number of bits by one,
1774 		 * since bit 14 is the last bit.
1775 		 *
1776 		 * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register
1777 		 * anyway.  Nor for non-B2B connection types.
1778 		 */
1779 		ntb->db_count = XEON_DB_COUNT - 1;
1780 
1781 	ntb->db_valid_mask = (1ull << ntb->db_count) - 1;
1782 
1783 	if (ntb->dev_type == NTB_DEV_USD)
1784 		rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr,
1785 		    &xeon_b2b_usd_addr);
1786 	else
1787 		rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr,
1788 		    &xeon_b2b_dsd_addr);
1789 	if (rc != 0)
1790 		return (rc);
1791 
1792 	/* Enable Bus Master and Memory Space on the secondary side */
1793 	intel_ntb_reg_write(2, XEON_SPCICMD_OFFSET,
1794 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1795 
1796 	/*
1797 	 * Mask all doorbell interrupts.
1798 	 */
1799 	DB_MASK_LOCK(ntb);
1800 	ntb->db_mask = ntb->db_valid_mask;
1801 	db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1802 	DB_MASK_UNLOCK(ntb);
1803 
1804 	rc = intel_ntb_init_isr(ntb);
1805 	return (rc);
1806 }
1807 
1808 static int
1809 intel_ntb_xeon_gen3_init_dev(struct ntb_softc *ntb)
1810 {
1811 	int rc;
1812 
1813 	ntb->spad_count = XEON_GEN3_SPAD_COUNT;
1814 	ntb->db_count = XEON_GEN3_DB_COUNT;
1815 	ntb->db_link_mask = XEON_GEN3_DB_LINK_BIT;
1816 	ntb->db_vec_count = XEON_GEN3_DB_MSIX_VECTOR_COUNT;
1817 	ntb->db_vec_shift = XEON_GEN3_DB_MSIX_VECTOR_SHIFT;
1818 
1819 	if (ntb->conn_type != NTB_CONN_B2B) {
1820 		device_printf(ntb->device, "Connection type %d not supported\n",
1821 		    ntb->conn_type);
1822 		return (ENXIO);
1823 	}
1824 
1825 	ntb->reg = &xeon_gen3_reg;
1826 	ntb->self_reg = &xeon_gen3_pri_reg;
1827 	ntb->peer_reg = &xeon_gen3_b2b_reg;
1828 	ntb->xlat_reg = &xeon_gen3_sec_xlat;
1829 
1830 	ntb->db_valid_mask = (1ULL << ntb->db_count) - 1;
1831 
1832 	xeon_gen3_setup_b2b_mw(ntb);
1833 
1834 	/* Enable Bus Master and Memory Space on the External Side */
1835 	intel_ntb_reg_write(2, XEON_GEN3_EXT_REG_PCI_CMD,
1836 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1837 
1838 	/* Setup Interrupt */
1839 	rc = intel_ntb_xeon_gen3_init_isr(ntb);
1840 
1841 	return (rc);
1842 }
1843 
1844 static int
1845 intel_ntb_atom_init_dev(struct ntb_softc *ntb)
1846 {
1847 	int error;
1848 
1849 	KASSERT(ntb->conn_type == NTB_CONN_B2B,
1850 	    ("Unsupported NTB configuration (%d)\n", ntb->conn_type));
1851 
1852 	ntb->spad_count		 = ATOM_SPAD_COUNT;
1853 	ntb->db_count		 = ATOM_DB_COUNT;
1854 	ntb->db_vec_count	 = ATOM_DB_MSIX_VECTOR_COUNT;
1855 	ntb->db_vec_shift	 = ATOM_DB_MSIX_VECTOR_SHIFT;
1856 	ntb->db_valid_mask	 = (1ull << ntb->db_count) - 1;
1857 
1858 	ntb->reg = &atom_reg;
1859 	ntb->self_reg = &atom_pri_reg;
1860 	ntb->peer_reg = &atom_b2b_reg;
1861 	ntb->xlat_reg = &atom_sec_xlat;
1862 
1863 	/*
1864 	 * FIXME - MSI-X bug on early Atom HW, remove once internal issue is
1865 	 * resolved.  Mask transaction layer internal parity errors.
1866 	 */
1867 	pci_write_config(ntb->device, 0xFC, 0x4, 4);
1868 
1869 	configure_atom_secondary_side_bars(ntb);
1870 
1871 	/* Enable Bus Master and Memory Space on the secondary side */
1872 	intel_ntb_reg_write(2, ATOM_SPCICMD_OFFSET,
1873 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1874 
1875 	error = intel_ntb_init_isr(ntb);
1876 	if (error != 0)
1877 		return (error);
1878 
1879 	/* Initiate PCI-E link training */
1880 	intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1881 
1882 	callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb);
1883 
1884 	return (0);
1885 }
1886 
1887 /* XXX: Linux driver doesn't seem to do any of this for Atom. */
1888 static void
1889 configure_atom_secondary_side_bars(struct ntb_softc *ntb)
1890 {
1891 
1892 	if (ntb->dev_type == NTB_DEV_USD) {
1893 		intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
1894 		    XEON_B2B_BAR2_ADDR64);
1895 		intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
1896 		    XEON_B2B_BAR4_ADDR64);
1897 		intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
1898 		intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
1899 	} else {
1900 		intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
1901 		    XEON_B2B_BAR2_ADDR64);
1902 		intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
1903 		    XEON_B2B_BAR4_ADDR64);
1904 		intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
1905 		intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
1906 	}
1907 }
1908 
1909 /*
1910  * When working around Xeon SDOORBELL errata by remapping remote registers in a
1911  * MW, limit the B2B MW to half a MW.  By sharing a MW, half the shared MW
1912  * remains for use by a higher layer.
1913  *
1914  * Will only be used if working around SDOORBELL errata and the BIOS-configured
1915  * MW size is sufficiently large.
1916  */
1917 static unsigned int ntb_b2b_mw_share;
1918 SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share,
1919     0, "If enabled (non-zero), prefer to share half of the B2B peer register "
1920     "MW with higher level consumers.  Both sides of the NTB MUST set the same "
1921     "value here.");
1922 
1923 static void
1924 xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx,
1925     enum ntb_bar regbar)
1926 {
1927 	struct ntb_pci_bar_info *bar;
1928 	uint8_t bar_sz;
1929 
1930 	if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3)
1931 		return;
1932 
1933 	bar = &ntb->bar_info[idx];
1934 	bar_sz = pci_read_config(ntb->device, bar->psz_off, 1);
1935 	if (idx == regbar) {
1936 		if (ntb->b2b_off != 0)
1937 			bar_sz--;
1938 		else
1939 			bar_sz = 0;
1940 	}
1941 	pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1);
1942 	bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1);
1943 	(void)bar_sz;
1944 }
1945 
1946 static void
1947 xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr,
1948     enum ntb_bar idx, enum ntb_bar regbar)
1949 {
1950 	uint64_t reg_val;
1951 	uint32_t base_reg, lmt_reg;
1952 
1953 	bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg);
1954 	if (idx == regbar) {
1955 		if (ntb->b2b_off)
1956 			bar_addr += ntb->b2b_off;
1957 		else
1958 			bar_addr = 0;
1959 	}
1960 
1961 	if (!bar_is_64bit(ntb, idx)) {
1962 		intel_ntb_reg_write(4, base_reg, bar_addr);
1963 		reg_val = intel_ntb_reg_read(4, base_reg);
1964 		(void)reg_val;
1965 
1966 		intel_ntb_reg_write(4, lmt_reg, bar_addr);
1967 		reg_val = intel_ntb_reg_read(4, lmt_reg);
1968 		(void)reg_val;
1969 	} else {
1970 		intel_ntb_reg_write(8, base_reg, bar_addr);
1971 		reg_val = intel_ntb_reg_read(8, base_reg);
1972 		(void)reg_val;
1973 
1974 		intel_ntb_reg_write(8, lmt_reg, bar_addr);
1975 		reg_val = intel_ntb_reg_read(8, lmt_reg);
1976 		(void)reg_val;
1977 	}
1978 }
1979 
1980 static void
1981 xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx)
1982 {
1983 	struct ntb_pci_bar_info *bar;
1984 
1985 	bar = &ntb->bar_info[idx];
1986 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) {
1987 		intel_ntb_reg_write(4, bar->pbarxlat_off, base_addr);
1988 		base_addr = intel_ntb_reg_read(4, bar->pbarxlat_off);
1989 	} else {
1990 		intel_ntb_reg_write(8, bar->pbarxlat_off, base_addr);
1991 		base_addr = intel_ntb_reg_read(8, bar->pbarxlat_off);
1992 	}
1993 	(void)base_addr;
1994 }
1995 
1996 static int
1997 xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
1998     const struct ntb_b2b_addr *peer_addr)
1999 {
2000 	struct ntb_pci_bar_info *b2b_bar;
2001 	vm_size_t bar_size;
2002 	uint64_t bar_addr;
2003 	enum ntb_bar b2b_bar_num, i;
2004 
2005 	if (ntb->b2b_mw_idx == B2B_MW_DISABLED) {
2006 		b2b_bar = NULL;
2007 		b2b_bar_num = NTB_CONFIG_BAR;
2008 		ntb->b2b_off = 0;
2009 	} else {
2010 		b2b_bar_num = intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx);
2011 		KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS,
2012 		    ("invalid b2b mw bar"));
2013 
2014 		b2b_bar = &ntb->bar_info[b2b_bar_num];
2015 		bar_size = b2b_bar->size;
2016 
2017 		if (ntb_b2b_mw_share != 0 &&
2018 		    (bar_size >> 1) >= XEON_B2B_MIN_SIZE)
2019 			ntb->b2b_off = bar_size >> 1;
2020 		else if (bar_size >= XEON_B2B_MIN_SIZE) {
2021 			ntb->b2b_off = 0;
2022 		} else {
2023 			device_printf(ntb->device,
2024 			    "B2B bar size is too small!\n");
2025 			return (EIO);
2026 		}
2027 	}
2028 
2029 	/*
2030 	 * Reset the secondary bar sizes to match the primary bar sizes.
2031 	 * (Except, disable or halve the size of the B2B secondary bar.)
2032 	 */
2033 	for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++)
2034 		xeon_reset_sbar_size(ntb, i, b2b_bar_num);
2035 
2036 	bar_addr = 0;
2037 	if (b2b_bar_num == NTB_CONFIG_BAR)
2038 		bar_addr = addr->bar0_addr;
2039 	else if (b2b_bar_num == NTB_B2B_BAR_1)
2040 		bar_addr = addr->bar2_addr64;
2041 	else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2042 		bar_addr = addr->bar4_addr64;
2043 	else if (b2b_bar_num == NTB_B2B_BAR_2)
2044 		bar_addr = addr->bar4_addr32;
2045 	else if (b2b_bar_num == NTB_B2B_BAR_3)
2046 		bar_addr = addr->bar5_addr32;
2047 	else
2048 		KASSERT(false, ("invalid bar"));
2049 
2050 	intel_ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr);
2051 
2052 	/*
2053 	 * Other SBARs are normally hit by the PBAR xlat, except for the b2b
2054 	 * register BAR.  The B2B BAR is either disabled above or configured
2055 	 * half-size.  It starts at PBAR xlat + offset.
2056 	 *
2057 	 * Also set up incoming BAR limits == base (zero length window).
2058 	 */
2059 	xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1,
2060 	    b2b_bar_num);
2061 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2062 		xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32,
2063 		    NTB_B2B_BAR_2, b2b_bar_num);
2064 		xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32,
2065 		    NTB_B2B_BAR_3, b2b_bar_num);
2066 	} else
2067 		xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64,
2068 		    NTB_B2B_BAR_2, b2b_bar_num);
2069 
2070 	/* Zero incoming translation addrs */
2071 	intel_ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0);
2072 	intel_ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0);
2073 
2074 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
2075 		uint32_t xlat_reg, lmt_reg;
2076 		enum ntb_bar bar_num;
2077 
2078 		/*
2079 		 * We point the chosen MSIX MW BAR xlat to remote LAPIC for
2080 		 * workaround
2081 		 */
2082 		bar_num = intel_ntb_mw_to_bar(ntb, ntb->msix_mw_idx);
2083 		bar_get_xlat_params(ntb, bar_num, NULL, &xlat_reg, &lmt_reg);
2084 		if (bar_is_64bit(ntb, bar_num)) {
2085 			intel_ntb_reg_write(8, xlat_reg, MSI_INTEL_ADDR_BASE);
2086 			ntb->msix_xlat = intel_ntb_reg_read(8, xlat_reg);
2087 			intel_ntb_reg_write(8, lmt_reg, 0);
2088 		} else {
2089 			intel_ntb_reg_write(4, xlat_reg, MSI_INTEL_ADDR_BASE);
2090 			ntb->msix_xlat = intel_ntb_reg_read(4, xlat_reg);
2091 			intel_ntb_reg_write(4, lmt_reg, 0);
2092 		}
2093 
2094 		ntb->peer_lapic_bar =  &ntb->bar_info[bar_num];
2095 	}
2096 	(void)intel_ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET);
2097 	(void)intel_ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET);
2098 
2099 	/* Zero outgoing translation limits (whole bar size windows) */
2100 	intel_ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0);
2101 	intel_ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0);
2102 
2103 	/* Set outgoing translation offsets */
2104 	xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1);
2105 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2106 		xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2);
2107 		xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3);
2108 	} else
2109 		xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2);
2110 
2111 	/* Set the translation offset for B2B registers */
2112 	bar_addr = 0;
2113 	if (b2b_bar_num == NTB_CONFIG_BAR)
2114 		bar_addr = peer_addr->bar0_addr;
2115 	else if (b2b_bar_num == NTB_B2B_BAR_1)
2116 		bar_addr = peer_addr->bar2_addr64;
2117 	else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2118 		bar_addr = peer_addr->bar4_addr64;
2119 	else if (b2b_bar_num == NTB_B2B_BAR_2)
2120 		bar_addr = peer_addr->bar4_addr32;
2121 	else if (b2b_bar_num == NTB_B2B_BAR_3)
2122 		bar_addr = peer_addr->bar5_addr32;
2123 	else
2124 		KASSERT(false, ("invalid bar"));
2125 
2126 	/*
2127 	 * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits
2128 	 * at a time.
2129 	 */
2130 	intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff);
2131 	intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32);
2132 	return (0);
2133 }
2134 
2135 static int
2136 xeon_gen3_setup_b2b_mw(struct ntb_softc *ntb)
2137 {
2138 	uint64_t reg;
2139 	uint32_t embarsz, imbarsz;
2140 
2141 	/* IMBAR1SZ should be equal to EMBAR1SZ */
2142 	embarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_EMBAR1SZ, 1);
2143 	imbarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_IMBAR1SZ, 1);
2144 	if (embarsz != imbarsz) {
2145 		device_printf(ntb->device,
2146 		    "IMBAR1SZ (%u) should be equal to EMBAR1SZ (%u)\n",
2147 		    imbarsz, embarsz);
2148 		return (EIO);
2149 	}
2150 
2151 	/* IMBAR2SZ should be equal to EMBAR2SZ */
2152 	embarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_EMBAR2SZ, 1);
2153 	imbarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_IMBAR2SZ, 1);
2154 	if (embarsz != imbarsz) {
2155 		device_printf(ntb->device,
2156 		    "IMBAR2SZ (%u) should be equal to EMBAR2SZ (%u)\n",
2157 		    imbarsz, embarsz);
2158 		return (EIO);
2159 	}
2160 
2161 	/* Client will provide the incoming IMBAR1/2XBASE, zero it for now */
2162 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR1XBASE, 0);
2163 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XBASE, 0);
2164 
2165 	/*
2166 	 * If the value in IMBAR1XLIMIT is set equal to the value in IMBAR1XBASE,
2167 	 * the local memory window exposure from EMBAR1 is disabled.
2168 	 * Note: It is needed to avoid malicious access.
2169 	 */
2170 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR1XLIMIT, 0);
2171 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XLIMIT, 0);
2172 
2173 	/* Config outgoing translation limits (whole bar size windows) */
2174 	reg = intel_ntb_reg_read(8, XEON_GEN3_REG_EMBAR1XBASE);
2175 	reg += ntb->bar_info[NTB_B2B_BAR_1].size;
2176 	intel_ntb_reg_write(8, XEON_GEN3_REG_EMBAR1XLIMIT, reg);
2177 
2178 	reg = intel_ntb_reg_read(8, XEON_GEN3_REG_EMBAR2XBASE);
2179 	reg += ntb->bar_info[NTB_B2B_BAR_2].size;
2180 	intel_ntb_reg_write(8, XEON_GEN3_REG_EMBAR2XLIMIT, reg);
2181 
2182 	return (0);
2183 }
2184 
2185 static inline bool
2186 _xeon_link_is_up(struct ntb_softc *ntb)
2187 {
2188 
2189 	if (ntb->conn_type == NTB_CONN_TRANSPARENT)
2190 		return (true);
2191 	return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0);
2192 }
2193 
2194 static inline bool
2195 link_is_up(struct ntb_softc *ntb)
2196 {
2197 
2198 	if (ntb->type == NTB_XEON_GEN1 || ntb->type == NTB_XEON_GEN3)
2199 		return (_xeon_link_is_up(ntb) && (ntb->peer_msix_good ||
2200 		    !HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)));
2201 
2202 	KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
2203 	return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0);
2204 }
2205 
2206 static inline bool
2207 atom_link_is_err(struct ntb_softc *ntb)
2208 {
2209 	uint32_t status;
2210 
2211 	KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
2212 
2213 	status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
2214 	if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0)
2215 		return (true);
2216 
2217 	status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
2218 	return ((status & ATOM_IBIST_ERR_OFLOW) != 0);
2219 }
2220 
2221 /* Atom does not have link status interrupt, poll on that platform */
2222 static void
2223 atom_link_hb(void *arg)
2224 {
2225 	struct ntb_softc *ntb = arg;
2226 	sbintime_t timo, poll_ts;
2227 
2228 	timo = NTB_HB_TIMEOUT * hz;
2229 	poll_ts = ntb->last_ts + timo;
2230 
2231 	/*
2232 	 * Delay polling the link status if an interrupt was received, unless
2233 	 * the cached link status says the link is down.
2234 	 */
2235 	if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) {
2236 		timo = poll_ts - ticks;
2237 		goto out;
2238 	}
2239 
2240 	if (intel_ntb_poll_link(ntb))
2241 		ntb_link_event(ntb->device);
2242 
2243 	if (!link_is_up(ntb) && atom_link_is_err(ntb)) {
2244 		/* Link is down with error, proceed with recovery */
2245 		callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb);
2246 		return;
2247 	}
2248 
2249 out:
2250 	callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb);
2251 }
2252 
2253 static void
2254 atom_perform_link_restart(struct ntb_softc *ntb)
2255 {
2256 	uint32_t status;
2257 
2258 	/* Driver resets the NTB ModPhy lanes - magic! */
2259 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0);
2260 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40);
2261 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60);
2262 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60);
2263 
2264 	/* Driver waits 100ms to allow the NTB ModPhy to settle */
2265 	pause("ModPhy", hz / 10);
2266 
2267 	/* Clear AER Errors, write to clear */
2268 	status = intel_ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET);
2269 	status &= PCIM_AER_COR_REPLAY_ROLLOVER;
2270 	intel_ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status);
2271 
2272 	/* Clear unexpected electrical idle event in LTSSM, write to clear */
2273 	status = intel_ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET);
2274 	status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
2275 	intel_ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status);
2276 
2277 	/* Clear DeSkew Buffer error, write to clear */
2278 	status = intel_ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET);
2279 	status |= ATOM_DESKEWSTS_DBERR;
2280 	intel_ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status);
2281 
2282 	status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
2283 	status &= ATOM_IBIST_ERR_OFLOW;
2284 	intel_ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status);
2285 
2286 	/* Releases the NTB state machine to allow the link to retrain */
2287 	status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
2288 	status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
2289 	intel_ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status);
2290 }
2291 
2292 static int
2293 intel_ntb_port_number(device_t dev)
2294 {
2295 	struct ntb_softc *ntb = device_get_softc(dev);
2296 
2297 	return (ntb->dev_type == NTB_DEV_USD ? 0 : 1);
2298 }
2299 
2300 static int
2301 intel_ntb_peer_port_count(device_t dev)
2302 {
2303 
2304 	return (1);
2305 }
2306 
2307 static int
2308 intel_ntb_peer_port_number(device_t dev, int pidx)
2309 {
2310 	struct ntb_softc *ntb = device_get_softc(dev);
2311 
2312 	if (pidx != 0)
2313 		return (-EINVAL);
2314 
2315 	return (ntb->dev_type == NTB_DEV_USD ? 1 : 0);
2316 }
2317 
2318 static int
2319 intel_ntb_peer_port_idx(device_t dev, int port)
2320 {
2321 	int peer_port;
2322 
2323 	peer_port = intel_ntb_peer_port_number(dev, 0);
2324 	if (peer_port == -EINVAL || port != peer_port)
2325 		return (-EINVAL);
2326 
2327 	return (0);
2328 }
2329 
2330 static int
2331 intel_ntb_link_enable(device_t dev, enum ntb_speed speed __unused,
2332     enum ntb_width width __unused)
2333 {
2334 	struct ntb_softc *ntb = device_get_softc(dev);
2335 	uint32_t cntl;
2336 
2337 	intel_ntb_printf(2, "%s\n", __func__);
2338 
2339 	if (ntb->type == NTB_ATOM) {
2340 		pci_write_config(ntb->device, NTB_PPD_OFFSET,
2341 		    ntb->ppd | ATOM_PPD_INIT_LINK, 4);
2342 		return (0);
2343 	}
2344 
2345 	if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
2346 		ntb_link_event(dev);
2347 		return (0);
2348 	}
2349 
2350 	cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2351 	cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
2352 	cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
2353 	cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP;
2354 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2355 		cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP;
2356 	intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
2357 	return (0);
2358 }
2359 
2360 static int
2361 intel_ntb_link_disable(device_t dev)
2362 {
2363 	struct ntb_softc *ntb = device_get_softc(dev);
2364 	uint32_t cntl;
2365 
2366 	intel_ntb_printf(2, "%s\n", __func__);
2367 
2368 	if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
2369 		ntb_link_event(dev);
2370 		return (0);
2371 	}
2372 
2373 	cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2374 	cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
2375 	cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP);
2376 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2377 		cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP);
2378 	cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
2379 	intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
2380 	return (0);
2381 }
2382 
2383 static bool
2384 intel_ntb_link_enabled(device_t dev)
2385 {
2386 	struct ntb_softc *ntb = device_get_softc(dev);
2387 	uint32_t cntl;
2388 
2389 	if (ntb->type == NTB_ATOM) {
2390 		cntl = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4);
2391 		return ((cntl & ATOM_PPD_INIT_LINK) != 0);
2392 	}
2393 
2394 	if (ntb->conn_type == NTB_CONN_TRANSPARENT)
2395 		return (true);
2396 
2397 	cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2398 	return ((cntl & NTB_CNTL_LINK_DISABLE) == 0);
2399 }
2400 
2401 static void
2402 recover_atom_link(void *arg)
2403 {
2404 	struct ntb_softc *ntb = arg;
2405 	unsigned speed, width, oldspeed, oldwidth;
2406 	uint32_t status32;
2407 
2408 	atom_perform_link_restart(ntb);
2409 
2410 	/*
2411 	 * There is a potential race between the 2 NTB devices recovering at
2412 	 * the same time.  If the times are the same, the link will not recover
2413 	 * and the driver will be stuck in this loop forever.  Add a random
2414 	 * interval to the recovery time to prevent this race.
2415 	 */
2416 	status32 = arc4random() % ATOM_LINK_RECOVERY_TIME;
2417 	pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000);
2418 
2419 	if (atom_link_is_err(ntb))
2420 		goto retry;
2421 
2422 	status32 = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2423 	if ((status32 & ATOM_CNTL_LINK_DOWN) != 0)
2424 		goto out;
2425 
2426 	status32 = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
2427 	width = NTB_LNK_STA_WIDTH(status32);
2428 	speed = status32 & NTB_LINK_SPEED_MASK;
2429 
2430 	oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta);
2431 	oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK;
2432 	if (oldwidth != width || oldspeed != speed)
2433 		goto retry;
2434 
2435 out:
2436 	callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb,
2437 	    ntb);
2438 	return;
2439 
2440 retry:
2441 	callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link,
2442 	    ntb);
2443 }
2444 
2445 /*
2446  * Polls the HW link status register(s); returns true if something has changed.
2447  */
2448 static bool
2449 intel_ntb_poll_link(struct ntb_softc *ntb)
2450 {
2451 	uint32_t ntb_cntl;
2452 	uint16_t reg_val;
2453 
2454 	if (ntb->type == NTB_ATOM) {
2455 		ntb_cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2456 		if (ntb_cntl == ntb->ntb_ctl)
2457 			return (false);
2458 
2459 		ntb->ntb_ctl = ntb_cntl;
2460 		ntb->lnk_sta = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
2461 	} else {
2462 		if (ntb->type == NTB_XEON_GEN1)
2463 			db_iowrite_raw(ntb, ntb->self_reg->db_bell,
2464 			    ntb->db_link_mask);
2465 
2466 		reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2);
2467 		if (reg_val == ntb->lnk_sta)
2468 			return (false);
2469 
2470 		ntb->lnk_sta = reg_val;
2471 
2472 		if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
2473 			if (_xeon_link_is_up(ntb)) {
2474 				if (!ntb->peer_msix_good) {
2475 					callout_reset(&ntb->peer_msix_work, 0,
2476 					    intel_ntb_exchange_msix, ntb);
2477 					return (false);
2478 				}
2479 			} else {
2480 				ntb->peer_msix_good = false;
2481 				ntb->peer_msix_done = false;
2482 			}
2483 		}
2484 	}
2485 	return (true);
2486 }
2487 
2488 static inline enum ntb_speed
2489 intel_ntb_link_sta_speed(struct ntb_softc *ntb)
2490 {
2491 
2492 	if (!link_is_up(ntb))
2493 		return (NTB_SPEED_NONE);
2494 	return (ntb->lnk_sta & NTB_LINK_SPEED_MASK);
2495 }
2496 
2497 static inline enum ntb_width
2498 intel_ntb_link_sta_width(struct ntb_softc *ntb)
2499 {
2500 
2501 	if (!link_is_up(ntb))
2502 		return (NTB_WIDTH_NONE);
2503 	return (NTB_LNK_STA_WIDTH(ntb->lnk_sta));
2504 }
2505 
2506 SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
2507     "Driver state, statistics, and HW registers");
2508 
2509 #define NTB_REGSZ_MASK	(3ul << 30)
2510 #define NTB_REG_64	(1ul << 30)
2511 #define NTB_REG_32	(2ul << 30)
2512 #define NTB_REG_16	(3ul << 30)
2513 #define NTB_REG_8	(0ul << 30)
2514 
2515 #define NTB_DB_READ	(1ul << 29)
2516 #define NTB_PCI_REG	(1ul << 28)
2517 #define NTB_REGFLAGS_MASK	(NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG)
2518 
2519 static void
2520 intel_ntb_sysctl_init(struct ntb_softc *ntb)
2521 {
2522 	struct sysctl_oid_list *globals, *tree_par, *regpar, *statpar, *errpar;
2523 	struct sysctl_ctx_list *ctx;
2524 	struct sysctl_oid *tree, *tmptree;
2525 
2526 	ctx = device_get_sysctl_ctx(ntb->device);
2527 	globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device));
2528 
2529 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "link_status",
2530 	    CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE, ntb, 0,
2531 	    sysctl_handle_link_status_human, "A",
2532 	    "Link status (human readable)");
2533 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "active",
2534 	    CTLFLAG_RD | CTLTYPE_UINT | CTLFLAG_MPSAFE, ntb, 0,
2535 	    sysctl_handle_link_status, "IU",
2536 	    "Link status (1=active, 0=inactive)");
2537 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "admin_up",
2538 	    CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, ntb, 0,
2539 	    sysctl_handle_link_admin, "IU",
2540 	    "Set/get interface status (1=UP, 0=DOWN)");
2541 
2542 	tree = SYSCTL_ADD_NODE(ctx, globals, OID_AUTO, "debug_info",
2543 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2544 	    "Driver state, statistics, and HW registers");
2545 	tree_par = SYSCTL_CHILDREN(tree);
2546 
2547 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD,
2548 	    &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port");
2549 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD,
2550 	    &ntb->dev_type, 0, "0 - USD; 1 - DSD");
2551 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD,
2552 	    &ntb->ppd, 0, "Raw PPD register (cached)");
2553 
2554 	if (ntb->b2b_mw_idx != B2B_MW_DISABLED) {
2555 		SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD,
2556 		    &ntb->b2b_mw_idx, 0,
2557 		    "Index of the MW used for B2B remote register access");
2558 		SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off",
2559 		    CTLFLAG_RD, &ntb->b2b_off,
2560 		    "If non-zero, offset of B2B register region in shared MW");
2561 	}
2562 
2563 	SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features",
2564 	    CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE, ntb, 0,
2565 	    sysctl_handle_features, "A", "Features/errata of this NTB device");
2566 
2567 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD,
2568 	    __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0,
2569 	    "NTB CTL register (cached)");
2570 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD,
2571 	    __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0,
2572 	    "LNK STA register (cached)");
2573 
2574 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD,
2575 	    &ntb->mw_count, 0, "MW count");
2576 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD,
2577 	    &ntb->spad_count, 0, "Scratchpad count");
2578 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD,
2579 	    &ntb->db_count, 0, "Doorbell count");
2580 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD,
2581 	    &ntb->db_vec_count, 0, "Doorbell vector count");
2582 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD,
2583 	    &ntb->db_vec_shift, 0, "Doorbell vector shift");
2584 
2585 	SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD,
2586 	    &ntb->db_valid_mask, "Doorbell valid mask");
2587 	SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD,
2588 	    &ntb->db_link_mask, "Doorbell link mask");
2589 	SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD,
2590 	    &ntb->db_mask, "Doorbell mask (cached)");
2591 
2592 	tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers",
2593 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2594 	    "Raw HW registers (big-endian)");
2595 	regpar = SYSCTL_CHILDREN(tmptree);
2596 
2597 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl",
2598 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2599 	    NTB_REG_32 | ntb->reg->ntb_ctl, sysctl_handle_register, "IU",
2600 	    "NTB Control register");
2601 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap",
2602 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2603 	    NTB_REG_32 | 0x19c, sysctl_handle_register, "IU",
2604 	    "NTB Link Capabilities");
2605 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon",
2606 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2607 	    NTB_REG_32 | 0x1a0, sysctl_handle_register, "IU",
2608 	    "NTB Link Control register");
2609 
2610 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask",
2611 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2612 	    NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask,
2613 	    sysctl_handle_register, "QU", "Doorbell mask register");
2614 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell",
2615 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2616 	    NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell,
2617 	    sysctl_handle_register, "QU", "Doorbell register");
2618 
2619 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23",
2620 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2621 	    NTB_REG_64 | ntb->xlat_reg->bar2_xlat,
2622 	    sysctl_handle_register, "QU", "Incoming XLAT23 register");
2623 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2624 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4",
2625 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2626 		    NTB_REG_32 | ntb->xlat_reg->bar4_xlat,
2627 		    sysctl_handle_register, "IU", "Incoming XLAT4 register");
2628 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5",
2629 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2630 		    NTB_REG_32 | ntb->xlat_reg->bar5_xlat,
2631 		    sysctl_handle_register, "IU", "Incoming XLAT5 register");
2632 	} else {
2633 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45",
2634 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2635 		    NTB_REG_64 | ntb->xlat_reg->bar4_xlat,
2636 		    sysctl_handle_register, "QU", "Incoming XLAT45 register");
2637 	}
2638 
2639 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23",
2640 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2641 	    NTB_REG_64 | ntb->xlat_reg->bar2_limit,
2642 	    sysctl_handle_register, "QU", "Incoming LMT23 register");
2643 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2644 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4",
2645 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2646 		    NTB_REG_32 | ntb->xlat_reg->bar4_limit,
2647 		    sysctl_handle_register, "IU", "Incoming LMT4 register");
2648 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5",
2649 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2650 		    NTB_REG_32 | ntb->xlat_reg->bar5_limit,
2651 		    sysctl_handle_register, "IU", "Incoming LMT5 register");
2652 	} else {
2653 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45",
2654 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2655 		    NTB_REG_64 | ntb->xlat_reg->bar4_limit,
2656 		    sysctl_handle_register, "QU", "Incoming LMT45 register");
2657 	}
2658 
2659 	if (ntb->type == NTB_ATOM)
2660 		return;
2661 
2662 	tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats",
2663 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Xeon HW statistics");
2664 	statpar = SYSCTL_CHILDREN(tmptree);
2665 	SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss",
2666 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2667 	    NTB_REG_16 | XEON_USMEMMISS_OFFSET,
2668 	    sysctl_handle_register, "SU", "Upstream Memory Miss");
2669 
2670 	tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err",
2671 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Xeon HW errors");
2672 	errpar = SYSCTL_CHILDREN(tmptree);
2673 
2674 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd",
2675 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2676 	    NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET,
2677 	    sysctl_handle_register, "CU", "PPD");
2678 
2679 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz",
2680 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2681 	    NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET,
2682 	    sysctl_handle_register, "CU", "PBAR23 SZ (log2)");
2683 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz",
2684 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2685 	    NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET,
2686 	    sysctl_handle_register, "CU", "PBAR4 SZ (log2)");
2687 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz",
2688 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2689 	    NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET,
2690 	    sysctl_handle_register, "CU", "PBAR5 SZ (log2)");
2691 
2692 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz",
2693 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2694 	    NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET,
2695 	    sysctl_handle_register, "CU", "SBAR23 SZ (log2)");
2696 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz",
2697 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2698 	    NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET,
2699 	    sysctl_handle_register, "CU", "SBAR4 SZ (log2)");
2700 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz",
2701 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2702 	    NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET,
2703 	    sysctl_handle_register, "CU", "SBAR5 SZ (log2)");
2704 
2705 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts",
2706 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2707 	    NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET,
2708 	    sysctl_handle_register, "SU", "DEVSTS");
2709 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts",
2710 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2711 	    NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET,
2712 	    sysctl_handle_register, "SU", "LNKSTS");
2713 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts",
2714 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2715 	    NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET,
2716 	    sysctl_handle_register, "SU", "SLNKSTS");
2717 
2718 	SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts",
2719 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2720 	    NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET,
2721 	    sysctl_handle_register, "IU", "UNCERRSTS");
2722 	SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts",
2723 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2724 	    NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET,
2725 	    sysctl_handle_register, "IU", "CORERRSTS");
2726 
2727 	if (ntb->conn_type != NTB_CONN_B2B)
2728 		return;
2729 
2730 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat01l",
2731 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2732 	    NTB_REG_32 | XEON_B2B_XLAT_OFFSETL,
2733 	    sysctl_handle_register, "IU", "Outgoing XLAT0L register");
2734 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat01u",
2735 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2736 	    NTB_REG_32 | XEON_B2B_XLAT_OFFSETU,
2737 	    sysctl_handle_register, "IU", "Outgoing XLAT0U register");
2738 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23",
2739 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2740 	    NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off,
2741 	    sysctl_handle_register, "QU", "Outgoing XLAT23 register");
2742 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2743 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4",
2744 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2745 		    NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off,
2746 		    sysctl_handle_register, "IU", "Outgoing XLAT4 register");
2747 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5",
2748 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2749 		    NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off,
2750 		    sysctl_handle_register, "IU", "Outgoing XLAT5 register");
2751 	} else {
2752 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45",
2753 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2754 		    NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off,
2755 		    sysctl_handle_register, "QU", "Outgoing XLAT45 register");
2756 	}
2757 
2758 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23",
2759 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2760 	    NTB_REG_64 | XEON_PBAR2LMT_OFFSET,
2761 	    sysctl_handle_register, "QU", "Outgoing LMT23 register");
2762 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2763 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4",
2764 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2765 		    NTB_REG_32 | XEON_PBAR4LMT_OFFSET,
2766 		    sysctl_handle_register, "IU", "Outgoing LMT4 register");
2767 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5",
2768 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2769 		    NTB_REG_32 | XEON_PBAR5LMT_OFFSET,
2770 		    sysctl_handle_register, "IU", "Outgoing LMT5 register");
2771 	} else {
2772 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45",
2773 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2774 		    NTB_REG_64 | XEON_PBAR4LMT_OFFSET,
2775 		    sysctl_handle_register, "QU", "Outgoing LMT45 register");
2776 	}
2777 
2778 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base",
2779 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2780 	    NTB_REG_64 | ntb->xlat_reg->bar0_base,
2781 	    sysctl_handle_register, "QU", "Secondary BAR01 base register");
2782 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base",
2783 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2784 	    NTB_REG_64 | ntb->xlat_reg->bar2_base,
2785 	    sysctl_handle_register, "QU", "Secondary BAR23 base register");
2786 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2787 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base",
2788 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2789 		    NTB_REG_32 | ntb->xlat_reg->bar4_base,
2790 		    sysctl_handle_register, "IU",
2791 		    "Secondary BAR4 base register");
2792 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base",
2793 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2794 		    NTB_REG_32 | ntb->xlat_reg->bar5_base,
2795 		    sysctl_handle_register, "IU",
2796 		    "Secondary BAR5 base register");
2797 	} else {
2798 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base",
2799 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2800 		    NTB_REG_64 | ntb->xlat_reg->bar4_base,
2801 		    sysctl_handle_register, "QU",
2802 		    "Secondary BAR45 base register");
2803 	}
2804 }
2805 
2806 static int
2807 sysctl_handle_features(SYSCTL_HANDLER_ARGS)
2808 {
2809 	struct ntb_softc *ntb = arg1;
2810 	struct sbuf sb;
2811 	int error;
2812 
2813 	sbuf_new_for_sysctl(&sb, NULL, 256, req);
2814 
2815 	sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR);
2816 	error = sbuf_finish(&sb);
2817 	sbuf_delete(&sb);
2818 
2819 	if (error || !req->newptr)
2820 		return (error);
2821 	return (EINVAL);
2822 }
2823 
2824 static int
2825 sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS)
2826 {
2827 	struct ntb_softc *ntb = arg1;
2828 	unsigned old, new;
2829 	int error;
2830 
2831 	old = intel_ntb_link_enabled(ntb->device);
2832 
2833 	error = SYSCTL_OUT(req, &old, sizeof(old));
2834 	if (error != 0 || req->newptr == NULL)
2835 		return (error);
2836 
2837 	error = SYSCTL_IN(req, &new, sizeof(new));
2838 	if (error != 0)
2839 		return (error);
2840 
2841 	intel_ntb_printf(0, "Admin set interface state to '%sabled'\n",
2842 	    (new != 0)? "en" : "dis");
2843 
2844 	if (new != 0)
2845 		error = intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
2846 	else
2847 		error = intel_ntb_link_disable(ntb->device);
2848 	return (error);
2849 }
2850 
2851 static int
2852 sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS)
2853 {
2854 	struct ntb_softc *ntb = arg1;
2855 	struct sbuf sb;
2856 	enum ntb_speed speed;
2857 	enum ntb_width width;
2858 	int error;
2859 
2860 	sbuf_new_for_sysctl(&sb, NULL, 32, req);
2861 
2862 	if (intel_ntb_link_is_up(ntb->device, &speed, &width))
2863 		sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u",
2864 		    (unsigned)speed, (unsigned)width);
2865 	else
2866 		sbuf_printf(&sb, "down");
2867 
2868 	error = sbuf_finish(&sb);
2869 	sbuf_delete(&sb);
2870 
2871 	if (error || !req->newptr)
2872 		return (error);
2873 	return (EINVAL);
2874 }
2875 
2876 static int
2877 sysctl_handle_link_status(SYSCTL_HANDLER_ARGS)
2878 {
2879 	struct ntb_softc *ntb = arg1;
2880 	unsigned res;
2881 	int error;
2882 
2883 	res = intel_ntb_link_is_up(ntb->device, NULL, NULL);
2884 
2885 	error = SYSCTL_OUT(req, &res, sizeof(res));
2886 	if (error || !req->newptr)
2887 		return (error);
2888 	return (EINVAL);
2889 }
2890 
2891 static int
2892 sysctl_handle_register(SYSCTL_HANDLER_ARGS)
2893 {
2894 	struct ntb_softc *ntb;
2895 	const void *outp;
2896 	uintptr_t sz;
2897 	uint64_t umv;
2898 	char be[sizeof(umv)];
2899 	size_t outsz;
2900 	uint32_t reg;
2901 	bool db, pci;
2902 	int error;
2903 
2904 	ntb = arg1;
2905 	reg = arg2 & ~NTB_REGFLAGS_MASK;
2906 	sz = arg2 & NTB_REGSZ_MASK;
2907 	db = (arg2 & NTB_DB_READ) != 0;
2908 	pci = (arg2 & NTB_PCI_REG) != 0;
2909 
2910 	KASSERT(!(db && pci), ("bogus"));
2911 
2912 	if (db) {
2913 		KASSERT(sz == NTB_REG_64, ("bogus"));
2914 		umv = db_ioread(ntb, reg);
2915 		outsz = sizeof(uint64_t);
2916 	} else {
2917 		switch (sz) {
2918 		case NTB_REG_64:
2919 			if (pci)
2920 				umv = pci_read_config(ntb->device, reg, 8);
2921 			else
2922 				umv = intel_ntb_reg_read(8, reg);
2923 			outsz = sizeof(uint64_t);
2924 			break;
2925 		case NTB_REG_32:
2926 			if (pci)
2927 				umv = pci_read_config(ntb->device, reg, 4);
2928 			else
2929 				umv = intel_ntb_reg_read(4, reg);
2930 			outsz = sizeof(uint32_t);
2931 			break;
2932 		case NTB_REG_16:
2933 			if (pci)
2934 				umv = pci_read_config(ntb->device, reg, 2);
2935 			else
2936 				umv = intel_ntb_reg_read(2, reg);
2937 			outsz = sizeof(uint16_t);
2938 			break;
2939 		case NTB_REG_8:
2940 			if (pci)
2941 				umv = pci_read_config(ntb->device, reg, 1);
2942 			else
2943 				umv = intel_ntb_reg_read(1, reg);
2944 			outsz = sizeof(uint8_t);
2945 			break;
2946 		default:
2947 			panic("bogus");
2948 			break;
2949 		}
2950 	}
2951 
2952 	/* Encode bigendian so that sysctl -x is legible. */
2953 	be64enc(be, umv);
2954 	outp = ((char *)be) + sizeof(umv) - outsz;
2955 
2956 	error = SYSCTL_OUT(req, outp, outsz);
2957 	if (error || !req->newptr)
2958 		return (error);
2959 	return (EINVAL);
2960 }
2961 
2962 static unsigned
2963 intel_ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx)
2964 {
2965 
2966 	if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 &&
2967 	    uidx >= ntb->b2b_mw_idx) ||
2968 	    (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx))
2969 		uidx++;
2970 	if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 &&
2971 	    uidx >= ntb->b2b_mw_idx) &&
2972 	    (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx))
2973 		uidx++;
2974 	return (uidx);
2975 }
2976 
2977 #ifndef EARLY_AP_STARTUP
2978 static int msix_ready;
2979 
2980 static void
2981 intel_ntb_msix_ready(void *arg __unused)
2982 {
2983 
2984 	msix_ready = 1;
2985 }
2986 SYSINIT(intel_ntb_msix_ready, SI_SUB_SMP, SI_ORDER_ANY,
2987     intel_ntb_msix_ready, NULL);
2988 #endif
2989 
2990 static void
2991 intel_ntb_exchange_msix(void *ctx)
2992 {
2993 	struct ntb_softc *ntb;
2994 	uint32_t val;
2995 	unsigned i;
2996 
2997 	ntb = ctx;
2998 
2999 	if (ntb->peer_msix_good)
3000 		goto msix_good;
3001 	if (ntb->peer_msix_done)
3002 		goto msix_done;
3003 
3004 #ifndef EARLY_AP_STARTUP
3005 	/* Block MSIX negotiation until SMP started and IRQ reshuffled. */
3006 	if (!msix_ready)
3007 		goto reschedule;
3008 #endif
3009 
3010 	intel_ntb_get_msix_info(ntb);
3011 	for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
3012 		intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DATA0 + i,
3013 		    ntb->msix_data[i].nmd_data);
3014 		intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_OFS0 + i,
3015 		    ntb->msix_data[i].nmd_ofs - ntb->msix_xlat);
3016 	}
3017 	intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD);
3018 
3019 	intel_ntb_spad_read(ntb->device, NTB_MSIX_GUARD, &val);
3020 	if (val != NTB_MSIX_VER_GUARD)
3021 		goto reschedule;
3022 
3023 	for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
3024 		intel_ntb_spad_read(ntb->device, NTB_MSIX_DATA0 + i, &val);
3025 		intel_ntb_printf(2, "remote MSIX data(%u): 0x%x\n", i, val);
3026 		ntb->peer_msix_data[i].nmd_data = val;
3027 		intel_ntb_spad_read(ntb->device, NTB_MSIX_OFS0 + i, &val);
3028 		intel_ntb_printf(2, "remote MSIX addr(%u): 0x%x\n", i, val);
3029 		ntb->peer_msix_data[i].nmd_ofs = val;
3030 	}
3031 
3032 	ntb->peer_msix_done = true;
3033 
3034 msix_done:
3035 	intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DONE, NTB_MSIX_RECEIVED);
3036 	intel_ntb_spad_read(ntb->device, NTB_MSIX_DONE, &val);
3037 	if (val != NTB_MSIX_RECEIVED)
3038 		goto reschedule;
3039 
3040 	intel_ntb_spad_clear(ntb->device);
3041 	ntb->peer_msix_good = true;
3042 	/* Give peer time to see our NTB_MSIX_RECEIVED. */
3043 	goto reschedule;
3044 
3045 msix_good:
3046 	intel_ntb_poll_link(ntb);
3047 	ntb_link_event(ntb->device);
3048 	return;
3049 
3050 reschedule:
3051 	ntb->lnk_sta = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2);
3052 	if (_xeon_link_is_up(ntb)) {
3053 		callout_reset(&ntb->peer_msix_work,
3054 		    hz * (ntb->peer_msix_good ? 2 : 1) / 10,
3055 		    intel_ntb_exchange_msix, ntb);
3056 	} else
3057 		intel_ntb_spad_clear(ntb->device);
3058 }
3059 
3060 /*
3061  * Public API to the rest of the OS
3062  */
3063 
3064 static uint8_t
3065 intel_ntb_spad_count(device_t dev)
3066 {
3067 	struct ntb_softc *ntb = device_get_softc(dev);
3068 
3069 	return (ntb->spad_count);
3070 }
3071 
3072 static uint8_t
3073 intel_ntb_mw_count(device_t dev)
3074 {
3075 	struct ntb_softc *ntb = device_get_softc(dev);
3076 	uint8_t res;
3077 
3078 	res = ntb->mw_count;
3079 	if (ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0)
3080 		res--;
3081 	if (ntb->msix_mw_idx != B2B_MW_DISABLED)
3082 		res--;
3083 	return (res);
3084 }
3085 
3086 static int
3087 intel_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
3088 {
3089 	struct ntb_softc *ntb = device_get_softc(dev);
3090 
3091 	if (idx >= ntb->spad_count)
3092 		return (EINVAL);
3093 
3094 	intel_ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val);
3095 
3096 	return (0);
3097 }
3098 
3099 /*
3100  * Zeros the local scratchpad.
3101  */
3102 static void
3103 intel_ntb_spad_clear(device_t dev)
3104 {
3105 	struct ntb_softc *ntb = device_get_softc(dev);
3106 	unsigned i;
3107 
3108 	for (i = 0; i < ntb->spad_count; i++)
3109 		intel_ntb_spad_write(dev, i, 0);
3110 }
3111 
3112 static int
3113 intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
3114 {
3115 	struct ntb_softc *ntb = device_get_softc(dev);
3116 
3117 	if (idx >= ntb->spad_count)
3118 		return (EINVAL);
3119 
3120 	*val = intel_ntb_reg_read(4, ntb->self_reg->spad + idx * 4);
3121 
3122 	return (0);
3123 }
3124 
3125 static int
3126 intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
3127 {
3128 	struct ntb_softc *ntb = device_get_softc(dev);
3129 
3130 	if (idx >= ntb->spad_count)
3131 		return (EINVAL);
3132 
3133 	if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
3134 		intel_ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val);
3135 	else
3136 		intel_ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val);
3137 
3138 	return (0);
3139 }
3140 
3141 static int
3142 intel_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
3143 {
3144 	struct ntb_softc *ntb = device_get_softc(dev);
3145 
3146 	if (idx >= ntb->spad_count)
3147 		return (EINVAL);
3148 
3149 	if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
3150 		*val = intel_ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4);
3151 	else
3152 		*val = intel_ntb_reg_read(4, ntb->peer_reg->spad + idx * 4);
3153 
3154 	return (0);
3155 }
3156 
3157 static int
3158 intel_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
3159     caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
3160     bus_addr_t *plimit)
3161 {
3162 	struct ntb_softc *ntb = device_get_softc(dev);
3163 	struct ntb_pci_bar_info *bar;
3164 	bus_addr_t limit;
3165 	size_t bar_b2b_off;
3166 	enum ntb_bar bar_num;
3167 
3168 	if (mw_idx >= intel_ntb_mw_count(dev))
3169 		return (EINVAL);
3170 	mw_idx = intel_ntb_user_mw_to_idx(ntb, mw_idx);
3171 
3172 	bar_num = intel_ntb_mw_to_bar(ntb, mw_idx);
3173 	bar = &ntb->bar_info[bar_num];
3174 	bar_b2b_off = 0;
3175 	if (mw_idx == ntb->b2b_mw_idx) {
3176 		KASSERT(ntb->b2b_off != 0,
3177 		    ("user shouldn't get non-shared b2b mw"));
3178 		bar_b2b_off = ntb->b2b_off;
3179 	}
3180 
3181 	if (bar_is_64bit(ntb, bar_num))
3182 		limit = BUS_SPACE_MAXADDR;
3183 	else
3184 		limit = BUS_SPACE_MAXADDR_32BIT;
3185 
3186 	if (base != NULL)
3187 		*base = bar->pbase + bar_b2b_off;
3188 	if (vbase != NULL)
3189 		*vbase = bar->vbase + bar_b2b_off;
3190 	if (size != NULL)
3191 		*size = bar->size - bar_b2b_off;
3192 	if (align != NULL)
3193 		*align = bar->size;
3194 	if (align_size != NULL)
3195 		*align_size = 1;
3196 	if (plimit != NULL)
3197 		*plimit = limit;
3198 	return (0);
3199 }
3200 
3201 static int
3202 intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
3203 {
3204 	struct ntb_softc *ntb = device_get_softc(dev);
3205 	struct ntb_pci_bar_info *bar;
3206 	uint64_t base, limit, reg_val;
3207 	size_t bar_size, mw_size;
3208 	uint32_t base_reg, xlat_reg, limit_reg;
3209 	enum ntb_bar bar_num;
3210 
3211 	if (idx >= intel_ntb_mw_count(dev))
3212 		return (EINVAL);
3213 	idx = intel_ntb_user_mw_to_idx(ntb, idx);
3214 
3215 	bar_num = intel_ntb_mw_to_bar(ntb, idx);
3216 	bar = &ntb->bar_info[bar_num];
3217 
3218 	bar_size = bar->size;
3219 	if (idx == ntb->b2b_mw_idx)
3220 		mw_size = bar_size - ntb->b2b_off;
3221 	else
3222 		mw_size = bar_size;
3223 
3224 	/* Hardware requires that addr is aligned to bar size */
3225 	if ((addr & (bar_size - 1)) != 0)
3226 		return (EINVAL);
3227 
3228 	if (size > mw_size)
3229 		return (EINVAL);
3230 
3231 	bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg);
3232 
3233 	limit = 0;
3234 	if (bar_is_64bit(ntb, bar_num)) {
3235 		if (ntb->type == NTB_XEON_GEN3)
3236 			base = addr;
3237 		else
3238 			base = intel_ntb_reg_read(8, base_reg) & BAR_HIGH_MASK;
3239 
3240 		if (limit_reg != 0 && size != mw_size)
3241 			limit = base + size;
3242 		else
3243 			limit = base + mw_size;
3244 
3245 		/* Set and verify translation address */
3246 		intel_ntb_reg_write(8, xlat_reg, addr);
3247 		reg_val = intel_ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK;
3248 		if (reg_val != addr) {
3249 			intel_ntb_reg_write(8, xlat_reg, 0);
3250 			return (EIO);
3251 		}
3252 
3253 		/* Set and verify the limit */
3254 		intel_ntb_reg_write(8, limit_reg, limit);
3255 		reg_val = intel_ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK;
3256 		if (reg_val != limit) {
3257 			intel_ntb_reg_write(8, limit_reg, base);
3258 			intel_ntb_reg_write(8, xlat_reg, 0);
3259 			return (EIO);
3260 		}
3261 	} else {
3262 		/* Configure 32-bit (split) BAR MW */
3263 		if (ntb->type == NTB_XEON_GEN3)
3264 			return (EIO);
3265 
3266 		if ((addr & UINT32_MAX) != addr)
3267 			return (ERANGE);
3268 		if (((addr + size) & UINT32_MAX) != (addr + size))
3269 			return (ERANGE);
3270 
3271 		base = intel_ntb_reg_read(4, base_reg) & BAR_HIGH_MASK;
3272 
3273 		if (limit_reg != 0 && size != mw_size)
3274 			limit = base + size;
3275 
3276 		/* Set and verify translation address */
3277 		intel_ntb_reg_write(4, xlat_reg, addr);
3278 		reg_val = intel_ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK;
3279 		if (reg_val != addr) {
3280 			intel_ntb_reg_write(4, xlat_reg, 0);
3281 			return (EIO);
3282 		}
3283 
3284 		/* Set and verify the limit */
3285 		intel_ntb_reg_write(4, limit_reg, limit);
3286 		reg_val = intel_ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK;
3287 		if (reg_val != limit) {
3288 			intel_ntb_reg_write(4, limit_reg, base);
3289 			intel_ntb_reg_write(4, xlat_reg, 0);
3290 			return (EIO);
3291 		}
3292 	}
3293 	return (0);
3294 }
3295 
3296 static int
3297 intel_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
3298 {
3299 
3300 	return (intel_ntb_mw_set_trans(dev, mw_idx, 0, 0));
3301 }
3302 
3303 static int
3304 intel_ntb_mw_get_wc(device_t dev, unsigned idx, vm_memattr_t *mode)
3305 {
3306 	struct ntb_softc *ntb = device_get_softc(dev);
3307 	struct ntb_pci_bar_info *bar;
3308 
3309 	if (idx >= intel_ntb_mw_count(dev))
3310 		return (EINVAL);
3311 	idx = intel_ntb_user_mw_to_idx(ntb, idx);
3312 
3313 	bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
3314 	*mode = bar->map_mode;
3315 	return (0);
3316 }
3317 
3318 static int
3319 intel_ntb_mw_set_wc(device_t dev, unsigned idx, vm_memattr_t mode)
3320 {
3321 	struct ntb_softc *ntb = device_get_softc(dev);
3322 
3323 	if (idx >= intel_ntb_mw_count(dev))
3324 		return (EINVAL);
3325 
3326 	idx = intel_ntb_user_mw_to_idx(ntb, idx);
3327 	return (intel_ntb_mw_set_wc_internal(ntb, idx, mode));
3328 }
3329 
3330 static int
3331 intel_ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
3332 {
3333 	struct ntb_pci_bar_info *bar;
3334 	int rc;
3335 
3336 	bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
3337 	if (bar->map_mode == mode)
3338 		return (0);
3339 
3340 	rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mode);
3341 	if (rc == 0)
3342 		bar->map_mode = mode;
3343 
3344 	return (rc);
3345 }
3346 
3347 static void
3348 intel_ntb_peer_db_set(device_t dev, uint64_t bits)
3349 {
3350 	struct ntb_softc *ntb = device_get_softc(dev);
3351 	uint64_t db;
3352 
3353 	if ((bits & ~ntb->db_valid_mask) != 0) {
3354 		device_printf(ntb->device, "Invalid doorbell bits %#jx\n",
3355 		    (uintmax_t)bits);
3356 		return;
3357 	}
3358 
3359 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
3360 		struct ntb_pci_bar_info *lapic;
3361 		unsigned i;
3362 
3363 		lapic = ntb->peer_lapic_bar;
3364 
3365 		for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
3366 			if ((bits & intel_ntb_db_vector_mask(dev, i)) != 0)
3367 				bus_space_write_4(lapic->pci_bus_tag,
3368 				    lapic->pci_bus_handle,
3369 				    ntb->peer_msix_data[i].nmd_ofs,
3370 				    ntb->peer_msix_data[i].nmd_data);
3371 		}
3372 		return;
3373 	}
3374 
3375 	if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
3376 		intel_ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bits);
3377 		return;
3378 	}
3379 
3380 	if (ntb->type == NTB_XEON_GEN3) {
3381 		while (bits != 0) {
3382 			db = ffsll(bits);
3383 
3384 			intel_ntb_reg_write(1,
3385 			    ntb->peer_reg->db_bell + (db - 1) * 4, 0x1);
3386 
3387 			bits = bits & (bits - 1);
3388 		}
3389 	} else {
3390 		db_iowrite(ntb, ntb->peer_reg->db_bell, bits);
3391 	}
3392 }
3393 
3394 static int
3395 intel_ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size)
3396 {
3397 	struct ntb_softc *ntb = device_get_softc(dev);
3398 	struct ntb_pci_bar_info *bar;
3399 	uint64_t regoff;
3400 
3401 	KASSERT((db_addr != NULL && db_size != NULL), ("must be non-NULL"));
3402 
3403 	if (!HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
3404 		bar = &ntb->bar_info[NTB_CONFIG_BAR];
3405 		regoff = ntb->peer_reg->db_bell;
3406 	} else {
3407 		KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED,
3408 		    ("invalid b2b idx"));
3409 
3410 		bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)];
3411 		regoff = XEON_PDOORBELL_OFFSET;
3412 	}
3413 	KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh"));
3414 
3415 	/* HACK: Specific to current x86 bus implementation. */
3416 	*db_addr = ((uint64_t)bar->pci_bus_handle + regoff);
3417 	*db_size = ntb->reg->db_size;
3418 	return (0);
3419 }
3420 
3421 static uint64_t
3422 intel_ntb_db_valid_mask(device_t dev)
3423 {
3424 	struct ntb_softc *ntb = device_get_softc(dev);
3425 
3426 	return (ntb->db_valid_mask);
3427 }
3428 
3429 static int
3430 intel_ntb_db_vector_count(device_t dev)
3431 {
3432 	struct ntb_softc *ntb = device_get_softc(dev);
3433 
3434 	return (ntb->db_vec_count);
3435 }
3436 
3437 static uint64_t
3438 intel_ntb_db_vector_mask(device_t dev, uint32_t vector)
3439 {
3440 	struct ntb_softc *ntb = device_get_softc(dev);
3441 
3442 	if (vector > ntb->db_vec_count)
3443 		return (0);
3444 	return (ntb->db_valid_mask & intel_ntb_vec_mask(ntb, vector));
3445 }
3446 
3447 static bool
3448 intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
3449 {
3450 	struct ntb_softc *ntb = device_get_softc(dev);
3451 
3452 	if (speed != NULL)
3453 		*speed = intel_ntb_link_sta_speed(ntb);
3454 	if (width != NULL)
3455 		*width = intel_ntb_link_sta_width(ntb);
3456 	return (link_is_up(ntb));
3457 }
3458 
3459 static void
3460 save_bar_parameters(struct ntb_pci_bar_info *bar)
3461 {
3462 
3463 	bar->pci_bus_tag = rman_get_bustag(bar->pci_resource);
3464 	bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource);
3465 	bar->pbase = rman_get_start(bar->pci_resource);
3466 	bar->size = rman_get_size(bar->pci_resource);
3467 	bar->vbase = rman_get_virtual(bar->pci_resource);
3468 }
3469 
3470 static device_method_t ntb_intel_methods[] = {
3471 	/* Device interface */
3472 	DEVMETHOD(device_probe,		intel_ntb_probe),
3473 	DEVMETHOD(device_attach,	intel_ntb_attach),
3474 	DEVMETHOD(device_detach,	intel_ntb_detach),
3475 	/* Bus interface */
3476 	DEVMETHOD(bus_child_location,	ntb_child_location),
3477 	DEVMETHOD(bus_print_child,	ntb_print_child),
3478 	DEVMETHOD(bus_get_dma_tag,	ntb_get_dma_tag),
3479 	/* NTB interface */
3480 	DEVMETHOD(ntb_port_number,	intel_ntb_port_number),
3481 	DEVMETHOD(ntb_peer_port_count,	intel_ntb_peer_port_count),
3482 	DEVMETHOD(ntb_peer_port_number,	intel_ntb_peer_port_number),
3483 	DEVMETHOD(ntb_peer_port_idx, 	intel_ntb_peer_port_idx),
3484 	DEVMETHOD(ntb_link_is_up,	intel_ntb_link_is_up),
3485 	DEVMETHOD(ntb_link_enable,	intel_ntb_link_enable),
3486 	DEVMETHOD(ntb_link_disable,	intel_ntb_link_disable),
3487 	DEVMETHOD(ntb_link_enabled,	intel_ntb_link_enabled),
3488 	DEVMETHOD(ntb_mw_count,		intel_ntb_mw_count),
3489 	DEVMETHOD(ntb_mw_get_range,	intel_ntb_mw_get_range),
3490 	DEVMETHOD(ntb_mw_set_trans,	intel_ntb_mw_set_trans),
3491 	DEVMETHOD(ntb_mw_clear_trans,	intel_ntb_mw_clear_trans),
3492 	DEVMETHOD(ntb_mw_get_wc,	intel_ntb_mw_get_wc),
3493 	DEVMETHOD(ntb_mw_set_wc,	intel_ntb_mw_set_wc),
3494 	DEVMETHOD(ntb_spad_count,	intel_ntb_spad_count),
3495 	DEVMETHOD(ntb_spad_clear,	intel_ntb_spad_clear),
3496 	DEVMETHOD(ntb_spad_write,	intel_ntb_spad_write),
3497 	DEVMETHOD(ntb_spad_read,	intel_ntb_spad_read),
3498 	DEVMETHOD(ntb_peer_spad_write,	intel_ntb_peer_spad_write),
3499 	DEVMETHOD(ntb_peer_spad_read,	intel_ntb_peer_spad_read),
3500 	DEVMETHOD(ntb_db_valid_mask,	intel_ntb_db_valid_mask),
3501 	DEVMETHOD(ntb_db_vector_count,	intel_ntb_db_vector_count),
3502 	DEVMETHOD(ntb_db_vector_mask,	intel_ntb_db_vector_mask),
3503 	DEVMETHOD(ntb_db_clear,		intel_ntb_db_clear),
3504 	DEVMETHOD(ntb_db_clear_mask,	intel_ntb_db_clear_mask),
3505 	DEVMETHOD(ntb_db_read,		intel_ntb_db_read),
3506 	DEVMETHOD(ntb_db_set_mask,	intel_ntb_db_set_mask),
3507 	DEVMETHOD(ntb_peer_db_addr,	intel_ntb_peer_db_addr),
3508 	DEVMETHOD(ntb_peer_db_set,	intel_ntb_peer_db_set),
3509 	DEVMETHOD_END
3510 };
3511 
3512 static DEFINE_CLASS_0(ntb_hw, ntb_intel_driver, ntb_intel_methods,
3513     sizeof(struct ntb_softc));
3514 DRIVER_MODULE(ntb_hw_intel, pci, ntb_intel_driver, ntb_hw_devclass, NULL, NULL);
3515 MODULE_DEPEND(ntb_hw_intel, ntb, 1, 1, 1);
3516 MODULE_VERSION(ntb_hw_intel, 1);
3517 MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ntb_hw_intel, pci_ids,
3518     nitems(pci_ids));
3519