xref: /freebsd/sys/dev/ntb/ntb_hw/ntb_hw_intel.c (revision 069ac18495ad8fde2748bc94b0f80a50250bb01d)
1 /*-
2  * Copyright (c) 2016-2017 Alexander Motin <mav@FreeBSD.org>
3  * Copyright (C) 2013 Intel Corporation
4  * Copyright (C) 2015 EMC Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * The Non-Transparent Bridge (NTB) is a device that allows you to connect
31  * two or more systems using a PCI-e links, providing remote memory access.
32  *
33  * This module contains a driver for NTB hardware in Intel Xeon/Atom CPUs.
34  *
35  * NOTE: Much of the code in this module is shared with Linux. Any patches may
36  * be picked up and redistributed in Linux with a dual GPL/BSD license.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/kernel.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/endian.h>
44 #include <sys/interrupt.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/module.h>
48 #include <sys/mutex.h>
49 #include <sys/pciio.h>
50 #include <sys/taskqueue.h>
51 #include <sys/tree.h>
52 #include <sys/queue.h>
53 #include <sys/rman.h>
54 #include <sys/sbuf.h>
55 #include <sys/sysctl.h>
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <machine/bus.h>
59 #include <machine/intr_machdep.h>
60 #include <machine/resource.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/iommu/iommu.h>
64 
65 #include "ntb_hw_intel.h"
66 #include "../ntb.h"
67 
68 #define MAX_MSIX_INTERRUPTS	\
69 	MAX(MAX(XEON_DB_COUNT, ATOM_DB_COUNT), XEON_GEN3_DB_COUNT)
70 
71 #define NTB_HB_TIMEOUT		1 /* second */
72 #define ATOM_LINK_RECOVERY_TIME	500 /* ms */
73 #define BAR_HIGH_MASK		(~((1ull << 12) - 1))
74 
75 #define	NTB_MSIX_VER_GUARD	0xaabbccdd
76 #define	NTB_MSIX_RECEIVED	0xe0f0e0f0
77 
78 /*
79  * PCI constants could be somewhere more generic, but aren't defined/used in
80  * pci.c.
81  */
82 #define	PCI_MSIX_ENTRY_SIZE		16
83 #define	PCI_MSIX_ENTRY_LOWER_ADDR	0
84 #define	PCI_MSIX_ENTRY_UPPER_ADDR	4
85 #define	PCI_MSIX_ENTRY_DATA		8
86 
87 enum ntb_device_type {
88 	NTB_XEON_GEN1,
89 	NTB_XEON_GEN3,
90 	NTB_ATOM
91 };
92 
93 /* ntb_conn_type are hardware numbers, cannot change. */
94 enum ntb_conn_type {
95 	NTB_CONN_TRANSPARENT = 0,
96 	NTB_CONN_B2B = 1,
97 	NTB_CONN_RP = 2,
98 };
99 
100 enum ntb_b2b_direction {
101 	NTB_DEV_USD = 0,
102 	NTB_DEV_DSD = 1,
103 };
104 
105 enum ntb_bar {
106 	NTB_CONFIG_BAR = 0,
107 	NTB_B2B_BAR_1,
108 	NTB_B2B_BAR_2,
109 	NTB_B2B_BAR_3,
110 	NTB_MAX_BARS
111 };
112 
113 enum {
114 	NTB_MSIX_GUARD = 0,
115 	NTB_MSIX_DATA0,
116 	NTB_MSIX_DATA1,
117 	NTB_MSIX_DATA2,
118 	NTB_MSIX_OFS0,
119 	NTB_MSIX_OFS1,
120 	NTB_MSIX_OFS2,
121 	NTB_MSIX_DONE,
122 	NTB_MAX_MSIX_SPAD
123 };
124 
125 /* Device features and workarounds */
126 #define HAS_FEATURE(ntb, feature)	\
127 	(((ntb)->features & (feature)) != 0)
128 
129 struct ntb_hw_info {
130 	uint32_t		device_id;
131 	const char		*desc;
132 	enum ntb_device_type	type;
133 	uint32_t		features;
134 };
135 
136 struct ntb_pci_bar_info {
137 	bus_space_tag_t		pci_bus_tag;
138 	bus_space_handle_t	pci_bus_handle;
139 	int			pci_resource_id;
140 	struct resource		*pci_resource;
141 	vm_paddr_t		pbase;
142 	caddr_t			vbase;
143 	vm_size_t		size;
144 	vm_memattr_t		map_mode;
145 
146 	/* Configuration register offsets */
147 	uint32_t		psz_off;
148 	uint32_t		ssz_off;
149 	uint32_t		pbarxlat_off;
150 };
151 
152 struct ntb_int_info {
153 	struct resource	*res;
154 	int		rid;
155 	void		*tag;
156 };
157 
158 struct ntb_vec {
159 	struct ntb_softc	*ntb;
160 	uint32_t		num;
161 	unsigned		masked;
162 };
163 
164 struct ntb_reg {
165 	uint32_t	ntb_ctl;
166 	uint32_t	lnk_sta;
167 	uint8_t		db_size;
168 	unsigned	mw_bar[NTB_MAX_BARS];
169 };
170 
171 struct ntb_alt_reg {
172 	uint32_t	db_bell;
173 	uint32_t	db_mask;
174 	uint32_t	spad;
175 };
176 
177 struct ntb_xlat_reg {
178 	uint32_t	bar0_base;
179 	uint32_t	bar2_base;
180 	uint32_t	bar4_base;
181 	uint32_t	bar5_base;
182 
183 	uint32_t	bar2_xlat;
184 	uint32_t	bar4_xlat;
185 	uint32_t	bar5_xlat;
186 
187 	uint32_t	bar2_limit;
188 	uint32_t	bar4_limit;
189 	uint32_t	bar5_limit;
190 };
191 
192 struct ntb_b2b_addr {
193 	uint64_t	bar0_addr;
194 	uint64_t	bar2_addr64;
195 	uint64_t	bar4_addr64;
196 	uint64_t	bar4_addr32;
197 	uint64_t	bar5_addr32;
198 };
199 
200 struct ntb_msix_data {
201 	uint32_t	nmd_ofs;
202 	uint32_t	nmd_data;
203 };
204 
205 struct ntb_softc {
206 	/* ntb.c context. Do not move! Must go first! */
207 	void			*ntb_store;
208 
209 	device_t		device;
210 	enum ntb_device_type	type;
211 	uint32_t		features;
212 
213 	struct ntb_pci_bar_info	bar_info[NTB_MAX_BARS];
214 	struct ntb_int_info	int_info[MAX_MSIX_INTERRUPTS];
215 	uint32_t		allocated_interrupts;
216 
217 	struct ntb_msix_data	peer_msix_data[XEON_NONLINK_DB_MSIX_BITS];
218 	struct ntb_msix_data	msix_data[XEON_NONLINK_DB_MSIX_BITS];
219 	bool			peer_msix_good;
220 	bool			peer_msix_done;
221 	struct ntb_pci_bar_info	*peer_lapic_bar;
222 	struct callout		peer_msix_work;
223 
224 	bus_dma_tag_t		bar0_dma_tag;
225 	bus_dmamap_t		bar0_dma_map;
226 
227 	struct callout		heartbeat_timer;
228 	struct callout		lr_timer;
229 
230 	struct ntb_vec		*msix_vec;
231 
232 	uint32_t		ppd;
233 	enum ntb_conn_type	conn_type;
234 	enum ntb_b2b_direction	dev_type;
235 
236 	/* Offset of peer bar0 in B2B BAR */
237 	uint64_t			b2b_off;
238 	/* Memory window used to access peer bar0 */
239 #define B2B_MW_DISABLED			UINT8_MAX
240 	uint8_t				b2b_mw_idx;
241 	uint32_t			msix_xlat;
242 	uint8_t				msix_mw_idx;
243 
244 	uint8_t				mw_count;
245 	uint8_t				spad_count;
246 	uint8_t				db_count;
247 	uint8_t				db_vec_count;
248 	uint8_t				db_vec_shift;
249 
250 	/* Protects local db_mask. */
251 #define DB_MASK_LOCK(sc)	mtx_lock_spin(&(sc)->db_mask_lock)
252 #define DB_MASK_UNLOCK(sc)	mtx_unlock_spin(&(sc)->db_mask_lock)
253 #define DB_MASK_ASSERT(sc,f)	mtx_assert(&(sc)->db_mask_lock, (f))
254 	struct mtx			db_mask_lock;
255 
256 	volatile uint32_t		ntb_ctl;
257 	volatile uint32_t		lnk_sta;
258 
259 	uint64_t			db_valid_mask;
260 	uint64_t			db_link_mask;
261 	uint64_t			db_mask;
262 	uint64_t			fake_db;	/* NTB_SB01BASE_LOCKUP*/
263 	uint64_t			force_db;	/* NTB_SB01BASE_LOCKUP*/
264 
265 	int				last_ts;	/* ticks @ last irq */
266 
267 	const struct ntb_reg		*reg;
268 	const struct ntb_alt_reg	*self_reg;
269 	const struct ntb_alt_reg	*peer_reg;
270 	const struct ntb_xlat_reg	*xlat_reg;
271 };
272 
273 #ifdef __i386__
274 static __inline uint64_t
275 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
276     bus_size_t offset)
277 {
278 
279 	return (bus_space_read_4(tag, handle, offset) |
280 	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
281 }
282 
283 static __inline void
284 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
285     bus_size_t offset, uint64_t val)
286 {
287 
288 	bus_space_write_4(tag, handle, offset, val);
289 	bus_space_write_4(tag, handle, offset + 4, val >> 32);
290 }
291 #endif
292 
293 #define intel_ntb_bar_read(SIZE, bar, offset) \
294 	    bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
295 	    ntb->bar_info[(bar)].pci_bus_handle, (offset))
296 #define intel_ntb_bar_write(SIZE, bar, offset, val) \
297 	    bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
298 	    ntb->bar_info[(bar)].pci_bus_handle, (offset), (val))
299 #define intel_ntb_reg_read(SIZE, offset) \
300 	    intel_ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset)
301 #define intel_ntb_reg_write(SIZE, offset, val) \
302 	    intel_ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val)
303 #define intel_ntb_mw_read(SIZE, offset) \
304 	    intel_ntb_bar_read(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
305 		offset)
306 #define intel_ntb_mw_write(SIZE, offset, val) \
307 	    intel_ntb_bar_write(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
308 		offset, val)
309 
310 static int intel_ntb_probe(device_t device);
311 static int intel_ntb_attach(device_t device);
312 static int intel_ntb_detach(device_t device);
313 static uint64_t intel_ntb_db_valid_mask(device_t dev);
314 static void intel_ntb_spad_clear(device_t dev);
315 static uint64_t intel_ntb_db_vector_mask(device_t dev, uint32_t vector);
316 static bool intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed,
317     enum ntb_width *width);
318 static int intel_ntb_link_enable(device_t dev, enum ntb_speed speed,
319     enum ntb_width width);
320 static int intel_ntb_link_disable(device_t dev);
321 static int intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val);
322 static int intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val);
323 
324 static unsigned intel_ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx);
325 static inline enum ntb_bar intel_ntb_mw_to_bar(struct ntb_softc *, unsigned mw);
326 static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar);
327 static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar,
328     uint32_t *base, uint32_t *xlat, uint32_t *lmt);
329 static int intel_ntb_map_pci_bars(struct ntb_softc *ntb);
330 static int intel_ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx,
331     vm_memattr_t);
332 static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *,
333     const char *);
334 static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar);
335 static int map_memory_window_bar(struct ntb_softc *ntb,
336     struct ntb_pci_bar_info *bar);
337 static void intel_ntb_unmap_pci_bar(struct ntb_softc *ntb);
338 static int intel_ntb_remap_msix(device_t, uint32_t desired, uint32_t avail);
339 static int intel_ntb_init_isr(struct ntb_softc *ntb);
340 static int intel_ntb_xeon_gen3_init_isr(struct ntb_softc *ntb);
341 static int intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb);
342 static int intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors);
343 static void intel_ntb_teardown_interrupts(struct ntb_softc *ntb);
344 static inline uint64_t intel_ntb_vec_mask(struct ntb_softc *, uint64_t db_vector);
345 static void intel_ntb_interrupt(struct ntb_softc *, uint32_t vec);
346 static void ndev_vec_isr(void *arg);
347 static void ndev_irq_isr(void *arg);
348 static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff);
349 static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t);
350 static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t);
351 static int intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors);
352 static void intel_ntb_free_msix_vec(struct ntb_softc *ntb);
353 static void intel_ntb_get_msix_info(struct ntb_softc *ntb);
354 static void intel_ntb_exchange_msix(void *);
355 static struct ntb_hw_info *intel_ntb_get_device_info(uint32_t device_id);
356 static void intel_ntb_detect_max_mw(struct ntb_softc *ntb);
357 static int intel_ntb_detect_xeon(struct ntb_softc *ntb);
358 static int intel_ntb_detect_xeon_gen3(struct ntb_softc *ntb);
359 static int intel_ntb_detect_atom(struct ntb_softc *ntb);
360 static int intel_ntb_xeon_init_dev(struct ntb_softc *ntb);
361 static int intel_ntb_xeon_gen3_init_dev(struct ntb_softc *ntb);
362 static int intel_ntb_atom_init_dev(struct ntb_softc *ntb);
363 static void intel_ntb_teardown_xeon(struct ntb_softc *ntb);
364 static void configure_atom_secondary_side_bars(struct ntb_softc *ntb);
365 static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx,
366     enum ntb_bar regbar);
367 static void xeon_set_sbar_base_and_limit(struct ntb_softc *,
368     uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar);
369 static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr,
370     enum ntb_bar idx);
371 static int xeon_setup_b2b_mw(struct ntb_softc *,
372     const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr);
373 static int xeon_gen3_setup_b2b_mw(struct ntb_softc *);
374 static int intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr,
375     size_t size);
376 static inline bool link_is_up(struct ntb_softc *ntb);
377 static inline bool _xeon_link_is_up(struct ntb_softc *ntb);
378 static inline bool atom_link_is_err(struct ntb_softc *ntb);
379 static inline enum ntb_speed intel_ntb_link_sta_speed(struct ntb_softc *);
380 static inline enum ntb_width intel_ntb_link_sta_width(struct ntb_softc *);
381 static void atom_link_hb(void *arg);
382 static void recover_atom_link(void *arg);
383 static bool intel_ntb_poll_link(struct ntb_softc *ntb);
384 static void save_bar_parameters(struct ntb_pci_bar_info *bar);
385 static void intel_ntb_sysctl_init(struct ntb_softc *);
386 static int sysctl_handle_features(SYSCTL_HANDLER_ARGS);
387 static int sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS);
388 static int sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS);
389 static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS);
390 static int sysctl_handle_register(SYSCTL_HANDLER_ARGS);
391 
392 static unsigned g_ntb_hw_debug_level;
393 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
394     &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose");
395 #define intel_ntb_printf(lvl, ...) do {				\
396 	if ((lvl) <= g_ntb_hw_debug_level) {			\
397 		device_printf(ntb->device, __VA_ARGS__);	\
398 	}							\
399 } while (0)
400 
401 #define	_NTB_PAT_UC	0
402 #define	_NTB_PAT_WC	1
403 #define	_NTB_PAT_WT	4
404 #define	_NTB_PAT_WP	5
405 #define	_NTB_PAT_WB	6
406 #define	_NTB_PAT_UCM	7
407 static unsigned g_ntb_mw_pat = _NTB_PAT_UC;
408 SYSCTL_UINT(_hw_ntb, OID_AUTO, default_mw_pat, CTLFLAG_RDTUN,
409     &g_ntb_mw_pat, 0, "Configure the default memory window cache flags (PAT): "
410     "UC: "  __XSTRING(_NTB_PAT_UC) ", "
411     "WC: "  __XSTRING(_NTB_PAT_WC) ", "
412     "WT: "  __XSTRING(_NTB_PAT_WT) ", "
413     "WP: "  __XSTRING(_NTB_PAT_WP) ", "
414     "WB: "  __XSTRING(_NTB_PAT_WB) ", "
415     "UC-: " __XSTRING(_NTB_PAT_UCM));
416 
417 static inline vm_memattr_t
418 intel_ntb_pat_flags(void)
419 {
420 
421 	switch (g_ntb_mw_pat) {
422 	case _NTB_PAT_WC:
423 		return (VM_MEMATTR_WRITE_COMBINING);
424 	case _NTB_PAT_WT:
425 		return (VM_MEMATTR_WRITE_THROUGH);
426 	case _NTB_PAT_WP:
427 		return (VM_MEMATTR_WRITE_PROTECTED);
428 	case _NTB_PAT_WB:
429 		return (VM_MEMATTR_WRITE_BACK);
430 	case _NTB_PAT_UCM:
431 		return (VM_MEMATTR_WEAK_UNCACHEABLE);
432 	case _NTB_PAT_UC:
433 		/* FALLTHROUGH */
434 	default:
435 		return (VM_MEMATTR_UNCACHEABLE);
436 	}
437 }
438 
439 /*
440  * Well, this obviously doesn't belong here, but it doesn't seem to exist
441  * anywhere better yet.
442  */
443 static inline const char *
444 intel_ntb_vm_memattr_to_str(vm_memattr_t pat)
445 {
446 
447 	switch (pat) {
448 	case VM_MEMATTR_WRITE_COMBINING:
449 		return ("WRITE_COMBINING");
450 	case VM_MEMATTR_WRITE_THROUGH:
451 		return ("WRITE_THROUGH");
452 	case VM_MEMATTR_WRITE_PROTECTED:
453 		return ("WRITE_PROTECTED");
454 	case VM_MEMATTR_WRITE_BACK:
455 		return ("WRITE_BACK");
456 	case VM_MEMATTR_WEAK_UNCACHEABLE:
457 		return ("UNCACHED");
458 	case VM_MEMATTR_UNCACHEABLE:
459 		return ("UNCACHEABLE");
460 	default:
461 		return ("UNKNOWN");
462 	}
463 }
464 
465 static int g_ntb_msix_idx = 1;
466 SYSCTL_INT(_hw_ntb, OID_AUTO, msix_mw_idx, CTLFLAG_RDTUN, &g_ntb_msix_idx,
467     0, "Use this memory window to access the peer MSIX message complex on "
468     "certain Xeon-based NTB systems, as a workaround for a hardware errata.  "
469     "Like b2b_mw_idx, negative values index from the last available memory "
470     "window.  (Applies on Xeon platforms with SB01BASE_LOCKUP errata.)");
471 
472 static int g_ntb_mw_idx = -1;
473 SYSCTL_INT(_hw_ntb, OID_AUTO, b2b_mw_idx, CTLFLAG_RDTUN, &g_ntb_mw_idx,
474     0, "Use this memory window to access the peer NTB registers.  A "
475     "non-negative value starts from the first MW index; a negative value "
476     "starts from the last MW index.  The default is -1, i.e., the last "
477     "available memory window.  Both sides of the NTB MUST set the same "
478     "value here!  (Applies on Xeon platforms with SDOORBELL_LOCKUP errata.)");
479 
480 /* Hardware owns the low 16 bits of features. */
481 #define NTB_BAR_SIZE_4K		(1 << 0)
482 #define NTB_SDOORBELL_LOCKUP	(1 << 1)
483 #define NTB_SB01BASE_LOCKUP	(1 << 2)
484 #define NTB_B2BDOORBELL_BIT14	(1 << 3)
485 /* Software/configuration owns the top 16 bits. */
486 #define NTB_SPLIT_BAR		(1ull << 16)
487 #define NTB_ONE_MSIX		(1ull << 17)
488 
489 #define NTB_FEATURES_STR \
490     "\20\21SPLIT_BAR4\04B2B_DOORBELL_BIT14\03SB01BASE_LOCKUP" \
491     "\02SDOORBELL_LOCKUP\01BAR_SIZE_4K"
492 
493 static struct ntb_hw_info pci_ids[] = {
494 	/* XXX: PS/SS IDs left out until they are supported. */
495 	{ 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B",
496 		NTB_ATOM, 0 },
497 
498 	{ 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B",
499 		NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 },
500 	{ 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B",
501 		NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 },
502 	{ 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B",
503 		NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
504 		    NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K },
505 	{ 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B",
506 		NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
507 		    NTB_SB01BASE_LOCKUP },
508 	{ 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B",
509 		NTB_XEON_GEN1, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
510 		    NTB_SB01BASE_LOCKUP },
511 
512 	{ 0x201C8086, "SKL Xeon E5 V5 Non-Transparent Bridge B2B",
513 		NTB_XEON_GEN3, 0 },
514 };
515 
516 static const struct ntb_reg atom_reg = {
517 	.ntb_ctl = ATOM_NTBCNTL_OFFSET,
518 	.lnk_sta = ATOM_LINK_STATUS_OFFSET,
519 	.db_size = sizeof(uint64_t),
520 	.mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 },
521 };
522 
523 static const struct ntb_alt_reg atom_pri_reg = {
524 	.db_bell = ATOM_PDOORBELL_OFFSET,
525 	.db_mask = ATOM_PDBMSK_OFFSET,
526 	.spad = ATOM_SPAD_OFFSET,
527 };
528 
529 static const struct ntb_alt_reg atom_b2b_reg = {
530 	.db_bell = ATOM_B2B_DOORBELL_OFFSET,
531 	.spad = ATOM_B2B_SPAD_OFFSET,
532 };
533 
534 static const struct ntb_xlat_reg atom_sec_xlat = {
535 #if 0
536 	/* "FIXME" says the Linux driver. */
537 	.bar0_base = ATOM_SBAR0BASE_OFFSET,
538 	.bar2_base = ATOM_SBAR2BASE_OFFSET,
539 	.bar4_base = ATOM_SBAR4BASE_OFFSET,
540 
541 	.bar2_limit = ATOM_SBAR2LMT_OFFSET,
542 	.bar4_limit = ATOM_SBAR4LMT_OFFSET,
543 #endif
544 
545 	.bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
546 	.bar4_xlat = ATOM_SBAR4XLAT_OFFSET,
547 };
548 
549 static const struct ntb_reg xeon_reg = {
550 	.ntb_ctl = XEON_NTBCNTL_OFFSET,
551 	.lnk_sta = XEON_LINK_STATUS_OFFSET,
552 	.db_size = sizeof(uint16_t),
553 	.mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 },
554 };
555 
556 static const struct ntb_alt_reg xeon_pri_reg = {
557 	.db_bell = XEON_PDOORBELL_OFFSET,
558 	.db_mask = XEON_PDBMSK_OFFSET,
559 	.spad = XEON_SPAD_OFFSET,
560 };
561 
562 static const struct ntb_alt_reg xeon_b2b_reg = {
563 	.db_bell = XEON_B2B_DOORBELL_OFFSET,
564 	.spad = XEON_B2B_SPAD_OFFSET,
565 };
566 
567 static const struct ntb_xlat_reg xeon_sec_xlat = {
568 	.bar0_base = XEON_SBAR0BASE_OFFSET,
569 	.bar2_base = XEON_SBAR2BASE_OFFSET,
570 	.bar4_base = XEON_SBAR4BASE_OFFSET,
571 	.bar5_base = XEON_SBAR5BASE_OFFSET,
572 
573 	.bar2_limit = XEON_SBAR2LMT_OFFSET,
574 	.bar4_limit = XEON_SBAR4LMT_OFFSET,
575 	.bar5_limit = XEON_SBAR5LMT_OFFSET,
576 
577 	.bar2_xlat = XEON_SBAR2XLAT_OFFSET,
578 	.bar4_xlat = XEON_SBAR4XLAT_OFFSET,
579 	.bar5_xlat = XEON_SBAR5XLAT_OFFSET,
580 };
581 
582 static struct ntb_b2b_addr xeon_b2b_usd_addr = {
583 	.bar0_addr = XEON_B2B_BAR0_ADDR,
584 	.bar2_addr64 = XEON_B2B_BAR2_ADDR64,
585 	.bar4_addr64 = XEON_B2B_BAR4_ADDR64,
586 	.bar4_addr32 = XEON_B2B_BAR4_ADDR32,
587 	.bar5_addr32 = XEON_B2B_BAR5_ADDR32,
588 };
589 
590 static struct ntb_b2b_addr xeon_b2b_dsd_addr = {
591 	.bar0_addr = XEON_B2B_BAR0_ADDR,
592 	.bar2_addr64 = XEON_B2B_BAR2_ADDR64,
593 	.bar4_addr64 = XEON_B2B_BAR4_ADDR64,
594 	.bar4_addr32 = XEON_B2B_BAR4_ADDR32,
595 	.bar5_addr32 = XEON_B2B_BAR5_ADDR32,
596 };
597 
598 static const struct ntb_reg xeon_gen3_reg = {
599 	.ntb_ctl = XEON_GEN3_REG_IMNTB_CTRL,
600 	.lnk_sta = XEON_GEN3_INT_LNK_STS_OFFSET,
601 	.db_size = sizeof(uint32_t),
602 	.mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 },
603 };
604 
605 static const struct ntb_alt_reg xeon_gen3_pri_reg = {
606 	.db_bell = XEON_GEN3_REG_EMDOORBELL,
607 	.db_mask = XEON_GEN3_REG_IMINT_DISABLE,
608 	.spad = XEON_GEN3_REG_IMSPAD,
609 };
610 
611 static const struct ntb_alt_reg xeon_gen3_b2b_reg = {
612 	.db_bell = XEON_GEN3_REG_IMDOORBELL,
613 	.db_mask = XEON_GEN3_REG_EMINT_DISABLE,
614 	.spad = XEON_GEN3_REG_IMB2B_SSPAD,
615 };
616 
617 static const struct ntb_xlat_reg xeon_gen3_sec_xlat = {
618 	.bar0_base = XEON_GEN3_EXT_REG_BAR0BASE,
619 	.bar2_base = XEON_GEN3_EXT_REG_BAR1BASE,
620 	.bar4_base = XEON_GEN3_EXT_REG_BAR2BASE,
621 
622 	.bar2_limit = XEON_GEN3_REG_IMBAR1XLIMIT,
623 	.bar4_limit = XEON_GEN3_REG_IMBAR2XLIMIT,
624 
625 	.bar2_xlat = XEON_GEN3_REG_IMBAR1XBASE,
626 	.bar4_xlat = XEON_GEN3_REG_IMBAR2XBASE,
627 };
628 
629 SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
630     "B2B MW segment overrides -- MUST be the same on both sides");
631 
632 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN,
633     &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon "
634     "hardware, use this 64-bit address on the bus between the NTB devices for "
635     "the window at BAR2, on the upstream side of the link.  MUST be the same "
636     "address on both sides.");
637 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN,
638     &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4.");
639 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN,
640     &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 "
641     "(split-BAR mode).");
642 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN,
643     &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 "
644     "(split-BAR mode).");
645 
646 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN,
647     &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon "
648     "hardware, use this 64-bit address on the bus between the NTB devices for "
649     "the window at BAR2, on the downstream side of the link.  MUST be the same"
650     " address on both sides.");
651 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN,
652     &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4.");
653 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN,
654     &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 "
655     "(split-BAR mode).");
656 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN,
657     &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 "
658     "(split-BAR mode).");
659 
660 /*
661  * OS <-> Driver interface structures
662  */
663 MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations");
664 
665 /*
666  * OS <-> Driver linkage functions
667  */
668 static int
669 intel_ntb_probe(device_t device)
670 {
671 	struct ntb_hw_info *p;
672 
673 	p = intel_ntb_get_device_info(pci_get_devid(device));
674 	if (p == NULL)
675 		return (ENXIO);
676 
677 	device_set_desc(device, p->desc);
678 	return (0);
679 }
680 
681 static int
682 intel_ntb_attach(device_t device)
683 {
684 	struct ntb_softc *ntb;
685 	struct ntb_hw_info *p;
686 	int error;
687 
688 	ntb = device_get_softc(device);
689 	p = intel_ntb_get_device_info(pci_get_devid(device));
690 
691 	ntb->device = device;
692 	ntb->type = p->type;
693 	ntb->features = p->features;
694 	ntb->b2b_mw_idx = B2B_MW_DISABLED;
695 	ntb->msix_mw_idx = B2B_MW_DISABLED;
696 
697 	/* Heartbeat timer for NTB_ATOM since there is no link interrupt */
698 	callout_init(&ntb->heartbeat_timer, 1);
699 	callout_init(&ntb->lr_timer, 1);
700 	callout_init(&ntb->peer_msix_work, 1);
701 	mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN);
702 
703 	if (ntb->type == NTB_ATOM)
704 		error = intel_ntb_detect_atom(ntb);
705 	else if (ntb->type == NTB_XEON_GEN3)
706 		error = intel_ntb_detect_xeon_gen3(ntb);
707 	else
708 		error = intel_ntb_detect_xeon(ntb);
709 	if (error != 0)
710 		goto out;
711 
712 	intel_ntb_detect_max_mw(ntb);
713 
714 	pci_enable_busmaster(ntb->device);
715 
716 	error = intel_ntb_map_pci_bars(ntb);
717 	if (error != 0)
718 		goto out;
719 	if (ntb->type == NTB_ATOM)
720 		error = intel_ntb_atom_init_dev(ntb);
721 	else if (ntb->type == NTB_XEON_GEN3)
722 		error = intel_ntb_xeon_gen3_init_dev(ntb);
723 	else
724 		error = intel_ntb_xeon_init_dev(ntb);
725 	if (error != 0)
726 		goto out;
727 
728 	intel_ntb_spad_clear(device);
729 
730 	intel_ntb_poll_link(ntb);
731 
732 	intel_ntb_sysctl_init(ntb);
733 
734 	/* Attach children to this controller */
735 	error = ntb_register_device(device);
736 
737 out:
738 	if (error != 0)
739 		intel_ntb_detach(device);
740 	return (error);
741 }
742 
743 static int
744 intel_ntb_detach(device_t device)
745 {
746 	struct ntb_softc *ntb;
747 
748 	ntb = device_get_softc(device);
749 
750 	/* Detach & delete all children */
751 	ntb_unregister_device(device);
752 
753 	if (ntb->self_reg != NULL) {
754 		DB_MASK_LOCK(ntb);
755 		db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_valid_mask);
756 		DB_MASK_UNLOCK(ntb);
757 	}
758 	callout_drain(&ntb->heartbeat_timer);
759 	callout_drain(&ntb->lr_timer);
760 	callout_drain(&ntb->peer_msix_work);
761 	pci_disable_busmaster(ntb->device);
762 	if (ntb->type == NTB_XEON_GEN1)
763 		intel_ntb_teardown_xeon(ntb);
764 	intel_ntb_teardown_interrupts(ntb);
765 
766 	mtx_destroy(&ntb->db_mask_lock);
767 
768 	intel_ntb_unmap_pci_bar(ntb);
769 
770 	return (0);
771 }
772 
773 /*
774  * Driver internal routines
775  */
776 static inline enum ntb_bar
777 intel_ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw)
778 {
779 
780 	KASSERT(mw < ntb->mw_count,
781 	    ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count));
782 	KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw"));
783 
784 	return (ntb->reg->mw_bar[mw]);
785 }
786 
787 static inline bool
788 bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar)
789 {
790 	/* XXX This assertion could be stronger. */
791 	KASSERT(bar < NTB_MAX_BARS, ("bogus bar"));
792 	return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(ntb, NTB_SPLIT_BAR));
793 }
794 
795 static inline void
796 bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base,
797     uint32_t *xlat, uint32_t *lmt)
798 {
799 	uint32_t basev, lmtv, xlatv;
800 
801 	switch (bar) {
802 	case NTB_B2B_BAR_1:
803 		basev = ntb->xlat_reg->bar2_base;
804 		lmtv = ntb->xlat_reg->bar2_limit;
805 		xlatv = ntb->xlat_reg->bar2_xlat;
806 		break;
807 	case NTB_B2B_BAR_2:
808 		basev = ntb->xlat_reg->bar4_base;
809 		lmtv = ntb->xlat_reg->bar4_limit;
810 		xlatv = ntb->xlat_reg->bar4_xlat;
811 		break;
812 	case NTB_B2B_BAR_3:
813 		basev = ntb->xlat_reg->bar5_base;
814 		lmtv = ntb->xlat_reg->bar5_limit;
815 		xlatv = ntb->xlat_reg->bar5_xlat;
816 		break;
817 	default:
818 		KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS,
819 		    ("bad bar"));
820 		basev = lmtv = xlatv = 0;
821 		break;
822 	}
823 
824 	if (base != NULL)
825 		*base = basev;
826 	if (xlat != NULL)
827 		*xlat = xlatv;
828 	if (lmt != NULL)
829 		*lmt = lmtv;
830 }
831 
832 static int
833 intel_ntb_map_pci_bars(struct ntb_softc *ntb)
834 {
835 	struct ntb_pci_bar_info *bar;
836 	int rc;
837 
838 	bar = &ntb->bar_info[NTB_CONFIG_BAR];
839 	bar->pci_resource_id = PCIR_BAR(0);
840 	rc = map_mmr_bar(ntb, bar);
841 	if (rc != 0)
842 		goto out;
843 
844 	/*
845 	 * At least on Xeon v4 NTB device leaks to host some remote side
846 	 * BAR0 writes supposed to update scratchpad registers.  I am not
847 	 * sure why it happens, but it may be related to the fact that
848 	 * on a link side BAR0 is 32KB, while on a host side it is 64KB.
849 	 * Without this hack DMAR blocks those accesses as not allowed.
850 	 */
851 	if (bus_dma_tag_create(bus_get_dma_tag(ntb->device), 1, 0,
852 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
853 	    bar->size, 1, bar->size, 0, NULL, NULL, &ntb->bar0_dma_tag)) {
854 		device_printf(ntb->device, "Unable to create BAR0 tag\n");
855 		return (ENOMEM);
856 	}
857 	if (bus_dmamap_create(ntb->bar0_dma_tag, 0, &ntb->bar0_dma_map)) {
858 		device_printf(ntb->device, "Unable to create BAR0 map\n");
859 		return (ENOMEM);
860 	}
861 	if (bus_dma_iommu_load_ident(ntb->bar0_dma_tag, ntb->bar0_dma_map,
862 	    bar->pbase, bar->size, 0)) {
863 		device_printf(ntb->device, "Unable to load BAR0 map\n");
864 		return (ENOMEM);
865 	}
866 
867 	bar = &ntb->bar_info[NTB_B2B_BAR_1];
868 	bar->pci_resource_id = PCIR_BAR(2);
869 	rc = map_memory_window_bar(ntb, bar);
870 	if (rc != 0)
871 		goto out;
872 	if (ntb->type == NTB_XEON_GEN3) {
873 		bar->psz_off = XEON_GEN3_INT_REG_IMBAR1SZ;
874 		bar->ssz_off = XEON_GEN3_INT_REG_EMBAR1SZ;
875 		bar->pbarxlat_off = XEON_GEN3_REG_EMBAR1XBASE;
876 	} else {
877 		bar->psz_off = XEON_PBAR23SZ_OFFSET;
878 		bar->ssz_off = XEON_SBAR23SZ_OFFSET;
879 		bar->pbarxlat_off = XEON_PBAR2XLAT_OFFSET;
880 	}
881 
882 	bar = &ntb->bar_info[NTB_B2B_BAR_2];
883 	bar->pci_resource_id = PCIR_BAR(4);
884 	rc = map_memory_window_bar(ntb, bar);
885 	if (rc != 0)
886 		goto out;
887 	if (ntb->type == NTB_XEON_GEN3) {
888 		bar->psz_off = XEON_GEN3_INT_REG_IMBAR2SZ;
889 		bar->ssz_off = XEON_GEN3_INT_REG_EMBAR2SZ;
890 		bar->pbarxlat_off = XEON_GEN3_REG_EMBAR2XBASE;
891 	} else {
892 		bar->psz_off = XEON_PBAR4SZ_OFFSET;
893 		bar->ssz_off = XEON_SBAR4SZ_OFFSET;
894 		bar->pbarxlat_off = XEON_PBAR4XLAT_OFFSET;
895 	}
896 
897 	if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR))
898 		goto out;
899 
900 	if (ntb->type == NTB_XEON_GEN3) {
901 		device_printf(ntb->device, "no split bar support\n");
902 		return (ENXIO);
903 	}
904 
905 	bar = &ntb->bar_info[NTB_B2B_BAR_3];
906 	bar->pci_resource_id = PCIR_BAR(5);
907 	rc = map_memory_window_bar(ntb, bar);
908 	bar->psz_off = XEON_PBAR5SZ_OFFSET;
909 	bar->ssz_off = XEON_SBAR5SZ_OFFSET;
910 	bar->pbarxlat_off = XEON_PBAR5XLAT_OFFSET;
911 
912 out:
913 	if (rc != 0)
914 		device_printf(ntb->device,
915 		    "unable to allocate pci resource\n");
916 	return (rc);
917 }
918 
919 static void
920 print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar,
921     const char *kind)
922 {
923 
924 	device_printf(ntb->device,
925 	    "Mapped BAR%d v:[%p-%p] p:[0x%jx-0x%jx] (0x%jx bytes) (%s)\n",
926 	    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
927 	    (char *)bar->vbase + bar->size - 1,
928 	    (uintmax_t)bar->pbase, (uintmax_t)(bar->pbase + bar->size - 1),
929 	    (uintmax_t)bar->size, kind);
930 }
931 
932 static int
933 map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
934 {
935 
936 	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
937 	    &bar->pci_resource_id, RF_ACTIVE);
938 	if (bar->pci_resource == NULL)
939 		return (ENXIO);
940 
941 	save_bar_parameters(bar);
942 	bar->map_mode = VM_MEMATTR_UNCACHEABLE;
943 	print_map_success(ntb, bar, "mmr");
944 	return (0);
945 }
946 
947 static int
948 map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
949 {
950 	int rc;
951 	vm_memattr_t mapmode;
952 	uint8_t bar_size_bits = 0;
953 
954 	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
955 	    &bar->pci_resource_id, RF_ACTIVE);
956 
957 	if (bar->pci_resource == NULL)
958 		return (ENXIO);
959 
960 	save_bar_parameters(bar);
961 	/*
962 	 * Ivytown NTB BAR sizes are misreported by the hardware due to a
963 	 * hardware issue. To work around this, query the size it should be
964 	 * configured to by the device and modify the resource to correspond to
965 	 * this new size. The BIOS on systems with this problem is required to
966 	 * provide enough address space to allow the driver to make this change
967 	 * safely.
968 	 *
969 	 * Ideally I could have just specified the size when I allocated the
970 	 * resource like:
971 	 *  bus_alloc_resource(ntb->device,
972 	 *	SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul,
973 	 *	1ul << bar_size_bits, RF_ACTIVE);
974 	 * but the PCI driver does not honor the size in this call, so we have
975 	 * to modify it after the fact.
976 	 */
977 	if (HAS_FEATURE(ntb, NTB_BAR_SIZE_4K)) {
978 		if (bar->pci_resource_id == PCIR_BAR(2))
979 			bar_size_bits = pci_read_config(ntb->device,
980 			    XEON_PBAR23SZ_OFFSET, 1);
981 		else
982 			bar_size_bits = pci_read_config(ntb->device,
983 			    XEON_PBAR45SZ_OFFSET, 1);
984 
985 		rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY,
986 		    bar->pci_resource, bar->pbase,
987 		    bar->pbase + (1ul << bar_size_bits) - 1);
988 		if (rc != 0) {
989 			device_printf(ntb->device,
990 			    "unable to resize bar\n");
991 			return (rc);
992 		}
993 
994 		save_bar_parameters(bar);
995 	}
996 
997 	bar->map_mode = VM_MEMATTR_UNCACHEABLE;
998 	print_map_success(ntb, bar, "mw");
999 
1000 	/*
1001 	 * Optionally, mark MW BARs as anything other than UC to improve
1002 	 * performance.
1003 	 */
1004 	mapmode = intel_ntb_pat_flags();
1005 	if (mapmode == bar->map_mode)
1006 		return (0);
1007 
1008 	rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mapmode);
1009 	if (rc == 0) {
1010 		bar->map_mode = mapmode;
1011 		device_printf(ntb->device,
1012 		    "Marked BAR%d v:[%p-%p] p:[0x%jx-0x%jx] as "
1013 		    "%s.\n",
1014 		    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1015 		    (char *)bar->vbase + bar->size - 1,
1016 		    (uintmax_t)bar->pbase,
1017 		    (uintmax_t)(bar->pbase + bar->size - 1),
1018 		    intel_ntb_vm_memattr_to_str(mapmode));
1019 	} else
1020 		device_printf(ntb->device,
1021 		    "Unable to mark BAR%d v:[%p-%p] p:[0x%jx-0x%jx] as "
1022 		    "%s: %d\n",
1023 		    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1024 		    (char *)bar->vbase + bar->size - 1,
1025 		    (uintmax_t)bar->pbase,
1026 		    (uintmax_t)(bar->pbase + bar->size - 1),
1027 		    intel_ntb_vm_memattr_to_str(mapmode), rc);
1028 		/* Proceed anyway */
1029 	return (0);
1030 }
1031 
1032 static void
1033 intel_ntb_unmap_pci_bar(struct ntb_softc *ntb)
1034 {
1035 	struct ntb_pci_bar_info *bar;
1036 	int i;
1037 
1038 	if (ntb->bar0_dma_map != NULL) {
1039 		bus_dmamap_unload(ntb->bar0_dma_tag, ntb->bar0_dma_map);
1040 		bus_dmamap_destroy(ntb->bar0_dma_tag, ntb->bar0_dma_map);
1041 	}
1042 	if (ntb->bar0_dma_tag != NULL)
1043 		bus_dma_tag_destroy(ntb->bar0_dma_tag);
1044 	for (i = 0; i < NTB_MAX_BARS; i++) {
1045 		bar = &ntb->bar_info[i];
1046 		if (bar->pci_resource != NULL)
1047 			bus_release_resource(ntb->device, SYS_RES_MEMORY,
1048 			    bar->pci_resource_id, bar->pci_resource);
1049 	}
1050 }
1051 
1052 static int
1053 intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors)
1054 {
1055 	uint32_t i;
1056 	int rc;
1057 
1058 	for (i = 0; i < num_vectors; i++) {
1059 		ntb->int_info[i].rid = i + 1;
1060 		ntb->int_info[i].res = bus_alloc_resource_any(ntb->device,
1061 		    SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE);
1062 		if (ntb->int_info[i].res == NULL) {
1063 			device_printf(ntb->device,
1064 			    "bus_alloc_resource failed\n");
1065 			return (ENOMEM);
1066 		}
1067 		ntb->int_info[i].tag = NULL;
1068 		ntb->allocated_interrupts++;
1069 		rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
1070 		    INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr,
1071 		    &ntb->msix_vec[i], &ntb->int_info[i].tag);
1072 		if (rc != 0) {
1073 			device_printf(ntb->device, "bus_setup_intr failed\n");
1074 			return (ENXIO);
1075 		}
1076 	}
1077 	return (0);
1078 }
1079 
1080 /*
1081  * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector
1082  * cannot be allocated for each MSI-X message.  JHB seems to think remapping
1083  * should be okay.  This tunable should enable us to test that hypothesis
1084  * when someone gets their hands on some Xeon hardware.
1085  */
1086 static int ntb_force_remap_mode;
1087 SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN,
1088     &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped"
1089     " to a smaller number of ithreads, even if the desired number are "
1090     "available");
1091 
1092 /*
1093  * In case it is NOT ok, give consumers an abort button.
1094  */
1095 static int ntb_prefer_intx;
1096 SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN,
1097     &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather "
1098     "than remapping MSI-X messages over available slots (match Linux driver "
1099     "behavior)");
1100 
1101 /*
1102  * Remap the desired number of MSI-X messages to available ithreads in a simple
1103  * round-robin fashion.
1104  */
1105 static int
1106 intel_ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
1107 {
1108 	u_int *vectors;
1109 	uint32_t i;
1110 	int rc;
1111 
1112 	if (ntb_prefer_intx != 0)
1113 		return (ENXIO);
1114 
1115 	vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK);
1116 
1117 	for (i = 0; i < desired; i++)
1118 		vectors[i] = (i % avail) + 1;
1119 
1120 	rc = pci_remap_msix(dev, desired, vectors);
1121 	free(vectors, M_NTB);
1122 	return (rc);
1123 }
1124 
1125 static int
1126 intel_ntb_xeon_gen3_init_isr(struct ntb_softc *ntb)
1127 {
1128 	uint64_t i, reg;
1129 	uint32_t desired_vectors, num_vectors;
1130 	int rc;
1131 
1132 	ntb->allocated_interrupts = 0;
1133 	ntb->last_ts = ticks;
1134 
1135 	/* Mask all the interrupts, including hardware interrupt */
1136 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_DISABLE, ~0ULL);
1137 
1138 	/* Clear Interrupt Status */
1139 	reg = intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS);
1140 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_STATUS, reg);
1141 
1142 	num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device),
1143 	    XEON_GEN3_DB_MSIX_VECTOR_COUNT);
1144 
1145 	rc = pci_alloc_msix(ntb->device, &num_vectors);
1146 	if (rc != 0) {
1147 		device_printf(ntb->device,
1148 		    "Interrupt allocation failed %d\n", rc);
1149 		return (rc);
1150 	}
1151 	if (desired_vectors != num_vectors) {
1152 		device_printf(ntb->device, "Couldn't get %d vectors\n",
1153 		    XEON_GEN3_DB_MSIX_VECTOR_COUNT);
1154 		return (ENXIO);
1155 	}
1156 	/* 32 db + 1 hardware */
1157 	if (num_vectors == XEON_GEN3_DB_MSIX_VECTOR_COUNT) {
1158 		/* Program INTVECXX source register */
1159 		for (i = 0; i < XEON_GEN3_DB_MSIX_VECTOR_COUNT; i++) {
1160 			/* interrupt source i for vector i */
1161 			intel_ntb_reg_write(1, XEON_GEN3_REG_IMINTVEC00 + i, i);
1162 			if (i == (XEON_GEN3_DB_MSIX_VECTOR_COUNT - 1)) {
1163 				intel_ntb_reg_write(1,
1164 				    XEON_GEN3_REG_IMINTVEC00 + i,
1165 				    XEON_GEN3_LINK_VECTOR_INDEX);
1166 			}
1167 		}
1168 
1169 		intel_ntb_create_msix_vec(ntb, num_vectors);
1170 		rc = intel_ntb_setup_msix(ntb, num_vectors);
1171 
1172 		/* enable all interrupts */
1173 		intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_DISABLE, 0ULL);
1174 	} else {
1175 		device_printf(ntb->device, "need to remap interrupts, giving up.\n");
1176 		return (ENXIO);
1177 	}
1178 
1179 	return (0);
1180 }
1181 
1182 static int
1183 intel_ntb_init_isr(struct ntb_softc *ntb)
1184 {
1185 	uint32_t desired_vectors, num_vectors;
1186 	int rc;
1187 
1188 	ntb->allocated_interrupts = 0;
1189 	ntb->last_ts = ticks;
1190 
1191 	/*
1192 	 * Mask all doorbell interrupts.  (Except link events!)
1193 	 */
1194 	DB_MASK_LOCK(ntb);
1195 	ntb->db_mask = ntb->db_valid_mask;
1196 	db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1197 	DB_MASK_UNLOCK(ntb);
1198 
1199 	num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device),
1200 	    ntb->db_count);
1201 	if (desired_vectors >= 1) {
1202 		rc = pci_alloc_msix(ntb->device, &num_vectors);
1203 
1204 		if (ntb_force_remap_mode != 0 && rc == 0 &&
1205 		    num_vectors == desired_vectors)
1206 			num_vectors--;
1207 
1208 		if (rc == 0 && num_vectors < desired_vectors) {
1209 			rc = intel_ntb_remap_msix(ntb->device, desired_vectors,
1210 			    num_vectors);
1211 			if (rc == 0)
1212 				num_vectors = desired_vectors;
1213 			else
1214 				pci_release_msi(ntb->device);
1215 		}
1216 		if (rc != 0)
1217 			num_vectors = 1;
1218 	} else
1219 		num_vectors = 1;
1220 
1221 	if (ntb->type == NTB_XEON_GEN1 && num_vectors < ntb->db_vec_count) {
1222 		if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1223 			device_printf(ntb->device,
1224 			    "Errata workaround does not support MSI or INTX\n");
1225 			return (EINVAL);
1226 		}
1227 
1228 		ntb->db_vec_count = 1;
1229 		ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT;
1230 		rc = intel_ntb_setup_legacy_interrupt(ntb);
1231 	} else {
1232 		if (num_vectors - 1 != XEON_NONLINK_DB_MSIX_BITS &&
1233 		    HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1234 			device_printf(ntb->device,
1235 			    "Errata workaround expects %d doorbell bits\n",
1236 			    XEON_NONLINK_DB_MSIX_BITS);
1237 			return (EINVAL);
1238 		}
1239 
1240 		intel_ntb_create_msix_vec(ntb, num_vectors);
1241 		rc = intel_ntb_setup_msix(ntb, num_vectors);
1242 	}
1243 	if (rc != 0) {
1244 		device_printf(ntb->device,
1245 		    "Error allocating interrupts: %d\n", rc);
1246 		intel_ntb_free_msix_vec(ntb);
1247 	}
1248 
1249 	return (rc);
1250 }
1251 
1252 static int
1253 intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
1254 {
1255 	int rc;
1256 
1257 	ntb->int_info[0].rid = 0;
1258 	ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ,
1259 	    &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE);
1260 	if (ntb->int_info[0].res == NULL) {
1261 		device_printf(ntb->device, "bus_alloc_resource failed\n");
1262 		return (ENOMEM);
1263 	}
1264 
1265 	ntb->int_info[0].tag = NULL;
1266 	ntb->allocated_interrupts = 1;
1267 
1268 	rc = bus_setup_intr(ntb->device, ntb->int_info[0].res,
1269 	    INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr,
1270 	    ntb, &ntb->int_info[0].tag);
1271 	if (rc != 0) {
1272 		device_printf(ntb->device, "bus_setup_intr failed\n");
1273 		return (ENXIO);
1274 	}
1275 
1276 	return (0);
1277 }
1278 
1279 static void
1280 intel_ntb_teardown_interrupts(struct ntb_softc *ntb)
1281 {
1282 	struct ntb_int_info *current_int;
1283 	int i;
1284 
1285 	for (i = 0; i < ntb->allocated_interrupts; i++) {
1286 		current_int = &ntb->int_info[i];
1287 		if (current_int->tag != NULL)
1288 			bus_teardown_intr(ntb->device, current_int->res,
1289 			    current_int->tag);
1290 
1291 		if (current_int->res != NULL)
1292 			bus_release_resource(ntb->device, SYS_RES_IRQ,
1293 			    rman_get_rid(current_int->res), current_int->res);
1294 	}
1295 
1296 	intel_ntb_free_msix_vec(ntb);
1297 	pci_release_msi(ntb->device);
1298 }
1299 
1300 static inline uint64_t
1301 db_ioread(struct ntb_softc *ntb, uint64_t regoff)
1302 {
1303 
1304 	switch (ntb->type) {
1305 	case NTB_ATOM:
1306 	case NTB_XEON_GEN3:
1307 		return (intel_ntb_reg_read(8, regoff));
1308 	case NTB_XEON_GEN1:
1309 		return (intel_ntb_reg_read(2, regoff));
1310 	}
1311 	__assert_unreachable();
1312 }
1313 
1314 static inline void
1315 db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
1316 {
1317 
1318 	KASSERT((val & ~ntb->db_valid_mask) == 0,
1319 	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1320 	     (uintmax_t)(val & ~ntb->db_valid_mask),
1321 	     (uintmax_t)ntb->db_valid_mask));
1322 
1323 	if (regoff == ntb->self_reg->db_mask)
1324 		DB_MASK_ASSERT(ntb, MA_OWNED);
1325 	db_iowrite_raw(ntb, regoff, val);
1326 }
1327 
1328 static inline void
1329 db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
1330 {
1331 
1332 	switch (ntb->type) {
1333 	case NTB_ATOM:
1334 	case NTB_XEON_GEN3:
1335 		intel_ntb_reg_write(8, regoff, val);
1336 		break;
1337 	case NTB_XEON_GEN1:
1338 		intel_ntb_reg_write(2, regoff, (uint16_t)val);
1339 		break;
1340 	}
1341 }
1342 
1343 static void
1344 intel_ntb_db_set_mask(device_t dev, uint64_t bits)
1345 {
1346 	struct ntb_softc *ntb = device_get_softc(dev);
1347 
1348 	DB_MASK_LOCK(ntb);
1349 	ntb->db_mask |= bits;
1350 	if (!HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1351 		db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1352 	DB_MASK_UNLOCK(ntb);
1353 }
1354 
1355 static void
1356 intel_ntb_db_clear_mask(device_t dev, uint64_t bits)
1357 {
1358 	struct ntb_softc *ntb = device_get_softc(dev);
1359 	uint64_t ibits;
1360 	int i;
1361 
1362 	KASSERT((bits & ~ntb->db_valid_mask) == 0,
1363 	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1364 	     (uintmax_t)(bits & ~ntb->db_valid_mask),
1365 	     (uintmax_t)ntb->db_valid_mask));
1366 
1367 	DB_MASK_LOCK(ntb);
1368 	ibits = ntb->fake_db & ntb->db_mask & bits;
1369 	ntb->db_mask &= ~bits;
1370 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1371 		/* Simulate fake interrupts if unmasked DB bits are set. */
1372 		ntb->force_db |= ibits;
1373 		for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
1374 			if ((ibits & intel_ntb_db_vector_mask(dev, i)) != 0)
1375 				swi_sched(ntb->int_info[i].tag, 0);
1376 		}
1377 	} else {
1378 		db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1379 	}
1380 	DB_MASK_UNLOCK(ntb);
1381 }
1382 
1383 static uint64_t
1384 intel_ntb_db_read(device_t dev)
1385 {
1386 	struct ntb_softc *ntb = device_get_softc(dev);
1387 
1388 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1389 		return (ntb->fake_db);
1390 	if (ntb->type == NTB_XEON_GEN3)
1391 		return (intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS));
1392 	else
1393 		return (db_ioread(ntb, ntb->self_reg->db_bell));
1394 }
1395 
1396 static void
1397 intel_ntb_db_clear(device_t dev, uint64_t bits)
1398 {
1399 	struct ntb_softc *ntb = device_get_softc(dev);
1400 
1401 	KASSERT((bits & ~ntb->db_valid_mask) == 0,
1402 	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1403 	     (uintmax_t)(bits & ~ntb->db_valid_mask),
1404 	     (uintmax_t)ntb->db_valid_mask));
1405 
1406 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1407 		DB_MASK_LOCK(ntb);
1408 		ntb->fake_db &= ~bits;
1409 		DB_MASK_UNLOCK(ntb);
1410 		return;
1411 	}
1412 
1413 	if (ntb->type == NTB_XEON_GEN3)
1414 		intel_ntb_reg_write(4, XEON_GEN3_REG_IMINT_STATUS,
1415 		    (uint32_t)bits);
1416 	else
1417 		db_iowrite(ntb, ntb->self_reg->db_bell, bits);
1418 }
1419 
1420 static inline uint64_t
1421 intel_ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector)
1422 {
1423 	uint64_t shift, mask;
1424 
1425 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1426 		/*
1427 		 * Remap vectors in custom way to make at least first
1428 		 * three doorbells to not generate stray events.
1429 		 * This breaks Linux compatibility (if one existed)
1430 		 * when more then one DB is used (not by if_ntb).
1431 		 */
1432 		if (db_vector < XEON_NONLINK_DB_MSIX_BITS - 1)
1433 			return (1 << db_vector);
1434 		if (db_vector == XEON_NONLINK_DB_MSIX_BITS - 1)
1435 			return (0x7ffc);
1436 	}
1437 
1438 	shift = ntb->db_vec_shift;
1439 	mask = (1ull << shift) - 1;
1440 	return (mask << (shift * db_vector));
1441 }
1442 
1443 static void
1444 intel_ntb_interrupt(struct ntb_softc *ntb, uint32_t vec)
1445 {
1446 	uint64_t vec_mask;
1447 
1448 	ntb->last_ts = ticks;
1449 	vec_mask = intel_ntb_vec_mask(ntb, vec);
1450 
1451 	if (ntb->type == NTB_XEON_GEN3 && vec == XEON_GEN3_LINK_VECTOR_INDEX)
1452 		vec_mask |= ntb->db_link_mask;
1453 	if ((vec_mask & ntb->db_link_mask) != 0) {
1454 		if (intel_ntb_poll_link(ntb))
1455 			ntb_link_event(ntb->device);
1456 		if (ntb->type == NTB_XEON_GEN3)
1457 			intel_ntb_reg_write(8, XEON_GEN3_REG_IMINT_STATUS,
1458 			    intel_ntb_reg_read(8, XEON_GEN3_REG_IMINT_STATUS));
1459 	}
1460 
1461 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) &&
1462 	    (vec_mask & ntb->db_link_mask) == 0) {
1463 		DB_MASK_LOCK(ntb);
1464 
1465 		/*
1466 		 * Do not report same DB events again if not cleared yet,
1467 		 * unless the mask was just cleared for them and this
1468 		 * interrupt handler call can be the consequence of it.
1469 		 */
1470 		vec_mask &= ~ntb->fake_db | ntb->force_db;
1471 		ntb->force_db &= ~vec_mask;
1472 
1473 		/* Update our internal doorbell register. */
1474 		ntb->fake_db |= vec_mask;
1475 
1476 		/* Do not report masked DB events. */
1477 		vec_mask &= ~ntb->db_mask;
1478 
1479 		DB_MASK_UNLOCK(ntb);
1480 	}
1481 
1482 	if ((vec_mask & ntb->db_valid_mask) != 0)
1483 		ntb_db_event(ntb->device, vec);
1484 }
1485 
1486 static void
1487 ndev_vec_isr(void *arg)
1488 {
1489 	struct ntb_vec *nvec = arg;
1490 
1491 	intel_ntb_interrupt(nvec->ntb, nvec->num);
1492 }
1493 
1494 static void
1495 ndev_irq_isr(void *arg)
1496 {
1497 	/* If we couldn't set up MSI-X, we only have the one vector. */
1498 	intel_ntb_interrupt(arg, 0);
1499 }
1500 
1501 static int
1502 intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
1503 {
1504 	uint32_t i;
1505 
1506 	ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB,
1507 	    M_ZERO | M_WAITOK);
1508 	for (i = 0; i < num_vectors; i++) {
1509 		ntb->msix_vec[i].num = i;
1510 		ntb->msix_vec[i].ntb = ntb;
1511 	}
1512 
1513 	return (0);
1514 }
1515 
1516 static void
1517 intel_ntb_free_msix_vec(struct ntb_softc *ntb)
1518 {
1519 
1520 	if (ntb->msix_vec == NULL)
1521 		return;
1522 
1523 	free(ntb->msix_vec, M_NTB);
1524 	ntb->msix_vec = NULL;
1525 }
1526 
1527 static void
1528 intel_ntb_get_msix_info(struct ntb_softc *ntb)
1529 {
1530 	struct pci_devinfo *dinfo;
1531 	struct pcicfg_msix *msix;
1532 	uint32_t laddr, data, i, offset;
1533 
1534 	dinfo = device_get_ivars(ntb->device);
1535 	msix = &dinfo->cfg.msix;
1536 
1537 	CTASSERT(XEON_NONLINK_DB_MSIX_BITS == nitems(ntb->msix_data));
1538 
1539 	for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
1540 		offset = msix->msix_table_offset + i * PCI_MSIX_ENTRY_SIZE;
1541 
1542 		laddr = bus_read_4(msix->msix_table_res, offset +
1543 		    PCI_MSIX_ENTRY_LOWER_ADDR);
1544 		intel_ntb_printf(2, "local MSIX addr(%u): 0x%x\n", i, laddr);
1545 
1546 		KASSERT((laddr & MSI_INTEL_ADDR_BASE) == MSI_INTEL_ADDR_BASE,
1547 		    ("local MSIX addr 0x%x not in MSI base 0x%x", laddr,
1548 		     MSI_INTEL_ADDR_BASE));
1549 		ntb->msix_data[i].nmd_ofs = laddr;
1550 
1551 		data = bus_read_4(msix->msix_table_res, offset +
1552 		    PCI_MSIX_ENTRY_DATA);
1553 		intel_ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data);
1554 
1555 		ntb->msix_data[i].nmd_data = data;
1556 	}
1557 }
1558 
1559 static struct ntb_hw_info *
1560 intel_ntb_get_device_info(uint32_t device_id)
1561 {
1562 	struct ntb_hw_info *ep;
1563 
1564 	for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) {
1565 		if (ep->device_id == device_id)
1566 			return (ep);
1567 	}
1568 	return (NULL);
1569 }
1570 
1571 static void
1572 intel_ntb_teardown_xeon(struct ntb_softc *ntb)
1573 {
1574 
1575 	if (ntb->reg != NULL)
1576 		intel_ntb_link_disable(ntb->device);
1577 }
1578 
1579 static void
1580 intel_ntb_detect_max_mw(struct ntb_softc *ntb)
1581 {
1582 
1583 	switch (ntb->type) {
1584 	case NTB_ATOM:
1585 		ntb->mw_count = ATOM_MW_COUNT;
1586 		break;
1587 	case NTB_XEON_GEN1:
1588 		if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
1589 			ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT;
1590 		else
1591 			ntb->mw_count = XEON_SNB_MW_COUNT;
1592 		break;
1593 	case NTB_XEON_GEN3:
1594 		if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
1595 			ntb->mw_count = XEON_GEN3_SPLIT_MW_COUNT;
1596 		else
1597 			ntb->mw_count = XEON_GEN3_MW_COUNT;
1598 		break;
1599 	}
1600 }
1601 
1602 static int
1603 intel_ntb_detect_xeon(struct ntb_softc *ntb)
1604 {
1605 	uint8_t ppd, conn_type;
1606 
1607 	ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1);
1608 	ntb->ppd = ppd;
1609 
1610 	if ((ppd & XEON_PPD_DEV_TYPE) != 0)
1611 		ntb->dev_type = NTB_DEV_DSD;
1612 	else
1613 		ntb->dev_type = NTB_DEV_USD;
1614 
1615 	if ((ppd & XEON_PPD_SPLIT_BAR) != 0)
1616 		ntb->features |= NTB_SPLIT_BAR;
1617 
1618 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) &&
1619 	    !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
1620 		device_printf(ntb->device,
1621 		    "Can not apply SB01BASE_LOCKUP workaround "
1622 		    "with split BARs disabled!\n");
1623 		device_printf(ntb->device,
1624 		    "Expect system hangs under heavy NTB traffic!\n");
1625 		ntb->features &= ~NTB_SB01BASE_LOCKUP;
1626 	}
1627 
1628 	/*
1629 	 * SDOORBELL errata workaround gets in the way of SB01BASE_LOCKUP
1630 	 * errata workaround; only do one at a time.
1631 	 */
1632 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1633 		ntb->features &= ~NTB_SDOORBELL_LOCKUP;
1634 
1635 	conn_type = ppd & XEON_PPD_CONN_TYPE;
1636 	switch (conn_type) {
1637 	case NTB_CONN_B2B:
1638 		ntb->conn_type = conn_type;
1639 		break;
1640 	case NTB_CONN_RP:
1641 	case NTB_CONN_TRANSPARENT:
1642 	default:
1643 		device_printf(ntb->device, "Unsupported connection type: %u\n",
1644 		    (unsigned)conn_type);
1645 		return (ENXIO);
1646 	}
1647 	return (0);
1648 }
1649 
1650 static int
1651 intel_ntb_detect_atom(struct ntb_softc *ntb)
1652 {
1653 	uint32_t ppd, conn_type;
1654 
1655 	ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4);
1656 	ntb->ppd = ppd;
1657 
1658 	if ((ppd & ATOM_PPD_DEV_TYPE) != 0)
1659 		ntb->dev_type = NTB_DEV_DSD;
1660 	else
1661 		ntb->dev_type = NTB_DEV_USD;
1662 
1663 	conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8;
1664 	switch (conn_type) {
1665 	case NTB_CONN_B2B:
1666 		ntb->conn_type = conn_type;
1667 		break;
1668 	default:
1669 		device_printf(ntb->device, "Unsupported NTB configuration\n");
1670 		return (ENXIO);
1671 	}
1672 	return (0);
1673 }
1674 
1675 static int
1676 intel_ntb_detect_xeon_gen3(struct ntb_softc *ntb)
1677 {
1678 	uint8_t ppd, conn_type;
1679 
1680 	ppd = pci_read_config(ntb->device, XEON_GEN3_INT_REG_PPD, 1);
1681 	ntb->ppd = ppd;
1682 
1683 	/* check port definition */
1684 	conn_type = XEON_GEN3_REG_PPD_PORT_DEF_F(ppd);
1685 	switch (conn_type) {
1686 	case NTB_CONN_B2B:
1687 		ntb->conn_type = conn_type;
1688 		break;
1689 	default:
1690 		device_printf(ntb->device, "Unsupported connection type: %u\n",
1691 		    conn_type);
1692 		return (ENXIO);
1693 	}
1694 
1695 	/* check cross link configuration status */
1696 	if (XEON_GEN3_REG_PPD_CONF_STS_F(ppd)) {
1697 		/* NTB Port is configured as DSD/USP */
1698 		ntb->dev_type = NTB_DEV_DSD;
1699 	} else {
1700 		/* NTB Port is configured as USD/DSP */
1701 		ntb->dev_type = NTB_DEV_USD;
1702 	}
1703 
1704 	if (XEON_GEN3_REG_PPD_ONE_MSIX_F(ppd)) {
1705 		/*
1706 		 * This bit when set, causes only a single MSI-X message to be
1707 		 * generated if MSI-X is enabled.
1708 		 */
1709 		ntb->features |= NTB_ONE_MSIX;
1710 	}
1711 
1712 	if (XEON_GEN3_REG_PPD_BAR45_SPL_F(ppd)) {
1713 		/* BARs 4 and 5 are presented as two 32b non-prefetchable BARs */
1714 		ntb->features |= NTB_SPLIT_BAR;
1715 	}
1716 
1717 	device_printf(ntb->device, "conn type 0x%02x, dev type 0x%02x,"
1718 	    "features 0x%02x\n", ntb->conn_type, ntb->dev_type, ntb->features);
1719 
1720 	return (0);
1721 }
1722 
1723 static int
1724 intel_ntb_xeon_init_dev(struct ntb_softc *ntb)
1725 {
1726 	int rc;
1727 
1728 	ntb->spad_count		= XEON_SPAD_COUNT;
1729 	ntb->db_count		= XEON_DB_COUNT;
1730 	ntb->db_link_mask	= XEON_DB_LINK_BIT;
1731 	ntb->db_vec_count	= XEON_DB_MSIX_VECTOR_COUNT;
1732 	ntb->db_vec_shift	= XEON_DB_MSIX_VECTOR_SHIFT;
1733 
1734 	if (ntb->conn_type != NTB_CONN_B2B) {
1735 		device_printf(ntb->device, "Connection type %d not supported\n",
1736 		    ntb->conn_type);
1737 		return (ENXIO);
1738 	}
1739 
1740 	ntb->reg = &xeon_reg;
1741 	ntb->self_reg = &xeon_pri_reg;
1742 	ntb->peer_reg = &xeon_b2b_reg;
1743 	ntb->xlat_reg = &xeon_sec_xlat;
1744 
1745 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1746 		ntb->force_db = ntb->fake_db = 0;
1747 		ntb->msix_mw_idx = (ntb->mw_count + g_ntb_msix_idx) %
1748 		    ntb->mw_count;
1749 		intel_ntb_printf(2, "Setting up MSIX mw idx %d means %u\n",
1750 		    g_ntb_msix_idx, ntb->msix_mw_idx);
1751 		rc = intel_ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx,
1752 		    VM_MEMATTR_UNCACHEABLE);
1753 		KASSERT(rc == 0, ("shouldn't fail"));
1754 	} else if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
1755 		/*
1756 		 * There is a Xeon hardware errata related to writes to SDOORBELL or
1757 		 * B2BDOORBELL in conjunction with inbound access to NTB MMIO space,
1758 		 * which may hang the system.  To workaround this, use a memory
1759 		 * window to access the interrupt and scratch pad registers on the
1760 		 * remote system.
1761 		 */
1762 		ntb->b2b_mw_idx = (ntb->mw_count + g_ntb_mw_idx) %
1763 		    ntb->mw_count;
1764 		intel_ntb_printf(2, "Setting up b2b mw idx %d means %u\n",
1765 		    g_ntb_mw_idx, ntb->b2b_mw_idx);
1766 		rc = intel_ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx,
1767 		    VM_MEMATTR_UNCACHEABLE);
1768 		KASSERT(rc == 0, ("shouldn't fail"));
1769 	} else if (HAS_FEATURE(ntb, NTB_B2BDOORBELL_BIT14))
1770 		/*
1771 		 * HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
1772 		 * mirrored to the remote system.  Shrink the number of bits by one,
1773 		 * since bit 14 is the last bit.
1774 		 *
1775 		 * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register
1776 		 * anyway.  Nor for non-B2B connection types.
1777 		 */
1778 		ntb->db_count = XEON_DB_COUNT - 1;
1779 
1780 	ntb->db_valid_mask = (1ull << ntb->db_count) - 1;
1781 
1782 	if (ntb->dev_type == NTB_DEV_USD)
1783 		rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr,
1784 		    &xeon_b2b_usd_addr);
1785 	else
1786 		rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr,
1787 		    &xeon_b2b_dsd_addr);
1788 	if (rc != 0)
1789 		return (rc);
1790 
1791 	/* Enable Bus Master and Memory Space on the secondary side */
1792 	intel_ntb_reg_write(2, XEON_SPCICMD_OFFSET,
1793 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1794 
1795 	/*
1796 	 * Mask all doorbell interrupts.
1797 	 */
1798 	DB_MASK_LOCK(ntb);
1799 	ntb->db_mask = ntb->db_valid_mask;
1800 	db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1801 	DB_MASK_UNLOCK(ntb);
1802 
1803 	rc = intel_ntb_init_isr(ntb);
1804 	return (rc);
1805 }
1806 
1807 static int
1808 intel_ntb_xeon_gen3_init_dev(struct ntb_softc *ntb)
1809 {
1810 	int rc;
1811 
1812 	ntb->spad_count = XEON_GEN3_SPAD_COUNT;
1813 	ntb->db_count = XEON_GEN3_DB_COUNT;
1814 	ntb->db_link_mask = XEON_GEN3_DB_LINK_BIT;
1815 	ntb->db_vec_count = XEON_GEN3_DB_MSIX_VECTOR_COUNT;
1816 	ntb->db_vec_shift = XEON_GEN3_DB_MSIX_VECTOR_SHIFT;
1817 
1818 	if (ntb->conn_type != NTB_CONN_B2B) {
1819 		device_printf(ntb->device, "Connection type %d not supported\n",
1820 		    ntb->conn_type);
1821 		return (ENXIO);
1822 	}
1823 
1824 	ntb->reg = &xeon_gen3_reg;
1825 	ntb->self_reg = &xeon_gen3_pri_reg;
1826 	ntb->peer_reg = &xeon_gen3_b2b_reg;
1827 	ntb->xlat_reg = &xeon_gen3_sec_xlat;
1828 
1829 	ntb->db_valid_mask = (1ULL << ntb->db_count) - 1;
1830 
1831 	xeon_gen3_setup_b2b_mw(ntb);
1832 
1833 	/* Enable Bus Master and Memory Space on the External Side */
1834 	intel_ntb_reg_write(2, XEON_GEN3_EXT_REG_PCI_CMD,
1835 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1836 
1837 	/* Setup Interrupt */
1838 	rc = intel_ntb_xeon_gen3_init_isr(ntb);
1839 
1840 	return (rc);
1841 }
1842 
1843 static int
1844 intel_ntb_atom_init_dev(struct ntb_softc *ntb)
1845 {
1846 	int error;
1847 
1848 	KASSERT(ntb->conn_type == NTB_CONN_B2B,
1849 	    ("Unsupported NTB configuration (%d)\n", ntb->conn_type));
1850 
1851 	ntb->spad_count		 = ATOM_SPAD_COUNT;
1852 	ntb->db_count		 = ATOM_DB_COUNT;
1853 	ntb->db_vec_count	 = ATOM_DB_MSIX_VECTOR_COUNT;
1854 	ntb->db_vec_shift	 = ATOM_DB_MSIX_VECTOR_SHIFT;
1855 	ntb->db_valid_mask	 = (1ull << ntb->db_count) - 1;
1856 
1857 	ntb->reg = &atom_reg;
1858 	ntb->self_reg = &atom_pri_reg;
1859 	ntb->peer_reg = &atom_b2b_reg;
1860 	ntb->xlat_reg = &atom_sec_xlat;
1861 
1862 	/*
1863 	 * FIXME - MSI-X bug on early Atom HW, remove once internal issue is
1864 	 * resolved.  Mask transaction layer internal parity errors.
1865 	 */
1866 	pci_write_config(ntb->device, 0xFC, 0x4, 4);
1867 
1868 	configure_atom_secondary_side_bars(ntb);
1869 
1870 	/* Enable Bus Master and Memory Space on the secondary side */
1871 	intel_ntb_reg_write(2, ATOM_SPCICMD_OFFSET,
1872 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1873 
1874 	error = intel_ntb_init_isr(ntb);
1875 	if (error != 0)
1876 		return (error);
1877 
1878 	/* Initiate PCI-E link training */
1879 	intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1880 
1881 	callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb);
1882 
1883 	return (0);
1884 }
1885 
1886 /* XXX: Linux driver doesn't seem to do any of this for Atom. */
1887 static void
1888 configure_atom_secondary_side_bars(struct ntb_softc *ntb)
1889 {
1890 
1891 	if (ntb->dev_type == NTB_DEV_USD) {
1892 		intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
1893 		    XEON_B2B_BAR2_ADDR64);
1894 		intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
1895 		    XEON_B2B_BAR4_ADDR64);
1896 		intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
1897 		intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
1898 	} else {
1899 		intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
1900 		    XEON_B2B_BAR2_ADDR64);
1901 		intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
1902 		    XEON_B2B_BAR4_ADDR64);
1903 		intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
1904 		intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
1905 	}
1906 }
1907 
1908 /*
1909  * When working around Xeon SDOORBELL errata by remapping remote registers in a
1910  * MW, limit the B2B MW to half a MW.  By sharing a MW, half the shared MW
1911  * remains for use by a higher layer.
1912  *
1913  * Will only be used if working around SDOORBELL errata and the BIOS-configured
1914  * MW size is sufficiently large.
1915  */
1916 static unsigned int ntb_b2b_mw_share;
1917 SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share,
1918     0, "If enabled (non-zero), prefer to share half of the B2B peer register "
1919     "MW with higher level consumers.  Both sides of the NTB MUST set the same "
1920     "value here.");
1921 
1922 static void
1923 xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx,
1924     enum ntb_bar regbar)
1925 {
1926 	struct ntb_pci_bar_info *bar;
1927 	uint8_t bar_sz;
1928 
1929 	if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3)
1930 		return;
1931 
1932 	bar = &ntb->bar_info[idx];
1933 	bar_sz = pci_read_config(ntb->device, bar->psz_off, 1);
1934 	if (idx == regbar) {
1935 		if (ntb->b2b_off != 0)
1936 			bar_sz--;
1937 		else
1938 			bar_sz = 0;
1939 	}
1940 	pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1);
1941 	bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1);
1942 	(void)bar_sz;
1943 }
1944 
1945 static void
1946 xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr,
1947     enum ntb_bar idx, enum ntb_bar regbar)
1948 {
1949 	uint64_t reg_val;
1950 	uint32_t base_reg, lmt_reg;
1951 
1952 	bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg);
1953 	if (idx == regbar) {
1954 		if (ntb->b2b_off)
1955 			bar_addr += ntb->b2b_off;
1956 		else
1957 			bar_addr = 0;
1958 	}
1959 
1960 	if (!bar_is_64bit(ntb, idx)) {
1961 		intel_ntb_reg_write(4, base_reg, bar_addr);
1962 		reg_val = intel_ntb_reg_read(4, base_reg);
1963 		(void)reg_val;
1964 
1965 		intel_ntb_reg_write(4, lmt_reg, bar_addr);
1966 		reg_val = intel_ntb_reg_read(4, lmt_reg);
1967 		(void)reg_val;
1968 	} else {
1969 		intel_ntb_reg_write(8, base_reg, bar_addr);
1970 		reg_val = intel_ntb_reg_read(8, base_reg);
1971 		(void)reg_val;
1972 
1973 		intel_ntb_reg_write(8, lmt_reg, bar_addr);
1974 		reg_val = intel_ntb_reg_read(8, lmt_reg);
1975 		(void)reg_val;
1976 	}
1977 }
1978 
1979 static void
1980 xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx)
1981 {
1982 	struct ntb_pci_bar_info *bar;
1983 
1984 	bar = &ntb->bar_info[idx];
1985 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) {
1986 		intel_ntb_reg_write(4, bar->pbarxlat_off, base_addr);
1987 		base_addr = intel_ntb_reg_read(4, bar->pbarxlat_off);
1988 	} else {
1989 		intel_ntb_reg_write(8, bar->pbarxlat_off, base_addr);
1990 		base_addr = intel_ntb_reg_read(8, bar->pbarxlat_off);
1991 	}
1992 	(void)base_addr;
1993 }
1994 
1995 static int
1996 xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
1997     const struct ntb_b2b_addr *peer_addr)
1998 {
1999 	struct ntb_pci_bar_info *b2b_bar;
2000 	vm_size_t bar_size;
2001 	uint64_t bar_addr;
2002 	enum ntb_bar b2b_bar_num, i;
2003 
2004 	if (ntb->b2b_mw_idx == B2B_MW_DISABLED) {
2005 		b2b_bar = NULL;
2006 		b2b_bar_num = NTB_CONFIG_BAR;
2007 		ntb->b2b_off = 0;
2008 	} else {
2009 		b2b_bar_num = intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx);
2010 		KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS,
2011 		    ("invalid b2b mw bar"));
2012 
2013 		b2b_bar = &ntb->bar_info[b2b_bar_num];
2014 		bar_size = b2b_bar->size;
2015 
2016 		if (ntb_b2b_mw_share != 0 &&
2017 		    (bar_size >> 1) >= XEON_B2B_MIN_SIZE)
2018 			ntb->b2b_off = bar_size >> 1;
2019 		else if (bar_size >= XEON_B2B_MIN_SIZE) {
2020 			ntb->b2b_off = 0;
2021 		} else {
2022 			device_printf(ntb->device,
2023 			    "B2B bar size is too small!\n");
2024 			return (EIO);
2025 		}
2026 	}
2027 
2028 	/*
2029 	 * Reset the secondary bar sizes to match the primary bar sizes.
2030 	 * (Except, disable or halve the size of the B2B secondary bar.)
2031 	 */
2032 	for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++)
2033 		xeon_reset_sbar_size(ntb, i, b2b_bar_num);
2034 
2035 	bar_addr = 0;
2036 	if (b2b_bar_num == NTB_CONFIG_BAR)
2037 		bar_addr = addr->bar0_addr;
2038 	else if (b2b_bar_num == NTB_B2B_BAR_1)
2039 		bar_addr = addr->bar2_addr64;
2040 	else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2041 		bar_addr = addr->bar4_addr64;
2042 	else if (b2b_bar_num == NTB_B2B_BAR_2)
2043 		bar_addr = addr->bar4_addr32;
2044 	else if (b2b_bar_num == NTB_B2B_BAR_3)
2045 		bar_addr = addr->bar5_addr32;
2046 	else
2047 		KASSERT(false, ("invalid bar"));
2048 
2049 	intel_ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr);
2050 
2051 	/*
2052 	 * Other SBARs are normally hit by the PBAR xlat, except for the b2b
2053 	 * register BAR.  The B2B BAR is either disabled above or configured
2054 	 * half-size.  It starts at PBAR xlat + offset.
2055 	 *
2056 	 * Also set up incoming BAR limits == base (zero length window).
2057 	 */
2058 	xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1,
2059 	    b2b_bar_num);
2060 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2061 		xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32,
2062 		    NTB_B2B_BAR_2, b2b_bar_num);
2063 		xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32,
2064 		    NTB_B2B_BAR_3, b2b_bar_num);
2065 	} else
2066 		xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64,
2067 		    NTB_B2B_BAR_2, b2b_bar_num);
2068 
2069 	/* Zero incoming translation addrs */
2070 	intel_ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0);
2071 	intel_ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0);
2072 
2073 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
2074 		uint32_t xlat_reg, lmt_reg;
2075 		enum ntb_bar bar_num;
2076 
2077 		/*
2078 		 * We point the chosen MSIX MW BAR xlat to remote LAPIC for
2079 		 * workaround
2080 		 */
2081 		bar_num = intel_ntb_mw_to_bar(ntb, ntb->msix_mw_idx);
2082 		bar_get_xlat_params(ntb, bar_num, NULL, &xlat_reg, &lmt_reg);
2083 		if (bar_is_64bit(ntb, bar_num)) {
2084 			intel_ntb_reg_write(8, xlat_reg, MSI_INTEL_ADDR_BASE);
2085 			ntb->msix_xlat = intel_ntb_reg_read(8, xlat_reg);
2086 			intel_ntb_reg_write(8, lmt_reg, 0);
2087 		} else {
2088 			intel_ntb_reg_write(4, xlat_reg, MSI_INTEL_ADDR_BASE);
2089 			ntb->msix_xlat = intel_ntb_reg_read(4, xlat_reg);
2090 			intel_ntb_reg_write(4, lmt_reg, 0);
2091 		}
2092 
2093 		ntb->peer_lapic_bar =  &ntb->bar_info[bar_num];
2094 	}
2095 	(void)intel_ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET);
2096 	(void)intel_ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET);
2097 
2098 	/* Zero outgoing translation limits (whole bar size windows) */
2099 	intel_ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0);
2100 	intel_ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0);
2101 
2102 	/* Set outgoing translation offsets */
2103 	xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1);
2104 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2105 		xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2);
2106 		xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3);
2107 	} else
2108 		xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2);
2109 
2110 	/* Set the translation offset for B2B registers */
2111 	bar_addr = 0;
2112 	if (b2b_bar_num == NTB_CONFIG_BAR)
2113 		bar_addr = peer_addr->bar0_addr;
2114 	else if (b2b_bar_num == NTB_B2B_BAR_1)
2115 		bar_addr = peer_addr->bar2_addr64;
2116 	else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2117 		bar_addr = peer_addr->bar4_addr64;
2118 	else if (b2b_bar_num == NTB_B2B_BAR_2)
2119 		bar_addr = peer_addr->bar4_addr32;
2120 	else if (b2b_bar_num == NTB_B2B_BAR_3)
2121 		bar_addr = peer_addr->bar5_addr32;
2122 	else
2123 		KASSERT(false, ("invalid bar"));
2124 
2125 	/*
2126 	 * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits
2127 	 * at a time.
2128 	 */
2129 	intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff);
2130 	intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32);
2131 	return (0);
2132 }
2133 
2134 static int
2135 xeon_gen3_setup_b2b_mw(struct ntb_softc *ntb)
2136 {
2137 	uint64_t reg;
2138 	uint32_t embarsz, imbarsz;
2139 
2140 	/* IMBAR1SZ should be equal to EMBAR1SZ */
2141 	embarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_EMBAR1SZ, 1);
2142 	imbarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_IMBAR1SZ, 1);
2143 	if (embarsz != imbarsz) {
2144 		device_printf(ntb->device,
2145 		    "IMBAR1SZ (%u) should be equal to EMBAR1SZ (%u)\n",
2146 		    imbarsz, embarsz);
2147 		return (EIO);
2148 	}
2149 
2150 	/* IMBAR2SZ should be equal to EMBAR2SZ */
2151 	embarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_EMBAR2SZ, 1);
2152 	imbarsz = pci_read_config(ntb->device, XEON_GEN3_INT_REG_IMBAR2SZ, 1);
2153 	if (embarsz != imbarsz) {
2154 		device_printf(ntb->device,
2155 		    "IMBAR2SZ (%u) should be equal to EMBAR2SZ (%u)\n",
2156 		    imbarsz, embarsz);
2157 		return (EIO);
2158 	}
2159 
2160 	/* Client will provide the incoming IMBAR1/2XBASE, zero it for now */
2161 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR1XBASE, 0);
2162 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XBASE, 0);
2163 
2164 	/*
2165 	 * If the value in IMBAR1XLIMIT is set equal to the value in IMBAR1XBASE,
2166 	 * the local memory window exposure from EMBAR1 is disabled.
2167 	 * Note: It is needed to avoid malicious access.
2168 	 */
2169 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR1XLIMIT, 0);
2170 	intel_ntb_reg_write(8, XEON_GEN3_REG_IMBAR2XLIMIT, 0);
2171 
2172 	/* Config outgoing translation limits (whole bar size windows) */
2173 	reg = intel_ntb_reg_read(8, XEON_GEN3_REG_EMBAR1XBASE);
2174 	reg += ntb->bar_info[NTB_B2B_BAR_1].size;
2175 	intel_ntb_reg_write(8, XEON_GEN3_REG_EMBAR1XLIMIT, reg);
2176 
2177 	reg = intel_ntb_reg_read(8, XEON_GEN3_REG_EMBAR2XBASE);
2178 	reg += ntb->bar_info[NTB_B2B_BAR_2].size;
2179 	intel_ntb_reg_write(8, XEON_GEN3_REG_EMBAR2XLIMIT, reg);
2180 
2181 	return (0);
2182 }
2183 
2184 static inline bool
2185 _xeon_link_is_up(struct ntb_softc *ntb)
2186 {
2187 
2188 	if (ntb->conn_type == NTB_CONN_TRANSPARENT)
2189 		return (true);
2190 	return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0);
2191 }
2192 
2193 static inline bool
2194 link_is_up(struct ntb_softc *ntb)
2195 {
2196 
2197 	if (ntb->type == NTB_XEON_GEN1 || ntb->type == NTB_XEON_GEN3)
2198 		return (_xeon_link_is_up(ntb) && (ntb->peer_msix_good ||
2199 		    !HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)));
2200 
2201 	KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
2202 	return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0);
2203 }
2204 
2205 static inline bool
2206 atom_link_is_err(struct ntb_softc *ntb)
2207 {
2208 	uint32_t status;
2209 
2210 	KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
2211 
2212 	status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
2213 	if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0)
2214 		return (true);
2215 
2216 	status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
2217 	return ((status & ATOM_IBIST_ERR_OFLOW) != 0);
2218 }
2219 
2220 /* Atom does not have link status interrupt, poll on that platform */
2221 static void
2222 atom_link_hb(void *arg)
2223 {
2224 	struct ntb_softc *ntb = arg;
2225 	sbintime_t timo, poll_ts;
2226 
2227 	timo = NTB_HB_TIMEOUT * hz;
2228 	poll_ts = ntb->last_ts + timo;
2229 
2230 	/*
2231 	 * Delay polling the link status if an interrupt was received, unless
2232 	 * the cached link status says the link is down.
2233 	 */
2234 	if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) {
2235 		timo = poll_ts - ticks;
2236 		goto out;
2237 	}
2238 
2239 	if (intel_ntb_poll_link(ntb))
2240 		ntb_link_event(ntb->device);
2241 
2242 	if (!link_is_up(ntb) && atom_link_is_err(ntb)) {
2243 		/* Link is down with error, proceed with recovery */
2244 		callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb);
2245 		return;
2246 	}
2247 
2248 out:
2249 	callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb);
2250 }
2251 
2252 static void
2253 atom_perform_link_restart(struct ntb_softc *ntb)
2254 {
2255 	uint32_t status;
2256 
2257 	/* Driver resets the NTB ModPhy lanes - magic! */
2258 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0);
2259 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40);
2260 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60);
2261 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60);
2262 
2263 	/* Driver waits 100ms to allow the NTB ModPhy to settle */
2264 	pause("ModPhy", hz / 10);
2265 
2266 	/* Clear AER Errors, write to clear */
2267 	status = intel_ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET);
2268 	status &= PCIM_AER_COR_REPLAY_ROLLOVER;
2269 	intel_ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status);
2270 
2271 	/* Clear unexpected electrical idle event in LTSSM, write to clear */
2272 	status = intel_ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET);
2273 	status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
2274 	intel_ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status);
2275 
2276 	/* Clear DeSkew Buffer error, write to clear */
2277 	status = intel_ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET);
2278 	status |= ATOM_DESKEWSTS_DBERR;
2279 	intel_ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status);
2280 
2281 	status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
2282 	status &= ATOM_IBIST_ERR_OFLOW;
2283 	intel_ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status);
2284 
2285 	/* Releases the NTB state machine to allow the link to retrain */
2286 	status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
2287 	status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
2288 	intel_ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status);
2289 }
2290 
2291 static int
2292 intel_ntb_port_number(device_t dev)
2293 {
2294 	struct ntb_softc *ntb = device_get_softc(dev);
2295 
2296 	return (ntb->dev_type == NTB_DEV_USD ? 0 : 1);
2297 }
2298 
2299 static int
2300 intel_ntb_peer_port_count(device_t dev)
2301 {
2302 
2303 	return (1);
2304 }
2305 
2306 static int
2307 intel_ntb_peer_port_number(device_t dev, int pidx)
2308 {
2309 	struct ntb_softc *ntb = device_get_softc(dev);
2310 
2311 	if (pidx != 0)
2312 		return (-EINVAL);
2313 
2314 	return (ntb->dev_type == NTB_DEV_USD ? 1 : 0);
2315 }
2316 
2317 static int
2318 intel_ntb_peer_port_idx(device_t dev, int port)
2319 {
2320 	int peer_port;
2321 
2322 	peer_port = intel_ntb_peer_port_number(dev, 0);
2323 	if (peer_port == -EINVAL || port != peer_port)
2324 		return (-EINVAL);
2325 
2326 	return (0);
2327 }
2328 
2329 static int
2330 intel_ntb_link_enable(device_t dev, enum ntb_speed speed __unused,
2331     enum ntb_width width __unused)
2332 {
2333 	struct ntb_softc *ntb = device_get_softc(dev);
2334 	uint32_t cntl;
2335 
2336 	intel_ntb_printf(2, "%s\n", __func__);
2337 
2338 	if (ntb->type == NTB_ATOM) {
2339 		pci_write_config(ntb->device, NTB_PPD_OFFSET,
2340 		    ntb->ppd | ATOM_PPD_INIT_LINK, 4);
2341 		return (0);
2342 	}
2343 
2344 	if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
2345 		ntb_link_event(dev);
2346 		return (0);
2347 	}
2348 
2349 	cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2350 	cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
2351 	cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
2352 	cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP;
2353 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2354 		cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP;
2355 	intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
2356 	return (0);
2357 }
2358 
2359 static int
2360 intel_ntb_link_disable(device_t dev)
2361 {
2362 	struct ntb_softc *ntb = device_get_softc(dev);
2363 	uint32_t cntl;
2364 
2365 	intel_ntb_printf(2, "%s\n", __func__);
2366 
2367 	if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
2368 		ntb_link_event(dev);
2369 		return (0);
2370 	}
2371 
2372 	cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2373 	cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
2374 	cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP);
2375 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2376 		cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP);
2377 	cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
2378 	intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
2379 	return (0);
2380 }
2381 
2382 static bool
2383 intel_ntb_link_enabled(device_t dev)
2384 {
2385 	struct ntb_softc *ntb = device_get_softc(dev);
2386 	uint32_t cntl;
2387 
2388 	if (ntb->type == NTB_ATOM) {
2389 		cntl = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4);
2390 		return ((cntl & ATOM_PPD_INIT_LINK) != 0);
2391 	}
2392 
2393 	if (ntb->conn_type == NTB_CONN_TRANSPARENT)
2394 		return (true);
2395 
2396 	cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2397 	return ((cntl & NTB_CNTL_LINK_DISABLE) == 0);
2398 }
2399 
2400 static void
2401 recover_atom_link(void *arg)
2402 {
2403 	struct ntb_softc *ntb = arg;
2404 	unsigned speed, width, oldspeed, oldwidth;
2405 	uint32_t status32;
2406 
2407 	atom_perform_link_restart(ntb);
2408 
2409 	/*
2410 	 * There is a potential race between the 2 NTB devices recovering at
2411 	 * the same time.  If the times are the same, the link will not recover
2412 	 * and the driver will be stuck in this loop forever.  Add a random
2413 	 * interval to the recovery time to prevent this race.
2414 	 */
2415 	status32 = arc4random() % ATOM_LINK_RECOVERY_TIME;
2416 	pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000);
2417 
2418 	if (atom_link_is_err(ntb))
2419 		goto retry;
2420 
2421 	status32 = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2422 	if ((status32 & ATOM_CNTL_LINK_DOWN) != 0)
2423 		goto out;
2424 
2425 	status32 = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
2426 	width = NTB_LNK_STA_WIDTH(status32);
2427 	speed = status32 & NTB_LINK_SPEED_MASK;
2428 
2429 	oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta);
2430 	oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK;
2431 	if (oldwidth != width || oldspeed != speed)
2432 		goto retry;
2433 
2434 out:
2435 	callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb,
2436 	    ntb);
2437 	return;
2438 
2439 retry:
2440 	callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link,
2441 	    ntb);
2442 }
2443 
2444 /*
2445  * Polls the HW link status register(s); returns true if something has changed.
2446  */
2447 static bool
2448 intel_ntb_poll_link(struct ntb_softc *ntb)
2449 {
2450 	uint32_t ntb_cntl;
2451 	uint16_t reg_val;
2452 
2453 	if (ntb->type == NTB_ATOM) {
2454 		ntb_cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2455 		if (ntb_cntl == ntb->ntb_ctl)
2456 			return (false);
2457 
2458 		ntb->ntb_ctl = ntb_cntl;
2459 		ntb->lnk_sta = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
2460 	} else {
2461 		if (ntb->type == NTB_XEON_GEN1)
2462 			db_iowrite_raw(ntb, ntb->self_reg->db_bell,
2463 			    ntb->db_link_mask);
2464 
2465 		reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2);
2466 		if (reg_val == ntb->lnk_sta)
2467 			return (false);
2468 
2469 		ntb->lnk_sta = reg_val;
2470 
2471 		if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
2472 			if (_xeon_link_is_up(ntb)) {
2473 				if (!ntb->peer_msix_good) {
2474 					callout_reset(&ntb->peer_msix_work, 0,
2475 					    intel_ntb_exchange_msix, ntb);
2476 					return (false);
2477 				}
2478 			} else {
2479 				ntb->peer_msix_good = false;
2480 				ntb->peer_msix_done = false;
2481 			}
2482 		}
2483 	}
2484 	return (true);
2485 }
2486 
2487 static inline enum ntb_speed
2488 intel_ntb_link_sta_speed(struct ntb_softc *ntb)
2489 {
2490 
2491 	if (!link_is_up(ntb))
2492 		return (NTB_SPEED_NONE);
2493 	return (ntb->lnk_sta & NTB_LINK_SPEED_MASK);
2494 }
2495 
2496 static inline enum ntb_width
2497 intel_ntb_link_sta_width(struct ntb_softc *ntb)
2498 {
2499 
2500 	if (!link_is_up(ntb))
2501 		return (NTB_WIDTH_NONE);
2502 	return (NTB_LNK_STA_WIDTH(ntb->lnk_sta));
2503 }
2504 
2505 SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
2506     "Driver state, statistics, and HW registers");
2507 
2508 #define NTB_REGSZ_MASK	(3ul << 30)
2509 #define NTB_REG_64	(1ul << 30)
2510 #define NTB_REG_32	(2ul << 30)
2511 #define NTB_REG_16	(3ul << 30)
2512 #define NTB_REG_8	(0ul << 30)
2513 
2514 #define NTB_DB_READ	(1ul << 29)
2515 #define NTB_PCI_REG	(1ul << 28)
2516 #define NTB_REGFLAGS_MASK	(NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG)
2517 
2518 static void
2519 intel_ntb_sysctl_init(struct ntb_softc *ntb)
2520 {
2521 	struct sysctl_oid_list *globals, *tree_par, *regpar, *statpar, *errpar;
2522 	struct sysctl_ctx_list *ctx;
2523 	struct sysctl_oid *tree, *tmptree;
2524 
2525 	ctx = device_get_sysctl_ctx(ntb->device);
2526 	globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device));
2527 
2528 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "link_status",
2529 	    CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE, ntb, 0,
2530 	    sysctl_handle_link_status_human, "A",
2531 	    "Link status (human readable)");
2532 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "active",
2533 	    CTLFLAG_RD | CTLTYPE_UINT | CTLFLAG_MPSAFE, ntb, 0,
2534 	    sysctl_handle_link_status, "IU",
2535 	    "Link status (1=active, 0=inactive)");
2536 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "admin_up",
2537 	    CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, ntb, 0,
2538 	    sysctl_handle_link_admin, "IU",
2539 	    "Set/get interface status (1=UP, 0=DOWN)");
2540 
2541 	tree = SYSCTL_ADD_NODE(ctx, globals, OID_AUTO, "debug_info",
2542 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2543 	    "Driver state, statistics, and HW registers");
2544 	tree_par = SYSCTL_CHILDREN(tree);
2545 
2546 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD,
2547 	    &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port");
2548 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD,
2549 	    &ntb->dev_type, 0, "0 - USD; 1 - DSD");
2550 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD,
2551 	    &ntb->ppd, 0, "Raw PPD register (cached)");
2552 
2553 	if (ntb->b2b_mw_idx != B2B_MW_DISABLED) {
2554 		SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD,
2555 		    &ntb->b2b_mw_idx, 0,
2556 		    "Index of the MW used for B2B remote register access");
2557 		SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off",
2558 		    CTLFLAG_RD, &ntb->b2b_off,
2559 		    "If non-zero, offset of B2B register region in shared MW");
2560 	}
2561 
2562 	SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features",
2563 	    CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE, ntb, 0,
2564 	    sysctl_handle_features, "A", "Features/errata of this NTB device");
2565 
2566 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD,
2567 	    __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0,
2568 	    "NTB CTL register (cached)");
2569 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD,
2570 	    __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0,
2571 	    "LNK STA register (cached)");
2572 
2573 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD,
2574 	    &ntb->mw_count, 0, "MW count");
2575 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD,
2576 	    &ntb->spad_count, 0, "Scratchpad count");
2577 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD,
2578 	    &ntb->db_count, 0, "Doorbell count");
2579 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD,
2580 	    &ntb->db_vec_count, 0, "Doorbell vector count");
2581 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD,
2582 	    &ntb->db_vec_shift, 0, "Doorbell vector shift");
2583 
2584 	SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD,
2585 	    &ntb->db_valid_mask, "Doorbell valid mask");
2586 	SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD,
2587 	    &ntb->db_link_mask, "Doorbell link mask");
2588 	SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD,
2589 	    &ntb->db_mask, "Doorbell mask (cached)");
2590 
2591 	tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers",
2592 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2593 	    "Raw HW registers (big-endian)");
2594 	regpar = SYSCTL_CHILDREN(tmptree);
2595 
2596 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl",
2597 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2598 	    NTB_REG_32 | ntb->reg->ntb_ctl, sysctl_handle_register, "IU",
2599 	    "NTB Control register");
2600 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap",
2601 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2602 	    NTB_REG_32 | 0x19c, sysctl_handle_register, "IU",
2603 	    "NTB Link Capabilities");
2604 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon",
2605 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2606 	    NTB_REG_32 | 0x1a0, sysctl_handle_register, "IU",
2607 	    "NTB Link Control register");
2608 
2609 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask",
2610 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2611 	    NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask,
2612 	    sysctl_handle_register, "QU", "Doorbell mask register");
2613 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell",
2614 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2615 	    NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell,
2616 	    sysctl_handle_register, "QU", "Doorbell register");
2617 
2618 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23",
2619 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2620 	    NTB_REG_64 | ntb->xlat_reg->bar2_xlat,
2621 	    sysctl_handle_register, "QU", "Incoming XLAT23 register");
2622 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2623 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4",
2624 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2625 		    NTB_REG_32 | ntb->xlat_reg->bar4_xlat,
2626 		    sysctl_handle_register, "IU", "Incoming XLAT4 register");
2627 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5",
2628 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2629 		    NTB_REG_32 | ntb->xlat_reg->bar5_xlat,
2630 		    sysctl_handle_register, "IU", "Incoming XLAT5 register");
2631 	} else {
2632 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45",
2633 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2634 		    NTB_REG_64 | ntb->xlat_reg->bar4_xlat,
2635 		    sysctl_handle_register, "QU", "Incoming XLAT45 register");
2636 	}
2637 
2638 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23",
2639 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2640 	    NTB_REG_64 | ntb->xlat_reg->bar2_limit,
2641 	    sysctl_handle_register, "QU", "Incoming LMT23 register");
2642 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2643 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4",
2644 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2645 		    NTB_REG_32 | ntb->xlat_reg->bar4_limit,
2646 		    sysctl_handle_register, "IU", "Incoming LMT4 register");
2647 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5",
2648 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2649 		    NTB_REG_32 | ntb->xlat_reg->bar5_limit,
2650 		    sysctl_handle_register, "IU", "Incoming LMT5 register");
2651 	} else {
2652 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45",
2653 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2654 		    NTB_REG_64 | ntb->xlat_reg->bar4_limit,
2655 		    sysctl_handle_register, "QU", "Incoming LMT45 register");
2656 	}
2657 
2658 	if (ntb->type == NTB_ATOM)
2659 		return;
2660 
2661 	tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats",
2662 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Xeon HW statistics");
2663 	statpar = SYSCTL_CHILDREN(tmptree);
2664 	SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss",
2665 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2666 	    NTB_REG_16 | XEON_USMEMMISS_OFFSET,
2667 	    sysctl_handle_register, "SU", "Upstream Memory Miss");
2668 
2669 	tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err",
2670 	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Xeon HW errors");
2671 	errpar = SYSCTL_CHILDREN(tmptree);
2672 
2673 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd",
2674 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2675 	    NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET,
2676 	    sysctl_handle_register, "CU", "PPD");
2677 
2678 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz",
2679 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2680 	    NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET,
2681 	    sysctl_handle_register, "CU", "PBAR23 SZ (log2)");
2682 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz",
2683 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2684 	    NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET,
2685 	    sysctl_handle_register, "CU", "PBAR4 SZ (log2)");
2686 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz",
2687 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2688 	    NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET,
2689 	    sysctl_handle_register, "CU", "PBAR5 SZ (log2)");
2690 
2691 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz",
2692 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2693 	    NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET,
2694 	    sysctl_handle_register, "CU", "SBAR23 SZ (log2)");
2695 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz",
2696 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2697 	    NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET,
2698 	    sysctl_handle_register, "CU", "SBAR4 SZ (log2)");
2699 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz",
2700 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2701 	    NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET,
2702 	    sysctl_handle_register, "CU", "SBAR5 SZ (log2)");
2703 
2704 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts",
2705 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2706 	    NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET,
2707 	    sysctl_handle_register, "SU", "DEVSTS");
2708 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts",
2709 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2710 	    NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET,
2711 	    sysctl_handle_register, "SU", "LNKSTS");
2712 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts",
2713 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2714 	    NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET,
2715 	    sysctl_handle_register, "SU", "SLNKSTS");
2716 
2717 	SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts",
2718 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2719 	    NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET,
2720 	    sysctl_handle_register, "IU", "UNCERRSTS");
2721 	SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts",
2722 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2723 	    NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET,
2724 	    sysctl_handle_register, "IU", "CORERRSTS");
2725 
2726 	if (ntb->conn_type != NTB_CONN_B2B)
2727 		return;
2728 
2729 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat01l",
2730 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2731 	    NTB_REG_32 | XEON_B2B_XLAT_OFFSETL,
2732 	    sysctl_handle_register, "IU", "Outgoing XLAT0L register");
2733 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat01u",
2734 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2735 	    NTB_REG_32 | XEON_B2B_XLAT_OFFSETU,
2736 	    sysctl_handle_register, "IU", "Outgoing XLAT0U register");
2737 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23",
2738 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2739 	    NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off,
2740 	    sysctl_handle_register, "QU", "Outgoing XLAT23 register");
2741 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2742 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4",
2743 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2744 		    NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off,
2745 		    sysctl_handle_register, "IU", "Outgoing XLAT4 register");
2746 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5",
2747 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2748 		    NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off,
2749 		    sysctl_handle_register, "IU", "Outgoing XLAT5 register");
2750 	} else {
2751 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45",
2752 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2753 		    NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off,
2754 		    sysctl_handle_register, "QU", "Outgoing XLAT45 register");
2755 	}
2756 
2757 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23",
2758 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2759 	    NTB_REG_64 | XEON_PBAR2LMT_OFFSET,
2760 	    sysctl_handle_register, "QU", "Outgoing LMT23 register");
2761 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2762 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4",
2763 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2764 		    NTB_REG_32 | XEON_PBAR4LMT_OFFSET,
2765 		    sysctl_handle_register, "IU", "Outgoing LMT4 register");
2766 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5",
2767 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2768 		    NTB_REG_32 | XEON_PBAR5LMT_OFFSET,
2769 		    sysctl_handle_register, "IU", "Outgoing LMT5 register");
2770 	} else {
2771 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45",
2772 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2773 		    NTB_REG_64 | XEON_PBAR4LMT_OFFSET,
2774 		    sysctl_handle_register, "QU", "Outgoing LMT45 register");
2775 	}
2776 
2777 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base",
2778 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2779 	    NTB_REG_64 | ntb->xlat_reg->bar0_base,
2780 	    sysctl_handle_register, "QU", "Secondary BAR01 base register");
2781 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base",
2782 	    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2783 	    NTB_REG_64 | ntb->xlat_reg->bar2_base,
2784 	    sysctl_handle_register, "QU", "Secondary BAR23 base register");
2785 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2786 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base",
2787 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2788 		    NTB_REG_32 | ntb->xlat_reg->bar4_base,
2789 		    sysctl_handle_register, "IU",
2790 		    "Secondary BAR4 base register");
2791 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base",
2792 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2793 		    NTB_REG_32 | ntb->xlat_reg->bar5_base,
2794 		    sysctl_handle_register, "IU",
2795 		    "Secondary BAR5 base register");
2796 	} else {
2797 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base",
2798 		    CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, ntb,
2799 		    NTB_REG_64 | ntb->xlat_reg->bar4_base,
2800 		    sysctl_handle_register, "QU",
2801 		    "Secondary BAR45 base register");
2802 	}
2803 }
2804 
2805 static int
2806 sysctl_handle_features(SYSCTL_HANDLER_ARGS)
2807 {
2808 	struct ntb_softc *ntb = arg1;
2809 	struct sbuf sb;
2810 	int error;
2811 
2812 	sbuf_new_for_sysctl(&sb, NULL, 256, req);
2813 
2814 	sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR);
2815 	error = sbuf_finish(&sb);
2816 	sbuf_delete(&sb);
2817 
2818 	if (error || !req->newptr)
2819 		return (error);
2820 	return (EINVAL);
2821 }
2822 
2823 static int
2824 sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS)
2825 {
2826 	struct ntb_softc *ntb = arg1;
2827 	unsigned old, new;
2828 	int error;
2829 
2830 	old = intel_ntb_link_enabled(ntb->device);
2831 
2832 	error = SYSCTL_OUT(req, &old, sizeof(old));
2833 	if (error != 0 || req->newptr == NULL)
2834 		return (error);
2835 
2836 	error = SYSCTL_IN(req, &new, sizeof(new));
2837 	if (error != 0)
2838 		return (error);
2839 
2840 	intel_ntb_printf(0, "Admin set interface state to '%sabled'\n",
2841 	    (new != 0)? "en" : "dis");
2842 
2843 	if (new != 0)
2844 		error = intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
2845 	else
2846 		error = intel_ntb_link_disable(ntb->device);
2847 	return (error);
2848 }
2849 
2850 static int
2851 sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS)
2852 {
2853 	struct ntb_softc *ntb = arg1;
2854 	struct sbuf sb;
2855 	enum ntb_speed speed;
2856 	enum ntb_width width;
2857 	int error;
2858 
2859 	sbuf_new_for_sysctl(&sb, NULL, 32, req);
2860 
2861 	if (intel_ntb_link_is_up(ntb->device, &speed, &width))
2862 		sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u",
2863 		    (unsigned)speed, (unsigned)width);
2864 	else
2865 		sbuf_printf(&sb, "down");
2866 
2867 	error = sbuf_finish(&sb);
2868 	sbuf_delete(&sb);
2869 
2870 	if (error || !req->newptr)
2871 		return (error);
2872 	return (EINVAL);
2873 }
2874 
2875 static int
2876 sysctl_handle_link_status(SYSCTL_HANDLER_ARGS)
2877 {
2878 	struct ntb_softc *ntb = arg1;
2879 	unsigned res;
2880 	int error;
2881 
2882 	res = intel_ntb_link_is_up(ntb->device, NULL, NULL);
2883 
2884 	error = SYSCTL_OUT(req, &res, sizeof(res));
2885 	if (error || !req->newptr)
2886 		return (error);
2887 	return (EINVAL);
2888 }
2889 
2890 static int
2891 sysctl_handle_register(SYSCTL_HANDLER_ARGS)
2892 {
2893 	struct ntb_softc *ntb;
2894 	const void *outp;
2895 	uintptr_t sz;
2896 	uint64_t umv;
2897 	char be[sizeof(umv)];
2898 	size_t outsz;
2899 	uint32_t reg;
2900 	bool db, pci;
2901 	int error;
2902 
2903 	ntb = arg1;
2904 	reg = arg2 & ~NTB_REGFLAGS_MASK;
2905 	sz = arg2 & NTB_REGSZ_MASK;
2906 	db = (arg2 & NTB_DB_READ) != 0;
2907 	pci = (arg2 & NTB_PCI_REG) != 0;
2908 
2909 	KASSERT(!(db && pci), ("bogus"));
2910 
2911 	if (db) {
2912 		KASSERT(sz == NTB_REG_64, ("bogus"));
2913 		umv = db_ioread(ntb, reg);
2914 		outsz = sizeof(uint64_t);
2915 	} else {
2916 		switch (sz) {
2917 		case NTB_REG_64:
2918 			if (pci)
2919 				umv = pci_read_config(ntb->device, reg, 8);
2920 			else
2921 				umv = intel_ntb_reg_read(8, reg);
2922 			outsz = sizeof(uint64_t);
2923 			break;
2924 		case NTB_REG_32:
2925 			if (pci)
2926 				umv = pci_read_config(ntb->device, reg, 4);
2927 			else
2928 				umv = intel_ntb_reg_read(4, reg);
2929 			outsz = sizeof(uint32_t);
2930 			break;
2931 		case NTB_REG_16:
2932 			if (pci)
2933 				umv = pci_read_config(ntb->device, reg, 2);
2934 			else
2935 				umv = intel_ntb_reg_read(2, reg);
2936 			outsz = sizeof(uint16_t);
2937 			break;
2938 		case NTB_REG_8:
2939 			if (pci)
2940 				umv = pci_read_config(ntb->device, reg, 1);
2941 			else
2942 				umv = intel_ntb_reg_read(1, reg);
2943 			outsz = sizeof(uint8_t);
2944 			break;
2945 		default:
2946 			panic("bogus");
2947 			break;
2948 		}
2949 	}
2950 
2951 	/* Encode bigendian so that sysctl -x is legible. */
2952 	be64enc(be, umv);
2953 	outp = ((char *)be) + sizeof(umv) - outsz;
2954 
2955 	error = SYSCTL_OUT(req, outp, outsz);
2956 	if (error || !req->newptr)
2957 		return (error);
2958 	return (EINVAL);
2959 }
2960 
2961 static unsigned
2962 intel_ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx)
2963 {
2964 
2965 	if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 &&
2966 	    uidx >= ntb->b2b_mw_idx) ||
2967 	    (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx))
2968 		uidx++;
2969 	if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 &&
2970 	    uidx >= ntb->b2b_mw_idx) &&
2971 	    (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx))
2972 		uidx++;
2973 	return (uidx);
2974 }
2975 
2976 #ifndef EARLY_AP_STARTUP
2977 static int msix_ready;
2978 
2979 static void
2980 intel_ntb_msix_ready(void *arg __unused)
2981 {
2982 
2983 	msix_ready = 1;
2984 }
2985 SYSINIT(intel_ntb_msix_ready, SI_SUB_SMP, SI_ORDER_ANY,
2986     intel_ntb_msix_ready, NULL);
2987 #endif
2988 
2989 static void
2990 intel_ntb_exchange_msix(void *ctx)
2991 {
2992 	struct ntb_softc *ntb;
2993 	uint32_t val;
2994 	unsigned i;
2995 
2996 	ntb = ctx;
2997 
2998 	if (ntb->peer_msix_good)
2999 		goto msix_good;
3000 	if (ntb->peer_msix_done)
3001 		goto msix_done;
3002 
3003 #ifndef EARLY_AP_STARTUP
3004 	/* Block MSIX negotiation until SMP started and IRQ reshuffled. */
3005 	if (!msix_ready)
3006 		goto reschedule;
3007 #endif
3008 
3009 	intel_ntb_get_msix_info(ntb);
3010 	for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
3011 		intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DATA0 + i,
3012 		    ntb->msix_data[i].nmd_data);
3013 		intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_OFS0 + i,
3014 		    ntb->msix_data[i].nmd_ofs - ntb->msix_xlat);
3015 	}
3016 	intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD);
3017 
3018 	intel_ntb_spad_read(ntb->device, NTB_MSIX_GUARD, &val);
3019 	if (val != NTB_MSIX_VER_GUARD)
3020 		goto reschedule;
3021 
3022 	for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
3023 		intel_ntb_spad_read(ntb->device, NTB_MSIX_DATA0 + i, &val);
3024 		intel_ntb_printf(2, "remote MSIX data(%u): 0x%x\n", i, val);
3025 		ntb->peer_msix_data[i].nmd_data = val;
3026 		intel_ntb_spad_read(ntb->device, NTB_MSIX_OFS0 + i, &val);
3027 		intel_ntb_printf(2, "remote MSIX addr(%u): 0x%x\n", i, val);
3028 		ntb->peer_msix_data[i].nmd_ofs = val;
3029 	}
3030 
3031 	ntb->peer_msix_done = true;
3032 
3033 msix_done:
3034 	intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DONE, NTB_MSIX_RECEIVED);
3035 	intel_ntb_spad_read(ntb->device, NTB_MSIX_DONE, &val);
3036 	if (val != NTB_MSIX_RECEIVED)
3037 		goto reschedule;
3038 
3039 	intel_ntb_spad_clear(ntb->device);
3040 	ntb->peer_msix_good = true;
3041 	/* Give peer time to see our NTB_MSIX_RECEIVED. */
3042 	goto reschedule;
3043 
3044 msix_good:
3045 	intel_ntb_poll_link(ntb);
3046 	ntb_link_event(ntb->device);
3047 	return;
3048 
3049 reschedule:
3050 	ntb->lnk_sta = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2);
3051 	if (_xeon_link_is_up(ntb)) {
3052 		callout_reset(&ntb->peer_msix_work,
3053 		    hz * (ntb->peer_msix_good ? 2 : 1) / 10,
3054 		    intel_ntb_exchange_msix, ntb);
3055 	} else
3056 		intel_ntb_spad_clear(ntb->device);
3057 }
3058 
3059 /*
3060  * Public API to the rest of the OS
3061  */
3062 
3063 static uint8_t
3064 intel_ntb_spad_count(device_t dev)
3065 {
3066 	struct ntb_softc *ntb = device_get_softc(dev);
3067 
3068 	return (ntb->spad_count);
3069 }
3070 
3071 static uint8_t
3072 intel_ntb_mw_count(device_t dev)
3073 {
3074 	struct ntb_softc *ntb = device_get_softc(dev);
3075 	uint8_t res;
3076 
3077 	res = ntb->mw_count;
3078 	if (ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0)
3079 		res--;
3080 	if (ntb->msix_mw_idx != B2B_MW_DISABLED)
3081 		res--;
3082 	return (res);
3083 }
3084 
3085 static int
3086 intel_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
3087 {
3088 	struct ntb_softc *ntb = device_get_softc(dev);
3089 
3090 	if (idx >= ntb->spad_count)
3091 		return (EINVAL);
3092 
3093 	intel_ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val);
3094 
3095 	return (0);
3096 }
3097 
3098 /*
3099  * Zeros the local scratchpad.
3100  */
3101 static void
3102 intel_ntb_spad_clear(device_t dev)
3103 {
3104 	struct ntb_softc *ntb = device_get_softc(dev);
3105 	unsigned i;
3106 
3107 	for (i = 0; i < ntb->spad_count; i++)
3108 		intel_ntb_spad_write(dev, i, 0);
3109 }
3110 
3111 static int
3112 intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
3113 {
3114 	struct ntb_softc *ntb = device_get_softc(dev);
3115 
3116 	if (idx >= ntb->spad_count)
3117 		return (EINVAL);
3118 
3119 	*val = intel_ntb_reg_read(4, ntb->self_reg->spad + idx * 4);
3120 
3121 	return (0);
3122 }
3123 
3124 static int
3125 intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
3126 {
3127 	struct ntb_softc *ntb = device_get_softc(dev);
3128 
3129 	if (idx >= ntb->spad_count)
3130 		return (EINVAL);
3131 
3132 	if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
3133 		intel_ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val);
3134 	else
3135 		intel_ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val);
3136 
3137 	return (0);
3138 }
3139 
3140 static int
3141 intel_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
3142 {
3143 	struct ntb_softc *ntb = device_get_softc(dev);
3144 
3145 	if (idx >= ntb->spad_count)
3146 		return (EINVAL);
3147 
3148 	if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
3149 		*val = intel_ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4);
3150 	else
3151 		*val = intel_ntb_reg_read(4, ntb->peer_reg->spad + idx * 4);
3152 
3153 	return (0);
3154 }
3155 
3156 static int
3157 intel_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
3158     caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
3159     bus_addr_t *plimit)
3160 {
3161 	struct ntb_softc *ntb = device_get_softc(dev);
3162 	struct ntb_pci_bar_info *bar;
3163 	bus_addr_t limit;
3164 	size_t bar_b2b_off;
3165 	enum ntb_bar bar_num;
3166 
3167 	if (mw_idx >= intel_ntb_mw_count(dev))
3168 		return (EINVAL);
3169 	mw_idx = intel_ntb_user_mw_to_idx(ntb, mw_idx);
3170 
3171 	bar_num = intel_ntb_mw_to_bar(ntb, mw_idx);
3172 	bar = &ntb->bar_info[bar_num];
3173 	bar_b2b_off = 0;
3174 	if (mw_idx == ntb->b2b_mw_idx) {
3175 		KASSERT(ntb->b2b_off != 0,
3176 		    ("user shouldn't get non-shared b2b mw"));
3177 		bar_b2b_off = ntb->b2b_off;
3178 	}
3179 
3180 	if (bar_is_64bit(ntb, bar_num))
3181 		limit = BUS_SPACE_MAXADDR;
3182 	else
3183 		limit = BUS_SPACE_MAXADDR_32BIT;
3184 
3185 	if (base != NULL)
3186 		*base = bar->pbase + bar_b2b_off;
3187 	if (vbase != NULL)
3188 		*vbase = bar->vbase + bar_b2b_off;
3189 	if (size != NULL)
3190 		*size = bar->size - bar_b2b_off;
3191 	if (align != NULL)
3192 		*align = bar->size;
3193 	if (align_size != NULL)
3194 		*align_size = 1;
3195 	if (plimit != NULL)
3196 		*plimit = limit;
3197 	return (0);
3198 }
3199 
3200 static int
3201 intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
3202 {
3203 	struct ntb_softc *ntb = device_get_softc(dev);
3204 	struct ntb_pci_bar_info *bar;
3205 	uint64_t base, limit, reg_val;
3206 	size_t bar_size, mw_size;
3207 	uint32_t base_reg, xlat_reg, limit_reg;
3208 	enum ntb_bar bar_num;
3209 
3210 	if (idx >= intel_ntb_mw_count(dev))
3211 		return (EINVAL);
3212 	idx = intel_ntb_user_mw_to_idx(ntb, idx);
3213 
3214 	bar_num = intel_ntb_mw_to_bar(ntb, idx);
3215 	bar = &ntb->bar_info[bar_num];
3216 
3217 	bar_size = bar->size;
3218 	if (idx == ntb->b2b_mw_idx)
3219 		mw_size = bar_size - ntb->b2b_off;
3220 	else
3221 		mw_size = bar_size;
3222 
3223 	/* Hardware requires that addr is aligned to bar size */
3224 	if ((addr & (bar_size - 1)) != 0)
3225 		return (EINVAL);
3226 
3227 	if (size > mw_size)
3228 		return (EINVAL);
3229 
3230 	bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg);
3231 
3232 	limit = 0;
3233 	if (bar_is_64bit(ntb, bar_num)) {
3234 		if (ntb->type == NTB_XEON_GEN3)
3235 			base = addr;
3236 		else
3237 			base = intel_ntb_reg_read(8, base_reg) & BAR_HIGH_MASK;
3238 
3239 		if (limit_reg != 0 && size != mw_size)
3240 			limit = base + size;
3241 		else
3242 			limit = base + mw_size;
3243 
3244 		/* Set and verify translation address */
3245 		intel_ntb_reg_write(8, xlat_reg, addr);
3246 		reg_val = intel_ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK;
3247 		if (reg_val != addr) {
3248 			intel_ntb_reg_write(8, xlat_reg, 0);
3249 			return (EIO);
3250 		}
3251 
3252 		/* Set and verify the limit */
3253 		intel_ntb_reg_write(8, limit_reg, limit);
3254 		reg_val = intel_ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK;
3255 		if (reg_val != limit) {
3256 			intel_ntb_reg_write(8, limit_reg, base);
3257 			intel_ntb_reg_write(8, xlat_reg, 0);
3258 			return (EIO);
3259 		}
3260 	} else {
3261 		/* Configure 32-bit (split) BAR MW */
3262 		if (ntb->type == NTB_XEON_GEN3)
3263 			return (EIO);
3264 
3265 		if ((addr & UINT32_MAX) != addr)
3266 			return (ERANGE);
3267 		if (((addr + size) & UINT32_MAX) != (addr + size))
3268 			return (ERANGE);
3269 
3270 		base = intel_ntb_reg_read(4, base_reg) & BAR_HIGH_MASK;
3271 
3272 		if (limit_reg != 0 && size != mw_size)
3273 			limit = base + size;
3274 
3275 		/* Set and verify translation address */
3276 		intel_ntb_reg_write(4, xlat_reg, addr);
3277 		reg_val = intel_ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK;
3278 		if (reg_val != addr) {
3279 			intel_ntb_reg_write(4, xlat_reg, 0);
3280 			return (EIO);
3281 		}
3282 
3283 		/* Set and verify the limit */
3284 		intel_ntb_reg_write(4, limit_reg, limit);
3285 		reg_val = intel_ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK;
3286 		if (reg_val != limit) {
3287 			intel_ntb_reg_write(4, limit_reg, base);
3288 			intel_ntb_reg_write(4, xlat_reg, 0);
3289 			return (EIO);
3290 		}
3291 	}
3292 	return (0);
3293 }
3294 
3295 static int
3296 intel_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
3297 {
3298 
3299 	return (intel_ntb_mw_set_trans(dev, mw_idx, 0, 0));
3300 }
3301 
3302 static int
3303 intel_ntb_mw_get_wc(device_t dev, unsigned idx, vm_memattr_t *mode)
3304 {
3305 	struct ntb_softc *ntb = device_get_softc(dev);
3306 	struct ntb_pci_bar_info *bar;
3307 
3308 	if (idx >= intel_ntb_mw_count(dev))
3309 		return (EINVAL);
3310 	idx = intel_ntb_user_mw_to_idx(ntb, idx);
3311 
3312 	bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
3313 	*mode = bar->map_mode;
3314 	return (0);
3315 }
3316 
3317 static int
3318 intel_ntb_mw_set_wc(device_t dev, unsigned idx, vm_memattr_t mode)
3319 {
3320 	struct ntb_softc *ntb = device_get_softc(dev);
3321 
3322 	if (idx >= intel_ntb_mw_count(dev))
3323 		return (EINVAL);
3324 
3325 	idx = intel_ntb_user_mw_to_idx(ntb, idx);
3326 	return (intel_ntb_mw_set_wc_internal(ntb, idx, mode));
3327 }
3328 
3329 static int
3330 intel_ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
3331 {
3332 	struct ntb_pci_bar_info *bar;
3333 	int rc;
3334 
3335 	bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
3336 	if (bar->map_mode == mode)
3337 		return (0);
3338 
3339 	rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mode);
3340 	if (rc == 0)
3341 		bar->map_mode = mode;
3342 
3343 	return (rc);
3344 }
3345 
3346 static void
3347 intel_ntb_peer_db_set(device_t dev, uint64_t bits)
3348 {
3349 	struct ntb_softc *ntb = device_get_softc(dev);
3350 	uint64_t db;
3351 
3352 	if ((bits & ~ntb->db_valid_mask) != 0) {
3353 		device_printf(ntb->device, "Invalid doorbell bits %#jx\n",
3354 		    (uintmax_t)bits);
3355 		return;
3356 	}
3357 
3358 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
3359 		struct ntb_pci_bar_info *lapic;
3360 		unsigned i;
3361 
3362 		lapic = ntb->peer_lapic_bar;
3363 
3364 		for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
3365 			if ((bits & intel_ntb_db_vector_mask(dev, i)) != 0)
3366 				bus_space_write_4(lapic->pci_bus_tag,
3367 				    lapic->pci_bus_handle,
3368 				    ntb->peer_msix_data[i].nmd_ofs,
3369 				    ntb->peer_msix_data[i].nmd_data);
3370 		}
3371 		return;
3372 	}
3373 
3374 	if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
3375 		intel_ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bits);
3376 		return;
3377 	}
3378 
3379 	if (ntb->type == NTB_XEON_GEN3) {
3380 		while (bits != 0) {
3381 			db = ffsll(bits);
3382 
3383 			intel_ntb_reg_write(1,
3384 			    ntb->peer_reg->db_bell + (db - 1) * 4, 0x1);
3385 
3386 			bits = bits & (bits - 1);
3387 		}
3388 	} else {
3389 		db_iowrite(ntb, ntb->peer_reg->db_bell, bits);
3390 	}
3391 }
3392 
3393 static int
3394 intel_ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size)
3395 {
3396 	struct ntb_softc *ntb = device_get_softc(dev);
3397 	struct ntb_pci_bar_info *bar;
3398 	uint64_t regoff;
3399 
3400 	KASSERT((db_addr != NULL && db_size != NULL), ("must be non-NULL"));
3401 
3402 	if (!HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
3403 		bar = &ntb->bar_info[NTB_CONFIG_BAR];
3404 		regoff = ntb->peer_reg->db_bell;
3405 	} else {
3406 		KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED,
3407 		    ("invalid b2b idx"));
3408 
3409 		bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)];
3410 		regoff = XEON_PDOORBELL_OFFSET;
3411 	}
3412 	KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh"));
3413 
3414 	/* HACK: Specific to current x86 bus implementation. */
3415 	*db_addr = ((uint64_t)bar->pci_bus_handle + regoff);
3416 	*db_size = ntb->reg->db_size;
3417 	return (0);
3418 }
3419 
3420 static uint64_t
3421 intel_ntb_db_valid_mask(device_t dev)
3422 {
3423 	struct ntb_softc *ntb = device_get_softc(dev);
3424 
3425 	return (ntb->db_valid_mask);
3426 }
3427 
3428 static int
3429 intel_ntb_db_vector_count(device_t dev)
3430 {
3431 	struct ntb_softc *ntb = device_get_softc(dev);
3432 
3433 	return (ntb->db_vec_count);
3434 }
3435 
3436 static uint64_t
3437 intel_ntb_db_vector_mask(device_t dev, uint32_t vector)
3438 {
3439 	struct ntb_softc *ntb = device_get_softc(dev);
3440 
3441 	if (vector > ntb->db_vec_count)
3442 		return (0);
3443 	return (ntb->db_valid_mask & intel_ntb_vec_mask(ntb, vector));
3444 }
3445 
3446 static bool
3447 intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
3448 {
3449 	struct ntb_softc *ntb = device_get_softc(dev);
3450 
3451 	if (speed != NULL)
3452 		*speed = intel_ntb_link_sta_speed(ntb);
3453 	if (width != NULL)
3454 		*width = intel_ntb_link_sta_width(ntb);
3455 	return (link_is_up(ntb));
3456 }
3457 
3458 static void
3459 save_bar_parameters(struct ntb_pci_bar_info *bar)
3460 {
3461 
3462 	bar->pci_bus_tag = rman_get_bustag(bar->pci_resource);
3463 	bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource);
3464 	bar->pbase = rman_get_start(bar->pci_resource);
3465 	bar->size = rman_get_size(bar->pci_resource);
3466 	bar->vbase = rman_get_virtual(bar->pci_resource);
3467 }
3468 
3469 static device_method_t ntb_intel_methods[] = {
3470 	/* Device interface */
3471 	DEVMETHOD(device_probe,		intel_ntb_probe),
3472 	DEVMETHOD(device_attach,	intel_ntb_attach),
3473 	DEVMETHOD(device_detach,	intel_ntb_detach),
3474 	/* Bus interface */
3475 	DEVMETHOD(bus_child_location,	ntb_child_location),
3476 	DEVMETHOD(bus_print_child,	ntb_print_child),
3477 	DEVMETHOD(bus_get_dma_tag,	ntb_get_dma_tag),
3478 	/* NTB interface */
3479 	DEVMETHOD(ntb_port_number,	intel_ntb_port_number),
3480 	DEVMETHOD(ntb_peer_port_count,	intel_ntb_peer_port_count),
3481 	DEVMETHOD(ntb_peer_port_number,	intel_ntb_peer_port_number),
3482 	DEVMETHOD(ntb_peer_port_idx, 	intel_ntb_peer_port_idx),
3483 	DEVMETHOD(ntb_link_is_up,	intel_ntb_link_is_up),
3484 	DEVMETHOD(ntb_link_enable,	intel_ntb_link_enable),
3485 	DEVMETHOD(ntb_link_disable,	intel_ntb_link_disable),
3486 	DEVMETHOD(ntb_link_enabled,	intel_ntb_link_enabled),
3487 	DEVMETHOD(ntb_mw_count,		intel_ntb_mw_count),
3488 	DEVMETHOD(ntb_mw_get_range,	intel_ntb_mw_get_range),
3489 	DEVMETHOD(ntb_mw_set_trans,	intel_ntb_mw_set_trans),
3490 	DEVMETHOD(ntb_mw_clear_trans,	intel_ntb_mw_clear_trans),
3491 	DEVMETHOD(ntb_mw_get_wc,	intel_ntb_mw_get_wc),
3492 	DEVMETHOD(ntb_mw_set_wc,	intel_ntb_mw_set_wc),
3493 	DEVMETHOD(ntb_spad_count,	intel_ntb_spad_count),
3494 	DEVMETHOD(ntb_spad_clear,	intel_ntb_spad_clear),
3495 	DEVMETHOD(ntb_spad_write,	intel_ntb_spad_write),
3496 	DEVMETHOD(ntb_spad_read,	intel_ntb_spad_read),
3497 	DEVMETHOD(ntb_peer_spad_write,	intel_ntb_peer_spad_write),
3498 	DEVMETHOD(ntb_peer_spad_read,	intel_ntb_peer_spad_read),
3499 	DEVMETHOD(ntb_db_valid_mask,	intel_ntb_db_valid_mask),
3500 	DEVMETHOD(ntb_db_vector_count,	intel_ntb_db_vector_count),
3501 	DEVMETHOD(ntb_db_vector_mask,	intel_ntb_db_vector_mask),
3502 	DEVMETHOD(ntb_db_clear,		intel_ntb_db_clear),
3503 	DEVMETHOD(ntb_db_clear_mask,	intel_ntb_db_clear_mask),
3504 	DEVMETHOD(ntb_db_read,		intel_ntb_db_read),
3505 	DEVMETHOD(ntb_db_set_mask,	intel_ntb_db_set_mask),
3506 	DEVMETHOD(ntb_peer_db_addr,	intel_ntb_peer_db_addr),
3507 	DEVMETHOD(ntb_peer_db_set,	intel_ntb_peer_db_set),
3508 	DEVMETHOD_END
3509 };
3510 
3511 static DEFINE_CLASS_0(ntb_hw, ntb_intel_driver, ntb_intel_methods,
3512     sizeof(struct ntb_softc));
3513 DRIVER_MODULE(ntb_hw_intel, pci, ntb_intel_driver, NULL, NULL);
3514 MODULE_DEPEND(ntb_hw_intel, ntb, 1, 1, 1);
3515 MODULE_VERSION(ntb_hw_intel, 1);
3516 MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ntb_hw_intel, pci_ids,
3517     nitems(pci_ids));
3518