xref: /freebsd/sys/dev/ntb/ntb_hw/ntb_hw_intel.c (revision a25896ca1270e25b657ceaa8d47d5699515f5c25)
1 /*-
2  * Copyright (c) 2016-2017 Alexander Motin <mav@FreeBSD.org>
3  * Copyright (C) 2013 Intel Corporation
4  * Copyright (C) 2015 EMC Corporation
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /*
30  * The Non-Transparent Bridge (NTB) is a device that allows you to connect
31  * two or more systems using a PCI-e links, providing remote memory access.
32  *
33  * This module contains a driver for NTB hardware in Intel Xeon/Atom CPUs.
34  *
35  * NOTE: Much of the code in this module is shared with Linux. Any patches may
36  * be picked up and redistributed in Linux with a dual GPL/BSD license.
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/endian.h>
47 #include <sys/interrupt.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/mutex.h>
51 #include <sys/pciio.h>
52 #include <sys/queue.h>
53 #include <sys/rman.h>
54 #include <sys/sbuf.h>
55 #include <sys/sysctl.h>
56 #include <vm/vm.h>
57 #include <vm/pmap.h>
58 #include <machine/bus.h>
59 #include <machine/intr_machdep.h>
60 #include <machine/resource.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 
64 #include "ntb_hw_intel.h"
65 #include "../ntb.h"
66 
67 #define MAX_MSIX_INTERRUPTS MAX(XEON_DB_COUNT, ATOM_DB_COUNT)
68 
69 #define NTB_HB_TIMEOUT		1 /* second */
70 #define ATOM_LINK_RECOVERY_TIME	500 /* ms */
71 #define BAR_HIGH_MASK		(~((1ull << 12) - 1))
72 
73 #define	NTB_MSIX_VER_GUARD	0xaabbccdd
74 #define	NTB_MSIX_RECEIVED	0xe0f0e0f0
75 
76 /*
77  * PCI constants could be somewhere more generic, but aren't defined/used in
78  * pci.c.
79  */
80 #define	PCI_MSIX_ENTRY_SIZE		16
81 #define	PCI_MSIX_ENTRY_LOWER_ADDR	0
82 #define	PCI_MSIX_ENTRY_UPPER_ADDR	4
83 #define	PCI_MSIX_ENTRY_DATA		8
84 
85 enum ntb_device_type {
86 	NTB_XEON,
87 	NTB_ATOM
88 };
89 
90 /* ntb_conn_type are hardware numbers, cannot change. */
91 enum ntb_conn_type {
92 	NTB_CONN_TRANSPARENT = 0,
93 	NTB_CONN_B2B = 1,
94 	NTB_CONN_RP = 2,
95 };
96 
97 enum ntb_b2b_direction {
98 	NTB_DEV_USD = 0,
99 	NTB_DEV_DSD = 1,
100 };
101 
102 enum ntb_bar {
103 	NTB_CONFIG_BAR = 0,
104 	NTB_B2B_BAR_1,
105 	NTB_B2B_BAR_2,
106 	NTB_B2B_BAR_3,
107 	NTB_MAX_BARS
108 };
109 
110 enum {
111 	NTB_MSIX_GUARD = 0,
112 	NTB_MSIX_DATA0,
113 	NTB_MSIX_DATA1,
114 	NTB_MSIX_DATA2,
115 	NTB_MSIX_OFS0,
116 	NTB_MSIX_OFS1,
117 	NTB_MSIX_OFS2,
118 	NTB_MSIX_DONE,
119 	NTB_MAX_MSIX_SPAD
120 };
121 
122 /* Device features and workarounds */
123 #define HAS_FEATURE(ntb, feature)	\
124 	(((ntb)->features & (feature)) != 0)
125 
126 struct ntb_hw_info {
127 	uint32_t		device_id;
128 	const char		*desc;
129 	enum ntb_device_type	type;
130 	uint32_t		features;
131 };
132 
133 struct ntb_pci_bar_info {
134 	bus_space_tag_t		pci_bus_tag;
135 	bus_space_handle_t	pci_bus_handle;
136 	int			pci_resource_id;
137 	struct resource		*pci_resource;
138 	vm_paddr_t		pbase;
139 	caddr_t			vbase;
140 	vm_size_t		size;
141 	vm_memattr_t		map_mode;
142 
143 	/* Configuration register offsets */
144 	uint32_t		psz_off;
145 	uint32_t		ssz_off;
146 	uint32_t		pbarxlat_off;
147 };
148 
149 struct ntb_int_info {
150 	struct resource	*res;
151 	int		rid;
152 	void		*tag;
153 };
154 
155 struct ntb_vec {
156 	struct ntb_softc	*ntb;
157 	uint32_t		num;
158 	unsigned		masked;
159 };
160 
161 struct ntb_reg {
162 	uint32_t	ntb_ctl;
163 	uint32_t	lnk_sta;
164 	uint8_t		db_size;
165 	unsigned	mw_bar[NTB_MAX_BARS];
166 };
167 
168 struct ntb_alt_reg {
169 	uint32_t	db_bell;
170 	uint32_t	db_mask;
171 	uint32_t	spad;
172 };
173 
174 struct ntb_xlat_reg {
175 	uint32_t	bar0_base;
176 	uint32_t	bar2_base;
177 	uint32_t	bar4_base;
178 	uint32_t	bar5_base;
179 
180 	uint32_t	bar2_xlat;
181 	uint32_t	bar4_xlat;
182 	uint32_t	bar5_xlat;
183 
184 	uint32_t	bar2_limit;
185 	uint32_t	bar4_limit;
186 	uint32_t	bar5_limit;
187 };
188 
189 struct ntb_b2b_addr {
190 	uint64_t	bar0_addr;
191 	uint64_t	bar2_addr64;
192 	uint64_t	bar4_addr64;
193 	uint64_t	bar4_addr32;
194 	uint64_t	bar5_addr32;
195 };
196 
197 struct ntb_msix_data {
198 	uint32_t	nmd_ofs;
199 	uint32_t	nmd_data;
200 };
201 
202 struct ntb_softc {
203 	/* ntb.c context. Do not move! Must go first! */
204 	void			*ntb_store;
205 
206 	device_t		device;
207 	enum ntb_device_type	type;
208 	uint32_t		features;
209 
210 	struct ntb_pci_bar_info	bar_info[NTB_MAX_BARS];
211 	struct ntb_int_info	int_info[MAX_MSIX_INTERRUPTS];
212 	uint32_t		allocated_interrupts;
213 
214 	struct ntb_msix_data	peer_msix_data[XEON_NONLINK_DB_MSIX_BITS];
215 	struct ntb_msix_data	msix_data[XEON_NONLINK_DB_MSIX_BITS];
216 	bool			peer_msix_good;
217 	bool			peer_msix_done;
218 	struct ntb_pci_bar_info	*peer_lapic_bar;
219 	struct callout		peer_msix_work;
220 
221 	struct callout		heartbeat_timer;
222 	struct callout		lr_timer;
223 
224 	struct ntb_vec		*msix_vec;
225 
226 	uint32_t		ppd;
227 	enum ntb_conn_type	conn_type;
228 	enum ntb_b2b_direction	dev_type;
229 
230 	/* Offset of peer bar0 in B2B BAR */
231 	uint64_t			b2b_off;
232 	/* Memory window used to access peer bar0 */
233 #define B2B_MW_DISABLED			UINT8_MAX
234 	uint8_t				b2b_mw_idx;
235 	uint32_t			msix_xlat;
236 	uint8_t				msix_mw_idx;
237 
238 	uint8_t				mw_count;
239 	uint8_t				spad_count;
240 	uint8_t				db_count;
241 	uint8_t				db_vec_count;
242 	uint8_t				db_vec_shift;
243 
244 	/* Protects local db_mask. */
245 #define DB_MASK_LOCK(sc)	mtx_lock_spin(&(sc)->db_mask_lock)
246 #define DB_MASK_UNLOCK(sc)	mtx_unlock_spin(&(sc)->db_mask_lock)
247 #define DB_MASK_ASSERT(sc,f)	mtx_assert(&(sc)->db_mask_lock, (f))
248 	struct mtx			db_mask_lock;
249 
250 	volatile uint32_t		ntb_ctl;
251 	volatile uint32_t		lnk_sta;
252 
253 	uint64_t			db_valid_mask;
254 	uint64_t			db_link_mask;
255 	uint64_t			db_mask;
256 	uint64_t			fake_db;	/* NTB_SB01BASE_LOCKUP*/
257 	uint64_t			force_db;	/* NTB_SB01BASE_LOCKUP*/
258 
259 	int				last_ts;	/* ticks @ last irq */
260 
261 	const struct ntb_reg		*reg;
262 	const struct ntb_alt_reg	*self_reg;
263 	const struct ntb_alt_reg	*peer_reg;
264 	const struct ntb_xlat_reg	*xlat_reg;
265 };
266 
267 #ifdef __i386__
268 static __inline uint64_t
269 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
270     bus_size_t offset)
271 {
272 
273 	return (bus_space_read_4(tag, handle, offset) |
274 	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
275 }
276 
277 static __inline void
278 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
279     bus_size_t offset, uint64_t val)
280 {
281 
282 	bus_space_write_4(tag, handle, offset, val);
283 	bus_space_write_4(tag, handle, offset + 4, val >> 32);
284 }
285 #endif
286 
287 #define intel_ntb_bar_read(SIZE, bar, offset) \
288 	    bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
289 	    ntb->bar_info[(bar)].pci_bus_handle, (offset))
290 #define intel_ntb_bar_write(SIZE, bar, offset, val) \
291 	    bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
292 	    ntb->bar_info[(bar)].pci_bus_handle, (offset), (val))
293 #define intel_ntb_reg_read(SIZE, offset) \
294 	    intel_ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset)
295 #define intel_ntb_reg_write(SIZE, offset, val) \
296 	    intel_ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val)
297 #define intel_ntb_mw_read(SIZE, offset) \
298 	    intel_ntb_bar_read(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
299 		offset)
300 #define intel_ntb_mw_write(SIZE, offset, val) \
301 	    intel_ntb_bar_write(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
302 		offset, val)
303 
304 static int intel_ntb_probe(device_t device);
305 static int intel_ntb_attach(device_t device);
306 static int intel_ntb_detach(device_t device);
307 static uint64_t intel_ntb_db_valid_mask(device_t dev);
308 static void intel_ntb_spad_clear(device_t dev);
309 static uint64_t intel_ntb_db_vector_mask(device_t dev, uint32_t vector);
310 static bool intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed,
311     enum ntb_width *width);
312 static int intel_ntb_link_enable(device_t dev, enum ntb_speed speed,
313     enum ntb_width width);
314 static int intel_ntb_link_disable(device_t dev);
315 static int intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val);
316 static int intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val);
317 
318 static unsigned intel_ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx);
319 static inline enum ntb_bar intel_ntb_mw_to_bar(struct ntb_softc *, unsigned mw);
320 static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar);
321 static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar,
322     uint32_t *base, uint32_t *xlat, uint32_t *lmt);
323 static int intel_ntb_map_pci_bars(struct ntb_softc *ntb);
324 static int intel_ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx,
325     vm_memattr_t);
326 static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *,
327     const char *);
328 static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar);
329 static int map_memory_window_bar(struct ntb_softc *ntb,
330     struct ntb_pci_bar_info *bar);
331 static void intel_ntb_unmap_pci_bar(struct ntb_softc *ntb);
332 static int intel_ntb_remap_msix(device_t, uint32_t desired, uint32_t avail);
333 static int intel_ntb_init_isr(struct ntb_softc *ntb);
334 static int intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb);
335 static int intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors);
336 static void intel_ntb_teardown_interrupts(struct ntb_softc *ntb);
337 static inline uint64_t intel_ntb_vec_mask(struct ntb_softc *, uint64_t db_vector);
338 static void intel_ntb_interrupt(struct ntb_softc *, uint32_t vec);
339 static void ndev_vec_isr(void *arg);
340 static void ndev_irq_isr(void *arg);
341 static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff);
342 static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t);
343 static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t);
344 static int intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors);
345 static void intel_ntb_free_msix_vec(struct ntb_softc *ntb);
346 static void intel_ntb_get_msix_info(struct ntb_softc *ntb);
347 static void intel_ntb_exchange_msix(void *);
348 static struct ntb_hw_info *intel_ntb_get_device_info(uint32_t device_id);
349 static void intel_ntb_detect_max_mw(struct ntb_softc *ntb);
350 static int intel_ntb_detect_xeon(struct ntb_softc *ntb);
351 static int intel_ntb_detect_atom(struct ntb_softc *ntb);
352 static int intel_ntb_xeon_init_dev(struct ntb_softc *ntb);
353 static int intel_ntb_atom_init_dev(struct ntb_softc *ntb);
354 static void intel_ntb_teardown_xeon(struct ntb_softc *ntb);
355 static void configure_atom_secondary_side_bars(struct ntb_softc *ntb);
356 static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx,
357     enum ntb_bar regbar);
358 static void xeon_set_sbar_base_and_limit(struct ntb_softc *,
359     uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar);
360 static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr,
361     enum ntb_bar idx);
362 static int xeon_setup_b2b_mw(struct ntb_softc *,
363     const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr);
364 static inline bool link_is_up(struct ntb_softc *ntb);
365 static inline bool _xeon_link_is_up(struct ntb_softc *ntb);
366 static inline bool atom_link_is_err(struct ntb_softc *ntb);
367 static inline enum ntb_speed intel_ntb_link_sta_speed(struct ntb_softc *);
368 static inline enum ntb_width intel_ntb_link_sta_width(struct ntb_softc *);
369 static void atom_link_hb(void *arg);
370 static void recover_atom_link(void *arg);
371 static bool intel_ntb_poll_link(struct ntb_softc *ntb);
372 static void save_bar_parameters(struct ntb_pci_bar_info *bar);
373 static void intel_ntb_sysctl_init(struct ntb_softc *);
374 static int sysctl_handle_features(SYSCTL_HANDLER_ARGS);
375 static int sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS);
376 static int sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS);
377 static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS);
378 static int sysctl_handle_register(SYSCTL_HANDLER_ARGS);
379 
380 static unsigned g_ntb_hw_debug_level;
381 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
382     &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose");
383 #define intel_ntb_printf(lvl, ...) do {				\
384 	if ((lvl) <= g_ntb_hw_debug_level) {			\
385 		device_printf(ntb->device, __VA_ARGS__);	\
386 	}							\
387 } while (0)
388 
389 #define	_NTB_PAT_UC	0
390 #define	_NTB_PAT_WC	1
391 #define	_NTB_PAT_WT	4
392 #define	_NTB_PAT_WP	5
393 #define	_NTB_PAT_WB	6
394 #define	_NTB_PAT_UCM	7
395 static unsigned g_ntb_mw_pat = _NTB_PAT_UC;
396 SYSCTL_UINT(_hw_ntb, OID_AUTO, default_mw_pat, CTLFLAG_RDTUN,
397     &g_ntb_mw_pat, 0, "Configure the default memory window cache flags (PAT): "
398     "UC: "  __XSTRING(_NTB_PAT_UC) ", "
399     "WC: "  __XSTRING(_NTB_PAT_WC) ", "
400     "WT: "  __XSTRING(_NTB_PAT_WT) ", "
401     "WP: "  __XSTRING(_NTB_PAT_WP) ", "
402     "WB: "  __XSTRING(_NTB_PAT_WB) ", "
403     "UC-: " __XSTRING(_NTB_PAT_UCM));
404 
405 static inline vm_memattr_t
406 intel_ntb_pat_flags(void)
407 {
408 
409 	switch (g_ntb_mw_pat) {
410 	case _NTB_PAT_WC:
411 		return (VM_MEMATTR_WRITE_COMBINING);
412 	case _NTB_PAT_WT:
413 		return (VM_MEMATTR_WRITE_THROUGH);
414 	case _NTB_PAT_WP:
415 		return (VM_MEMATTR_WRITE_PROTECTED);
416 	case _NTB_PAT_WB:
417 		return (VM_MEMATTR_WRITE_BACK);
418 	case _NTB_PAT_UCM:
419 		return (VM_MEMATTR_WEAK_UNCACHEABLE);
420 	case _NTB_PAT_UC:
421 		/* FALLTHROUGH */
422 	default:
423 		return (VM_MEMATTR_UNCACHEABLE);
424 	}
425 }
426 
427 /*
428  * Well, this obviously doesn't belong here, but it doesn't seem to exist
429  * anywhere better yet.
430  */
431 static inline const char *
432 intel_ntb_vm_memattr_to_str(vm_memattr_t pat)
433 {
434 
435 	switch (pat) {
436 	case VM_MEMATTR_WRITE_COMBINING:
437 		return ("WRITE_COMBINING");
438 	case VM_MEMATTR_WRITE_THROUGH:
439 		return ("WRITE_THROUGH");
440 	case VM_MEMATTR_WRITE_PROTECTED:
441 		return ("WRITE_PROTECTED");
442 	case VM_MEMATTR_WRITE_BACK:
443 		return ("WRITE_BACK");
444 	case VM_MEMATTR_WEAK_UNCACHEABLE:
445 		return ("UNCACHED");
446 	case VM_MEMATTR_UNCACHEABLE:
447 		return ("UNCACHEABLE");
448 	default:
449 		return ("UNKNOWN");
450 	}
451 }
452 
453 static int g_ntb_msix_idx = 1;
454 SYSCTL_INT(_hw_ntb, OID_AUTO, msix_mw_idx, CTLFLAG_RDTUN, &g_ntb_msix_idx,
455     0, "Use this memory window to access the peer MSIX message complex on "
456     "certain Xeon-based NTB systems, as a workaround for a hardware errata.  "
457     "Like b2b_mw_idx, negative values index from the last available memory "
458     "window.  (Applies on Xeon platforms with SB01BASE_LOCKUP errata.)");
459 
460 static int g_ntb_mw_idx = -1;
461 SYSCTL_INT(_hw_ntb, OID_AUTO, b2b_mw_idx, CTLFLAG_RDTUN, &g_ntb_mw_idx,
462     0, "Use this memory window to access the peer NTB registers.  A "
463     "non-negative value starts from the first MW index; a negative value "
464     "starts from the last MW index.  The default is -1, i.e., the last "
465     "available memory window.  Both sides of the NTB MUST set the same "
466     "value here!  (Applies on Xeon platforms with SDOORBELL_LOCKUP errata.)");
467 
468 /* Hardware owns the low 16 bits of features. */
469 #define NTB_BAR_SIZE_4K		(1 << 0)
470 #define NTB_SDOORBELL_LOCKUP	(1 << 1)
471 #define NTB_SB01BASE_LOCKUP	(1 << 2)
472 #define NTB_B2BDOORBELL_BIT14	(1 << 3)
473 /* Software/configuration owns the top 16 bits. */
474 #define NTB_SPLIT_BAR		(1ull << 16)
475 
476 #define NTB_FEATURES_STR \
477     "\20\21SPLIT_BAR4\04B2B_DOORBELL_BIT14\03SB01BASE_LOCKUP" \
478     "\02SDOORBELL_LOCKUP\01BAR_SIZE_4K"
479 
480 static struct ntb_hw_info pci_ids[] = {
481 	/* XXX: PS/SS IDs left out until they are supported. */
482 	{ 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B",
483 		NTB_ATOM, 0 },
484 
485 	{ 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B",
486 		NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 },
487 	{ 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B",
488 		NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 },
489 	{ 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON,
490 		NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
491 		    NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K },
492 	{ 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON,
493 		NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
494 		    NTB_SB01BASE_LOCKUP },
495 	{ 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON,
496 		NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 |
497 		    NTB_SB01BASE_LOCKUP },
498 };
499 
500 static const struct ntb_reg atom_reg = {
501 	.ntb_ctl = ATOM_NTBCNTL_OFFSET,
502 	.lnk_sta = ATOM_LINK_STATUS_OFFSET,
503 	.db_size = sizeof(uint64_t),
504 	.mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 },
505 };
506 
507 static const struct ntb_alt_reg atom_pri_reg = {
508 	.db_bell = ATOM_PDOORBELL_OFFSET,
509 	.db_mask = ATOM_PDBMSK_OFFSET,
510 	.spad = ATOM_SPAD_OFFSET,
511 };
512 
513 static const struct ntb_alt_reg atom_b2b_reg = {
514 	.db_bell = ATOM_B2B_DOORBELL_OFFSET,
515 	.spad = ATOM_B2B_SPAD_OFFSET,
516 };
517 
518 static const struct ntb_xlat_reg atom_sec_xlat = {
519 #if 0
520 	/* "FIXME" says the Linux driver. */
521 	.bar0_base = ATOM_SBAR0BASE_OFFSET,
522 	.bar2_base = ATOM_SBAR2BASE_OFFSET,
523 	.bar4_base = ATOM_SBAR4BASE_OFFSET,
524 
525 	.bar2_limit = ATOM_SBAR2LMT_OFFSET,
526 	.bar4_limit = ATOM_SBAR4LMT_OFFSET,
527 #endif
528 
529 	.bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
530 	.bar4_xlat = ATOM_SBAR4XLAT_OFFSET,
531 };
532 
533 static const struct ntb_reg xeon_reg = {
534 	.ntb_ctl = XEON_NTBCNTL_OFFSET,
535 	.lnk_sta = XEON_LINK_STATUS_OFFSET,
536 	.db_size = sizeof(uint16_t),
537 	.mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 },
538 };
539 
540 static const struct ntb_alt_reg xeon_pri_reg = {
541 	.db_bell = XEON_PDOORBELL_OFFSET,
542 	.db_mask = XEON_PDBMSK_OFFSET,
543 	.spad = XEON_SPAD_OFFSET,
544 };
545 
546 static const struct ntb_alt_reg xeon_b2b_reg = {
547 	.db_bell = XEON_B2B_DOORBELL_OFFSET,
548 	.spad = XEON_B2B_SPAD_OFFSET,
549 };
550 
551 static const struct ntb_xlat_reg xeon_sec_xlat = {
552 	.bar0_base = XEON_SBAR0BASE_OFFSET,
553 	.bar2_base = XEON_SBAR2BASE_OFFSET,
554 	.bar4_base = XEON_SBAR4BASE_OFFSET,
555 	.bar5_base = XEON_SBAR5BASE_OFFSET,
556 
557 	.bar2_limit = XEON_SBAR2LMT_OFFSET,
558 	.bar4_limit = XEON_SBAR4LMT_OFFSET,
559 	.bar5_limit = XEON_SBAR5LMT_OFFSET,
560 
561 	.bar2_xlat = XEON_SBAR2XLAT_OFFSET,
562 	.bar4_xlat = XEON_SBAR4XLAT_OFFSET,
563 	.bar5_xlat = XEON_SBAR5XLAT_OFFSET,
564 };
565 
566 static struct ntb_b2b_addr xeon_b2b_usd_addr = {
567 	.bar0_addr = XEON_B2B_BAR0_ADDR,
568 	.bar2_addr64 = XEON_B2B_BAR2_ADDR64,
569 	.bar4_addr64 = XEON_B2B_BAR4_ADDR64,
570 	.bar4_addr32 = XEON_B2B_BAR4_ADDR32,
571 	.bar5_addr32 = XEON_B2B_BAR5_ADDR32,
572 };
573 
574 static struct ntb_b2b_addr xeon_b2b_dsd_addr = {
575 	.bar0_addr = XEON_B2B_BAR0_ADDR,
576 	.bar2_addr64 = XEON_B2B_BAR2_ADDR64,
577 	.bar4_addr64 = XEON_B2B_BAR4_ADDR64,
578 	.bar4_addr32 = XEON_B2B_BAR4_ADDR32,
579 	.bar5_addr32 = XEON_B2B_BAR5_ADDR32,
580 };
581 
582 SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW, 0,
583     "B2B MW segment overrides -- MUST be the same on both sides");
584 
585 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN,
586     &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon "
587     "hardware, use this 64-bit address on the bus between the NTB devices for "
588     "the window at BAR2, on the upstream side of the link.  MUST be the same "
589     "address on both sides.");
590 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN,
591     &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4.");
592 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN,
593     &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 "
594     "(split-BAR mode).");
595 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN,
596     &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 "
597     "(split-BAR mode).");
598 
599 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN,
600     &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon "
601     "hardware, use this 64-bit address on the bus between the NTB devices for "
602     "the window at BAR2, on the downstream side of the link.  MUST be the same"
603     " address on both sides.");
604 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN,
605     &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4.");
606 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN,
607     &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 "
608     "(split-BAR mode).");
609 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN,
610     &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 "
611     "(split-BAR mode).");
612 
613 /*
614  * OS <-> Driver interface structures
615  */
616 MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations");
617 
618 /*
619  * OS <-> Driver linkage functions
620  */
621 static int
622 intel_ntb_probe(device_t device)
623 {
624 	struct ntb_hw_info *p;
625 
626 	p = intel_ntb_get_device_info(pci_get_devid(device));
627 	if (p == NULL)
628 		return (ENXIO);
629 
630 	device_set_desc(device, p->desc);
631 	return (0);
632 }
633 
634 static int
635 intel_ntb_attach(device_t device)
636 {
637 	struct ntb_softc *ntb;
638 	struct ntb_hw_info *p;
639 	int error;
640 
641 	ntb = device_get_softc(device);
642 	p = intel_ntb_get_device_info(pci_get_devid(device));
643 
644 	ntb->device = device;
645 	ntb->type = p->type;
646 	ntb->features = p->features;
647 	ntb->b2b_mw_idx = B2B_MW_DISABLED;
648 	ntb->msix_mw_idx = B2B_MW_DISABLED;
649 
650 	/* Heartbeat timer for NTB_ATOM since there is no link interrupt */
651 	callout_init(&ntb->heartbeat_timer, 1);
652 	callout_init(&ntb->lr_timer, 1);
653 	callout_init(&ntb->peer_msix_work, 1);
654 	mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN);
655 
656 	if (ntb->type == NTB_ATOM)
657 		error = intel_ntb_detect_atom(ntb);
658 	else
659 		error = intel_ntb_detect_xeon(ntb);
660 	if (error != 0)
661 		goto out;
662 
663 	intel_ntb_detect_max_mw(ntb);
664 
665 	pci_enable_busmaster(ntb->device);
666 
667 	error = intel_ntb_map_pci_bars(ntb);
668 	if (error != 0)
669 		goto out;
670 	if (ntb->type == NTB_ATOM)
671 		error = intel_ntb_atom_init_dev(ntb);
672 	else
673 		error = intel_ntb_xeon_init_dev(ntb);
674 	if (error != 0)
675 		goto out;
676 
677 	intel_ntb_spad_clear(device);
678 
679 	intel_ntb_poll_link(ntb);
680 
681 	intel_ntb_sysctl_init(ntb);
682 
683 	/* Attach children to this controller */
684 	error = ntb_register_device(device);
685 
686 out:
687 	if (error != 0)
688 		intel_ntb_detach(device);
689 	return (error);
690 }
691 
692 static int
693 intel_ntb_detach(device_t device)
694 {
695 	struct ntb_softc *ntb;
696 
697 	ntb = device_get_softc(device);
698 
699 	/* Detach & delete all children */
700 	ntb_unregister_device(device);
701 
702 	if (ntb->self_reg != NULL) {
703 		DB_MASK_LOCK(ntb);
704 		db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_valid_mask);
705 		DB_MASK_UNLOCK(ntb);
706 	}
707 	callout_drain(&ntb->heartbeat_timer);
708 	callout_drain(&ntb->lr_timer);
709 	callout_drain(&ntb->peer_msix_work);
710 	pci_disable_busmaster(ntb->device);
711 	if (ntb->type == NTB_XEON)
712 		intel_ntb_teardown_xeon(ntb);
713 	intel_ntb_teardown_interrupts(ntb);
714 
715 	mtx_destroy(&ntb->db_mask_lock);
716 
717 	intel_ntb_unmap_pci_bar(ntb);
718 
719 	return (0);
720 }
721 
722 /*
723  * Driver internal routines
724  */
725 static inline enum ntb_bar
726 intel_ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw)
727 {
728 
729 	KASSERT(mw < ntb->mw_count,
730 	    ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count));
731 	KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw"));
732 
733 	return (ntb->reg->mw_bar[mw]);
734 }
735 
736 static inline bool
737 bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar)
738 {
739 	/* XXX This assertion could be stronger. */
740 	KASSERT(bar < NTB_MAX_BARS, ("bogus bar"));
741 	return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(ntb, NTB_SPLIT_BAR));
742 }
743 
744 static inline void
745 bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base,
746     uint32_t *xlat, uint32_t *lmt)
747 {
748 	uint32_t basev, lmtv, xlatv;
749 
750 	switch (bar) {
751 	case NTB_B2B_BAR_1:
752 		basev = ntb->xlat_reg->bar2_base;
753 		lmtv = ntb->xlat_reg->bar2_limit;
754 		xlatv = ntb->xlat_reg->bar2_xlat;
755 		break;
756 	case NTB_B2B_BAR_2:
757 		basev = ntb->xlat_reg->bar4_base;
758 		lmtv = ntb->xlat_reg->bar4_limit;
759 		xlatv = ntb->xlat_reg->bar4_xlat;
760 		break;
761 	case NTB_B2B_BAR_3:
762 		basev = ntb->xlat_reg->bar5_base;
763 		lmtv = ntb->xlat_reg->bar5_limit;
764 		xlatv = ntb->xlat_reg->bar5_xlat;
765 		break;
766 	default:
767 		KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS,
768 		    ("bad bar"));
769 		basev = lmtv = xlatv = 0;
770 		break;
771 	}
772 
773 	if (base != NULL)
774 		*base = basev;
775 	if (xlat != NULL)
776 		*xlat = xlatv;
777 	if (lmt != NULL)
778 		*lmt = lmtv;
779 }
780 
781 static int
782 intel_ntb_map_pci_bars(struct ntb_softc *ntb)
783 {
784 	int rc;
785 
786 	ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0);
787 	rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]);
788 	if (rc != 0)
789 		goto out;
790 
791 	ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2);
792 	rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_1]);
793 	if (rc != 0)
794 		goto out;
795 	ntb->bar_info[NTB_B2B_BAR_1].psz_off = XEON_PBAR23SZ_OFFSET;
796 	ntb->bar_info[NTB_B2B_BAR_1].ssz_off = XEON_SBAR23SZ_OFFSET;
797 	ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off = XEON_PBAR2XLAT_OFFSET;
798 
799 	ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4);
800 	rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]);
801 	if (rc != 0)
802 		goto out;
803 	ntb->bar_info[NTB_B2B_BAR_2].psz_off = XEON_PBAR4SZ_OFFSET;
804 	ntb->bar_info[NTB_B2B_BAR_2].ssz_off = XEON_SBAR4SZ_OFFSET;
805 	ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off = XEON_PBAR4XLAT_OFFSET;
806 
807 	if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR))
808 		goto out;
809 
810 	ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5);
811 	rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]);
812 	ntb->bar_info[NTB_B2B_BAR_3].psz_off = XEON_PBAR5SZ_OFFSET;
813 	ntb->bar_info[NTB_B2B_BAR_3].ssz_off = XEON_SBAR5SZ_OFFSET;
814 	ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off = XEON_PBAR5XLAT_OFFSET;
815 
816 out:
817 	if (rc != 0)
818 		device_printf(ntb->device,
819 		    "unable to allocate pci resource\n");
820 	return (rc);
821 }
822 
823 static void
824 print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar,
825     const char *kind)
826 {
827 
828 	device_printf(ntb->device,
829 	    "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n",
830 	    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
831 	    (char *)bar->vbase + bar->size - 1,
832 	    (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
833 	    (uintmax_t)bar->size, kind);
834 }
835 
836 static int
837 map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
838 {
839 
840 	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
841 	    &bar->pci_resource_id, RF_ACTIVE);
842 	if (bar->pci_resource == NULL)
843 		return (ENXIO);
844 
845 	save_bar_parameters(bar);
846 	bar->map_mode = VM_MEMATTR_UNCACHEABLE;
847 	print_map_success(ntb, bar, "mmr");
848 	return (0);
849 }
850 
851 static int
852 map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
853 {
854 	int rc;
855 	vm_memattr_t mapmode;
856 	uint8_t bar_size_bits = 0;
857 
858 	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
859 	    &bar->pci_resource_id, RF_ACTIVE);
860 
861 	if (bar->pci_resource == NULL)
862 		return (ENXIO);
863 
864 	save_bar_parameters(bar);
865 	/*
866 	 * Ivytown NTB BAR sizes are misreported by the hardware due to a
867 	 * hardware issue. To work around this, query the size it should be
868 	 * configured to by the device and modify the resource to correspond to
869 	 * this new size. The BIOS on systems with this problem is required to
870 	 * provide enough address space to allow the driver to make this change
871 	 * safely.
872 	 *
873 	 * Ideally I could have just specified the size when I allocated the
874 	 * resource like:
875 	 *  bus_alloc_resource(ntb->device,
876 	 *	SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul,
877 	 *	1ul << bar_size_bits, RF_ACTIVE);
878 	 * but the PCI driver does not honor the size in this call, so we have
879 	 * to modify it after the fact.
880 	 */
881 	if (HAS_FEATURE(ntb, NTB_BAR_SIZE_4K)) {
882 		if (bar->pci_resource_id == PCIR_BAR(2))
883 			bar_size_bits = pci_read_config(ntb->device,
884 			    XEON_PBAR23SZ_OFFSET, 1);
885 		else
886 			bar_size_bits = pci_read_config(ntb->device,
887 			    XEON_PBAR45SZ_OFFSET, 1);
888 
889 		rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY,
890 		    bar->pci_resource, bar->pbase,
891 		    bar->pbase + (1ul << bar_size_bits) - 1);
892 		if (rc != 0) {
893 			device_printf(ntb->device,
894 			    "unable to resize bar\n");
895 			return (rc);
896 		}
897 
898 		save_bar_parameters(bar);
899 	}
900 
901 	bar->map_mode = VM_MEMATTR_UNCACHEABLE;
902 	print_map_success(ntb, bar, "mw");
903 
904 	/*
905 	 * Optionally, mark MW BARs as anything other than UC to improve
906 	 * performance.
907 	 */
908 	mapmode = intel_ntb_pat_flags();
909 	if (mapmode == bar->map_mode)
910 		return (0);
911 
912 	rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mapmode);
913 	if (rc == 0) {
914 		bar->map_mode = mapmode;
915 		device_printf(ntb->device,
916 		    "Marked BAR%d v:[%p-%p] p:[%p-%p] as "
917 		    "%s.\n",
918 		    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
919 		    (char *)bar->vbase + bar->size - 1,
920 		    (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
921 		    intel_ntb_vm_memattr_to_str(mapmode));
922 	} else
923 		device_printf(ntb->device,
924 		    "Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as "
925 		    "%s: %d\n",
926 		    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
927 		    (char *)bar->vbase + bar->size - 1,
928 		    (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
929 		    intel_ntb_vm_memattr_to_str(mapmode), rc);
930 		/* Proceed anyway */
931 	return (0);
932 }
933 
934 static void
935 intel_ntb_unmap_pci_bar(struct ntb_softc *ntb)
936 {
937 	struct ntb_pci_bar_info *current_bar;
938 	int i;
939 
940 	for (i = 0; i < NTB_MAX_BARS; i++) {
941 		current_bar = &ntb->bar_info[i];
942 		if (current_bar->pci_resource != NULL)
943 			bus_release_resource(ntb->device, SYS_RES_MEMORY,
944 			    current_bar->pci_resource_id,
945 			    current_bar->pci_resource);
946 	}
947 }
948 
949 static int
950 intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors)
951 {
952 	uint32_t i;
953 	int rc;
954 
955 	for (i = 0; i < num_vectors; i++) {
956 		ntb->int_info[i].rid = i + 1;
957 		ntb->int_info[i].res = bus_alloc_resource_any(ntb->device,
958 		    SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE);
959 		if (ntb->int_info[i].res == NULL) {
960 			device_printf(ntb->device,
961 			    "bus_alloc_resource failed\n");
962 			return (ENOMEM);
963 		}
964 		ntb->int_info[i].tag = NULL;
965 		ntb->allocated_interrupts++;
966 		rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
967 		    INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr,
968 		    &ntb->msix_vec[i], &ntb->int_info[i].tag);
969 		if (rc != 0) {
970 			device_printf(ntb->device, "bus_setup_intr failed\n");
971 			return (ENXIO);
972 		}
973 	}
974 	return (0);
975 }
976 
977 /*
978  * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector
979  * cannot be allocated for each MSI-X message.  JHB seems to think remapping
980  * should be okay.  This tunable should enable us to test that hypothesis
981  * when someone gets their hands on some Xeon hardware.
982  */
983 static int ntb_force_remap_mode;
984 SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN,
985     &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped"
986     " to a smaller number of ithreads, even if the desired number are "
987     "available");
988 
989 /*
990  * In case it is NOT ok, give consumers an abort button.
991  */
992 static int ntb_prefer_intx;
993 SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN,
994     &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather "
995     "than remapping MSI-X messages over available slots (match Linux driver "
996     "behavior)");
997 
998 /*
999  * Remap the desired number of MSI-X messages to available ithreads in a simple
1000  * round-robin fashion.
1001  */
1002 static int
1003 intel_ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
1004 {
1005 	u_int *vectors;
1006 	uint32_t i;
1007 	int rc;
1008 
1009 	if (ntb_prefer_intx != 0)
1010 		return (ENXIO);
1011 
1012 	vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK);
1013 
1014 	for (i = 0; i < desired; i++)
1015 		vectors[i] = (i % avail) + 1;
1016 
1017 	rc = pci_remap_msix(dev, desired, vectors);
1018 	free(vectors, M_NTB);
1019 	return (rc);
1020 }
1021 
1022 static int
1023 intel_ntb_init_isr(struct ntb_softc *ntb)
1024 {
1025 	uint32_t desired_vectors, num_vectors;
1026 	int rc;
1027 
1028 	ntb->allocated_interrupts = 0;
1029 	ntb->last_ts = ticks;
1030 
1031 	/*
1032 	 * Mask all doorbell interrupts.  (Except link events!)
1033 	 */
1034 	DB_MASK_LOCK(ntb);
1035 	ntb->db_mask = ntb->db_valid_mask;
1036 	db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1037 	DB_MASK_UNLOCK(ntb);
1038 
1039 	num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device),
1040 	    ntb->db_count);
1041 	if (desired_vectors >= 1) {
1042 		rc = pci_alloc_msix(ntb->device, &num_vectors);
1043 
1044 		if (ntb_force_remap_mode != 0 && rc == 0 &&
1045 		    num_vectors == desired_vectors)
1046 			num_vectors--;
1047 
1048 		if (rc == 0 && num_vectors < desired_vectors) {
1049 			rc = intel_ntb_remap_msix(ntb->device, desired_vectors,
1050 			    num_vectors);
1051 			if (rc == 0)
1052 				num_vectors = desired_vectors;
1053 			else
1054 				pci_release_msi(ntb->device);
1055 		}
1056 		if (rc != 0)
1057 			num_vectors = 1;
1058 	} else
1059 		num_vectors = 1;
1060 
1061 	if (ntb->type == NTB_XEON && num_vectors < ntb->db_vec_count) {
1062 		if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1063 			device_printf(ntb->device,
1064 			    "Errata workaround does not support MSI or INTX\n");
1065 			return (EINVAL);
1066 		}
1067 
1068 		ntb->db_vec_count = 1;
1069 		ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT;
1070 		rc = intel_ntb_setup_legacy_interrupt(ntb);
1071 	} else {
1072 		if (num_vectors - 1 != XEON_NONLINK_DB_MSIX_BITS &&
1073 		    HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1074 			device_printf(ntb->device,
1075 			    "Errata workaround expects %d doorbell bits\n",
1076 			    XEON_NONLINK_DB_MSIX_BITS);
1077 			return (EINVAL);
1078 		}
1079 
1080 		intel_ntb_create_msix_vec(ntb, num_vectors);
1081 		rc = intel_ntb_setup_msix(ntb, num_vectors);
1082 	}
1083 	if (rc != 0) {
1084 		device_printf(ntb->device,
1085 		    "Error allocating interrupts: %d\n", rc);
1086 		intel_ntb_free_msix_vec(ntb);
1087 	}
1088 
1089 	return (rc);
1090 }
1091 
1092 static int
1093 intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
1094 {
1095 	int rc;
1096 
1097 	ntb->int_info[0].rid = 0;
1098 	ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ,
1099 	    &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE);
1100 	if (ntb->int_info[0].res == NULL) {
1101 		device_printf(ntb->device, "bus_alloc_resource failed\n");
1102 		return (ENOMEM);
1103 	}
1104 
1105 	ntb->int_info[0].tag = NULL;
1106 	ntb->allocated_interrupts = 1;
1107 
1108 	rc = bus_setup_intr(ntb->device, ntb->int_info[0].res,
1109 	    INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr,
1110 	    ntb, &ntb->int_info[0].tag);
1111 	if (rc != 0) {
1112 		device_printf(ntb->device, "bus_setup_intr failed\n");
1113 		return (ENXIO);
1114 	}
1115 
1116 	return (0);
1117 }
1118 
1119 static void
1120 intel_ntb_teardown_interrupts(struct ntb_softc *ntb)
1121 {
1122 	struct ntb_int_info *current_int;
1123 	int i;
1124 
1125 	for (i = 0; i < ntb->allocated_interrupts; i++) {
1126 		current_int = &ntb->int_info[i];
1127 		if (current_int->tag != NULL)
1128 			bus_teardown_intr(ntb->device, current_int->res,
1129 			    current_int->tag);
1130 
1131 		if (current_int->res != NULL)
1132 			bus_release_resource(ntb->device, SYS_RES_IRQ,
1133 			    rman_get_rid(current_int->res), current_int->res);
1134 	}
1135 
1136 	intel_ntb_free_msix_vec(ntb);
1137 	pci_release_msi(ntb->device);
1138 }
1139 
1140 /*
1141  * Doorbell register and mask are 64-bit on Atom, 16-bit on Xeon.  Abstract it
1142  * out to make code clearer.
1143  */
1144 static inline uint64_t
1145 db_ioread(struct ntb_softc *ntb, uint64_t regoff)
1146 {
1147 
1148 	if (ntb->type == NTB_ATOM)
1149 		return (intel_ntb_reg_read(8, regoff));
1150 
1151 	KASSERT(ntb->type == NTB_XEON, ("bad ntb type"));
1152 
1153 	return (intel_ntb_reg_read(2, regoff));
1154 }
1155 
1156 static inline void
1157 db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
1158 {
1159 
1160 	KASSERT((val & ~ntb->db_valid_mask) == 0,
1161 	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1162 	     (uintmax_t)(val & ~ntb->db_valid_mask),
1163 	     (uintmax_t)ntb->db_valid_mask));
1164 
1165 	if (regoff == ntb->self_reg->db_mask)
1166 		DB_MASK_ASSERT(ntb, MA_OWNED);
1167 	db_iowrite_raw(ntb, regoff, val);
1168 }
1169 
1170 static inline void
1171 db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
1172 {
1173 
1174 	if (ntb->type == NTB_ATOM) {
1175 		intel_ntb_reg_write(8, regoff, val);
1176 		return;
1177 	}
1178 
1179 	KASSERT(ntb->type == NTB_XEON, ("bad ntb type"));
1180 	intel_ntb_reg_write(2, regoff, (uint16_t)val);
1181 }
1182 
1183 static void
1184 intel_ntb_db_set_mask(device_t dev, uint64_t bits)
1185 {
1186 	struct ntb_softc *ntb = device_get_softc(dev);
1187 
1188 	DB_MASK_LOCK(ntb);
1189 	ntb->db_mask |= bits;
1190 	if (!HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1191 		db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1192 	DB_MASK_UNLOCK(ntb);
1193 }
1194 
1195 static void
1196 intel_ntb_db_clear_mask(device_t dev, uint64_t bits)
1197 {
1198 	struct ntb_softc *ntb = device_get_softc(dev);
1199 	uint64_t ibits;
1200 	int i;
1201 
1202 	KASSERT((bits & ~ntb->db_valid_mask) == 0,
1203 	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1204 	     (uintmax_t)(bits & ~ntb->db_valid_mask),
1205 	     (uintmax_t)ntb->db_valid_mask));
1206 
1207 	DB_MASK_LOCK(ntb);
1208 	ibits = ntb->fake_db & ntb->db_mask & bits;
1209 	ntb->db_mask &= ~bits;
1210 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1211 		/* Simulate fake interrupts if unmasked DB bits are set. */
1212 		ntb->force_db |= ibits;
1213 		for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
1214 			if ((ibits & intel_ntb_db_vector_mask(dev, i)) != 0)
1215 				swi_sched(ntb->int_info[i].tag, 0);
1216 		}
1217 	} else {
1218 		db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1219 	}
1220 	DB_MASK_UNLOCK(ntb);
1221 }
1222 
1223 static uint64_t
1224 intel_ntb_db_read(device_t dev)
1225 {
1226 	struct ntb_softc *ntb = device_get_softc(dev);
1227 
1228 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1229 		return (ntb->fake_db);
1230 
1231 	return (db_ioread(ntb, ntb->self_reg->db_bell));
1232 }
1233 
1234 static void
1235 intel_ntb_db_clear(device_t dev, uint64_t bits)
1236 {
1237 	struct ntb_softc *ntb = device_get_softc(dev);
1238 
1239 	KASSERT((bits & ~ntb->db_valid_mask) == 0,
1240 	    ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__,
1241 	     (uintmax_t)(bits & ~ntb->db_valid_mask),
1242 	     (uintmax_t)ntb->db_valid_mask));
1243 
1244 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1245 		DB_MASK_LOCK(ntb);
1246 		ntb->fake_db &= ~bits;
1247 		DB_MASK_UNLOCK(ntb);
1248 		return;
1249 	}
1250 
1251 	db_iowrite(ntb, ntb->self_reg->db_bell, bits);
1252 }
1253 
1254 static inline uint64_t
1255 intel_ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector)
1256 {
1257 	uint64_t shift, mask;
1258 
1259 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1260 		/*
1261 		 * Remap vectors in custom way to make at least first
1262 		 * three doorbells to not generate stray events.
1263 		 * This breaks Linux compatibility (if one existed)
1264 		 * when more then one DB is used (not by if_ntb).
1265 		 */
1266 		if (db_vector < XEON_NONLINK_DB_MSIX_BITS - 1)
1267 			return (1 << db_vector);
1268 		if (db_vector == XEON_NONLINK_DB_MSIX_BITS - 1)
1269 			return (0x7ffc);
1270 	}
1271 
1272 	shift = ntb->db_vec_shift;
1273 	mask = (1ull << shift) - 1;
1274 	return (mask << (shift * db_vector));
1275 }
1276 
1277 static void
1278 intel_ntb_interrupt(struct ntb_softc *ntb, uint32_t vec)
1279 {
1280 	uint64_t vec_mask;
1281 
1282 	ntb->last_ts = ticks;
1283 	vec_mask = intel_ntb_vec_mask(ntb, vec);
1284 
1285 	if ((vec_mask & ntb->db_link_mask) != 0) {
1286 		if (intel_ntb_poll_link(ntb))
1287 			ntb_link_event(ntb->device);
1288 	}
1289 
1290 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) &&
1291 	    (vec_mask & ntb->db_link_mask) == 0) {
1292 		DB_MASK_LOCK(ntb);
1293 
1294 		/*
1295 		 * Do not report same DB events again if not cleared yet,
1296 		 * unless the mask was just cleared for them and this
1297 		 * interrupt handler call can be the consequence of it.
1298 		 */
1299 		vec_mask &= ~ntb->fake_db | ntb->force_db;
1300 		ntb->force_db &= ~vec_mask;
1301 
1302 		/* Update our internal doorbell register. */
1303 		ntb->fake_db |= vec_mask;
1304 
1305 		/* Do not report masked DB events. */
1306 		vec_mask &= ~ntb->db_mask;
1307 
1308 		DB_MASK_UNLOCK(ntb);
1309 	}
1310 
1311 	if ((vec_mask & ntb->db_valid_mask) != 0)
1312 		ntb_db_event(ntb->device, vec);
1313 }
1314 
1315 static void
1316 ndev_vec_isr(void *arg)
1317 {
1318 	struct ntb_vec *nvec = arg;
1319 
1320 	intel_ntb_interrupt(nvec->ntb, nvec->num);
1321 }
1322 
1323 static void
1324 ndev_irq_isr(void *arg)
1325 {
1326 	/* If we couldn't set up MSI-X, we only have the one vector. */
1327 	intel_ntb_interrupt(arg, 0);
1328 }
1329 
1330 static int
1331 intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
1332 {
1333 	uint32_t i;
1334 
1335 	ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB,
1336 	    M_ZERO | M_WAITOK);
1337 	for (i = 0; i < num_vectors; i++) {
1338 		ntb->msix_vec[i].num = i;
1339 		ntb->msix_vec[i].ntb = ntb;
1340 	}
1341 
1342 	return (0);
1343 }
1344 
1345 static void
1346 intel_ntb_free_msix_vec(struct ntb_softc *ntb)
1347 {
1348 
1349 	if (ntb->msix_vec == NULL)
1350 		return;
1351 
1352 	free(ntb->msix_vec, M_NTB);
1353 	ntb->msix_vec = NULL;
1354 }
1355 
1356 static void
1357 intel_ntb_get_msix_info(struct ntb_softc *ntb)
1358 {
1359 	struct pci_devinfo *dinfo;
1360 	struct pcicfg_msix *msix;
1361 	uint32_t laddr, data, i, offset;
1362 
1363 	dinfo = device_get_ivars(ntb->device);
1364 	msix = &dinfo->cfg.msix;
1365 
1366 	CTASSERT(XEON_NONLINK_DB_MSIX_BITS == nitems(ntb->msix_data));
1367 
1368 	for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
1369 		offset = msix->msix_table_offset + i * PCI_MSIX_ENTRY_SIZE;
1370 
1371 		laddr = bus_read_4(msix->msix_table_res, offset +
1372 		    PCI_MSIX_ENTRY_LOWER_ADDR);
1373 		intel_ntb_printf(2, "local MSIX addr(%u): 0x%x\n", i, laddr);
1374 
1375 		KASSERT((laddr & MSI_INTEL_ADDR_BASE) == MSI_INTEL_ADDR_BASE,
1376 		    ("local MSIX addr 0x%x not in MSI base 0x%x", laddr,
1377 		     MSI_INTEL_ADDR_BASE));
1378 		ntb->msix_data[i].nmd_ofs = laddr;
1379 
1380 		data = bus_read_4(msix->msix_table_res, offset +
1381 		    PCI_MSIX_ENTRY_DATA);
1382 		intel_ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data);
1383 
1384 		ntb->msix_data[i].nmd_data = data;
1385 	}
1386 }
1387 
1388 static struct ntb_hw_info *
1389 intel_ntb_get_device_info(uint32_t device_id)
1390 {
1391 	struct ntb_hw_info *ep;
1392 
1393 	for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) {
1394 		if (ep->device_id == device_id)
1395 			return (ep);
1396 	}
1397 	return (NULL);
1398 }
1399 
1400 static void
1401 intel_ntb_teardown_xeon(struct ntb_softc *ntb)
1402 {
1403 
1404 	if (ntb->reg != NULL)
1405 		intel_ntb_link_disable(ntb->device);
1406 }
1407 
1408 static void
1409 intel_ntb_detect_max_mw(struct ntb_softc *ntb)
1410 {
1411 
1412 	if (ntb->type == NTB_ATOM) {
1413 		ntb->mw_count = ATOM_MW_COUNT;
1414 		return;
1415 	}
1416 
1417 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
1418 		ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT;
1419 	else
1420 		ntb->mw_count = XEON_SNB_MW_COUNT;
1421 }
1422 
1423 static int
1424 intel_ntb_detect_xeon(struct ntb_softc *ntb)
1425 {
1426 	uint8_t ppd, conn_type;
1427 
1428 	ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1);
1429 	ntb->ppd = ppd;
1430 
1431 	if ((ppd & XEON_PPD_DEV_TYPE) != 0)
1432 		ntb->dev_type = NTB_DEV_DSD;
1433 	else
1434 		ntb->dev_type = NTB_DEV_USD;
1435 
1436 	if ((ppd & XEON_PPD_SPLIT_BAR) != 0)
1437 		ntb->features |= NTB_SPLIT_BAR;
1438 
1439 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) &&
1440 	    !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
1441 		device_printf(ntb->device,
1442 		    "Can not apply SB01BASE_LOCKUP workaround "
1443 		    "with split BARs disabled!\n");
1444 		device_printf(ntb->device,
1445 		    "Expect system hangs under heavy NTB traffic!\n");
1446 		ntb->features &= ~NTB_SB01BASE_LOCKUP;
1447 	}
1448 
1449 	/*
1450 	 * SDOORBELL errata workaround gets in the way of SB01BASE_LOCKUP
1451 	 * errata workaround; only do one at a time.
1452 	 */
1453 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
1454 		ntb->features &= ~NTB_SDOORBELL_LOCKUP;
1455 
1456 	conn_type = ppd & XEON_PPD_CONN_TYPE;
1457 	switch (conn_type) {
1458 	case NTB_CONN_B2B:
1459 		ntb->conn_type = conn_type;
1460 		break;
1461 	case NTB_CONN_RP:
1462 	case NTB_CONN_TRANSPARENT:
1463 	default:
1464 		device_printf(ntb->device, "Unsupported connection type: %u\n",
1465 		    (unsigned)conn_type);
1466 		return (ENXIO);
1467 	}
1468 	return (0);
1469 }
1470 
1471 static int
1472 intel_ntb_detect_atom(struct ntb_softc *ntb)
1473 {
1474 	uint32_t ppd, conn_type;
1475 
1476 	ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4);
1477 	ntb->ppd = ppd;
1478 
1479 	if ((ppd & ATOM_PPD_DEV_TYPE) != 0)
1480 		ntb->dev_type = NTB_DEV_DSD;
1481 	else
1482 		ntb->dev_type = NTB_DEV_USD;
1483 
1484 	conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8;
1485 	switch (conn_type) {
1486 	case NTB_CONN_B2B:
1487 		ntb->conn_type = conn_type;
1488 		break;
1489 	default:
1490 		device_printf(ntb->device, "Unsupported NTB configuration\n");
1491 		return (ENXIO);
1492 	}
1493 	return (0);
1494 }
1495 
1496 static int
1497 intel_ntb_xeon_init_dev(struct ntb_softc *ntb)
1498 {
1499 	int rc;
1500 
1501 	ntb->spad_count		= XEON_SPAD_COUNT;
1502 	ntb->db_count		= XEON_DB_COUNT;
1503 	ntb->db_link_mask	= XEON_DB_LINK_BIT;
1504 	ntb->db_vec_count	= XEON_DB_MSIX_VECTOR_COUNT;
1505 	ntb->db_vec_shift	= XEON_DB_MSIX_VECTOR_SHIFT;
1506 
1507 	if (ntb->conn_type != NTB_CONN_B2B) {
1508 		device_printf(ntb->device, "Connection type %d not supported\n",
1509 		    ntb->conn_type);
1510 		return (ENXIO);
1511 	}
1512 
1513 	ntb->reg = &xeon_reg;
1514 	ntb->self_reg = &xeon_pri_reg;
1515 	ntb->peer_reg = &xeon_b2b_reg;
1516 	ntb->xlat_reg = &xeon_sec_xlat;
1517 
1518 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1519 		ntb->force_db = ntb->fake_db = 0;
1520 		ntb->msix_mw_idx = (ntb->mw_count + g_ntb_msix_idx) %
1521 		    ntb->mw_count;
1522 		intel_ntb_printf(2, "Setting up MSIX mw idx %d means %u\n",
1523 		    g_ntb_msix_idx, ntb->msix_mw_idx);
1524 		rc = intel_ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx,
1525 		    VM_MEMATTR_UNCACHEABLE);
1526 		KASSERT(rc == 0, ("shouldn't fail"));
1527 	} else if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
1528 		/*
1529 		 * There is a Xeon hardware errata related to writes to SDOORBELL or
1530 		 * B2BDOORBELL in conjunction with inbound access to NTB MMIO space,
1531 		 * which may hang the system.  To workaround this, use a memory
1532 		 * window to access the interrupt and scratch pad registers on the
1533 		 * remote system.
1534 		 */
1535 		ntb->b2b_mw_idx = (ntb->mw_count + g_ntb_mw_idx) %
1536 		    ntb->mw_count;
1537 		intel_ntb_printf(2, "Setting up b2b mw idx %d means %u\n",
1538 		    g_ntb_mw_idx, ntb->b2b_mw_idx);
1539 		rc = intel_ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx,
1540 		    VM_MEMATTR_UNCACHEABLE);
1541 		KASSERT(rc == 0, ("shouldn't fail"));
1542 	} else if (HAS_FEATURE(ntb, NTB_B2BDOORBELL_BIT14))
1543 		/*
1544 		 * HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
1545 		 * mirrored to the remote system.  Shrink the number of bits by one,
1546 		 * since bit 14 is the last bit.
1547 		 *
1548 		 * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register
1549 		 * anyway.  Nor for non-B2B connection types.
1550 		 */
1551 		ntb->db_count = XEON_DB_COUNT - 1;
1552 
1553 	ntb->db_valid_mask = (1ull << ntb->db_count) - 1;
1554 
1555 	if (ntb->dev_type == NTB_DEV_USD)
1556 		rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr,
1557 		    &xeon_b2b_usd_addr);
1558 	else
1559 		rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr,
1560 		    &xeon_b2b_dsd_addr);
1561 	if (rc != 0)
1562 		return (rc);
1563 
1564 	/* Enable Bus Master and Memory Space on the secondary side */
1565 	intel_ntb_reg_write(2, XEON_SPCICMD_OFFSET,
1566 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1567 
1568 	/*
1569 	 * Mask all doorbell interrupts.
1570 	 */
1571 	DB_MASK_LOCK(ntb);
1572 	ntb->db_mask = ntb->db_valid_mask;
1573 	db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
1574 	DB_MASK_UNLOCK(ntb);
1575 
1576 	rc = intel_ntb_init_isr(ntb);
1577 	return (rc);
1578 }
1579 
1580 static int
1581 intel_ntb_atom_init_dev(struct ntb_softc *ntb)
1582 {
1583 	int error;
1584 
1585 	KASSERT(ntb->conn_type == NTB_CONN_B2B,
1586 	    ("Unsupported NTB configuration (%d)\n", ntb->conn_type));
1587 
1588 	ntb->spad_count		 = ATOM_SPAD_COUNT;
1589 	ntb->db_count		 = ATOM_DB_COUNT;
1590 	ntb->db_vec_count	 = ATOM_DB_MSIX_VECTOR_COUNT;
1591 	ntb->db_vec_shift	 = ATOM_DB_MSIX_VECTOR_SHIFT;
1592 	ntb->db_valid_mask	 = (1ull << ntb->db_count) - 1;
1593 
1594 	ntb->reg = &atom_reg;
1595 	ntb->self_reg = &atom_pri_reg;
1596 	ntb->peer_reg = &atom_b2b_reg;
1597 	ntb->xlat_reg = &atom_sec_xlat;
1598 
1599 	/*
1600 	 * FIXME - MSI-X bug on early Atom HW, remove once internal issue is
1601 	 * resolved.  Mask transaction layer internal parity errors.
1602 	 */
1603 	pci_write_config(ntb->device, 0xFC, 0x4, 4);
1604 
1605 	configure_atom_secondary_side_bars(ntb);
1606 
1607 	/* Enable Bus Master and Memory Space on the secondary side */
1608 	intel_ntb_reg_write(2, ATOM_SPCICMD_OFFSET,
1609 	    PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
1610 
1611 	error = intel_ntb_init_isr(ntb);
1612 	if (error != 0)
1613 		return (error);
1614 
1615 	/* Initiate PCI-E link training */
1616 	intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1617 
1618 	callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb);
1619 
1620 	return (0);
1621 }
1622 
1623 /* XXX: Linux driver doesn't seem to do any of this for Atom. */
1624 static void
1625 configure_atom_secondary_side_bars(struct ntb_softc *ntb)
1626 {
1627 
1628 	if (ntb->dev_type == NTB_DEV_USD) {
1629 		intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
1630 		    XEON_B2B_BAR2_ADDR64);
1631 		intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
1632 		    XEON_B2B_BAR4_ADDR64);
1633 		intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
1634 		intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
1635 	} else {
1636 		intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
1637 		    XEON_B2B_BAR2_ADDR64);
1638 		intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
1639 		    XEON_B2B_BAR4_ADDR64);
1640 		intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
1641 		intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
1642 	}
1643 }
1644 
1645 
1646 /*
1647  * When working around Xeon SDOORBELL errata by remapping remote registers in a
1648  * MW, limit the B2B MW to half a MW.  By sharing a MW, half the shared MW
1649  * remains for use by a higher layer.
1650  *
1651  * Will only be used if working around SDOORBELL errata and the BIOS-configured
1652  * MW size is sufficiently large.
1653  */
1654 static unsigned int ntb_b2b_mw_share;
1655 SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share,
1656     0, "If enabled (non-zero), prefer to share half of the B2B peer register "
1657     "MW with higher level consumers.  Both sides of the NTB MUST set the same "
1658     "value here.");
1659 
1660 static void
1661 xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx,
1662     enum ntb_bar regbar)
1663 {
1664 	struct ntb_pci_bar_info *bar;
1665 	uint8_t bar_sz;
1666 
1667 	if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3)
1668 		return;
1669 
1670 	bar = &ntb->bar_info[idx];
1671 	bar_sz = pci_read_config(ntb->device, bar->psz_off, 1);
1672 	if (idx == regbar) {
1673 		if (ntb->b2b_off != 0)
1674 			bar_sz--;
1675 		else
1676 			bar_sz = 0;
1677 	}
1678 	pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1);
1679 	bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1);
1680 	(void)bar_sz;
1681 }
1682 
1683 static void
1684 xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr,
1685     enum ntb_bar idx, enum ntb_bar regbar)
1686 {
1687 	uint64_t reg_val;
1688 	uint32_t base_reg, lmt_reg;
1689 
1690 	bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg);
1691 	if (idx == regbar) {
1692 		if (ntb->b2b_off)
1693 			bar_addr += ntb->b2b_off;
1694 		else
1695 			bar_addr = 0;
1696 	}
1697 
1698 	if (!bar_is_64bit(ntb, idx)) {
1699 		intel_ntb_reg_write(4, base_reg, bar_addr);
1700 		reg_val = intel_ntb_reg_read(4, base_reg);
1701 		(void)reg_val;
1702 
1703 		intel_ntb_reg_write(4, lmt_reg, bar_addr);
1704 		reg_val = intel_ntb_reg_read(4, lmt_reg);
1705 		(void)reg_val;
1706 	} else {
1707 		intel_ntb_reg_write(8, base_reg, bar_addr);
1708 		reg_val = intel_ntb_reg_read(8, base_reg);
1709 		(void)reg_val;
1710 
1711 		intel_ntb_reg_write(8, lmt_reg, bar_addr);
1712 		reg_val = intel_ntb_reg_read(8, lmt_reg);
1713 		(void)reg_val;
1714 	}
1715 }
1716 
1717 static void
1718 xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx)
1719 {
1720 	struct ntb_pci_bar_info *bar;
1721 
1722 	bar = &ntb->bar_info[idx];
1723 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) {
1724 		intel_ntb_reg_write(4, bar->pbarxlat_off, base_addr);
1725 		base_addr = intel_ntb_reg_read(4, bar->pbarxlat_off);
1726 	} else {
1727 		intel_ntb_reg_write(8, bar->pbarxlat_off, base_addr);
1728 		base_addr = intel_ntb_reg_read(8, bar->pbarxlat_off);
1729 	}
1730 	(void)base_addr;
1731 }
1732 
1733 static int
1734 xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
1735     const struct ntb_b2b_addr *peer_addr)
1736 {
1737 	struct ntb_pci_bar_info *b2b_bar;
1738 	vm_size_t bar_size;
1739 	uint64_t bar_addr;
1740 	enum ntb_bar b2b_bar_num, i;
1741 
1742 	if (ntb->b2b_mw_idx == B2B_MW_DISABLED) {
1743 		b2b_bar = NULL;
1744 		b2b_bar_num = NTB_CONFIG_BAR;
1745 		ntb->b2b_off = 0;
1746 	} else {
1747 		b2b_bar_num = intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx);
1748 		KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS,
1749 		    ("invalid b2b mw bar"));
1750 
1751 		b2b_bar = &ntb->bar_info[b2b_bar_num];
1752 		bar_size = b2b_bar->size;
1753 
1754 		if (ntb_b2b_mw_share != 0 &&
1755 		    (bar_size >> 1) >= XEON_B2B_MIN_SIZE)
1756 			ntb->b2b_off = bar_size >> 1;
1757 		else if (bar_size >= XEON_B2B_MIN_SIZE) {
1758 			ntb->b2b_off = 0;
1759 		} else {
1760 			device_printf(ntb->device,
1761 			    "B2B bar size is too small!\n");
1762 			return (EIO);
1763 		}
1764 	}
1765 
1766 	/*
1767 	 * Reset the secondary bar sizes to match the primary bar sizes.
1768 	 * (Except, disable or halve the size of the B2B secondary bar.)
1769 	 */
1770 	for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++)
1771 		xeon_reset_sbar_size(ntb, i, b2b_bar_num);
1772 
1773 	bar_addr = 0;
1774 	if (b2b_bar_num == NTB_CONFIG_BAR)
1775 		bar_addr = addr->bar0_addr;
1776 	else if (b2b_bar_num == NTB_B2B_BAR_1)
1777 		bar_addr = addr->bar2_addr64;
1778 	else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR))
1779 		bar_addr = addr->bar4_addr64;
1780 	else if (b2b_bar_num == NTB_B2B_BAR_2)
1781 		bar_addr = addr->bar4_addr32;
1782 	else if (b2b_bar_num == NTB_B2B_BAR_3)
1783 		bar_addr = addr->bar5_addr32;
1784 	else
1785 		KASSERT(false, ("invalid bar"));
1786 
1787 	intel_ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr);
1788 
1789 	/*
1790 	 * Other SBARs are normally hit by the PBAR xlat, except for the b2b
1791 	 * register BAR.  The B2B BAR is either disabled above or configured
1792 	 * half-size.  It starts at PBAR xlat + offset.
1793 	 *
1794 	 * Also set up incoming BAR limits == base (zero length window).
1795 	 */
1796 	xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1,
1797 	    b2b_bar_num);
1798 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
1799 		xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32,
1800 		    NTB_B2B_BAR_2, b2b_bar_num);
1801 		xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32,
1802 		    NTB_B2B_BAR_3, b2b_bar_num);
1803 	} else
1804 		xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64,
1805 		    NTB_B2B_BAR_2, b2b_bar_num);
1806 
1807 	/* Zero incoming translation addrs */
1808 	intel_ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0);
1809 	intel_ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0);
1810 
1811 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
1812 		uint32_t xlat_reg, lmt_reg;
1813 		enum ntb_bar bar_num;
1814 
1815 		/*
1816 		 * We point the chosen MSIX MW BAR xlat to remote LAPIC for
1817 		 * workaround
1818 		 */
1819 		bar_num = intel_ntb_mw_to_bar(ntb, ntb->msix_mw_idx);
1820 		bar_get_xlat_params(ntb, bar_num, NULL, &xlat_reg, &lmt_reg);
1821 		if (bar_is_64bit(ntb, bar_num)) {
1822 			intel_ntb_reg_write(8, xlat_reg, MSI_INTEL_ADDR_BASE);
1823 			ntb->msix_xlat = intel_ntb_reg_read(8, xlat_reg);
1824 			intel_ntb_reg_write(8, lmt_reg, 0);
1825 		} else {
1826 			intel_ntb_reg_write(4, xlat_reg, MSI_INTEL_ADDR_BASE);
1827 			ntb->msix_xlat = intel_ntb_reg_read(4, xlat_reg);
1828 			intel_ntb_reg_write(4, lmt_reg, 0);
1829 		}
1830 
1831 		ntb->peer_lapic_bar =  &ntb->bar_info[bar_num];
1832 	}
1833 	(void)intel_ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET);
1834 	(void)intel_ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET);
1835 
1836 	/* Zero outgoing translation limits (whole bar size windows) */
1837 	intel_ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0);
1838 	intel_ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0);
1839 
1840 	/* Set outgoing translation offsets */
1841 	xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1);
1842 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
1843 		xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2);
1844 		xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3);
1845 	} else
1846 		xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2);
1847 
1848 	/* Set the translation offset for B2B registers */
1849 	bar_addr = 0;
1850 	if (b2b_bar_num == NTB_CONFIG_BAR)
1851 		bar_addr = peer_addr->bar0_addr;
1852 	else if (b2b_bar_num == NTB_B2B_BAR_1)
1853 		bar_addr = peer_addr->bar2_addr64;
1854 	else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR))
1855 		bar_addr = peer_addr->bar4_addr64;
1856 	else if (b2b_bar_num == NTB_B2B_BAR_2)
1857 		bar_addr = peer_addr->bar4_addr32;
1858 	else if (b2b_bar_num == NTB_B2B_BAR_3)
1859 		bar_addr = peer_addr->bar5_addr32;
1860 	else
1861 		KASSERT(false, ("invalid bar"));
1862 
1863 	/*
1864 	 * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits
1865 	 * at a time.
1866 	 */
1867 	intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff);
1868 	intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32);
1869 	return (0);
1870 }
1871 
1872 static inline bool
1873 _xeon_link_is_up(struct ntb_softc *ntb)
1874 {
1875 
1876 	if (ntb->conn_type == NTB_CONN_TRANSPARENT)
1877 		return (true);
1878 	return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0);
1879 }
1880 
1881 static inline bool
1882 link_is_up(struct ntb_softc *ntb)
1883 {
1884 
1885 	if (ntb->type == NTB_XEON)
1886 		return (_xeon_link_is_up(ntb) && (ntb->peer_msix_good ||
1887 		    !HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)));
1888 
1889 	KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
1890 	return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0);
1891 }
1892 
1893 static inline bool
1894 atom_link_is_err(struct ntb_softc *ntb)
1895 {
1896 	uint32_t status;
1897 
1898 	KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
1899 
1900 	status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
1901 	if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0)
1902 		return (true);
1903 
1904 	status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
1905 	return ((status & ATOM_IBIST_ERR_OFLOW) != 0);
1906 }
1907 
1908 /* Atom does not have link status interrupt, poll on that platform */
1909 static void
1910 atom_link_hb(void *arg)
1911 {
1912 	struct ntb_softc *ntb = arg;
1913 	sbintime_t timo, poll_ts;
1914 
1915 	timo = NTB_HB_TIMEOUT * hz;
1916 	poll_ts = ntb->last_ts + timo;
1917 
1918 	/*
1919 	 * Delay polling the link status if an interrupt was received, unless
1920 	 * the cached link status says the link is down.
1921 	 */
1922 	if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) {
1923 		timo = poll_ts - ticks;
1924 		goto out;
1925 	}
1926 
1927 	if (intel_ntb_poll_link(ntb))
1928 		ntb_link_event(ntb->device);
1929 
1930 	if (!link_is_up(ntb) && atom_link_is_err(ntb)) {
1931 		/* Link is down with error, proceed with recovery */
1932 		callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb);
1933 		return;
1934 	}
1935 
1936 out:
1937 	callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb);
1938 }
1939 
1940 static void
1941 atom_perform_link_restart(struct ntb_softc *ntb)
1942 {
1943 	uint32_t status;
1944 
1945 	/* Driver resets the NTB ModPhy lanes - magic! */
1946 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0);
1947 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40);
1948 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60);
1949 	intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60);
1950 
1951 	/* Driver waits 100ms to allow the NTB ModPhy to settle */
1952 	pause("ModPhy", hz / 10);
1953 
1954 	/* Clear AER Errors, write to clear */
1955 	status = intel_ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET);
1956 	status &= PCIM_AER_COR_REPLAY_ROLLOVER;
1957 	intel_ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status);
1958 
1959 	/* Clear unexpected electrical idle event in LTSSM, write to clear */
1960 	status = intel_ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET);
1961 	status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
1962 	intel_ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status);
1963 
1964 	/* Clear DeSkew Buffer error, write to clear */
1965 	status = intel_ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET);
1966 	status |= ATOM_DESKEWSTS_DBERR;
1967 	intel_ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status);
1968 
1969 	status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
1970 	status &= ATOM_IBIST_ERR_OFLOW;
1971 	intel_ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status);
1972 
1973 	/* Releases the NTB state machine to allow the link to retrain */
1974 	status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
1975 	status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
1976 	intel_ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status);
1977 }
1978 
1979 static int
1980 intel_ntb_link_enable(device_t dev, enum ntb_speed speed __unused,
1981     enum ntb_width width __unused)
1982 {
1983 	struct ntb_softc *ntb = device_get_softc(dev);
1984 	uint32_t cntl;
1985 
1986 	intel_ntb_printf(2, "%s\n", __func__);
1987 
1988 	if (ntb->type == NTB_ATOM) {
1989 		pci_write_config(ntb->device, NTB_PPD_OFFSET,
1990 		    ntb->ppd | ATOM_PPD_INIT_LINK, 4);
1991 		return (0);
1992 	}
1993 
1994 	if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
1995 		ntb_link_event(dev);
1996 		return (0);
1997 	}
1998 
1999 	cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2000 	cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
2001 	cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
2002 	cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP;
2003 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2004 		cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP;
2005 	intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
2006 	return (0);
2007 }
2008 
2009 static int
2010 intel_ntb_link_disable(device_t dev)
2011 {
2012 	struct ntb_softc *ntb = device_get_softc(dev);
2013 	uint32_t cntl;
2014 
2015 	intel_ntb_printf(2, "%s\n", __func__);
2016 
2017 	if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
2018 		ntb_link_event(dev);
2019 		return (0);
2020 	}
2021 
2022 	cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2023 	cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
2024 	cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP);
2025 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
2026 		cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP);
2027 	cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
2028 	intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
2029 	return (0);
2030 }
2031 
2032 static bool
2033 intel_ntb_link_enabled(device_t dev)
2034 {
2035 	struct ntb_softc *ntb = device_get_softc(dev);
2036 	uint32_t cntl;
2037 
2038 	if (ntb->type == NTB_ATOM) {
2039 		cntl = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4);
2040 		return ((cntl & ATOM_PPD_INIT_LINK) != 0);
2041 	}
2042 
2043 	if (ntb->conn_type == NTB_CONN_TRANSPARENT)
2044 		return (true);
2045 
2046 	cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2047 	return ((cntl & NTB_CNTL_LINK_DISABLE) == 0);
2048 }
2049 
2050 static void
2051 recover_atom_link(void *arg)
2052 {
2053 	struct ntb_softc *ntb = arg;
2054 	unsigned speed, width, oldspeed, oldwidth;
2055 	uint32_t status32;
2056 
2057 	atom_perform_link_restart(ntb);
2058 
2059 	/*
2060 	 * There is a potential race between the 2 NTB devices recovering at
2061 	 * the same time.  If the times are the same, the link will not recover
2062 	 * and the driver will be stuck in this loop forever.  Add a random
2063 	 * interval to the recovery time to prevent this race.
2064 	 */
2065 	status32 = arc4random() % ATOM_LINK_RECOVERY_TIME;
2066 	pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000);
2067 
2068 	if (atom_link_is_err(ntb))
2069 		goto retry;
2070 
2071 	status32 = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2072 	if ((status32 & ATOM_CNTL_LINK_DOWN) != 0)
2073 		goto out;
2074 
2075 	status32 = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
2076 	width = NTB_LNK_STA_WIDTH(status32);
2077 	speed = status32 & NTB_LINK_SPEED_MASK;
2078 
2079 	oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta);
2080 	oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK;
2081 	if (oldwidth != width || oldspeed != speed)
2082 		goto retry;
2083 
2084 out:
2085 	callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb,
2086 	    ntb);
2087 	return;
2088 
2089 retry:
2090 	callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link,
2091 	    ntb);
2092 }
2093 
2094 /*
2095  * Polls the HW link status register(s); returns true if something has changed.
2096  */
2097 static bool
2098 intel_ntb_poll_link(struct ntb_softc *ntb)
2099 {
2100 	uint32_t ntb_cntl;
2101 	uint16_t reg_val;
2102 
2103 	if (ntb->type == NTB_ATOM) {
2104 		ntb_cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
2105 		if (ntb_cntl == ntb->ntb_ctl)
2106 			return (false);
2107 
2108 		ntb->ntb_ctl = ntb_cntl;
2109 		ntb->lnk_sta = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
2110 	} else {
2111 		db_iowrite_raw(ntb, ntb->self_reg->db_bell, ntb->db_link_mask);
2112 
2113 		reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2);
2114 		if (reg_val == ntb->lnk_sta)
2115 			return (false);
2116 
2117 		ntb->lnk_sta = reg_val;
2118 
2119 		if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
2120 			if (_xeon_link_is_up(ntb)) {
2121 				if (!ntb->peer_msix_good) {
2122 					callout_reset(&ntb->peer_msix_work, 0,
2123 					    intel_ntb_exchange_msix, ntb);
2124 					return (false);
2125 				}
2126 			} else {
2127 				ntb->peer_msix_good = false;
2128 				ntb->peer_msix_done = false;
2129 			}
2130 		}
2131 	}
2132 	return (true);
2133 }
2134 
2135 static inline enum ntb_speed
2136 intel_ntb_link_sta_speed(struct ntb_softc *ntb)
2137 {
2138 
2139 	if (!link_is_up(ntb))
2140 		return (NTB_SPEED_NONE);
2141 	return (ntb->lnk_sta & NTB_LINK_SPEED_MASK);
2142 }
2143 
2144 static inline enum ntb_width
2145 intel_ntb_link_sta_width(struct ntb_softc *ntb)
2146 {
2147 
2148 	if (!link_is_up(ntb))
2149 		return (NTB_WIDTH_NONE);
2150 	return (NTB_LNK_STA_WIDTH(ntb->lnk_sta));
2151 }
2152 
2153 SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW, 0,
2154     "Driver state, statistics, and HW registers");
2155 
2156 #define NTB_REGSZ_MASK	(3ul << 30)
2157 #define NTB_REG_64	(1ul << 30)
2158 #define NTB_REG_32	(2ul << 30)
2159 #define NTB_REG_16	(3ul << 30)
2160 #define NTB_REG_8	(0ul << 30)
2161 
2162 #define NTB_DB_READ	(1ul << 29)
2163 #define NTB_PCI_REG	(1ul << 28)
2164 #define NTB_REGFLAGS_MASK	(NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG)
2165 
2166 static void
2167 intel_ntb_sysctl_init(struct ntb_softc *ntb)
2168 {
2169 	struct sysctl_oid_list *globals, *tree_par, *regpar, *statpar, *errpar;
2170 	struct sysctl_ctx_list *ctx;
2171 	struct sysctl_oid *tree, *tmptree;
2172 
2173 	ctx = device_get_sysctl_ctx(ntb->device);
2174 	globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device));
2175 
2176 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "link_status",
2177 	    CTLFLAG_RD | CTLTYPE_STRING, ntb, 0,
2178 	    sysctl_handle_link_status_human, "A",
2179 	    "Link status (human readable)");
2180 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "active",
2181 	    CTLFLAG_RD | CTLTYPE_UINT, ntb, 0, sysctl_handle_link_status,
2182 	    "IU", "Link status (1=active, 0=inactive)");
2183 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "admin_up",
2184 	    CTLFLAG_RW | CTLTYPE_UINT, ntb, 0, sysctl_handle_link_admin,
2185 	    "IU", "Set/get interface status (1=UP, 0=DOWN)");
2186 
2187 	tree = SYSCTL_ADD_NODE(ctx, globals, OID_AUTO, "debug_info",
2188 	    CTLFLAG_RD, NULL, "Driver state, statistics, and HW registers");
2189 	tree_par = SYSCTL_CHILDREN(tree);
2190 
2191 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD,
2192 	    &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port");
2193 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD,
2194 	    &ntb->dev_type, 0, "0 - USD; 1 - DSD");
2195 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD,
2196 	    &ntb->ppd, 0, "Raw PPD register (cached)");
2197 
2198 	if (ntb->b2b_mw_idx != B2B_MW_DISABLED) {
2199 		SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD,
2200 		    &ntb->b2b_mw_idx, 0,
2201 		    "Index of the MW used for B2B remote register access");
2202 		SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off",
2203 		    CTLFLAG_RD, &ntb->b2b_off,
2204 		    "If non-zero, offset of B2B register region in shared MW");
2205 	}
2206 
2207 	SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features",
2208 	    CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_features, "A",
2209 	    "Features/errata of this NTB device");
2210 
2211 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD,
2212 	    __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0,
2213 	    "NTB CTL register (cached)");
2214 	SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD,
2215 	    __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0,
2216 	    "LNK STA register (cached)");
2217 
2218 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD,
2219 	    &ntb->mw_count, 0, "MW count");
2220 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD,
2221 	    &ntb->spad_count, 0, "Scratchpad count");
2222 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD,
2223 	    &ntb->db_count, 0, "Doorbell count");
2224 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD,
2225 	    &ntb->db_vec_count, 0, "Doorbell vector count");
2226 	SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD,
2227 	    &ntb->db_vec_shift, 0, "Doorbell vector shift");
2228 
2229 	SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD,
2230 	    &ntb->db_valid_mask, "Doorbell valid mask");
2231 	SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD,
2232 	    &ntb->db_link_mask, "Doorbell link mask");
2233 	SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD,
2234 	    &ntb->db_mask, "Doorbell mask (cached)");
2235 
2236 	tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers",
2237 	    CTLFLAG_RD, NULL, "Raw HW registers (big-endian)");
2238 	regpar = SYSCTL_CHILDREN(tmptree);
2239 
2240 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl",
2241 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 |
2242 	    ntb->reg->ntb_ctl, sysctl_handle_register, "IU",
2243 	    "NTB Control register");
2244 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap",
2245 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 |
2246 	    0x19c, sysctl_handle_register, "IU",
2247 	    "NTB Link Capabilities");
2248 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon",
2249 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 |
2250 	    0x1a0, sysctl_handle_register, "IU",
2251 	    "NTB Link Control register");
2252 
2253 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask",
2254 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2255 	    NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask,
2256 	    sysctl_handle_register, "QU", "Doorbell mask register");
2257 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell",
2258 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2259 	    NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell,
2260 	    sysctl_handle_register, "QU", "Doorbell register");
2261 
2262 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23",
2263 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2264 	    NTB_REG_64 | ntb->xlat_reg->bar2_xlat,
2265 	    sysctl_handle_register, "QU", "Incoming XLAT23 register");
2266 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2267 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4",
2268 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2269 		    NTB_REG_32 | ntb->xlat_reg->bar4_xlat,
2270 		    sysctl_handle_register, "IU", "Incoming XLAT4 register");
2271 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5",
2272 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2273 		    NTB_REG_32 | ntb->xlat_reg->bar5_xlat,
2274 		    sysctl_handle_register, "IU", "Incoming XLAT5 register");
2275 	} else {
2276 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45",
2277 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2278 		    NTB_REG_64 | ntb->xlat_reg->bar4_xlat,
2279 		    sysctl_handle_register, "QU", "Incoming XLAT45 register");
2280 	}
2281 
2282 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23",
2283 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2284 	    NTB_REG_64 | ntb->xlat_reg->bar2_limit,
2285 	    sysctl_handle_register, "QU", "Incoming LMT23 register");
2286 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2287 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4",
2288 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2289 		    NTB_REG_32 | ntb->xlat_reg->bar4_limit,
2290 		    sysctl_handle_register, "IU", "Incoming LMT4 register");
2291 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5",
2292 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2293 		    NTB_REG_32 | ntb->xlat_reg->bar5_limit,
2294 		    sysctl_handle_register, "IU", "Incoming LMT5 register");
2295 	} else {
2296 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45",
2297 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2298 		    NTB_REG_64 | ntb->xlat_reg->bar4_limit,
2299 		    sysctl_handle_register, "QU", "Incoming LMT45 register");
2300 	}
2301 
2302 	if (ntb->type == NTB_ATOM)
2303 		return;
2304 
2305 	tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats",
2306 	    CTLFLAG_RD, NULL, "Xeon HW statistics");
2307 	statpar = SYSCTL_CHILDREN(tmptree);
2308 	SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss",
2309 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2310 	    NTB_REG_16 | XEON_USMEMMISS_OFFSET,
2311 	    sysctl_handle_register, "SU", "Upstream Memory Miss");
2312 
2313 	tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err",
2314 	    CTLFLAG_RD, NULL, "Xeon HW errors");
2315 	errpar = SYSCTL_CHILDREN(tmptree);
2316 
2317 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd",
2318 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2319 	    NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET,
2320 	    sysctl_handle_register, "CU", "PPD");
2321 
2322 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz",
2323 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2324 	    NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET,
2325 	    sysctl_handle_register, "CU", "PBAR23 SZ (log2)");
2326 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz",
2327 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2328 	    NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET,
2329 	    sysctl_handle_register, "CU", "PBAR4 SZ (log2)");
2330 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz",
2331 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2332 	    NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET,
2333 	    sysctl_handle_register, "CU", "PBAR5 SZ (log2)");
2334 
2335 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz",
2336 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2337 	    NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET,
2338 	    sysctl_handle_register, "CU", "SBAR23 SZ (log2)");
2339 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz",
2340 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2341 	    NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET,
2342 	    sysctl_handle_register, "CU", "SBAR4 SZ (log2)");
2343 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz",
2344 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2345 	    NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET,
2346 	    sysctl_handle_register, "CU", "SBAR5 SZ (log2)");
2347 
2348 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts",
2349 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2350 	    NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET,
2351 	    sysctl_handle_register, "SU", "DEVSTS");
2352 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts",
2353 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2354 	    NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET,
2355 	    sysctl_handle_register, "SU", "LNKSTS");
2356 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts",
2357 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2358 	    NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET,
2359 	    sysctl_handle_register, "SU", "SLNKSTS");
2360 
2361 	SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts",
2362 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2363 	    NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET,
2364 	    sysctl_handle_register, "IU", "UNCERRSTS");
2365 	SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts",
2366 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2367 	    NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET,
2368 	    sysctl_handle_register, "IU", "CORERRSTS");
2369 
2370 	if (ntb->conn_type != NTB_CONN_B2B)
2371 		return;
2372 
2373 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23",
2374 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2375 	    NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off,
2376 	    sysctl_handle_register, "QU", "Outgoing XLAT23 register");
2377 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2378 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4",
2379 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2380 		    NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off,
2381 		    sysctl_handle_register, "IU", "Outgoing XLAT4 register");
2382 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5",
2383 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2384 		    NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off,
2385 		    sysctl_handle_register, "IU", "Outgoing XLAT5 register");
2386 	} else {
2387 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45",
2388 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2389 		    NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off,
2390 		    sysctl_handle_register, "QU", "Outgoing XLAT45 register");
2391 	}
2392 
2393 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23",
2394 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2395 	    NTB_REG_64 | XEON_PBAR2LMT_OFFSET,
2396 	    sysctl_handle_register, "QU", "Outgoing LMT23 register");
2397 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2398 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4",
2399 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2400 		    NTB_REG_32 | XEON_PBAR4LMT_OFFSET,
2401 		    sysctl_handle_register, "IU", "Outgoing LMT4 register");
2402 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5",
2403 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2404 		    NTB_REG_32 | XEON_PBAR5LMT_OFFSET,
2405 		    sysctl_handle_register, "IU", "Outgoing LMT5 register");
2406 	} else {
2407 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45",
2408 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2409 		    NTB_REG_64 | XEON_PBAR4LMT_OFFSET,
2410 		    sysctl_handle_register, "QU", "Outgoing LMT45 register");
2411 	}
2412 
2413 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base",
2414 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2415 	    NTB_REG_64 | ntb->xlat_reg->bar0_base,
2416 	    sysctl_handle_register, "QU", "Secondary BAR01 base register");
2417 	SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base",
2418 	    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2419 	    NTB_REG_64 | ntb->xlat_reg->bar2_base,
2420 	    sysctl_handle_register, "QU", "Secondary BAR23 base register");
2421 	if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) {
2422 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base",
2423 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2424 		    NTB_REG_32 | ntb->xlat_reg->bar4_base,
2425 		    sysctl_handle_register, "IU",
2426 		    "Secondary BAR4 base register");
2427 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base",
2428 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2429 		    NTB_REG_32 | ntb->xlat_reg->bar5_base,
2430 		    sysctl_handle_register, "IU",
2431 		    "Secondary BAR5 base register");
2432 	} else {
2433 		SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base",
2434 		    CTLFLAG_RD | CTLTYPE_OPAQUE, ntb,
2435 		    NTB_REG_64 | ntb->xlat_reg->bar4_base,
2436 		    sysctl_handle_register, "QU",
2437 		    "Secondary BAR45 base register");
2438 	}
2439 }
2440 
2441 static int
2442 sysctl_handle_features(SYSCTL_HANDLER_ARGS)
2443 {
2444 	struct ntb_softc *ntb = arg1;
2445 	struct sbuf sb;
2446 	int error;
2447 
2448 	sbuf_new_for_sysctl(&sb, NULL, 256, req);
2449 
2450 	sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR);
2451 	error = sbuf_finish(&sb);
2452 	sbuf_delete(&sb);
2453 
2454 	if (error || !req->newptr)
2455 		return (error);
2456 	return (EINVAL);
2457 }
2458 
2459 static int
2460 sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS)
2461 {
2462 	struct ntb_softc *ntb = arg1;
2463 	unsigned old, new;
2464 	int error;
2465 
2466 	old = intel_ntb_link_enabled(ntb->device);
2467 
2468 	error = SYSCTL_OUT(req, &old, sizeof(old));
2469 	if (error != 0 || req->newptr == NULL)
2470 		return (error);
2471 
2472 	error = SYSCTL_IN(req, &new, sizeof(new));
2473 	if (error != 0)
2474 		return (error);
2475 
2476 	intel_ntb_printf(0, "Admin set interface state to '%sabled'\n",
2477 	    (new != 0)? "en" : "dis");
2478 
2479 	if (new != 0)
2480 		error = intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
2481 	else
2482 		error = intel_ntb_link_disable(ntb->device);
2483 	return (error);
2484 }
2485 
2486 static int
2487 sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS)
2488 {
2489 	struct ntb_softc *ntb = arg1;
2490 	struct sbuf sb;
2491 	enum ntb_speed speed;
2492 	enum ntb_width width;
2493 	int error;
2494 
2495 	sbuf_new_for_sysctl(&sb, NULL, 32, req);
2496 
2497 	if (intel_ntb_link_is_up(ntb->device, &speed, &width))
2498 		sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u",
2499 		    (unsigned)speed, (unsigned)width);
2500 	else
2501 		sbuf_printf(&sb, "down");
2502 
2503 	error = sbuf_finish(&sb);
2504 	sbuf_delete(&sb);
2505 
2506 	if (error || !req->newptr)
2507 		return (error);
2508 	return (EINVAL);
2509 }
2510 
2511 static int
2512 sysctl_handle_link_status(SYSCTL_HANDLER_ARGS)
2513 {
2514 	struct ntb_softc *ntb = arg1;
2515 	unsigned res;
2516 	int error;
2517 
2518 	res = intel_ntb_link_is_up(ntb->device, NULL, NULL);
2519 
2520 	error = SYSCTL_OUT(req, &res, sizeof(res));
2521 	if (error || !req->newptr)
2522 		return (error);
2523 	return (EINVAL);
2524 }
2525 
2526 static int
2527 sysctl_handle_register(SYSCTL_HANDLER_ARGS)
2528 {
2529 	struct ntb_softc *ntb;
2530 	const void *outp;
2531 	uintptr_t sz;
2532 	uint64_t umv;
2533 	char be[sizeof(umv)];
2534 	size_t outsz;
2535 	uint32_t reg;
2536 	bool db, pci;
2537 	int error;
2538 
2539 	ntb = arg1;
2540 	reg = arg2 & ~NTB_REGFLAGS_MASK;
2541 	sz = arg2 & NTB_REGSZ_MASK;
2542 	db = (arg2 & NTB_DB_READ) != 0;
2543 	pci = (arg2 & NTB_PCI_REG) != 0;
2544 
2545 	KASSERT(!(db && pci), ("bogus"));
2546 
2547 	if (db) {
2548 		KASSERT(sz == NTB_REG_64, ("bogus"));
2549 		umv = db_ioread(ntb, reg);
2550 		outsz = sizeof(uint64_t);
2551 	} else {
2552 		switch (sz) {
2553 		case NTB_REG_64:
2554 			if (pci)
2555 				umv = pci_read_config(ntb->device, reg, 8);
2556 			else
2557 				umv = intel_ntb_reg_read(8, reg);
2558 			outsz = sizeof(uint64_t);
2559 			break;
2560 		case NTB_REG_32:
2561 			if (pci)
2562 				umv = pci_read_config(ntb->device, reg, 4);
2563 			else
2564 				umv = intel_ntb_reg_read(4, reg);
2565 			outsz = sizeof(uint32_t);
2566 			break;
2567 		case NTB_REG_16:
2568 			if (pci)
2569 				umv = pci_read_config(ntb->device, reg, 2);
2570 			else
2571 				umv = intel_ntb_reg_read(2, reg);
2572 			outsz = sizeof(uint16_t);
2573 			break;
2574 		case NTB_REG_8:
2575 			if (pci)
2576 				umv = pci_read_config(ntb->device, reg, 1);
2577 			else
2578 				umv = intel_ntb_reg_read(1, reg);
2579 			outsz = sizeof(uint8_t);
2580 			break;
2581 		default:
2582 			panic("bogus");
2583 			break;
2584 		}
2585 	}
2586 
2587 	/* Encode bigendian so that sysctl -x is legible. */
2588 	be64enc(be, umv);
2589 	outp = ((char *)be) + sizeof(umv) - outsz;
2590 
2591 	error = SYSCTL_OUT(req, outp, outsz);
2592 	if (error || !req->newptr)
2593 		return (error);
2594 	return (EINVAL);
2595 }
2596 
2597 static unsigned
2598 intel_ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx)
2599 {
2600 
2601 	if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 &&
2602 	    uidx >= ntb->b2b_mw_idx) ||
2603 	    (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx))
2604 		uidx++;
2605 	if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 &&
2606 	    uidx >= ntb->b2b_mw_idx) &&
2607 	    (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx))
2608 		uidx++;
2609 	return (uidx);
2610 }
2611 
2612 #ifndef EARLY_AP_STARTUP
2613 static int msix_ready;
2614 
2615 static void
2616 intel_ntb_msix_ready(void *arg __unused)
2617 {
2618 
2619 	msix_ready = 1;
2620 }
2621 SYSINIT(intel_ntb_msix_ready, SI_SUB_SMP, SI_ORDER_ANY,
2622     intel_ntb_msix_ready, NULL);
2623 #endif
2624 
2625 static void
2626 intel_ntb_exchange_msix(void *ctx)
2627 {
2628 	struct ntb_softc *ntb;
2629 	uint32_t val;
2630 	unsigned i;
2631 
2632 	ntb = ctx;
2633 
2634 	if (ntb->peer_msix_good)
2635 		goto msix_good;
2636 	if (ntb->peer_msix_done)
2637 		goto msix_done;
2638 
2639 #ifndef EARLY_AP_STARTUP
2640 	/* Block MSIX negotiation until SMP started and IRQ reshuffled. */
2641 	if (!msix_ready)
2642 		goto reschedule;
2643 #endif
2644 
2645 	intel_ntb_get_msix_info(ntb);
2646 	for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
2647 		intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DATA0 + i,
2648 		    ntb->msix_data[i].nmd_data);
2649 		intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_OFS0 + i,
2650 		    ntb->msix_data[i].nmd_ofs - ntb->msix_xlat);
2651 	}
2652 	intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD);
2653 
2654 	intel_ntb_spad_read(ntb->device, NTB_MSIX_GUARD, &val);
2655 	if (val != NTB_MSIX_VER_GUARD)
2656 		goto reschedule;
2657 
2658 	for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
2659 		intel_ntb_spad_read(ntb->device, NTB_MSIX_DATA0 + i, &val);
2660 		intel_ntb_printf(2, "remote MSIX data(%u): 0x%x\n", i, val);
2661 		ntb->peer_msix_data[i].nmd_data = val;
2662 		intel_ntb_spad_read(ntb->device, NTB_MSIX_OFS0 + i, &val);
2663 		intel_ntb_printf(2, "remote MSIX addr(%u): 0x%x\n", i, val);
2664 		ntb->peer_msix_data[i].nmd_ofs = val;
2665 	}
2666 
2667 	ntb->peer_msix_done = true;
2668 
2669 msix_done:
2670 	intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DONE, NTB_MSIX_RECEIVED);
2671 	intel_ntb_spad_read(ntb->device, NTB_MSIX_DONE, &val);
2672 	if (val != NTB_MSIX_RECEIVED)
2673 		goto reschedule;
2674 
2675 	intel_ntb_spad_clear(ntb->device);
2676 	ntb->peer_msix_good = true;
2677 	/* Give peer time to see our NTB_MSIX_RECEIVED. */
2678 	goto reschedule;
2679 
2680 msix_good:
2681 	intel_ntb_poll_link(ntb);
2682 	ntb_link_event(ntb->device);
2683 	return;
2684 
2685 reschedule:
2686 	ntb->lnk_sta = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2);
2687 	if (_xeon_link_is_up(ntb)) {
2688 		callout_reset(&ntb->peer_msix_work,
2689 		    hz * (ntb->peer_msix_good ? 2 : 1) / 10,
2690 		    intel_ntb_exchange_msix, ntb);
2691 	} else
2692 		intel_ntb_spad_clear(ntb->device);
2693 }
2694 
2695 /*
2696  * Public API to the rest of the OS
2697  */
2698 
2699 static uint8_t
2700 intel_ntb_spad_count(device_t dev)
2701 {
2702 	struct ntb_softc *ntb = device_get_softc(dev);
2703 
2704 	return (ntb->spad_count);
2705 }
2706 
2707 static uint8_t
2708 intel_ntb_mw_count(device_t dev)
2709 {
2710 	struct ntb_softc *ntb = device_get_softc(dev);
2711 	uint8_t res;
2712 
2713 	res = ntb->mw_count;
2714 	if (ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0)
2715 		res--;
2716 	if (ntb->msix_mw_idx != B2B_MW_DISABLED)
2717 		res--;
2718 	return (res);
2719 }
2720 
2721 static int
2722 intel_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
2723 {
2724 	struct ntb_softc *ntb = device_get_softc(dev);
2725 
2726 	if (idx >= ntb->spad_count)
2727 		return (EINVAL);
2728 
2729 	intel_ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val);
2730 
2731 	return (0);
2732 }
2733 
2734 /*
2735  * Zeros the local scratchpad.
2736  */
2737 static void
2738 intel_ntb_spad_clear(device_t dev)
2739 {
2740 	struct ntb_softc *ntb = device_get_softc(dev);
2741 	unsigned i;
2742 
2743 	for (i = 0; i < ntb->spad_count; i++)
2744 		intel_ntb_spad_write(dev, i, 0);
2745 }
2746 
2747 static int
2748 intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
2749 {
2750 	struct ntb_softc *ntb = device_get_softc(dev);
2751 
2752 	if (idx >= ntb->spad_count)
2753 		return (EINVAL);
2754 
2755 	*val = intel_ntb_reg_read(4, ntb->self_reg->spad + idx * 4);
2756 
2757 	return (0);
2758 }
2759 
2760 static int
2761 intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
2762 {
2763 	struct ntb_softc *ntb = device_get_softc(dev);
2764 
2765 	if (idx >= ntb->spad_count)
2766 		return (EINVAL);
2767 
2768 	if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
2769 		intel_ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val);
2770 	else
2771 		intel_ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val);
2772 
2773 	return (0);
2774 }
2775 
2776 static int
2777 intel_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
2778 {
2779 	struct ntb_softc *ntb = device_get_softc(dev);
2780 
2781 	if (idx >= ntb->spad_count)
2782 		return (EINVAL);
2783 
2784 	if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
2785 		*val = intel_ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4);
2786 	else
2787 		*val = intel_ntb_reg_read(4, ntb->peer_reg->spad + idx * 4);
2788 
2789 	return (0);
2790 }
2791 
2792 static int
2793 intel_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
2794     caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
2795     bus_addr_t *plimit)
2796 {
2797 	struct ntb_softc *ntb = device_get_softc(dev);
2798 	struct ntb_pci_bar_info *bar;
2799 	bus_addr_t limit;
2800 	size_t bar_b2b_off;
2801 	enum ntb_bar bar_num;
2802 
2803 	if (mw_idx >= intel_ntb_mw_count(dev))
2804 		return (EINVAL);
2805 	mw_idx = intel_ntb_user_mw_to_idx(ntb, mw_idx);
2806 
2807 	bar_num = intel_ntb_mw_to_bar(ntb, mw_idx);
2808 	bar = &ntb->bar_info[bar_num];
2809 	bar_b2b_off = 0;
2810 	if (mw_idx == ntb->b2b_mw_idx) {
2811 		KASSERT(ntb->b2b_off != 0,
2812 		    ("user shouldn't get non-shared b2b mw"));
2813 		bar_b2b_off = ntb->b2b_off;
2814 	}
2815 
2816 	if (bar_is_64bit(ntb, bar_num))
2817 		limit = BUS_SPACE_MAXADDR;
2818 	else
2819 		limit = BUS_SPACE_MAXADDR_32BIT;
2820 
2821 	if (base != NULL)
2822 		*base = bar->pbase + bar_b2b_off;
2823 	if (vbase != NULL)
2824 		*vbase = bar->vbase + bar_b2b_off;
2825 	if (size != NULL)
2826 		*size = bar->size - bar_b2b_off;
2827 	if (align != NULL)
2828 		*align = bar->size;
2829 	if (align_size != NULL)
2830 		*align_size = 1;
2831 	if (plimit != NULL)
2832 		*plimit = limit;
2833 	return (0);
2834 }
2835 
2836 static int
2837 intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
2838 {
2839 	struct ntb_softc *ntb = device_get_softc(dev);
2840 	struct ntb_pci_bar_info *bar;
2841 	uint64_t base, limit, reg_val;
2842 	size_t bar_size, mw_size;
2843 	uint32_t base_reg, xlat_reg, limit_reg;
2844 	enum ntb_bar bar_num;
2845 
2846 	if (idx >= intel_ntb_mw_count(dev))
2847 		return (EINVAL);
2848 	idx = intel_ntb_user_mw_to_idx(ntb, idx);
2849 
2850 	bar_num = intel_ntb_mw_to_bar(ntb, idx);
2851 	bar = &ntb->bar_info[bar_num];
2852 
2853 	bar_size = bar->size;
2854 	if (idx == ntb->b2b_mw_idx)
2855 		mw_size = bar_size - ntb->b2b_off;
2856 	else
2857 		mw_size = bar_size;
2858 
2859 	/* Hardware requires that addr is aligned to bar size */
2860 	if ((addr & (bar_size - 1)) != 0)
2861 		return (EINVAL);
2862 
2863 	if (size > mw_size)
2864 		return (EINVAL);
2865 
2866 	bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg);
2867 
2868 	limit = 0;
2869 	if (bar_is_64bit(ntb, bar_num)) {
2870 		base = intel_ntb_reg_read(8, base_reg) & BAR_HIGH_MASK;
2871 
2872 		if (limit_reg != 0 && size != mw_size)
2873 			limit = base + size;
2874 
2875 		/* Set and verify translation address */
2876 		intel_ntb_reg_write(8, xlat_reg, addr);
2877 		reg_val = intel_ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK;
2878 		if (reg_val != addr) {
2879 			intel_ntb_reg_write(8, xlat_reg, 0);
2880 			return (EIO);
2881 		}
2882 
2883 		/* Set and verify the limit */
2884 		intel_ntb_reg_write(8, limit_reg, limit);
2885 		reg_val = intel_ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK;
2886 		if (reg_val != limit) {
2887 			intel_ntb_reg_write(8, limit_reg, base);
2888 			intel_ntb_reg_write(8, xlat_reg, 0);
2889 			return (EIO);
2890 		}
2891 	} else {
2892 		/* Configure 32-bit (split) BAR MW */
2893 
2894 		if ((addr & UINT32_MAX) != addr)
2895 			return (ERANGE);
2896 		if (((addr + size) & UINT32_MAX) != (addr + size))
2897 			return (ERANGE);
2898 
2899 		base = intel_ntb_reg_read(4, base_reg) & BAR_HIGH_MASK;
2900 
2901 		if (limit_reg != 0 && size != mw_size)
2902 			limit = base + size;
2903 
2904 		/* Set and verify translation address */
2905 		intel_ntb_reg_write(4, xlat_reg, addr);
2906 		reg_val = intel_ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK;
2907 		if (reg_val != addr) {
2908 			intel_ntb_reg_write(4, xlat_reg, 0);
2909 			return (EIO);
2910 		}
2911 
2912 		/* Set and verify the limit */
2913 		intel_ntb_reg_write(4, limit_reg, limit);
2914 		reg_val = intel_ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK;
2915 		if (reg_val != limit) {
2916 			intel_ntb_reg_write(4, limit_reg, base);
2917 			intel_ntb_reg_write(4, xlat_reg, 0);
2918 			return (EIO);
2919 		}
2920 	}
2921 	return (0);
2922 }
2923 
2924 static int
2925 intel_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
2926 {
2927 
2928 	return (intel_ntb_mw_set_trans(dev, mw_idx, 0, 0));
2929 }
2930 
2931 static int
2932 intel_ntb_mw_get_wc(device_t dev, unsigned idx, vm_memattr_t *mode)
2933 {
2934 	struct ntb_softc *ntb = device_get_softc(dev);
2935 	struct ntb_pci_bar_info *bar;
2936 
2937 	if (idx >= intel_ntb_mw_count(dev))
2938 		return (EINVAL);
2939 	idx = intel_ntb_user_mw_to_idx(ntb, idx);
2940 
2941 	bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
2942 	*mode = bar->map_mode;
2943 	return (0);
2944 }
2945 
2946 static int
2947 intel_ntb_mw_set_wc(device_t dev, unsigned idx, vm_memattr_t mode)
2948 {
2949 	struct ntb_softc *ntb = device_get_softc(dev);
2950 
2951 	if (idx >= intel_ntb_mw_count(dev))
2952 		return (EINVAL);
2953 
2954 	idx = intel_ntb_user_mw_to_idx(ntb, idx);
2955 	return (intel_ntb_mw_set_wc_internal(ntb, idx, mode));
2956 }
2957 
2958 static int
2959 intel_ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
2960 {
2961 	struct ntb_pci_bar_info *bar;
2962 	int rc;
2963 
2964 	bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
2965 	if (bar->map_mode == mode)
2966 		return (0);
2967 
2968 	rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mode);
2969 	if (rc == 0)
2970 		bar->map_mode = mode;
2971 
2972 	return (rc);
2973 }
2974 
2975 static void
2976 intel_ntb_peer_db_set(device_t dev, uint64_t bit)
2977 {
2978 	struct ntb_softc *ntb = device_get_softc(dev);
2979 
2980 	if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
2981 		struct ntb_pci_bar_info *lapic;
2982 		unsigned i;
2983 
2984 		lapic = ntb->peer_lapic_bar;
2985 
2986 		for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
2987 			if ((bit & intel_ntb_db_vector_mask(dev, i)) != 0)
2988 				bus_space_write_4(lapic->pci_bus_tag,
2989 				    lapic->pci_bus_handle,
2990 				    ntb->peer_msix_data[i].nmd_ofs,
2991 				    ntb->peer_msix_data[i].nmd_data);
2992 		}
2993 		return;
2994 	}
2995 
2996 	if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
2997 		intel_ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit);
2998 		return;
2999 	}
3000 
3001 	db_iowrite(ntb, ntb->peer_reg->db_bell, bit);
3002 }
3003 
3004 static int
3005 intel_ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size)
3006 {
3007 	struct ntb_softc *ntb = device_get_softc(dev);
3008 	struct ntb_pci_bar_info *bar;
3009 	uint64_t regoff;
3010 
3011 	KASSERT((db_addr != NULL && db_size != NULL), ("must be non-NULL"));
3012 
3013 	if (!HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
3014 		bar = &ntb->bar_info[NTB_CONFIG_BAR];
3015 		regoff = ntb->peer_reg->db_bell;
3016 	} else {
3017 		KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED,
3018 		    ("invalid b2b idx"));
3019 
3020 		bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)];
3021 		regoff = XEON_PDOORBELL_OFFSET;
3022 	}
3023 	KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh"));
3024 
3025 	/* HACK: Specific to current x86 bus implementation. */
3026 	*db_addr = ((uint64_t)bar->pci_bus_handle + regoff);
3027 	*db_size = ntb->reg->db_size;
3028 	return (0);
3029 }
3030 
3031 static uint64_t
3032 intel_ntb_db_valid_mask(device_t dev)
3033 {
3034 	struct ntb_softc *ntb = device_get_softc(dev);
3035 
3036 	return (ntb->db_valid_mask);
3037 }
3038 
3039 static int
3040 intel_ntb_db_vector_count(device_t dev)
3041 {
3042 	struct ntb_softc *ntb = device_get_softc(dev);
3043 
3044 	return (ntb->db_vec_count);
3045 }
3046 
3047 static uint64_t
3048 intel_ntb_db_vector_mask(device_t dev, uint32_t vector)
3049 {
3050 	struct ntb_softc *ntb = device_get_softc(dev);
3051 
3052 	if (vector > ntb->db_vec_count)
3053 		return (0);
3054 	return (ntb->db_valid_mask & intel_ntb_vec_mask(ntb, vector));
3055 }
3056 
3057 static bool
3058 intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
3059 {
3060 	struct ntb_softc *ntb = device_get_softc(dev);
3061 
3062 	if (speed != NULL)
3063 		*speed = intel_ntb_link_sta_speed(ntb);
3064 	if (width != NULL)
3065 		*width = intel_ntb_link_sta_width(ntb);
3066 	return (link_is_up(ntb));
3067 }
3068 
3069 static void
3070 save_bar_parameters(struct ntb_pci_bar_info *bar)
3071 {
3072 
3073 	bar->pci_bus_tag = rman_get_bustag(bar->pci_resource);
3074 	bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource);
3075 	bar->pbase = rman_get_start(bar->pci_resource);
3076 	bar->size = rman_get_size(bar->pci_resource);
3077 	bar->vbase = rman_get_virtual(bar->pci_resource);
3078 }
3079 
3080 static device_method_t ntb_intel_methods[] = {
3081 	/* Device interface */
3082 	DEVMETHOD(device_probe,		intel_ntb_probe),
3083 	DEVMETHOD(device_attach,	intel_ntb_attach),
3084 	DEVMETHOD(device_detach,	intel_ntb_detach),
3085 	/* Bus interface */
3086 	DEVMETHOD(bus_child_location_str, ntb_child_location_str),
3087 	DEVMETHOD(bus_print_child,	ntb_print_child),
3088 	/* NTB interface */
3089 	DEVMETHOD(ntb_link_is_up,	intel_ntb_link_is_up),
3090 	DEVMETHOD(ntb_link_enable,	intel_ntb_link_enable),
3091 	DEVMETHOD(ntb_link_disable,	intel_ntb_link_disable),
3092 	DEVMETHOD(ntb_link_enabled,	intel_ntb_link_enabled),
3093 	DEVMETHOD(ntb_mw_count,		intel_ntb_mw_count),
3094 	DEVMETHOD(ntb_mw_get_range,	intel_ntb_mw_get_range),
3095 	DEVMETHOD(ntb_mw_set_trans,	intel_ntb_mw_set_trans),
3096 	DEVMETHOD(ntb_mw_clear_trans,	intel_ntb_mw_clear_trans),
3097 	DEVMETHOD(ntb_mw_get_wc,	intel_ntb_mw_get_wc),
3098 	DEVMETHOD(ntb_mw_set_wc,	intel_ntb_mw_set_wc),
3099 	DEVMETHOD(ntb_spad_count,	intel_ntb_spad_count),
3100 	DEVMETHOD(ntb_spad_clear,	intel_ntb_spad_clear),
3101 	DEVMETHOD(ntb_spad_write,	intel_ntb_spad_write),
3102 	DEVMETHOD(ntb_spad_read,	intel_ntb_spad_read),
3103 	DEVMETHOD(ntb_peer_spad_write,	intel_ntb_peer_spad_write),
3104 	DEVMETHOD(ntb_peer_spad_read,	intel_ntb_peer_spad_read),
3105 	DEVMETHOD(ntb_db_valid_mask,	intel_ntb_db_valid_mask),
3106 	DEVMETHOD(ntb_db_vector_count,	intel_ntb_db_vector_count),
3107 	DEVMETHOD(ntb_db_vector_mask,	intel_ntb_db_vector_mask),
3108 	DEVMETHOD(ntb_db_clear,		intel_ntb_db_clear),
3109 	DEVMETHOD(ntb_db_clear_mask,	intel_ntb_db_clear_mask),
3110 	DEVMETHOD(ntb_db_read,		intel_ntb_db_read),
3111 	DEVMETHOD(ntb_db_set_mask,	intel_ntb_db_set_mask),
3112 	DEVMETHOD(ntb_peer_db_addr,	intel_ntb_peer_db_addr),
3113 	DEVMETHOD(ntb_peer_db_set,	intel_ntb_peer_db_set),
3114 	DEVMETHOD_END
3115 };
3116 
3117 static DEFINE_CLASS_0(ntb_hw, ntb_intel_driver, ntb_intel_methods,
3118     sizeof(struct ntb_softc));
3119 DRIVER_MODULE(ntb_hw_intel, pci, ntb_intel_driver, ntb_hw_devclass, NULL, NULL);
3120 MODULE_DEPEND(ntb_hw_intel, ntb, 1, 1, 1);
3121 MODULE_VERSION(ntb_hw_intel, 1);
3122 MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ntb_hw_intel, pci_ids,
3123     nitems(pci_ids));
3124