1 /*- 2 * Copyright (c) 2016-2017 Alexander Motin <mav@FreeBSD.org> 3 * Copyright (C) 2013 Intel Corporation 4 * Copyright (C) 2015 EMC Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* 30 * The Non-Transparent Bridge (NTB) is a device that allows you to connect 31 * two or more systems using a PCI-e links, providing remote memory access. 32 * 33 * This module contains a driver for NTB hardware in Intel Xeon/Atom CPUs. 34 * 35 * NOTE: Much of the code in this module is shared with Linux. Any patches may 36 * be picked up and redistributed in Linux with a dual GPL/BSD license. 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include <sys/param.h> 43 #include <sys/kernel.h> 44 #include <sys/systm.h> 45 #include <sys/bus.h> 46 #include <sys/endian.h> 47 #include <sys/interrupt.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/module.h> 51 #include <sys/mutex.h> 52 #include <sys/pciio.h> 53 #include <sys/queue.h> 54 #include <sys/rman.h> 55 #include <sys/sbuf.h> 56 #include <sys/sysctl.h> 57 #include <vm/vm.h> 58 #include <vm/pmap.h> 59 #include <machine/bus.h> 60 #include <machine/intr_machdep.h> 61 #include <machine/resource.h> 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcivar.h> 64 65 #include "ntb_hw_intel.h" 66 #include "../ntb.h" 67 68 #define MAX_MSIX_INTERRUPTS MAX(XEON_DB_COUNT, ATOM_DB_COUNT) 69 70 #define NTB_HB_TIMEOUT 1 /* second */ 71 #define ATOM_LINK_RECOVERY_TIME 500 /* ms */ 72 #define BAR_HIGH_MASK (~((1ull << 12) - 1)) 73 74 #define NTB_MSIX_VER_GUARD 0xaabbccdd 75 #define NTB_MSIX_RECEIVED 0xe0f0e0f0 76 77 /* 78 * PCI constants could be somewhere more generic, but aren't defined/used in 79 * pci.c. 80 */ 81 #define PCI_MSIX_ENTRY_SIZE 16 82 #define PCI_MSIX_ENTRY_LOWER_ADDR 0 83 #define PCI_MSIX_ENTRY_UPPER_ADDR 4 84 #define PCI_MSIX_ENTRY_DATA 8 85 86 enum ntb_device_type { 87 NTB_XEON, 88 NTB_ATOM 89 }; 90 91 /* ntb_conn_type are hardware numbers, cannot change. */ 92 enum ntb_conn_type { 93 NTB_CONN_TRANSPARENT = 0, 94 NTB_CONN_B2B = 1, 95 NTB_CONN_RP = 2, 96 }; 97 98 enum ntb_b2b_direction { 99 NTB_DEV_USD = 0, 100 NTB_DEV_DSD = 1, 101 }; 102 103 enum ntb_bar { 104 NTB_CONFIG_BAR = 0, 105 NTB_B2B_BAR_1, 106 NTB_B2B_BAR_2, 107 NTB_B2B_BAR_3, 108 NTB_MAX_BARS 109 }; 110 111 enum { 112 NTB_MSIX_GUARD = 0, 113 NTB_MSIX_DATA0, 114 NTB_MSIX_DATA1, 115 NTB_MSIX_DATA2, 116 NTB_MSIX_OFS0, 117 NTB_MSIX_OFS1, 118 NTB_MSIX_OFS2, 119 NTB_MSIX_DONE, 120 NTB_MAX_MSIX_SPAD 121 }; 122 123 /* Device features and workarounds */ 124 #define HAS_FEATURE(ntb, feature) \ 125 (((ntb)->features & (feature)) != 0) 126 127 struct ntb_hw_info { 128 uint32_t device_id; 129 const char *desc; 130 enum ntb_device_type type; 131 uint32_t features; 132 }; 133 134 struct ntb_pci_bar_info { 135 bus_space_tag_t pci_bus_tag; 136 bus_space_handle_t pci_bus_handle; 137 int pci_resource_id; 138 struct resource *pci_resource; 139 vm_paddr_t pbase; 140 caddr_t vbase; 141 vm_size_t size; 142 vm_memattr_t map_mode; 143 144 /* Configuration register offsets */ 145 uint32_t psz_off; 146 uint32_t ssz_off; 147 uint32_t pbarxlat_off; 148 }; 149 150 struct ntb_int_info { 151 struct resource *res; 152 int rid; 153 void *tag; 154 }; 155 156 struct ntb_vec { 157 struct ntb_softc *ntb; 158 uint32_t num; 159 unsigned masked; 160 }; 161 162 struct ntb_reg { 163 uint32_t ntb_ctl; 164 uint32_t lnk_sta; 165 uint8_t db_size; 166 unsigned mw_bar[NTB_MAX_BARS]; 167 }; 168 169 struct ntb_alt_reg { 170 uint32_t db_bell; 171 uint32_t db_mask; 172 uint32_t spad; 173 }; 174 175 struct ntb_xlat_reg { 176 uint32_t bar0_base; 177 uint32_t bar2_base; 178 uint32_t bar4_base; 179 uint32_t bar5_base; 180 181 uint32_t bar2_xlat; 182 uint32_t bar4_xlat; 183 uint32_t bar5_xlat; 184 185 uint32_t bar2_limit; 186 uint32_t bar4_limit; 187 uint32_t bar5_limit; 188 }; 189 190 struct ntb_b2b_addr { 191 uint64_t bar0_addr; 192 uint64_t bar2_addr64; 193 uint64_t bar4_addr64; 194 uint64_t bar4_addr32; 195 uint64_t bar5_addr32; 196 }; 197 198 struct ntb_msix_data { 199 uint32_t nmd_ofs; 200 uint32_t nmd_data; 201 }; 202 203 struct ntb_softc { 204 /* ntb.c context. Do not move! Must go first! */ 205 void *ntb_store; 206 207 device_t device; 208 enum ntb_device_type type; 209 uint32_t features; 210 211 struct ntb_pci_bar_info bar_info[NTB_MAX_BARS]; 212 struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS]; 213 uint32_t allocated_interrupts; 214 215 struct ntb_msix_data peer_msix_data[XEON_NONLINK_DB_MSIX_BITS]; 216 struct ntb_msix_data msix_data[XEON_NONLINK_DB_MSIX_BITS]; 217 bool peer_msix_good; 218 bool peer_msix_done; 219 struct ntb_pci_bar_info *peer_lapic_bar; 220 struct callout peer_msix_work; 221 222 struct callout heartbeat_timer; 223 struct callout lr_timer; 224 225 struct ntb_vec *msix_vec; 226 227 uint32_t ppd; 228 enum ntb_conn_type conn_type; 229 enum ntb_b2b_direction dev_type; 230 231 /* Offset of peer bar0 in B2B BAR */ 232 uint64_t b2b_off; 233 /* Memory window used to access peer bar0 */ 234 #define B2B_MW_DISABLED UINT8_MAX 235 uint8_t b2b_mw_idx; 236 uint32_t msix_xlat; 237 uint8_t msix_mw_idx; 238 239 uint8_t mw_count; 240 uint8_t spad_count; 241 uint8_t db_count; 242 uint8_t db_vec_count; 243 uint8_t db_vec_shift; 244 245 /* Protects local db_mask. */ 246 #define DB_MASK_LOCK(sc) mtx_lock_spin(&(sc)->db_mask_lock) 247 #define DB_MASK_UNLOCK(sc) mtx_unlock_spin(&(sc)->db_mask_lock) 248 #define DB_MASK_ASSERT(sc,f) mtx_assert(&(sc)->db_mask_lock, (f)) 249 struct mtx db_mask_lock; 250 251 volatile uint32_t ntb_ctl; 252 volatile uint32_t lnk_sta; 253 254 uint64_t db_valid_mask; 255 uint64_t db_link_mask; 256 uint64_t db_mask; 257 uint64_t fake_db; /* NTB_SB01BASE_LOCKUP*/ 258 uint64_t force_db; /* NTB_SB01BASE_LOCKUP*/ 259 260 int last_ts; /* ticks @ last irq */ 261 262 const struct ntb_reg *reg; 263 const struct ntb_alt_reg *self_reg; 264 const struct ntb_alt_reg *peer_reg; 265 const struct ntb_xlat_reg *xlat_reg; 266 }; 267 268 #ifdef __i386__ 269 static __inline uint64_t 270 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 271 bus_size_t offset) 272 { 273 274 return (bus_space_read_4(tag, handle, offset) | 275 ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); 276 } 277 278 static __inline void 279 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, 280 bus_size_t offset, uint64_t val) 281 { 282 283 bus_space_write_4(tag, handle, offset, val); 284 bus_space_write_4(tag, handle, offset + 4, val >> 32); 285 } 286 #endif 287 288 #define intel_ntb_bar_read(SIZE, bar, offset) \ 289 bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ 290 ntb->bar_info[(bar)].pci_bus_handle, (offset)) 291 #define intel_ntb_bar_write(SIZE, bar, offset, val) \ 292 bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ 293 ntb->bar_info[(bar)].pci_bus_handle, (offset), (val)) 294 #define intel_ntb_reg_read(SIZE, offset) \ 295 intel_ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset) 296 #define intel_ntb_reg_write(SIZE, offset, val) \ 297 intel_ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val) 298 #define intel_ntb_mw_read(SIZE, offset) \ 299 intel_ntb_bar_read(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \ 300 offset) 301 #define intel_ntb_mw_write(SIZE, offset, val) \ 302 intel_ntb_bar_write(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \ 303 offset, val) 304 305 static int intel_ntb_probe(device_t device); 306 static int intel_ntb_attach(device_t device); 307 static int intel_ntb_detach(device_t device); 308 static uint64_t intel_ntb_db_valid_mask(device_t dev); 309 static void intel_ntb_spad_clear(device_t dev); 310 static uint64_t intel_ntb_db_vector_mask(device_t dev, uint32_t vector); 311 static bool intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed, 312 enum ntb_width *width); 313 static int intel_ntb_link_enable(device_t dev, enum ntb_speed speed, 314 enum ntb_width width); 315 static int intel_ntb_link_disable(device_t dev); 316 static int intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val); 317 static int intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val); 318 319 static unsigned intel_ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx); 320 static inline enum ntb_bar intel_ntb_mw_to_bar(struct ntb_softc *, unsigned mw); 321 static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar); 322 static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar, 323 uint32_t *base, uint32_t *xlat, uint32_t *lmt); 324 static int intel_ntb_map_pci_bars(struct ntb_softc *ntb); 325 static int intel_ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx, 326 vm_memattr_t); 327 static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *, 328 const char *); 329 static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); 330 static int map_memory_window_bar(struct ntb_softc *ntb, 331 struct ntb_pci_bar_info *bar); 332 static void intel_ntb_unmap_pci_bar(struct ntb_softc *ntb); 333 static int intel_ntb_remap_msix(device_t, uint32_t desired, uint32_t avail); 334 static int intel_ntb_init_isr(struct ntb_softc *ntb); 335 static int intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb); 336 static int intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors); 337 static void intel_ntb_teardown_interrupts(struct ntb_softc *ntb); 338 static inline uint64_t intel_ntb_vec_mask(struct ntb_softc *, uint64_t db_vector); 339 static void intel_ntb_interrupt(struct ntb_softc *, uint32_t vec); 340 static void ndev_vec_isr(void *arg); 341 static void ndev_irq_isr(void *arg); 342 static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff); 343 static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t); 344 static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t); 345 static int intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors); 346 static void intel_ntb_free_msix_vec(struct ntb_softc *ntb); 347 static void intel_ntb_get_msix_info(struct ntb_softc *ntb); 348 static void intel_ntb_exchange_msix(void *); 349 static struct ntb_hw_info *intel_ntb_get_device_info(uint32_t device_id); 350 static void intel_ntb_detect_max_mw(struct ntb_softc *ntb); 351 static int intel_ntb_detect_xeon(struct ntb_softc *ntb); 352 static int intel_ntb_detect_atom(struct ntb_softc *ntb); 353 static int intel_ntb_xeon_init_dev(struct ntb_softc *ntb); 354 static int intel_ntb_atom_init_dev(struct ntb_softc *ntb); 355 static void intel_ntb_teardown_xeon(struct ntb_softc *ntb); 356 static void configure_atom_secondary_side_bars(struct ntb_softc *ntb); 357 static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx, 358 enum ntb_bar regbar); 359 static void xeon_set_sbar_base_and_limit(struct ntb_softc *, 360 uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar); 361 static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr, 362 enum ntb_bar idx); 363 static int xeon_setup_b2b_mw(struct ntb_softc *, 364 const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr); 365 static inline bool link_is_up(struct ntb_softc *ntb); 366 static inline bool _xeon_link_is_up(struct ntb_softc *ntb); 367 static inline bool atom_link_is_err(struct ntb_softc *ntb); 368 static inline enum ntb_speed intel_ntb_link_sta_speed(struct ntb_softc *); 369 static inline enum ntb_width intel_ntb_link_sta_width(struct ntb_softc *); 370 static void atom_link_hb(void *arg); 371 static void recover_atom_link(void *arg); 372 static bool intel_ntb_poll_link(struct ntb_softc *ntb); 373 static void save_bar_parameters(struct ntb_pci_bar_info *bar); 374 static void intel_ntb_sysctl_init(struct ntb_softc *); 375 static int sysctl_handle_features(SYSCTL_HANDLER_ARGS); 376 static int sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS); 377 static int sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS); 378 static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS); 379 static int sysctl_handle_register(SYSCTL_HANDLER_ARGS); 380 381 static unsigned g_ntb_hw_debug_level; 382 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, 383 &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose"); 384 #define intel_ntb_printf(lvl, ...) do { \ 385 if ((lvl) <= g_ntb_hw_debug_level) { \ 386 device_printf(ntb->device, __VA_ARGS__); \ 387 } \ 388 } while (0) 389 390 #define _NTB_PAT_UC 0 391 #define _NTB_PAT_WC 1 392 #define _NTB_PAT_WT 4 393 #define _NTB_PAT_WP 5 394 #define _NTB_PAT_WB 6 395 #define _NTB_PAT_UCM 7 396 static unsigned g_ntb_mw_pat = _NTB_PAT_UC; 397 SYSCTL_UINT(_hw_ntb, OID_AUTO, default_mw_pat, CTLFLAG_RDTUN, 398 &g_ntb_mw_pat, 0, "Configure the default memory window cache flags (PAT): " 399 "UC: " __XSTRING(_NTB_PAT_UC) ", " 400 "WC: " __XSTRING(_NTB_PAT_WC) ", " 401 "WT: " __XSTRING(_NTB_PAT_WT) ", " 402 "WP: " __XSTRING(_NTB_PAT_WP) ", " 403 "WB: " __XSTRING(_NTB_PAT_WB) ", " 404 "UC-: " __XSTRING(_NTB_PAT_UCM)); 405 406 static inline vm_memattr_t 407 intel_ntb_pat_flags(void) 408 { 409 410 switch (g_ntb_mw_pat) { 411 case _NTB_PAT_WC: 412 return (VM_MEMATTR_WRITE_COMBINING); 413 case _NTB_PAT_WT: 414 return (VM_MEMATTR_WRITE_THROUGH); 415 case _NTB_PAT_WP: 416 return (VM_MEMATTR_WRITE_PROTECTED); 417 case _NTB_PAT_WB: 418 return (VM_MEMATTR_WRITE_BACK); 419 case _NTB_PAT_UCM: 420 return (VM_MEMATTR_WEAK_UNCACHEABLE); 421 case _NTB_PAT_UC: 422 /* FALLTHROUGH */ 423 default: 424 return (VM_MEMATTR_UNCACHEABLE); 425 } 426 } 427 428 /* 429 * Well, this obviously doesn't belong here, but it doesn't seem to exist 430 * anywhere better yet. 431 */ 432 static inline const char * 433 intel_ntb_vm_memattr_to_str(vm_memattr_t pat) 434 { 435 436 switch (pat) { 437 case VM_MEMATTR_WRITE_COMBINING: 438 return ("WRITE_COMBINING"); 439 case VM_MEMATTR_WRITE_THROUGH: 440 return ("WRITE_THROUGH"); 441 case VM_MEMATTR_WRITE_PROTECTED: 442 return ("WRITE_PROTECTED"); 443 case VM_MEMATTR_WRITE_BACK: 444 return ("WRITE_BACK"); 445 case VM_MEMATTR_WEAK_UNCACHEABLE: 446 return ("UNCACHED"); 447 case VM_MEMATTR_UNCACHEABLE: 448 return ("UNCACHEABLE"); 449 default: 450 return ("UNKNOWN"); 451 } 452 } 453 454 static int g_ntb_msix_idx = 1; 455 SYSCTL_INT(_hw_ntb, OID_AUTO, msix_mw_idx, CTLFLAG_RDTUN, &g_ntb_msix_idx, 456 0, "Use this memory window to access the peer MSIX message complex on " 457 "certain Xeon-based NTB systems, as a workaround for a hardware errata. " 458 "Like b2b_mw_idx, negative values index from the last available memory " 459 "window. (Applies on Xeon platforms with SB01BASE_LOCKUP errata.)"); 460 461 static int g_ntb_mw_idx = -1; 462 SYSCTL_INT(_hw_ntb, OID_AUTO, b2b_mw_idx, CTLFLAG_RDTUN, &g_ntb_mw_idx, 463 0, "Use this memory window to access the peer NTB registers. A " 464 "non-negative value starts from the first MW index; a negative value " 465 "starts from the last MW index. The default is -1, i.e., the last " 466 "available memory window. Both sides of the NTB MUST set the same " 467 "value here! (Applies on Xeon platforms with SDOORBELL_LOCKUP errata.)"); 468 469 /* Hardware owns the low 16 bits of features. */ 470 #define NTB_BAR_SIZE_4K (1 << 0) 471 #define NTB_SDOORBELL_LOCKUP (1 << 1) 472 #define NTB_SB01BASE_LOCKUP (1 << 2) 473 #define NTB_B2BDOORBELL_BIT14 (1 << 3) 474 /* Software/configuration owns the top 16 bits. */ 475 #define NTB_SPLIT_BAR (1ull << 16) 476 477 #define NTB_FEATURES_STR \ 478 "\20\21SPLIT_BAR4\04B2B_DOORBELL_BIT14\03SB01BASE_LOCKUP" \ 479 "\02SDOORBELL_LOCKUP\01BAR_SIZE_4K" 480 481 static struct ntb_hw_info pci_ids[] = { 482 /* XXX: PS/SS IDs left out until they are supported. */ 483 { 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B", 484 NTB_ATOM, 0 }, 485 486 { 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B", 487 NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, 488 { 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B", 489 NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, 490 { 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON, 491 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 492 NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K }, 493 { 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON, 494 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 495 NTB_SB01BASE_LOCKUP }, 496 { 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON, 497 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 498 NTB_SB01BASE_LOCKUP }, 499 }; 500 501 static const struct ntb_reg atom_reg = { 502 .ntb_ctl = ATOM_NTBCNTL_OFFSET, 503 .lnk_sta = ATOM_LINK_STATUS_OFFSET, 504 .db_size = sizeof(uint64_t), 505 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 }, 506 }; 507 508 static const struct ntb_alt_reg atom_pri_reg = { 509 .db_bell = ATOM_PDOORBELL_OFFSET, 510 .db_mask = ATOM_PDBMSK_OFFSET, 511 .spad = ATOM_SPAD_OFFSET, 512 }; 513 514 static const struct ntb_alt_reg atom_b2b_reg = { 515 .db_bell = ATOM_B2B_DOORBELL_OFFSET, 516 .spad = ATOM_B2B_SPAD_OFFSET, 517 }; 518 519 static const struct ntb_xlat_reg atom_sec_xlat = { 520 #if 0 521 /* "FIXME" says the Linux driver. */ 522 .bar0_base = ATOM_SBAR0BASE_OFFSET, 523 .bar2_base = ATOM_SBAR2BASE_OFFSET, 524 .bar4_base = ATOM_SBAR4BASE_OFFSET, 525 526 .bar2_limit = ATOM_SBAR2LMT_OFFSET, 527 .bar4_limit = ATOM_SBAR4LMT_OFFSET, 528 #endif 529 530 .bar2_xlat = ATOM_SBAR2XLAT_OFFSET, 531 .bar4_xlat = ATOM_SBAR4XLAT_OFFSET, 532 }; 533 534 static const struct ntb_reg xeon_reg = { 535 .ntb_ctl = XEON_NTBCNTL_OFFSET, 536 .lnk_sta = XEON_LINK_STATUS_OFFSET, 537 .db_size = sizeof(uint16_t), 538 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 }, 539 }; 540 541 static const struct ntb_alt_reg xeon_pri_reg = { 542 .db_bell = XEON_PDOORBELL_OFFSET, 543 .db_mask = XEON_PDBMSK_OFFSET, 544 .spad = XEON_SPAD_OFFSET, 545 }; 546 547 static const struct ntb_alt_reg xeon_b2b_reg = { 548 .db_bell = XEON_B2B_DOORBELL_OFFSET, 549 .spad = XEON_B2B_SPAD_OFFSET, 550 }; 551 552 static const struct ntb_xlat_reg xeon_sec_xlat = { 553 .bar0_base = XEON_SBAR0BASE_OFFSET, 554 .bar2_base = XEON_SBAR2BASE_OFFSET, 555 .bar4_base = XEON_SBAR4BASE_OFFSET, 556 .bar5_base = XEON_SBAR5BASE_OFFSET, 557 558 .bar2_limit = XEON_SBAR2LMT_OFFSET, 559 .bar4_limit = XEON_SBAR4LMT_OFFSET, 560 .bar5_limit = XEON_SBAR5LMT_OFFSET, 561 562 .bar2_xlat = XEON_SBAR2XLAT_OFFSET, 563 .bar4_xlat = XEON_SBAR4XLAT_OFFSET, 564 .bar5_xlat = XEON_SBAR5XLAT_OFFSET, 565 }; 566 567 static struct ntb_b2b_addr xeon_b2b_usd_addr = { 568 .bar0_addr = XEON_B2B_BAR0_ADDR, 569 .bar2_addr64 = XEON_B2B_BAR2_ADDR64, 570 .bar4_addr64 = XEON_B2B_BAR4_ADDR64, 571 .bar4_addr32 = XEON_B2B_BAR4_ADDR32, 572 .bar5_addr32 = XEON_B2B_BAR5_ADDR32, 573 }; 574 575 static struct ntb_b2b_addr xeon_b2b_dsd_addr = { 576 .bar0_addr = XEON_B2B_BAR0_ADDR, 577 .bar2_addr64 = XEON_B2B_BAR2_ADDR64, 578 .bar4_addr64 = XEON_B2B_BAR4_ADDR64, 579 .bar4_addr32 = XEON_B2B_BAR4_ADDR32, 580 .bar5_addr32 = XEON_B2B_BAR5_ADDR32, 581 }; 582 583 SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW, 0, 584 "B2B MW segment overrides -- MUST be the same on both sides"); 585 586 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN, 587 &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " 588 "hardware, use this 64-bit address on the bus between the NTB devices for " 589 "the window at BAR2, on the upstream side of the link. MUST be the same " 590 "address on both sides."); 591 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN, 592 &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4."); 593 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN, 594 &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 " 595 "(split-BAR mode)."); 596 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN, 597 &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 " 598 "(split-BAR mode)."); 599 600 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN, 601 &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " 602 "hardware, use this 64-bit address on the bus between the NTB devices for " 603 "the window at BAR2, on the downstream side of the link. MUST be the same" 604 " address on both sides."); 605 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN, 606 &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4."); 607 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN, 608 &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 " 609 "(split-BAR mode)."); 610 SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN, 611 &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 " 612 "(split-BAR mode)."); 613 614 /* 615 * OS <-> Driver interface structures 616 */ 617 MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations"); 618 619 /* 620 * OS <-> Driver linkage functions 621 */ 622 static int 623 intel_ntb_probe(device_t device) 624 { 625 struct ntb_hw_info *p; 626 627 p = intel_ntb_get_device_info(pci_get_devid(device)); 628 if (p == NULL) 629 return (ENXIO); 630 631 device_set_desc(device, p->desc); 632 return (0); 633 } 634 635 static int 636 intel_ntb_attach(device_t device) 637 { 638 struct ntb_softc *ntb; 639 struct ntb_hw_info *p; 640 int error; 641 642 ntb = device_get_softc(device); 643 p = intel_ntb_get_device_info(pci_get_devid(device)); 644 645 ntb->device = device; 646 ntb->type = p->type; 647 ntb->features = p->features; 648 ntb->b2b_mw_idx = B2B_MW_DISABLED; 649 ntb->msix_mw_idx = B2B_MW_DISABLED; 650 651 /* Heartbeat timer for NTB_ATOM since there is no link interrupt */ 652 callout_init(&ntb->heartbeat_timer, 1); 653 callout_init(&ntb->lr_timer, 1); 654 callout_init(&ntb->peer_msix_work, 1); 655 mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN); 656 657 if (ntb->type == NTB_ATOM) 658 error = intel_ntb_detect_atom(ntb); 659 else 660 error = intel_ntb_detect_xeon(ntb); 661 if (error != 0) 662 goto out; 663 664 intel_ntb_detect_max_mw(ntb); 665 666 pci_enable_busmaster(ntb->device); 667 668 error = intel_ntb_map_pci_bars(ntb); 669 if (error != 0) 670 goto out; 671 if (ntb->type == NTB_ATOM) 672 error = intel_ntb_atom_init_dev(ntb); 673 else 674 error = intel_ntb_xeon_init_dev(ntb); 675 if (error != 0) 676 goto out; 677 678 intel_ntb_spad_clear(device); 679 680 intel_ntb_poll_link(ntb); 681 682 intel_ntb_sysctl_init(ntb); 683 684 /* Attach children to this controller */ 685 error = ntb_register_device(device); 686 687 out: 688 if (error != 0) 689 intel_ntb_detach(device); 690 return (error); 691 } 692 693 static int 694 intel_ntb_detach(device_t device) 695 { 696 struct ntb_softc *ntb; 697 698 ntb = device_get_softc(device); 699 700 /* Detach & delete all children */ 701 ntb_unregister_device(device); 702 703 if (ntb->self_reg != NULL) { 704 DB_MASK_LOCK(ntb); 705 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_valid_mask); 706 DB_MASK_UNLOCK(ntb); 707 } 708 callout_drain(&ntb->heartbeat_timer); 709 callout_drain(&ntb->lr_timer); 710 callout_drain(&ntb->peer_msix_work); 711 pci_disable_busmaster(ntb->device); 712 if (ntb->type == NTB_XEON) 713 intel_ntb_teardown_xeon(ntb); 714 intel_ntb_teardown_interrupts(ntb); 715 716 mtx_destroy(&ntb->db_mask_lock); 717 718 intel_ntb_unmap_pci_bar(ntb); 719 720 return (0); 721 } 722 723 /* 724 * Driver internal routines 725 */ 726 static inline enum ntb_bar 727 intel_ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw) 728 { 729 730 KASSERT(mw < ntb->mw_count, 731 ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count)); 732 KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw")); 733 734 return (ntb->reg->mw_bar[mw]); 735 } 736 737 static inline bool 738 bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar) 739 { 740 /* XXX This assertion could be stronger. */ 741 KASSERT(bar < NTB_MAX_BARS, ("bogus bar")); 742 return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(ntb, NTB_SPLIT_BAR)); 743 } 744 745 static inline void 746 bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base, 747 uint32_t *xlat, uint32_t *lmt) 748 { 749 uint32_t basev, lmtv, xlatv; 750 751 switch (bar) { 752 case NTB_B2B_BAR_1: 753 basev = ntb->xlat_reg->bar2_base; 754 lmtv = ntb->xlat_reg->bar2_limit; 755 xlatv = ntb->xlat_reg->bar2_xlat; 756 break; 757 case NTB_B2B_BAR_2: 758 basev = ntb->xlat_reg->bar4_base; 759 lmtv = ntb->xlat_reg->bar4_limit; 760 xlatv = ntb->xlat_reg->bar4_xlat; 761 break; 762 case NTB_B2B_BAR_3: 763 basev = ntb->xlat_reg->bar5_base; 764 lmtv = ntb->xlat_reg->bar5_limit; 765 xlatv = ntb->xlat_reg->bar5_xlat; 766 break; 767 default: 768 KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS, 769 ("bad bar")); 770 basev = lmtv = xlatv = 0; 771 break; 772 } 773 774 if (base != NULL) 775 *base = basev; 776 if (xlat != NULL) 777 *xlat = xlatv; 778 if (lmt != NULL) 779 *lmt = lmtv; 780 } 781 782 static int 783 intel_ntb_map_pci_bars(struct ntb_softc *ntb) 784 { 785 int rc; 786 787 ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); 788 rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); 789 if (rc != 0) 790 goto out; 791 792 ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2); 793 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_1]); 794 if (rc != 0) 795 goto out; 796 ntb->bar_info[NTB_B2B_BAR_1].psz_off = XEON_PBAR23SZ_OFFSET; 797 ntb->bar_info[NTB_B2B_BAR_1].ssz_off = XEON_SBAR23SZ_OFFSET; 798 ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off = XEON_PBAR2XLAT_OFFSET; 799 800 ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4); 801 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); 802 if (rc != 0) 803 goto out; 804 ntb->bar_info[NTB_B2B_BAR_2].psz_off = XEON_PBAR4SZ_OFFSET; 805 ntb->bar_info[NTB_B2B_BAR_2].ssz_off = XEON_SBAR4SZ_OFFSET; 806 ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off = XEON_PBAR4XLAT_OFFSET; 807 808 if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR)) 809 goto out; 810 811 ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5); 812 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); 813 ntb->bar_info[NTB_B2B_BAR_3].psz_off = XEON_PBAR5SZ_OFFSET; 814 ntb->bar_info[NTB_B2B_BAR_3].ssz_off = XEON_SBAR5SZ_OFFSET; 815 ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off = XEON_PBAR5XLAT_OFFSET; 816 817 out: 818 if (rc != 0) 819 device_printf(ntb->device, 820 "unable to allocate pci resource\n"); 821 return (rc); 822 } 823 824 static void 825 print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar, 826 const char *kind) 827 { 828 829 device_printf(ntb->device, 830 "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n", 831 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 832 (char *)bar->vbase + bar->size - 1, 833 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), 834 (uintmax_t)bar->size, kind); 835 } 836 837 static int 838 map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) 839 { 840 841 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 842 &bar->pci_resource_id, RF_ACTIVE); 843 if (bar->pci_resource == NULL) 844 return (ENXIO); 845 846 save_bar_parameters(bar); 847 bar->map_mode = VM_MEMATTR_UNCACHEABLE; 848 print_map_success(ntb, bar, "mmr"); 849 return (0); 850 } 851 852 static int 853 map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) 854 { 855 int rc; 856 vm_memattr_t mapmode; 857 uint8_t bar_size_bits = 0; 858 859 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 860 &bar->pci_resource_id, RF_ACTIVE); 861 862 if (bar->pci_resource == NULL) 863 return (ENXIO); 864 865 save_bar_parameters(bar); 866 /* 867 * Ivytown NTB BAR sizes are misreported by the hardware due to a 868 * hardware issue. To work around this, query the size it should be 869 * configured to by the device and modify the resource to correspond to 870 * this new size. The BIOS on systems with this problem is required to 871 * provide enough address space to allow the driver to make this change 872 * safely. 873 * 874 * Ideally I could have just specified the size when I allocated the 875 * resource like: 876 * bus_alloc_resource(ntb->device, 877 * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul, 878 * 1ul << bar_size_bits, RF_ACTIVE); 879 * but the PCI driver does not honor the size in this call, so we have 880 * to modify it after the fact. 881 */ 882 if (HAS_FEATURE(ntb, NTB_BAR_SIZE_4K)) { 883 if (bar->pci_resource_id == PCIR_BAR(2)) 884 bar_size_bits = pci_read_config(ntb->device, 885 XEON_PBAR23SZ_OFFSET, 1); 886 else 887 bar_size_bits = pci_read_config(ntb->device, 888 XEON_PBAR45SZ_OFFSET, 1); 889 890 rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY, 891 bar->pci_resource, bar->pbase, 892 bar->pbase + (1ul << bar_size_bits) - 1); 893 if (rc != 0) { 894 device_printf(ntb->device, 895 "unable to resize bar\n"); 896 return (rc); 897 } 898 899 save_bar_parameters(bar); 900 } 901 902 bar->map_mode = VM_MEMATTR_UNCACHEABLE; 903 print_map_success(ntb, bar, "mw"); 904 905 /* 906 * Optionally, mark MW BARs as anything other than UC to improve 907 * performance. 908 */ 909 mapmode = intel_ntb_pat_flags(); 910 if (mapmode == bar->map_mode) 911 return (0); 912 913 rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mapmode); 914 if (rc == 0) { 915 bar->map_mode = mapmode; 916 device_printf(ntb->device, 917 "Marked BAR%d v:[%p-%p] p:[%p-%p] as " 918 "%s.\n", 919 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 920 (char *)bar->vbase + bar->size - 1, 921 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), 922 intel_ntb_vm_memattr_to_str(mapmode)); 923 } else 924 device_printf(ntb->device, 925 "Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as " 926 "%s: %d\n", 927 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 928 (char *)bar->vbase + bar->size - 1, 929 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), 930 intel_ntb_vm_memattr_to_str(mapmode), rc); 931 /* Proceed anyway */ 932 return (0); 933 } 934 935 static void 936 intel_ntb_unmap_pci_bar(struct ntb_softc *ntb) 937 { 938 struct ntb_pci_bar_info *current_bar; 939 int i; 940 941 for (i = 0; i < NTB_MAX_BARS; i++) { 942 current_bar = &ntb->bar_info[i]; 943 if (current_bar->pci_resource != NULL) 944 bus_release_resource(ntb->device, SYS_RES_MEMORY, 945 current_bar->pci_resource_id, 946 current_bar->pci_resource); 947 } 948 } 949 950 static int 951 intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors) 952 { 953 uint32_t i; 954 int rc; 955 956 for (i = 0; i < num_vectors; i++) { 957 ntb->int_info[i].rid = i + 1; 958 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, 959 SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); 960 if (ntb->int_info[i].res == NULL) { 961 device_printf(ntb->device, 962 "bus_alloc_resource failed\n"); 963 return (ENOMEM); 964 } 965 ntb->int_info[i].tag = NULL; 966 ntb->allocated_interrupts++; 967 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 968 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr, 969 &ntb->msix_vec[i], &ntb->int_info[i].tag); 970 if (rc != 0) { 971 device_printf(ntb->device, "bus_setup_intr failed\n"); 972 return (ENXIO); 973 } 974 } 975 return (0); 976 } 977 978 /* 979 * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector 980 * cannot be allocated for each MSI-X message. JHB seems to think remapping 981 * should be okay. This tunable should enable us to test that hypothesis 982 * when someone gets their hands on some Xeon hardware. 983 */ 984 static int ntb_force_remap_mode; 985 SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN, 986 &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped" 987 " to a smaller number of ithreads, even if the desired number are " 988 "available"); 989 990 /* 991 * In case it is NOT ok, give consumers an abort button. 992 */ 993 static int ntb_prefer_intx; 994 SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN, 995 &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather " 996 "than remapping MSI-X messages over available slots (match Linux driver " 997 "behavior)"); 998 999 /* 1000 * Remap the desired number of MSI-X messages to available ithreads in a simple 1001 * round-robin fashion. 1002 */ 1003 static int 1004 intel_ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail) 1005 { 1006 u_int *vectors; 1007 uint32_t i; 1008 int rc; 1009 1010 if (ntb_prefer_intx != 0) 1011 return (ENXIO); 1012 1013 vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK); 1014 1015 for (i = 0; i < desired; i++) 1016 vectors[i] = (i % avail) + 1; 1017 1018 rc = pci_remap_msix(dev, desired, vectors); 1019 free(vectors, M_NTB); 1020 return (rc); 1021 } 1022 1023 static int 1024 intel_ntb_init_isr(struct ntb_softc *ntb) 1025 { 1026 uint32_t desired_vectors, num_vectors; 1027 int rc; 1028 1029 ntb->allocated_interrupts = 0; 1030 ntb->last_ts = ticks; 1031 1032 /* 1033 * Mask all doorbell interrupts. (Except link events!) 1034 */ 1035 DB_MASK_LOCK(ntb); 1036 ntb->db_mask = ntb->db_valid_mask; 1037 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); 1038 DB_MASK_UNLOCK(ntb); 1039 1040 num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), 1041 ntb->db_count); 1042 if (desired_vectors >= 1) { 1043 rc = pci_alloc_msix(ntb->device, &num_vectors); 1044 1045 if (ntb_force_remap_mode != 0 && rc == 0 && 1046 num_vectors == desired_vectors) 1047 num_vectors--; 1048 1049 if (rc == 0 && num_vectors < desired_vectors) { 1050 rc = intel_ntb_remap_msix(ntb->device, desired_vectors, 1051 num_vectors); 1052 if (rc == 0) 1053 num_vectors = desired_vectors; 1054 else 1055 pci_release_msi(ntb->device); 1056 } 1057 if (rc != 0) 1058 num_vectors = 1; 1059 } else 1060 num_vectors = 1; 1061 1062 if (ntb->type == NTB_XEON && num_vectors < ntb->db_vec_count) { 1063 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { 1064 device_printf(ntb->device, 1065 "Errata workaround does not support MSI or INTX\n"); 1066 return (EINVAL); 1067 } 1068 1069 ntb->db_vec_count = 1; 1070 ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT; 1071 rc = intel_ntb_setup_legacy_interrupt(ntb); 1072 } else { 1073 if (num_vectors - 1 != XEON_NONLINK_DB_MSIX_BITS && 1074 HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { 1075 device_printf(ntb->device, 1076 "Errata workaround expects %d doorbell bits\n", 1077 XEON_NONLINK_DB_MSIX_BITS); 1078 return (EINVAL); 1079 } 1080 1081 intel_ntb_create_msix_vec(ntb, num_vectors); 1082 rc = intel_ntb_setup_msix(ntb, num_vectors); 1083 } 1084 if (rc != 0) { 1085 device_printf(ntb->device, 1086 "Error allocating interrupts: %d\n", rc); 1087 intel_ntb_free_msix_vec(ntb); 1088 } 1089 1090 return (rc); 1091 } 1092 1093 static int 1094 intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb) 1095 { 1096 int rc; 1097 1098 ntb->int_info[0].rid = 0; 1099 ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, 1100 &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE); 1101 if (ntb->int_info[0].res == NULL) { 1102 device_printf(ntb->device, "bus_alloc_resource failed\n"); 1103 return (ENOMEM); 1104 } 1105 1106 ntb->int_info[0].tag = NULL; 1107 ntb->allocated_interrupts = 1; 1108 1109 rc = bus_setup_intr(ntb->device, ntb->int_info[0].res, 1110 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr, 1111 ntb, &ntb->int_info[0].tag); 1112 if (rc != 0) { 1113 device_printf(ntb->device, "bus_setup_intr failed\n"); 1114 return (ENXIO); 1115 } 1116 1117 return (0); 1118 } 1119 1120 static void 1121 intel_ntb_teardown_interrupts(struct ntb_softc *ntb) 1122 { 1123 struct ntb_int_info *current_int; 1124 int i; 1125 1126 for (i = 0; i < ntb->allocated_interrupts; i++) { 1127 current_int = &ntb->int_info[i]; 1128 if (current_int->tag != NULL) 1129 bus_teardown_intr(ntb->device, current_int->res, 1130 current_int->tag); 1131 1132 if (current_int->res != NULL) 1133 bus_release_resource(ntb->device, SYS_RES_IRQ, 1134 rman_get_rid(current_int->res), current_int->res); 1135 } 1136 1137 intel_ntb_free_msix_vec(ntb); 1138 pci_release_msi(ntb->device); 1139 } 1140 1141 /* 1142 * Doorbell register and mask are 64-bit on Atom, 16-bit on Xeon. Abstract it 1143 * out to make code clearer. 1144 */ 1145 static inline uint64_t 1146 db_ioread(struct ntb_softc *ntb, uint64_t regoff) 1147 { 1148 1149 if (ntb->type == NTB_ATOM) 1150 return (intel_ntb_reg_read(8, regoff)); 1151 1152 KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); 1153 1154 return (intel_ntb_reg_read(2, regoff)); 1155 } 1156 1157 static inline void 1158 db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) 1159 { 1160 1161 KASSERT((val & ~ntb->db_valid_mask) == 0, 1162 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1163 (uintmax_t)(val & ~ntb->db_valid_mask), 1164 (uintmax_t)ntb->db_valid_mask)); 1165 1166 if (regoff == ntb->self_reg->db_mask) 1167 DB_MASK_ASSERT(ntb, MA_OWNED); 1168 db_iowrite_raw(ntb, regoff, val); 1169 } 1170 1171 static inline void 1172 db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) 1173 { 1174 1175 if (ntb->type == NTB_ATOM) { 1176 intel_ntb_reg_write(8, regoff, val); 1177 return; 1178 } 1179 1180 KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); 1181 intel_ntb_reg_write(2, regoff, (uint16_t)val); 1182 } 1183 1184 static void 1185 intel_ntb_db_set_mask(device_t dev, uint64_t bits) 1186 { 1187 struct ntb_softc *ntb = device_get_softc(dev); 1188 1189 DB_MASK_LOCK(ntb); 1190 ntb->db_mask |= bits; 1191 if (!HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) 1192 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); 1193 DB_MASK_UNLOCK(ntb); 1194 } 1195 1196 static void 1197 intel_ntb_db_clear_mask(device_t dev, uint64_t bits) 1198 { 1199 struct ntb_softc *ntb = device_get_softc(dev); 1200 uint64_t ibits; 1201 int i; 1202 1203 KASSERT((bits & ~ntb->db_valid_mask) == 0, 1204 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1205 (uintmax_t)(bits & ~ntb->db_valid_mask), 1206 (uintmax_t)ntb->db_valid_mask)); 1207 1208 DB_MASK_LOCK(ntb); 1209 ibits = ntb->fake_db & ntb->db_mask & bits; 1210 ntb->db_mask &= ~bits; 1211 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { 1212 /* Simulate fake interrupts if unmasked DB bits are set. */ 1213 ntb->force_db |= ibits; 1214 for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { 1215 if ((ibits & intel_ntb_db_vector_mask(dev, i)) != 0) 1216 swi_sched(ntb->int_info[i].tag, 0); 1217 } 1218 } else { 1219 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); 1220 } 1221 DB_MASK_UNLOCK(ntb); 1222 } 1223 1224 static uint64_t 1225 intel_ntb_db_read(device_t dev) 1226 { 1227 struct ntb_softc *ntb = device_get_softc(dev); 1228 1229 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) 1230 return (ntb->fake_db); 1231 1232 return (db_ioread(ntb, ntb->self_reg->db_bell)); 1233 } 1234 1235 static void 1236 intel_ntb_db_clear(device_t dev, uint64_t bits) 1237 { 1238 struct ntb_softc *ntb = device_get_softc(dev); 1239 1240 KASSERT((bits & ~ntb->db_valid_mask) == 0, 1241 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1242 (uintmax_t)(bits & ~ntb->db_valid_mask), 1243 (uintmax_t)ntb->db_valid_mask)); 1244 1245 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { 1246 DB_MASK_LOCK(ntb); 1247 ntb->fake_db &= ~bits; 1248 DB_MASK_UNLOCK(ntb); 1249 return; 1250 } 1251 1252 db_iowrite(ntb, ntb->self_reg->db_bell, bits); 1253 } 1254 1255 static inline uint64_t 1256 intel_ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector) 1257 { 1258 uint64_t shift, mask; 1259 1260 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { 1261 /* 1262 * Remap vectors in custom way to make at least first 1263 * three doorbells to not generate stray events. 1264 * This breaks Linux compatibility (if one existed) 1265 * when more then one DB is used (not by if_ntb). 1266 */ 1267 if (db_vector < XEON_NONLINK_DB_MSIX_BITS - 1) 1268 return (1 << db_vector); 1269 if (db_vector == XEON_NONLINK_DB_MSIX_BITS - 1) 1270 return (0x7ffc); 1271 } 1272 1273 shift = ntb->db_vec_shift; 1274 mask = (1ull << shift) - 1; 1275 return (mask << (shift * db_vector)); 1276 } 1277 1278 static void 1279 intel_ntb_interrupt(struct ntb_softc *ntb, uint32_t vec) 1280 { 1281 uint64_t vec_mask; 1282 1283 ntb->last_ts = ticks; 1284 vec_mask = intel_ntb_vec_mask(ntb, vec); 1285 1286 if ((vec_mask & ntb->db_link_mask) != 0) { 1287 if (intel_ntb_poll_link(ntb)) 1288 ntb_link_event(ntb->device); 1289 } 1290 1291 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) && 1292 (vec_mask & ntb->db_link_mask) == 0) { 1293 DB_MASK_LOCK(ntb); 1294 1295 /* 1296 * Do not report same DB events again if not cleared yet, 1297 * unless the mask was just cleared for them and this 1298 * interrupt handler call can be the consequence of it. 1299 */ 1300 vec_mask &= ~ntb->fake_db | ntb->force_db; 1301 ntb->force_db &= ~vec_mask; 1302 1303 /* Update our internal doorbell register. */ 1304 ntb->fake_db |= vec_mask; 1305 1306 /* Do not report masked DB events. */ 1307 vec_mask &= ~ntb->db_mask; 1308 1309 DB_MASK_UNLOCK(ntb); 1310 } 1311 1312 if ((vec_mask & ntb->db_valid_mask) != 0) 1313 ntb_db_event(ntb->device, vec); 1314 } 1315 1316 static void 1317 ndev_vec_isr(void *arg) 1318 { 1319 struct ntb_vec *nvec = arg; 1320 1321 intel_ntb_interrupt(nvec->ntb, nvec->num); 1322 } 1323 1324 static void 1325 ndev_irq_isr(void *arg) 1326 { 1327 /* If we couldn't set up MSI-X, we only have the one vector. */ 1328 intel_ntb_interrupt(arg, 0); 1329 } 1330 1331 static int 1332 intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors) 1333 { 1334 uint32_t i; 1335 1336 ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB, 1337 M_ZERO | M_WAITOK); 1338 for (i = 0; i < num_vectors; i++) { 1339 ntb->msix_vec[i].num = i; 1340 ntb->msix_vec[i].ntb = ntb; 1341 } 1342 1343 return (0); 1344 } 1345 1346 static void 1347 intel_ntb_free_msix_vec(struct ntb_softc *ntb) 1348 { 1349 1350 if (ntb->msix_vec == NULL) 1351 return; 1352 1353 free(ntb->msix_vec, M_NTB); 1354 ntb->msix_vec = NULL; 1355 } 1356 1357 static void 1358 intel_ntb_get_msix_info(struct ntb_softc *ntb) 1359 { 1360 struct pci_devinfo *dinfo; 1361 struct pcicfg_msix *msix; 1362 uint32_t laddr, data, i, offset; 1363 1364 dinfo = device_get_ivars(ntb->device); 1365 msix = &dinfo->cfg.msix; 1366 1367 CTASSERT(XEON_NONLINK_DB_MSIX_BITS == nitems(ntb->msix_data)); 1368 1369 for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { 1370 offset = msix->msix_table_offset + i * PCI_MSIX_ENTRY_SIZE; 1371 1372 laddr = bus_read_4(msix->msix_table_res, offset + 1373 PCI_MSIX_ENTRY_LOWER_ADDR); 1374 intel_ntb_printf(2, "local MSIX addr(%u): 0x%x\n", i, laddr); 1375 1376 KASSERT((laddr & MSI_INTEL_ADDR_BASE) == MSI_INTEL_ADDR_BASE, 1377 ("local MSIX addr 0x%x not in MSI base 0x%x", laddr, 1378 MSI_INTEL_ADDR_BASE)); 1379 ntb->msix_data[i].nmd_ofs = laddr; 1380 1381 data = bus_read_4(msix->msix_table_res, offset + 1382 PCI_MSIX_ENTRY_DATA); 1383 intel_ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data); 1384 1385 ntb->msix_data[i].nmd_data = data; 1386 } 1387 } 1388 1389 static struct ntb_hw_info * 1390 intel_ntb_get_device_info(uint32_t device_id) 1391 { 1392 struct ntb_hw_info *ep; 1393 1394 for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) { 1395 if (ep->device_id == device_id) 1396 return (ep); 1397 } 1398 return (NULL); 1399 } 1400 1401 static void 1402 intel_ntb_teardown_xeon(struct ntb_softc *ntb) 1403 { 1404 1405 if (ntb->reg != NULL) 1406 intel_ntb_link_disable(ntb->device); 1407 } 1408 1409 static void 1410 intel_ntb_detect_max_mw(struct ntb_softc *ntb) 1411 { 1412 1413 if (ntb->type == NTB_ATOM) { 1414 ntb->mw_count = ATOM_MW_COUNT; 1415 return; 1416 } 1417 1418 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) 1419 ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT; 1420 else 1421 ntb->mw_count = XEON_SNB_MW_COUNT; 1422 } 1423 1424 static int 1425 intel_ntb_detect_xeon(struct ntb_softc *ntb) 1426 { 1427 uint8_t ppd, conn_type; 1428 1429 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1); 1430 ntb->ppd = ppd; 1431 1432 if ((ppd & XEON_PPD_DEV_TYPE) != 0) 1433 ntb->dev_type = NTB_DEV_DSD; 1434 else 1435 ntb->dev_type = NTB_DEV_USD; 1436 1437 if ((ppd & XEON_PPD_SPLIT_BAR) != 0) 1438 ntb->features |= NTB_SPLIT_BAR; 1439 1440 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP) && 1441 !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { 1442 device_printf(ntb->device, 1443 "Can not apply SB01BASE_LOCKUP workaround " 1444 "with split BARs disabled!\n"); 1445 device_printf(ntb->device, 1446 "Expect system hangs under heavy NTB traffic!\n"); 1447 ntb->features &= ~NTB_SB01BASE_LOCKUP; 1448 } 1449 1450 /* 1451 * SDOORBELL errata workaround gets in the way of SB01BASE_LOCKUP 1452 * errata workaround; only do one at a time. 1453 */ 1454 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) 1455 ntb->features &= ~NTB_SDOORBELL_LOCKUP; 1456 1457 conn_type = ppd & XEON_PPD_CONN_TYPE; 1458 switch (conn_type) { 1459 case NTB_CONN_B2B: 1460 ntb->conn_type = conn_type; 1461 break; 1462 case NTB_CONN_RP: 1463 case NTB_CONN_TRANSPARENT: 1464 default: 1465 device_printf(ntb->device, "Unsupported connection type: %u\n", 1466 (unsigned)conn_type); 1467 return (ENXIO); 1468 } 1469 return (0); 1470 } 1471 1472 static int 1473 intel_ntb_detect_atom(struct ntb_softc *ntb) 1474 { 1475 uint32_t ppd, conn_type; 1476 1477 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); 1478 ntb->ppd = ppd; 1479 1480 if ((ppd & ATOM_PPD_DEV_TYPE) != 0) 1481 ntb->dev_type = NTB_DEV_DSD; 1482 else 1483 ntb->dev_type = NTB_DEV_USD; 1484 1485 conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8; 1486 switch (conn_type) { 1487 case NTB_CONN_B2B: 1488 ntb->conn_type = conn_type; 1489 break; 1490 default: 1491 device_printf(ntb->device, "Unsupported NTB configuration\n"); 1492 return (ENXIO); 1493 } 1494 return (0); 1495 } 1496 1497 static int 1498 intel_ntb_xeon_init_dev(struct ntb_softc *ntb) 1499 { 1500 int rc; 1501 1502 ntb->spad_count = XEON_SPAD_COUNT; 1503 ntb->db_count = XEON_DB_COUNT; 1504 ntb->db_link_mask = XEON_DB_LINK_BIT; 1505 ntb->db_vec_count = XEON_DB_MSIX_VECTOR_COUNT; 1506 ntb->db_vec_shift = XEON_DB_MSIX_VECTOR_SHIFT; 1507 1508 if (ntb->conn_type != NTB_CONN_B2B) { 1509 device_printf(ntb->device, "Connection type %d not supported\n", 1510 ntb->conn_type); 1511 return (ENXIO); 1512 } 1513 1514 ntb->reg = &xeon_reg; 1515 ntb->self_reg = &xeon_pri_reg; 1516 ntb->peer_reg = &xeon_b2b_reg; 1517 ntb->xlat_reg = &xeon_sec_xlat; 1518 1519 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { 1520 ntb->force_db = ntb->fake_db = 0; 1521 ntb->msix_mw_idx = (ntb->mw_count + g_ntb_msix_idx) % 1522 ntb->mw_count; 1523 intel_ntb_printf(2, "Setting up MSIX mw idx %d means %u\n", 1524 g_ntb_msix_idx, ntb->msix_mw_idx); 1525 rc = intel_ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx, 1526 VM_MEMATTR_UNCACHEABLE); 1527 KASSERT(rc == 0, ("shouldn't fail")); 1528 } else if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) { 1529 /* 1530 * There is a Xeon hardware errata related to writes to SDOORBELL or 1531 * B2BDOORBELL in conjunction with inbound access to NTB MMIO space, 1532 * which may hang the system. To workaround this, use a memory 1533 * window to access the interrupt and scratch pad registers on the 1534 * remote system. 1535 */ 1536 ntb->b2b_mw_idx = (ntb->mw_count + g_ntb_mw_idx) % 1537 ntb->mw_count; 1538 intel_ntb_printf(2, "Setting up b2b mw idx %d means %u\n", 1539 g_ntb_mw_idx, ntb->b2b_mw_idx); 1540 rc = intel_ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx, 1541 VM_MEMATTR_UNCACHEABLE); 1542 KASSERT(rc == 0, ("shouldn't fail")); 1543 } else if (HAS_FEATURE(ntb, NTB_B2BDOORBELL_BIT14)) 1544 /* 1545 * HW Errata on bit 14 of b2bdoorbell register. Writes will not be 1546 * mirrored to the remote system. Shrink the number of bits by one, 1547 * since bit 14 is the last bit. 1548 * 1549 * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register 1550 * anyway. Nor for non-B2B connection types. 1551 */ 1552 ntb->db_count = XEON_DB_COUNT - 1; 1553 1554 ntb->db_valid_mask = (1ull << ntb->db_count) - 1; 1555 1556 if (ntb->dev_type == NTB_DEV_USD) 1557 rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr, 1558 &xeon_b2b_usd_addr); 1559 else 1560 rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr, 1561 &xeon_b2b_dsd_addr); 1562 if (rc != 0) 1563 return (rc); 1564 1565 /* Enable Bus Master and Memory Space on the secondary side */ 1566 intel_ntb_reg_write(2, XEON_SPCICMD_OFFSET, 1567 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 1568 1569 /* 1570 * Mask all doorbell interrupts. 1571 */ 1572 DB_MASK_LOCK(ntb); 1573 ntb->db_mask = ntb->db_valid_mask; 1574 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); 1575 DB_MASK_UNLOCK(ntb); 1576 1577 rc = intel_ntb_init_isr(ntb); 1578 return (rc); 1579 } 1580 1581 static int 1582 intel_ntb_atom_init_dev(struct ntb_softc *ntb) 1583 { 1584 int error; 1585 1586 KASSERT(ntb->conn_type == NTB_CONN_B2B, 1587 ("Unsupported NTB configuration (%d)\n", ntb->conn_type)); 1588 1589 ntb->spad_count = ATOM_SPAD_COUNT; 1590 ntb->db_count = ATOM_DB_COUNT; 1591 ntb->db_vec_count = ATOM_DB_MSIX_VECTOR_COUNT; 1592 ntb->db_vec_shift = ATOM_DB_MSIX_VECTOR_SHIFT; 1593 ntb->db_valid_mask = (1ull << ntb->db_count) - 1; 1594 1595 ntb->reg = &atom_reg; 1596 ntb->self_reg = &atom_pri_reg; 1597 ntb->peer_reg = &atom_b2b_reg; 1598 ntb->xlat_reg = &atom_sec_xlat; 1599 1600 /* 1601 * FIXME - MSI-X bug on early Atom HW, remove once internal issue is 1602 * resolved. Mask transaction layer internal parity errors. 1603 */ 1604 pci_write_config(ntb->device, 0xFC, 0x4, 4); 1605 1606 configure_atom_secondary_side_bars(ntb); 1607 1608 /* Enable Bus Master and Memory Space on the secondary side */ 1609 intel_ntb_reg_write(2, ATOM_SPCICMD_OFFSET, 1610 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 1611 1612 error = intel_ntb_init_isr(ntb); 1613 if (error != 0) 1614 return (error); 1615 1616 /* Initiate PCI-E link training */ 1617 intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1618 1619 callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb); 1620 1621 return (0); 1622 } 1623 1624 /* XXX: Linux driver doesn't seem to do any of this for Atom. */ 1625 static void 1626 configure_atom_secondary_side_bars(struct ntb_softc *ntb) 1627 { 1628 1629 if (ntb->dev_type == NTB_DEV_USD) { 1630 intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, 1631 XEON_B2B_BAR2_ADDR64); 1632 intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, 1633 XEON_B2B_BAR4_ADDR64); 1634 intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); 1635 intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); 1636 } else { 1637 intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, 1638 XEON_B2B_BAR2_ADDR64); 1639 intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, 1640 XEON_B2B_BAR4_ADDR64); 1641 intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); 1642 intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); 1643 } 1644 } 1645 1646 1647 /* 1648 * When working around Xeon SDOORBELL errata by remapping remote registers in a 1649 * MW, limit the B2B MW to half a MW. By sharing a MW, half the shared MW 1650 * remains for use by a higher layer. 1651 * 1652 * Will only be used if working around SDOORBELL errata and the BIOS-configured 1653 * MW size is sufficiently large. 1654 */ 1655 static unsigned int ntb_b2b_mw_share; 1656 SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share, 1657 0, "If enabled (non-zero), prefer to share half of the B2B peer register " 1658 "MW with higher level consumers. Both sides of the NTB MUST set the same " 1659 "value here."); 1660 1661 static void 1662 xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx, 1663 enum ntb_bar regbar) 1664 { 1665 struct ntb_pci_bar_info *bar; 1666 uint8_t bar_sz; 1667 1668 if (!HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3) 1669 return; 1670 1671 bar = &ntb->bar_info[idx]; 1672 bar_sz = pci_read_config(ntb->device, bar->psz_off, 1); 1673 if (idx == regbar) { 1674 if (ntb->b2b_off != 0) 1675 bar_sz--; 1676 else 1677 bar_sz = 0; 1678 } 1679 pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1); 1680 bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1); 1681 (void)bar_sz; 1682 } 1683 1684 static void 1685 xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr, 1686 enum ntb_bar idx, enum ntb_bar regbar) 1687 { 1688 uint64_t reg_val; 1689 uint32_t base_reg, lmt_reg; 1690 1691 bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg); 1692 if (idx == regbar) { 1693 if (ntb->b2b_off) 1694 bar_addr += ntb->b2b_off; 1695 else 1696 bar_addr = 0; 1697 } 1698 1699 if (!bar_is_64bit(ntb, idx)) { 1700 intel_ntb_reg_write(4, base_reg, bar_addr); 1701 reg_val = intel_ntb_reg_read(4, base_reg); 1702 (void)reg_val; 1703 1704 intel_ntb_reg_write(4, lmt_reg, bar_addr); 1705 reg_val = intel_ntb_reg_read(4, lmt_reg); 1706 (void)reg_val; 1707 } else { 1708 intel_ntb_reg_write(8, base_reg, bar_addr); 1709 reg_val = intel_ntb_reg_read(8, base_reg); 1710 (void)reg_val; 1711 1712 intel_ntb_reg_write(8, lmt_reg, bar_addr); 1713 reg_val = intel_ntb_reg_read(8, lmt_reg); 1714 (void)reg_val; 1715 } 1716 } 1717 1718 static void 1719 xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx) 1720 { 1721 struct ntb_pci_bar_info *bar; 1722 1723 bar = &ntb->bar_info[idx]; 1724 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) { 1725 intel_ntb_reg_write(4, bar->pbarxlat_off, base_addr); 1726 base_addr = intel_ntb_reg_read(4, bar->pbarxlat_off); 1727 } else { 1728 intel_ntb_reg_write(8, bar->pbarxlat_off, base_addr); 1729 base_addr = intel_ntb_reg_read(8, bar->pbarxlat_off); 1730 } 1731 (void)base_addr; 1732 } 1733 1734 static int 1735 xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr, 1736 const struct ntb_b2b_addr *peer_addr) 1737 { 1738 struct ntb_pci_bar_info *b2b_bar; 1739 vm_size_t bar_size; 1740 uint64_t bar_addr; 1741 enum ntb_bar b2b_bar_num, i; 1742 1743 if (ntb->b2b_mw_idx == B2B_MW_DISABLED) { 1744 b2b_bar = NULL; 1745 b2b_bar_num = NTB_CONFIG_BAR; 1746 ntb->b2b_off = 0; 1747 } else { 1748 b2b_bar_num = intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx); 1749 KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS, 1750 ("invalid b2b mw bar")); 1751 1752 b2b_bar = &ntb->bar_info[b2b_bar_num]; 1753 bar_size = b2b_bar->size; 1754 1755 if (ntb_b2b_mw_share != 0 && 1756 (bar_size >> 1) >= XEON_B2B_MIN_SIZE) 1757 ntb->b2b_off = bar_size >> 1; 1758 else if (bar_size >= XEON_B2B_MIN_SIZE) { 1759 ntb->b2b_off = 0; 1760 } else { 1761 device_printf(ntb->device, 1762 "B2B bar size is too small!\n"); 1763 return (EIO); 1764 } 1765 } 1766 1767 /* 1768 * Reset the secondary bar sizes to match the primary bar sizes. 1769 * (Except, disable or halve the size of the B2B secondary bar.) 1770 */ 1771 for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++) 1772 xeon_reset_sbar_size(ntb, i, b2b_bar_num); 1773 1774 bar_addr = 0; 1775 if (b2b_bar_num == NTB_CONFIG_BAR) 1776 bar_addr = addr->bar0_addr; 1777 else if (b2b_bar_num == NTB_B2B_BAR_1) 1778 bar_addr = addr->bar2_addr64; 1779 else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) 1780 bar_addr = addr->bar4_addr64; 1781 else if (b2b_bar_num == NTB_B2B_BAR_2) 1782 bar_addr = addr->bar4_addr32; 1783 else if (b2b_bar_num == NTB_B2B_BAR_3) 1784 bar_addr = addr->bar5_addr32; 1785 else 1786 KASSERT(false, ("invalid bar")); 1787 1788 intel_ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr); 1789 1790 /* 1791 * Other SBARs are normally hit by the PBAR xlat, except for the b2b 1792 * register BAR. The B2B BAR is either disabled above or configured 1793 * half-size. It starts at PBAR xlat + offset. 1794 * 1795 * Also set up incoming BAR limits == base (zero length window). 1796 */ 1797 xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1, 1798 b2b_bar_num); 1799 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { 1800 xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32, 1801 NTB_B2B_BAR_2, b2b_bar_num); 1802 xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32, 1803 NTB_B2B_BAR_3, b2b_bar_num); 1804 } else 1805 xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64, 1806 NTB_B2B_BAR_2, b2b_bar_num); 1807 1808 /* Zero incoming translation addrs */ 1809 intel_ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0); 1810 intel_ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0); 1811 1812 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { 1813 uint32_t xlat_reg, lmt_reg; 1814 enum ntb_bar bar_num; 1815 1816 /* 1817 * We point the chosen MSIX MW BAR xlat to remote LAPIC for 1818 * workaround 1819 */ 1820 bar_num = intel_ntb_mw_to_bar(ntb, ntb->msix_mw_idx); 1821 bar_get_xlat_params(ntb, bar_num, NULL, &xlat_reg, &lmt_reg); 1822 if (bar_is_64bit(ntb, bar_num)) { 1823 intel_ntb_reg_write(8, xlat_reg, MSI_INTEL_ADDR_BASE); 1824 ntb->msix_xlat = intel_ntb_reg_read(8, xlat_reg); 1825 intel_ntb_reg_write(8, lmt_reg, 0); 1826 } else { 1827 intel_ntb_reg_write(4, xlat_reg, MSI_INTEL_ADDR_BASE); 1828 ntb->msix_xlat = intel_ntb_reg_read(4, xlat_reg); 1829 intel_ntb_reg_write(4, lmt_reg, 0); 1830 } 1831 1832 ntb->peer_lapic_bar = &ntb->bar_info[bar_num]; 1833 } 1834 (void)intel_ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET); 1835 (void)intel_ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET); 1836 1837 /* Zero outgoing translation limits (whole bar size windows) */ 1838 intel_ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0); 1839 intel_ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0); 1840 1841 /* Set outgoing translation offsets */ 1842 xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1); 1843 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { 1844 xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2); 1845 xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3); 1846 } else 1847 xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2); 1848 1849 /* Set the translation offset for B2B registers */ 1850 bar_addr = 0; 1851 if (b2b_bar_num == NTB_CONFIG_BAR) 1852 bar_addr = peer_addr->bar0_addr; 1853 else if (b2b_bar_num == NTB_B2B_BAR_1) 1854 bar_addr = peer_addr->bar2_addr64; 1855 else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(ntb, NTB_SPLIT_BAR)) 1856 bar_addr = peer_addr->bar4_addr64; 1857 else if (b2b_bar_num == NTB_B2B_BAR_2) 1858 bar_addr = peer_addr->bar4_addr32; 1859 else if (b2b_bar_num == NTB_B2B_BAR_3) 1860 bar_addr = peer_addr->bar5_addr32; 1861 else 1862 KASSERT(false, ("invalid bar")); 1863 1864 /* 1865 * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits 1866 * at a time. 1867 */ 1868 intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff); 1869 intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32); 1870 return (0); 1871 } 1872 1873 static inline bool 1874 _xeon_link_is_up(struct ntb_softc *ntb) 1875 { 1876 1877 if (ntb->conn_type == NTB_CONN_TRANSPARENT) 1878 return (true); 1879 return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0); 1880 } 1881 1882 static inline bool 1883 link_is_up(struct ntb_softc *ntb) 1884 { 1885 1886 if (ntb->type == NTB_XEON) 1887 return (_xeon_link_is_up(ntb) && (ntb->peer_msix_good || 1888 !HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))); 1889 1890 KASSERT(ntb->type == NTB_ATOM, ("ntb type")); 1891 return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0); 1892 } 1893 1894 static inline bool 1895 atom_link_is_err(struct ntb_softc *ntb) 1896 { 1897 uint32_t status; 1898 1899 KASSERT(ntb->type == NTB_ATOM, ("ntb type")); 1900 1901 status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); 1902 if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0) 1903 return (true); 1904 1905 status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); 1906 return ((status & ATOM_IBIST_ERR_OFLOW) != 0); 1907 } 1908 1909 /* Atom does not have link status interrupt, poll on that platform */ 1910 static void 1911 atom_link_hb(void *arg) 1912 { 1913 struct ntb_softc *ntb = arg; 1914 sbintime_t timo, poll_ts; 1915 1916 timo = NTB_HB_TIMEOUT * hz; 1917 poll_ts = ntb->last_ts + timo; 1918 1919 /* 1920 * Delay polling the link status if an interrupt was received, unless 1921 * the cached link status says the link is down. 1922 */ 1923 if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) { 1924 timo = poll_ts - ticks; 1925 goto out; 1926 } 1927 1928 if (intel_ntb_poll_link(ntb)) 1929 ntb_link_event(ntb->device); 1930 1931 if (!link_is_up(ntb) && atom_link_is_err(ntb)) { 1932 /* Link is down with error, proceed with recovery */ 1933 callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb); 1934 return; 1935 } 1936 1937 out: 1938 callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb); 1939 } 1940 1941 static void 1942 atom_perform_link_restart(struct ntb_softc *ntb) 1943 { 1944 uint32_t status; 1945 1946 /* Driver resets the NTB ModPhy lanes - magic! */ 1947 intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0); 1948 intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40); 1949 intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60); 1950 intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60); 1951 1952 /* Driver waits 100ms to allow the NTB ModPhy to settle */ 1953 pause("ModPhy", hz / 10); 1954 1955 /* Clear AER Errors, write to clear */ 1956 status = intel_ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET); 1957 status &= PCIM_AER_COR_REPLAY_ROLLOVER; 1958 intel_ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status); 1959 1960 /* Clear unexpected electrical idle event in LTSSM, write to clear */ 1961 status = intel_ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET); 1962 status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI; 1963 intel_ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status); 1964 1965 /* Clear DeSkew Buffer error, write to clear */ 1966 status = intel_ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET); 1967 status |= ATOM_DESKEWSTS_DBERR; 1968 intel_ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status); 1969 1970 status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); 1971 status &= ATOM_IBIST_ERR_OFLOW; 1972 intel_ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status); 1973 1974 /* Releases the NTB state machine to allow the link to retrain */ 1975 status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); 1976 status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT; 1977 intel_ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status); 1978 } 1979 1980 static int 1981 intel_ntb_port_number(device_t dev) 1982 { 1983 struct ntb_softc *ntb = device_get_softc(dev); 1984 1985 return (ntb->dev_type == NTB_DEV_USD ? 0 : 1); 1986 } 1987 1988 static int 1989 intel_ntb_peer_port_count(device_t dev) 1990 { 1991 1992 return (1); 1993 } 1994 1995 static int 1996 intel_ntb_peer_port_number(device_t dev, int pidx) 1997 { 1998 struct ntb_softc *ntb = device_get_softc(dev); 1999 2000 if (pidx != 0) 2001 return (-EINVAL); 2002 2003 return (ntb->dev_type == NTB_DEV_USD ? 1 : 0); 2004 } 2005 2006 static int 2007 intel_ntb_peer_port_idx(device_t dev, int port) 2008 { 2009 int peer_port; 2010 2011 peer_port = intel_ntb_peer_port_number(dev, 0); 2012 if (peer_port == -EINVAL || port != peer_port) 2013 return (-EINVAL); 2014 2015 return (0); 2016 } 2017 2018 static int 2019 intel_ntb_link_enable(device_t dev, enum ntb_speed speed __unused, 2020 enum ntb_width width __unused) 2021 { 2022 struct ntb_softc *ntb = device_get_softc(dev); 2023 uint32_t cntl; 2024 2025 intel_ntb_printf(2, "%s\n", __func__); 2026 2027 if (ntb->type == NTB_ATOM) { 2028 pci_write_config(ntb->device, NTB_PPD_OFFSET, 2029 ntb->ppd | ATOM_PPD_INIT_LINK, 4); 2030 return (0); 2031 } 2032 2033 if (ntb->conn_type == NTB_CONN_TRANSPARENT) { 2034 ntb_link_event(dev); 2035 return (0); 2036 } 2037 2038 cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl); 2039 cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK); 2040 cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP; 2041 cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP; 2042 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) 2043 cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP; 2044 intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); 2045 return (0); 2046 } 2047 2048 static int 2049 intel_ntb_link_disable(device_t dev) 2050 { 2051 struct ntb_softc *ntb = device_get_softc(dev); 2052 uint32_t cntl; 2053 2054 intel_ntb_printf(2, "%s\n", __func__); 2055 2056 if (ntb->conn_type == NTB_CONN_TRANSPARENT) { 2057 ntb_link_event(dev); 2058 return (0); 2059 } 2060 2061 cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl); 2062 cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP); 2063 cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP); 2064 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) 2065 cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP); 2066 cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK; 2067 intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); 2068 return (0); 2069 } 2070 2071 static bool 2072 intel_ntb_link_enabled(device_t dev) 2073 { 2074 struct ntb_softc *ntb = device_get_softc(dev); 2075 uint32_t cntl; 2076 2077 if (ntb->type == NTB_ATOM) { 2078 cntl = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); 2079 return ((cntl & ATOM_PPD_INIT_LINK) != 0); 2080 } 2081 2082 if (ntb->conn_type == NTB_CONN_TRANSPARENT) 2083 return (true); 2084 2085 cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl); 2086 return ((cntl & NTB_CNTL_LINK_DISABLE) == 0); 2087 } 2088 2089 static void 2090 recover_atom_link(void *arg) 2091 { 2092 struct ntb_softc *ntb = arg; 2093 unsigned speed, width, oldspeed, oldwidth; 2094 uint32_t status32; 2095 2096 atom_perform_link_restart(ntb); 2097 2098 /* 2099 * There is a potential race between the 2 NTB devices recovering at 2100 * the same time. If the times are the same, the link will not recover 2101 * and the driver will be stuck in this loop forever. Add a random 2102 * interval to the recovery time to prevent this race. 2103 */ 2104 status32 = arc4random() % ATOM_LINK_RECOVERY_TIME; 2105 pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000); 2106 2107 if (atom_link_is_err(ntb)) 2108 goto retry; 2109 2110 status32 = intel_ntb_reg_read(4, ntb->reg->ntb_ctl); 2111 if ((status32 & ATOM_CNTL_LINK_DOWN) != 0) 2112 goto out; 2113 2114 status32 = intel_ntb_reg_read(4, ntb->reg->lnk_sta); 2115 width = NTB_LNK_STA_WIDTH(status32); 2116 speed = status32 & NTB_LINK_SPEED_MASK; 2117 2118 oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta); 2119 oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK; 2120 if (oldwidth != width || oldspeed != speed) 2121 goto retry; 2122 2123 out: 2124 callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb, 2125 ntb); 2126 return; 2127 2128 retry: 2129 callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link, 2130 ntb); 2131 } 2132 2133 /* 2134 * Polls the HW link status register(s); returns true if something has changed. 2135 */ 2136 static bool 2137 intel_ntb_poll_link(struct ntb_softc *ntb) 2138 { 2139 uint32_t ntb_cntl; 2140 uint16_t reg_val; 2141 2142 if (ntb->type == NTB_ATOM) { 2143 ntb_cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl); 2144 if (ntb_cntl == ntb->ntb_ctl) 2145 return (false); 2146 2147 ntb->ntb_ctl = ntb_cntl; 2148 ntb->lnk_sta = intel_ntb_reg_read(4, ntb->reg->lnk_sta); 2149 } else { 2150 db_iowrite_raw(ntb, ntb->self_reg->db_bell, ntb->db_link_mask); 2151 2152 reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); 2153 if (reg_val == ntb->lnk_sta) 2154 return (false); 2155 2156 ntb->lnk_sta = reg_val; 2157 2158 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { 2159 if (_xeon_link_is_up(ntb)) { 2160 if (!ntb->peer_msix_good) { 2161 callout_reset(&ntb->peer_msix_work, 0, 2162 intel_ntb_exchange_msix, ntb); 2163 return (false); 2164 } 2165 } else { 2166 ntb->peer_msix_good = false; 2167 ntb->peer_msix_done = false; 2168 } 2169 } 2170 } 2171 return (true); 2172 } 2173 2174 static inline enum ntb_speed 2175 intel_ntb_link_sta_speed(struct ntb_softc *ntb) 2176 { 2177 2178 if (!link_is_up(ntb)) 2179 return (NTB_SPEED_NONE); 2180 return (ntb->lnk_sta & NTB_LINK_SPEED_MASK); 2181 } 2182 2183 static inline enum ntb_width 2184 intel_ntb_link_sta_width(struct ntb_softc *ntb) 2185 { 2186 2187 if (!link_is_up(ntb)) 2188 return (NTB_WIDTH_NONE); 2189 return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); 2190 } 2191 2192 SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW, 0, 2193 "Driver state, statistics, and HW registers"); 2194 2195 #define NTB_REGSZ_MASK (3ul << 30) 2196 #define NTB_REG_64 (1ul << 30) 2197 #define NTB_REG_32 (2ul << 30) 2198 #define NTB_REG_16 (3ul << 30) 2199 #define NTB_REG_8 (0ul << 30) 2200 2201 #define NTB_DB_READ (1ul << 29) 2202 #define NTB_PCI_REG (1ul << 28) 2203 #define NTB_REGFLAGS_MASK (NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG) 2204 2205 static void 2206 intel_ntb_sysctl_init(struct ntb_softc *ntb) 2207 { 2208 struct sysctl_oid_list *globals, *tree_par, *regpar, *statpar, *errpar; 2209 struct sysctl_ctx_list *ctx; 2210 struct sysctl_oid *tree, *tmptree; 2211 2212 ctx = device_get_sysctl_ctx(ntb->device); 2213 globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)); 2214 2215 SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "link_status", 2216 CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, 2217 sysctl_handle_link_status_human, "A", 2218 "Link status (human readable)"); 2219 SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "active", 2220 CTLFLAG_RD | CTLTYPE_UINT, ntb, 0, sysctl_handle_link_status, 2221 "IU", "Link status (1=active, 0=inactive)"); 2222 SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "admin_up", 2223 CTLFLAG_RW | CTLTYPE_UINT, ntb, 0, sysctl_handle_link_admin, 2224 "IU", "Set/get interface status (1=UP, 0=DOWN)"); 2225 2226 tree = SYSCTL_ADD_NODE(ctx, globals, OID_AUTO, "debug_info", 2227 CTLFLAG_RD, NULL, "Driver state, statistics, and HW registers"); 2228 tree_par = SYSCTL_CHILDREN(tree); 2229 2230 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD, 2231 &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port"); 2232 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD, 2233 &ntb->dev_type, 0, "0 - USD; 1 - DSD"); 2234 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD, 2235 &ntb->ppd, 0, "Raw PPD register (cached)"); 2236 2237 if (ntb->b2b_mw_idx != B2B_MW_DISABLED) { 2238 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD, 2239 &ntb->b2b_mw_idx, 0, 2240 "Index of the MW used for B2B remote register access"); 2241 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off", 2242 CTLFLAG_RD, &ntb->b2b_off, 2243 "If non-zero, offset of B2B register region in shared MW"); 2244 } 2245 2246 SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features", 2247 CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_features, "A", 2248 "Features/errata of this NTB device"); 2249 2250 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD, 2251 __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0, 2252 "NTB CTL register (cached)"); 2253 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD, 2254 __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0, 2255 "LNK STA register (cached)"); 2256 2257 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD, 2258 &ntb->mw_count, 0, "MW count"); 2259 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD, 2260 &ntb->spad_count, 0, "Scratchpad count"); 2261 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD, 2262 &ntb->db_count, 0, "Doorbell count"); 2263 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD, 2264 &ntb->db_vec_count, 0, "Doorbell vector count"); 2265 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD, 2266 &ntb->db_vec_shift, 0, "Doorbell vector shift"); 2267 2268 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD, 2269 &ntb->db_valid_mask, "Doorbell valid mask"); 2270 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD, 2271 &ntb->db_link_mask, "Doorbell link mask"); 2272 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD, 2273 &ntb->db_mask, "Doorbell mask (cached)"); 2274 2275 tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers", 2276 CTLFLAG_RD, NULL, "Raw HW registers (big-endian)"); 2277 regpar = SYSCTL_CHILDREN(tmptree); 2278 2279 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl", 2280 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2281 ntb->reg->ntb_ctl, sysctl_handle_register, "IU", 2282 "NTB Control register"); 2283 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap", 2284 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2285 0x19c, sysctl_handle_register, "IU", 2286 "NTB Link Capabilities"); 2287 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon", 2288 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2289 0x1a0, sysctl_handle_register, "IU", 2290 "NTB Link Control register"); 2291 2292 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask", 2293 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2294 NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask, 2295 sysctl_handle_register, "QU", "Doorbell mask register"); 2296 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell", 2297 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2298 NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell, 2299 sysctl_handle_register, "QU", "Doorbell register"); 2300 2301 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23", 2302 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2303 NTB_REG_64 | ntb->xlat_reg->bar2_xlat, 2304 sysctl_handle_register, "QU", "Incoming XLAT23 register"); 2305 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { 2306 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4", 2307 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2308 NTB_REG_32 | ntb->xlat_reg->bar4_xlat, 2309 sysctl_handle_register, "IU", "Incoming XLAT4 register"); 2310 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5", 2311 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2312 NTB_REG_32 | ntb->xlat_reg->bar5_xlat, 2313 sysctl_handle_register, "IU", "Incoming XLAT5 register"); 2314 } else { 2315 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45", 2316 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2317 NTB_REG_64 | ntb->xlat_reg->bar4_xlat, 2318 sysctl_handle_register, "QU", "Incoming XLAT45 register"); 2319 } 2320 2321 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23", 2322 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2323 NTB_REG_64 | ntb->xlat_reg->bar2_limit, 2324 sysctl_handle_register, "QU", "Incoming LMT23 register"); 2325 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { 2326 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4", 2327 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2328 NTB_REG_32 | ntb->xlat_reg->bar4_limit, 2329 sysctl_handle_register, "IU", "Incoming LMT4 register"); 2330 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5", 2331 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2332 NTB_REG_32 | ntb->xlat_reg->bar5_limit, 2333 sysctl_handle_register, "IU", "Incoming LMT5 register"); 2334 } else { 2335 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45", 2336 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2337 NTB_REG_64 | ntb->xlat_reg->bar4_limit, 2338 sysctl_handle_register, "QU", "Incoming LMT45 register"); 2339 } 2340 2341 if (ntb->type == NTB_ATOM) 2342 return; 2343 2344 tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats", 2345 CTLFLAG_RD, NULL, "Xeon HW statistics"); 2346 statpar = SYSCTL_CHILDREN(tmptree); 2347 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss", 2348 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2349 NTB_REG_16 | XEON_USMEMMISS_OFFSET, 2350 sysctl_handle_register, "SU", "Upstream Memory Miss"); 2351 2352 tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err", 2353 CTLFLAG_RD, NULL, "Xeon HW errors"); 2354 errpar = SYSCTL_CHILDREN(tmptree); 2355 2356 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd", 2357 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2358 NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET, 2359 sysctl_handle_register, "CU", "PPD"); 2360 2361 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz", 2362 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2363 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET, 2364 sysctl_handle_register, "CU", "PBAR23 SZ (log2)"); 2365 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz", 2366 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2367 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET, 2368 sysctl_handle_register, "CU", "PBAR4 SZ (log2)"); 2369 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz", 2370 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2371 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET, 2372 sysctl_handle_register, "CU", "PBAR5 SZ (log2)"); 2373 2374 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz", 2375 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2376 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET, 2377 sysctl_handle_register, "CU", "SBAR23 SZ (log2)"); 2378 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz", 2379 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2380 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET, 2381 sysctl_handle_register, "CU", "SBAR4 SZ (log2)"); 2382 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz", 2383 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2384 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET, 2385 sysctl_handle_register, "CU", "SBAR5 SZ (log2)"); 2386 2387 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts", 2388 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2389 NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET, 2390 sysctl_handle_register, "SU", "DEVSTS"); 2391 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts", 2392 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2393 NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET, 2394 sysctl_handle_register, "SU", "LNKSTS"); 2395 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts", 2396 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2397 NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET, 2398 sysctl_handle_register, "SU", "SLNKSTS"); 2399 2400 SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts", 2401 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2402 NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET, 2403 sysctl_handle_register, "IU", "UNCERRSTS"); 2404 SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts", 2405 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2406 NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET, 2407 sysctl_handle_register, "IU", "CORERRSTS"); 2408 2409 if (ntb->conn_type != NTB_CONN_B2B) 2410 return; 2411 2412 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23", 2413 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2414 NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off, 2415 sysctl_handle_register, "QU", "Outgoing XLAT23 register"); 2416 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { 2417 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4", 2418 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2419 NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, 2420 sysctl_handle_register, "IU", "Outgoing XLAT4 register"); 2421 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5", 2422 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2423 NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off, 2424 sysctl_handle_register, "IU", "Outgoing XLAT5 register"); 2425 } else { 2426 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45", 2427 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2428 NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, 2429 sysctl_handle_register, "QU", "Outgoing XLAT45 register"); 2430 } 2431 2432 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23", 2433 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2434 NTB_REG_64 | XEON_PBAR2LMT_OFFSET, 2435 sysctl_handle_register, "QU", "Outgoing LMT23 register"); 2436 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { 2437 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4", 2438 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2439 NTB_REG_32 | XEON_PBAR4LMT_OFFSET, 2440 sysctl_handle_register, "IU", "Outgoing LMT4 register"); 2441 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5", 2442 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2443 NTB_REG_32 | XEON_PBAR5LMT_OFFSET, 2444 sysctl_handle_register, "IU", "Outgoing LMT5 register"); 2445 } else { 2446 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45", 2447 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2448 NTB_REG_64 | XEON_PBAR4LMT_OFFSET, 2449 sysctl_handle_register, "QU", "Outgoing LMT45 register"); 2450 } 2451 2452 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base", 2453 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2454 NTB_REG_64 | ntb->xlat_reg->bar0_base, 2455 sysctl_handle_register, "QU", "Secondary BAR01 base register"); 2456 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base", 2457 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2458 NTB_REG_64 | ntb->xlat_reg->bar2_base, 2459 sysctl_handle_register, "QU", "Secondary BAR23 base register"); 2460 if (HAS_FEATURE(ntb, NTB_SPLIT_BAR)) { 2461 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base", 2462 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2463 NTB_REG_32 | ntb->xlat_reg->bar4_base, 2464 sysctl_handle_register, "IU", 2465 "Secondary BAR4 base register"); 2466 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base", 2467 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2468 NTB_REG_32 | ntb->xlat_reg->bar5_base, 2469 sysctl_handle_register, "IU", 2470 "Secondary BAR5 base register"); 2471 } else { 2472 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base", 2473 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2474 NTB_REG_64 | ntb->xlat_reg->bar4_base, 2475 sysctl_handle_register, "QU", 2476 "Secondary BAR45 base register"); 2477 } 2478 } 2479 2480 static int 2481 sysctl_handle_features(SYSCTL_HANDLER_ARGS) 2482 { 2483 struct ntb_softc *ntb = arg1; 2484 struct sbuf sb; 2485 int error; 2486 2487 sbuf_new_for_sysctl(&sb, NULL, 256, req); 2488 2489 sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR); 2490 error = sbuf_finish(&sb); 2491 sbuf_delete(&sb); 2492 2493 if (error || !req->newptr) 2494 return (error); 2495 return (EINVAL); 2496 } 2497 2498 static int 2499 sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS) 2500 { 2501 struct ntb_softc *ntb = arg1; 2502 unsigned old, new; 2503 int error; 2504 2505 old = intel_ntb_link_enabled(ntb->device); 2506 2507 error = SYSCTL_OUT(req, &old, sizeof(old)); 2508 if (error != 0 || req->newptr == NULL) 2509 return (error); 2510 2511 error = SYSCTL_IN(req, &new, sizeof(new)); 2512 if (error != 0) 2513 return (error); 2514 2515 intel_ntb_printf(0, "Admin set interface state to '%sabled'\n", 2516 (new != 0)? "en" : "dis"); 2517 2518 if (new != 0) 2519 error = intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 2520 else 2521 error = intel_ntb_link_disable(ntb->device); 2522 return (error); 2523 } 2524 2525 static int 2526 sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS) 2527 { 2528 struct ntb_softc *ntb = arg1; 2529 struct sbuf sb; 2530 enum ntb_speed speed; 2531 enum ntb_width width; 2532 int error; 2533 2534 sbuf_new_for_sysctl(&sb, NULL, 32, req); 2535 2536 if (intel_ntb_link_is_up(ntb->device, &speed, &width)) 2537 sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u", 2538 (unsigned)speed, (unsigned)width); 2539 else 2540 sbuf_printf(&sb, "down"); 2541 2542 error = sbuf_finish(&sb); 2543 sbuf_delete(&sb); 2544 2545 if (error || !req->newptr) 2546 return (error); 2547 return (EINVAL); 2548 } 2549 2550 static int 2551 sysctl_handle_link_status(SYSCTL_HANDLER_ARGS) 2552 { 2553 struct ntb_softc *ntb = arg1; 2554 unsigned res; 2555 int error; 2556 2557 res = intel_ntb_link_is_up(ntb->device, NULL, NULL); 2558 2559 error = SYSCTL_OUT(req, &res, sizeof(res)); 2560 if (error || !req->newptr) 2561 return (error); 2562 return (EINVAL); 2563 } 2564 2565 static int 2566 sysctl_handle_register(SYSCTL_HANDLER_ARGS) 2567 { 2568 struct ntb_softc *ntb; 2569 const void *outp; 2570 uintptr_t sz; 2571 uint64_t umv; 2572 char be[sizeof(umv)]; 2573 size_t outsz; 2574 uint32_t reg; 2575 bool db, pci; 2576 int error; 2577 2578 ntb = arg1; 2579 reg = arg2 & ~NTB_REGFLAGS_MASK; 2580 sz = arg2 & NTB_REGSZ_MASK; 2581 db = (arg2 & NTB_DB_READ) != 0; 2582 pci = (arg2 & NTB_PCI_REG) != 0; 2583 2584 KASSERT(!(db && pci), ("bogus")); 2585 2586 if (db) { 2587 KASSERT(sz == NTB_REG_64, ("bogus")); 2588 umv = db_ioread(ntb, reg); 2589 outsz = sizeof(uint64_t); 2590 } else { 2591 switch (sz) { 2592 case NTB_REG_64: 2593 if (pci) 2594 umv = pci_read_config(ntb->device, reg, 8); 2595 else 2596 umv = intel_ntb_reg_read(8, reg); 2597 outsz = sizeof(uint64_t); 2598 break; 2599 case NTB_REG_32: 2600 if (pci) 2601 umv = pci_read_config(ntb->device, reg, 4); 2602 else 2603 umv = intel_ntb_reg_read(4, reg); 2604 outsz = sizeof(uint32_t); 2605 break; 2606 case NTB_REG_16: 2607 if (pci) 2608 umv = pci_read_config(ntb->device, reg, 2); 2609 else 2610 umv = intel_ntb_reg_read(2, reg); 2611 outsz = sizeof(uint16_t); 2612 break; 2613 case NTB_REG_8: 2614 if (pci) 2615 umv = pci_read_config(ntb->device, reg, 1); 2616 else 2617 umv = intel_ntb_reg_read(1, reg); 2618 outsz = sizeof(uint8_t); 2619 break; 2620 default: 2621 panic("bogus"); 2622 break; 2623 } 2624 } 2625 2626 /* Encode bigendian so that sysctl -x is legible. */ 2627 be64enc(be, umv); 2628 outp = ((char *)be) + sizeof(umv) - outsz; 2629 2630 error = SYSCTL_OUT(req, outp, outsz); 2631 if (error || !req->newptr) 2632 return (error); 2633 return (EINVAL); 2634 } 2635 2636 static unsigned 2637 intel_ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx) 2638 { 2639 2640 if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 && 2641 uidx >= ntb->b2b_mw_idx) || 2642 (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx)) 2643 uidx++; 2644 if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 && 2645 uidx >= ntb->b2b_mw_idx) && 2646 (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx)) 2647 uidx++; 2648 return (uidx); 2649 } 2650 2651 #ifndef EARLY_AP_STARTUP 2652 static int msix_ready; 2653 2654 static void 2655 intel_ntb_msix_ready(void *arg __unused) 2656 { 2657 2658 msix_ready = 1; 2659 } 2660 SYSINIT(intel_ntb_msix_ready, SI_SUB_SMP, SI_ORDER_ANY, 2661 intel_ntb_msix_ready, NULL); 2662 #endif 2663 2664 static void 2665 intel_ntb_exchange_msix(void *ctx) 2666 { 2667 struct ntb_softc *ntb; 2668 uint32_t val; 2669 unsigned i; 2670 2671 ntb = ctx; 2672 2673 if (ntb->peer_msix_good) 2674 goto msix_good; 2675 if (ntb->peer_msix_done) 2676 goto msix_done; 2677 2678 #ifndef EARLY_AP_STARTUP 2679 /* Block MSIX negotiation until SMP started and IRQ reshuffled. */ 2680 if (!msix_ready) 2681 goto reschedule; 2682 #endif 2683 2684 intel_ntb_get_msix_info(ntb); 2685 for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { 2686 intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DATA0 + i, 2687 ntb->msix_data[i].nmd_data); 2688 intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_OFS0 + i, 2689 ntb->msix_data[i].nmd_ofs - ntb->msix_xlat); 2690 } 2691 intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD); 2692 2693 intel_ntb_spad_read(ntb->device, NTB_MSIX_GUARD, &val); 2694 if (val != NTB_MSIX_VER_GUARD) 2695 goto reschedule; 2696 2697 for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { 2698 intel_ntb_spad_read(ntb->device, NTB_MSIX_DATA0 + i, &val); 2699 intel_ntb_printf(2, "remote MSIX data(%u): 0x%x\n", i, val); 2700 ntb->peer_msix_data[i].nmd_data = val; 2701 intel_ntb_spad_read(ntb->device, NTB_MSIX_OFS0 + i, &val); 2702 intel_ntb_printf(2, "remote MSIX addr(%u): 0x%x\n", i, val); 2703 ntb->peer_msix_data[i].nmd_ofs = val; 2704 } 2705 2706 ntb->peer_msix_done = true; 2707 2708 msix_done: 2709 intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DONE, NTB_MSIX_RECEIVED); 2710 intel_ntb_spad_read(ntb->device, NTB_MSIX_DONE, &val); 2711 if (val != NTB_MSIX_RECEIVED) 2712 goto reschedule; 2713 2714 intel_ntb_spad_clear(ntb->device); 2715 ntb->peer_msix_good = true; 2716 /* Give peer time to see our NTB_MSIX_RECEIVED. */ 2717 goto reschedule; 2718 2719 msix_good: 2720 intel_ntb_poll_link(ntb); 2721 ntb_link_event(ntb->device); 2722 return; 2723 2724 reschedule: 2725 ntb->lnk_sta = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); 2726 if (_xeon_link_is_up(ntb)) { 2727 callout_reset(&ntb->peer_msix_work, 2728 hz * (ntb->peer_msix_good ? 2 : 1) / 10, 2729 intel_ntb_exchange_msix, ntb); 2730 } else 2731 intel_ntb_spad_clear(ntb->device); 2732 } 2733 2734 /* 2735 * Public API to the rest of the OS 2736 */ 2737 2738 static uint8_t 2739 intel_ntb_spad_count(device_t dev) 2740 { 2741 struct ntb_softc *ntb = device_get_softc(dev); 2742 2743 return (ntb->spad_count); 2744 } 2745 2746 static uint8_t 2747 intel_ntb_mw_count(device_t dev) 2748 { 2749 struct ntb_softc *ntb = device_get_softc(dev); 2750 uint8_t res; 2751 2752 res = ntb->mw_count; 2753 if (ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0) 2754 res--; 2755 if (ntb->msix_mw_idx != B2B_MW_DISABLED) 2756 res--; 2757 return (res); 2758 } 2759 2760 static int 2761 intel_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val) 2762 { 2763 struct ntb_softc *ntb = device_get_softc(dev); 2764 2765 if (idx >= ntb->spad_count) 2766 return (EINVAL); 2767 2768 intel_ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val); 2769 2770 return (0); 2771 } 2772 2773 /* 2774 * Zeros the local scratchpad. 2775 */ 2776 static void 2777 intel_ntb_spad_clear(device_t dev) 2778 { 2779 struct ntb_softc *ntb = device_get_softc(dev); 2780 unsigned i; 2781 2782 for (i = 0; i < ntb->spad_count; i++) 2783 intel_ntb_spad_write(dev, i, 0); 2784 } 2785 2786 static int 2787 intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val) 2788 { 2789 struct ntb_softc *ntb = device_get_softc(dev); 2790 2791 if (idx >= ntb->spad_count) 2792 return (EINVAL); 2793 2794 *val = intel_ntb_reg_read(4, ntb->self_reg->spad + idx * 4); 2795 2796 return (0); 2797 } 2798 2799 static int 2800 intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val) 2801 { 2802 struct ntb_softc *ntb = device_get_softc(dev); 2803 2804 if (idx >= ntb->spad_count) 2805 return (EINVAL); 2806 2807 if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) 2808 intel_ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val); 2809 else 2810 intel_ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val); 2811 2812 return (0); 2813 } 2814 2815 static int 2816 intel_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val) 2817 { 2818 struct ntb_softc *ntb = device_get_softc(dev); 2819 2820 if (idx >= ntb->spad_count) 2821 return (EINVAL); 2822 2823 if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) 2824 *val = intel_ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4); 2825 else 2826 *val = intel_ntb_reg_read(4, ntb->peer_reg->spad + idx * 4); 2827 2828 return (0); 2829 } 2830 2831 static int 2832 intel_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base, 2833 caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, 2834 bus_addr_t *plimit) 2835 { 2836 struct ntb_softc *ntb = device_get_softc(dev); 2837 struct ntb_pci_bar_info *bar; 2838 bus_addr_t limit; 2839 size_t bar_b2b_off; 2840 enum ntb_bar bar_num; 2841 2842 if (mw_idx >= intel_ntb_mw_count(dev)) 2843 return (EINVAL); 2844 mw_idx = intel_ntb_user_mw_to_idx(ntb, mw_idx); 2845 2846 bar_num = intel_ntb_mw_to_bar(ntb, mw_idx); 2847 bar = &ntb->bar_info[bar_num]; 2848 bar_b2b_off = 0; 2849 if (mw_idx == ntb->b2b_mw_idx) { 2850 KASSERT(ntb->b2b_off != 0, 2851 ("user shouldn't get non-shared b2b mw")); 2852 bar_b2b_off = ntb->b2b_off; 2853 } 2854 2855 if (bar_is_64bit(ntb, bar_num)) 2856 limit = BUS_SPACE_MAXADDR; 2857 else 2858 limit = BUS_SPACE_MAXADDR_32BIT; 2859 2860 if (base != NULL) 2861 *base = bar->pbase + bar_b2b_off; 2862 if (vbase != NULL) 2863 *vbase = bar->vbase + bar_b2b_off; 2864 if (size != NULL) 2865 *size = bar->size - bar_b2b_off; 2866 if (align != NULL) 2867 *align = bar->size; 2868 if (align_size != NULL) 2869 *align_size = 1; 2870 if (plimit != NULL) 2871 *plimit = limit; 2872 return (0); 2873 } 2874 2875 static int 2876 intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size) 2877 { 2878 struct ntb_softc *ntb = device_get_softc(dev); 2879 struct ntb_pci_bar_info *bar; 2880 uint64_t base, limit, reg_val; 2881 size_t bar_size, mw_size; 2882 uint32_t base_reg, xlat_reg, limit_reg; 2883 enum ntb_bar bar_num; 2884 2885 if (idx >= intel_ntb_mw_count(dev)) 2886 return (EINVAL); 2887 idx = intel_ntb_user_mw_to_idx(ntb, idx); 2888 2889 bar_num = intel_ntb_mw_to_bar(ntb, idx); 2890 bar = &ntb->bar_info[bar_num]; 2891 2892 bar_size = bar->size; 2893 if (idx == ntb->b2b_mw_idx) 2894 mw_size = bar_size - ntb->b2b_off; 2895 else 2896 mw_size = bar_size; 2897 2898 /* Hardware requires that addr is aligned to bar size */ 2899 if ((addr & (bar_size - 1)) != 0) 2900 return (EINVAL); 2901 2902 if (size > mw_size) 2903 return (EINVAL); 2904 2905 bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg); 2906 2907 limit = 0; 2908 if (bar_is_64bit(ntb, bar_num)) { 2909 base = intel_ntb_reg_read(8, base_reg) & BAR_HIGH_MASK; 2910 2911 if (limit_reg != 0 && size != mw_size) 2912 limit = base + size; 2913 2914 /* Set and verify translation address */ 2915 intel_ntb_reg_write(8, xlat_reg, addr); 2916 reg_val = intel_ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK; 2917 if (reg_val != addr) { 2918 intel_ntb_reg_write(8, xlat_reg, 0); 2919 return (EIO); 2920 } 2921 2922 /* Set and verify the limit */ 2923 intel_ntb_reg_write(8, limit_reg, limit); 2924 reg_val = intel_ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK; 2925 if (reg_val != limit) { 2926 intel_ntb_reg_write(8, limit_reg, base); 2927 intel_ntb_reg_write(8, xlat_reg, 0); 2928 return (EIO); 2929 } 2930 } else { 2931 /* Configure 32-bit (split) BAR MW */ 2932 2933 if ((addr & UINT32_MAX) != addr) 2934 return (ERANGE); 2935 if (((addr + size) & UINT32_MAX) != (addr + size)) 2936 return (ERANGE); 2937 2938 base = intel_ntb_reg_read(4, base_reg) & BAR_HIGH_MASK; 2939 2940 if (limit_reg != 0 && size != mw_size) 2941 limit = base + size; 2942 2943 /* Set and verify translation address */ 2944 intel_ntb_reg_write(4, xlat_reg, addr); 2945 reg_val = intel_ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK; 2946 if (reg_val != addr) { 2947 intel_ntb_reg_write(4, xlat_reg, 0); 2948 return (EIO); 2949 } 2950 2951 /* Set and verify the limit */ 2952 intel_ntb_reg_write(4, limit_reg, limit); 2953 reg_val = intel_ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK; 2954 if (reg_val != limit) { 2955 intel_ntb_reg_write(4, limit_reg, base); 2956 intel_ntb_reg_write(4, xlat_reg, 0); 2957 return (EIO); 2958 } 2959 } 2960 return (0); 2961 } 2962 2963 static int 2964 intel_ntb_mw_clear_trans(device_t dev, unsigned mw_idx) 2965 { 2966 2967 return (intel_ntb_mw_set_trans(dev, mw_idx, 0, 0)); 2968 } 2969 2970 static int 2971 intel_ntb_mw_get_wc(device_t dev, unsigned idx, vm_memattr_t *mode) 2972 { 2973 struct ntb_softc *ntb = device_get_softc(dev); 2974 struct ntb_pci_bar_info *bar; 2975 2976 if (idx >= intel_ntb_mw_count(dev)) 2977 return (EINVAL); 2978 idx = intel_ntb_user_mw_to_idx(ntb, idx); 2979 2980 bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)]; 2981 *mode = bar->map_mode; 2982 return (0); 2983 } 2984 2985 static int 2986 intel_ntb_mw_set_wc(device_t dev, unsigned idx, vm_memattr_t mode) 2987 { 2988 struct ntb_softc *ntb = device_get_softc(dev); 2989 2990 if (idx >= intel_ntb_mw_count(dev)) 2991 return (EINVAL); 2992 2993 idx = intel_ntb_user_mw_to_idx(ntb, idx); 2994 return (intel_ntb_mw_set_wc_internal(ntb, idx, mode)); 2995 } 2996 2997 static int 2998 intel_ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode) 2999 { 3000 struct ntb_pci_bar_info *bar; 3001 int rc; 3002 3003 bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)]; 3004 if (bar->map_mode == mode) 3005 return (0); 3006 3007 rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mode); 3008 if (rc == 0) 3009 bar->map_mode = mode; 3010 3011 return (rc); 3012 } 3013 3014 static void 3015 intel_ntb_peer_db_set(device_t dev, uint64_t bit) 3016 { 3017 struct ntb_softc *ntb = device_get_softc(dev); 3018 3019 if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) { 3020 struct ntb_pci_bar_info *lapic; 3021 unsigned i; 3022 3023 lapic = ntb->peer_lapic_bar; 3024 3025 for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { 3026 if ((bit & intel_ntb_db_vector_mask(dev, i)) != 0) 3027 bus_space_write_4(lapic->pci_bus_tag, 3028 lapic->pci_bus_handle, 3029 ntb->peer_msix_data[i].nmd_ofs, 3030 ntb->peer_msix_data[i].nmd_data); 3031 } 3032 return; 3033 } 3034 3035 if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) { 3036 intel_ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit); 3037 return; 3038 } 3039 3040 db_iowrite(ntb, ntb->peer_reg->db_bell, bit); 3041 } 3042 3043 static int 3044 intel_ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size) 3045 { 3046 struct ntb_softc *ntb = device_get_softc(dev); 3047 struct ntb_pci_bar_info *bar; 3048 uint64_t regoff; 3049 3050 KASSERT((db_addr != NULL && db_size != NULL), ("must be non-NULL")); 3051 3052 if (!HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) { 3053 bar = &ntb->bar_info[NTB_CONFIG_BAR]; 3054 regoff = ntb->peer_reg->db_bell; 3055 } else { 3056 KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED, 3057 ("invalid b2b idx")); 3058 3059 bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)]; 3060 regoff = XEON_PDOORBELL_OFFSET; 3061 } 3062 KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh")); 3063 3064 /* HACK: Specific to current x86 bus implementation. */ 3065 *db_addr = ((uint64_t)bar->pci_bus_handle + regoff); 3066 *db_size = ntb->reg->db_size; 3067 return (0); 3068 } 3069 3070 static uint64_t 3071 intel_ntb_db_valid_mask(device_t dev) 3072 { 3073 struct ntb_softc *ntb = device_get_softc(dev); 3074 3075 return (ntb->db_valid_mask); 3076 } 3077 3078 static int 3079 intel_ntb_db_vector_count(device_t dev) 3080 { 3081 struct ntb_softc *ntb = device_get_softc(dev); 3082 3083 return (ntb->db_vec_count); 3084 } 3085 3086 static uint64_t 3087 intel_ntb_db_vector_mask(device_t dev, uint32_t vector) 3088 { 3089 struct ntb_softc *ntb = device_get_softc(dev); 3090 3091 if (vector > ntb->db_vec_count) 3092 return (0); 3093 return (ntb->db_valid_mask & intel_ntb_vec_mask(ntb, vector)); 3094 } 3095 3096 static bool 3097 intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width) 3098 { 3099 struct ntb_softc *ntb = device_get_softc(dev); 3100 3101 if (speed != NULL) 3102 *speed = intel_ntb_link_sta_speed(ntb); 3103 if (width != NULL) 3104 *width = intel_ntb_link_sta_width(ntb); 3105 return (link_is_up(ntb)); 3106 } 3107 3108 static void 3109 save_bar_parameters(struct ntb_pci_bar_info *bar) 3110 { 3111 3112 bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); 3113 bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); 3114 bar->pbase = rman_get_start(bar->pci_resource); 3115 bar->size = rman_get_size(bar->pci_resource); 3116 bar->vbase = rman_get_virtual(bar->pci_resource); 3117 } 3118 3119 static device_method_t ntb_intel_methods[] = { 3120 /* Device interface */ 3121 DEVMETHOD(device_probe, intel_ntb_probe), 3122 DEVMETHOD(device_attach, intel_ntb_attach), 3123 DEVMETHOD(device_detach, intel_ntb_detach), 3124 /* Bus interface */ 3125 DEVMETHOD(bus_child_location_str, ntb_child_location_str), 3126 DEVMETHOD(bus_print_child, ntb_print_child), 3127 DEVMETHOD(bus_get_dma_tag, ntb_get_dma_tag), 3128 /* NTB interface */ 3129 DEVMETHOD(ntb_port_number, intel_ntb_port_number), 3130 DEVMETHOD(ntb_peer_port_count, intel_ntb_peer_port_count), 3131 DEVMETHOD(ntb_peer_port_number, intel_ntb_peer_port_number), 3132 DEVMETHOD(ntb_peer_port_idx, intel_ntb_peer_port_idx), 3133 DEVMETHOD(ntb_link_is_up, intel_ntb_link_is_up), 3134 DEVMETHOD(ntb_link_enable, intel_ntb_link_enable), 3135 DEVMETHOD(ntb_link_disable, intel_ntb_link_disable), 3136 DEVMETHOD(ntb_link_enabled, intel_ntb_link_enabled), 3137 DEVMETHOD(ntb_mw_count, intel_ntb_mw_count), 3138 DEVMETHOD(ntb_mw_get_range, intel_ntb_mw_get_range), 3139 DEVMETHOD(ntb_mw_set_trans, intel_ntb_mw_set_trans), 3140 DEVMETHOD(ntb_mw_clear_trans, intel_ntb_mw_clear_trans), 3141 DEVMETHOD(ntb_mw_get_wc, intel_ntb_mw_get_wc), 3142 DEVMETHOD(ntb_mw_set_wc, intel_ntb_mw_set_wc), 3143 DEVMETHOD(ntb_spad_count, intel_ntb_spad_count), 3144 DEVMETHOD(ntb_spad_clear, intel_ntb_spad_clear), 3145 DEVMETHOD(ntb_spad_write, intel_ntb_spad_write), 3146 DEVMETHOD(ntb_spad_read, intel_ntb_spad_read), 3147 DEVMETHOD(ntb_peer_spad_write, intel_ntb_peer_spad_write), 3148 DEVMETHOD(ntb_peer_spad_read, intel_ntb_peer_spad_read), 3149 DEVMETHOD(ntb_db_valid_mask, intel_ntb_db_valid_mask), 3150 DEVMETHOD(ntb_db_vector_count, intel_ntb_db_vector_count), 3151 DEVMETHOD(ntb_db_vector_mask, intel_ntb_db_vector_mask), 3152 DEVMETHOD(ntb_db_clear, intel_ntb_db_clear), 3153 DEVMETHOD(ntb_db_clear_mask, intel_ntb_db_clear_mask), 3154 DEVMETHOD(ntb_db_read, intel_ntb_db_read), 3155 DEVMETHOD(ntb_db_set_mask, intel_ntb_db_set_mask), 3156 DEVMETHOD(ntb_peer_db_addr, intel_ntb_peer_db_addr), 3157 DEVMETHOD(ntb_peer_db_set, intel_ntb_peer_db_set), 3158 DEVMETHOD_END 3159 }; 3160 3161 static DEFINE_CLASS_0(ntb_hw, ntb_intel_driver, ntb_intel_methods, 3162 sizeof(struct ntb_softc)); 3163 DRIVER_MODULE(ntb_hw_intel, pci, ntb_intel_driver, ntb_hw_devclass, NULL, NULL); 3164 MODULE_DEPEND(ntb_hw_intel, ntb, 1, 1, 1); 3165 MODULE_VERSION(ntb_hw_intel, 1); 3166 MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ntb_hw_intel, pci_ids, 3167 nitems(pci_ids)); 3168