1 /*- 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright (C) 2019 Advanced Micro Devices, Inc. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * BSD LICENSE 14 * 15 * Copyright (c) 2019 Advanced Micro Devices, Inc. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. Neither the name of AMD corporation nor the names of its 26 * contributors may be used to endorse or promote products derived 27 * from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * Contact Information : 42 * Rajesh Kumar <rajesh1.kumar@amd.com> 43 */ 44 45 /* 46 * The Non-Transparent Bridge (NTB) is a device that allows you to connect 47 * two or more systems using a PCI-e links, providing remote memory access. 48 * 49 * This module contains a driver for NTB hardware in AMD CPUs 50 * 51 * Much of the code in this module is shared with Linux. Any patches may 52 * be picked up and redistributed in Linux with a dual GPL/BSD license. 53 */ 54 55 #include <sys/cdefs.h> 56 #include <sys/param.h> 57 #include <sys/kernel.h> 58 #include <sys/systm.h> 59 #include <sys/bus.h> 60 #include <sys/lock.h> 61 #include <sys/malloc.h> 62 #include <sys/module.h> 63 #include <sys/mutex.h> 64 #include <sys/rman.h> 65 #include <sys/sbuf.h> 66 #include <sys/sysctl.h> 67 68 #include <vm/vm.h> 69 #include <vm/pmap.h> 70 71 #include <machine/bus.h> 72 73 #include <dev/pci/pcireg.h> 74 #include <dev/pci/pcivar.h> 75 76 #include "ntb_hw_amd.h" 77 #include "dev/ntb/ntb.h" 78 79 MALLOC_DEFINE(M_AMD_NTB, "amd_ntb_hw", "amd_ntb_hw driver memory allocations"); 80 81 static const struct amd_ntb_hw_info amd_ntb_hw_info_list[] = { 82 { .vendor_id = NTB_HW_AMD_VENDOR_ID, 83 .device_id = NTB_HW_AMD_DEVICE_ID1, 84 .mw_count = 3, 85 .bar_start_idx = 1, 86 .spad_count = 16, 87 .db_count = 16, 88 .msix_vector_count = 24, 89 .quirks = QUIRK_MW0_32BIT, 90 .desc = "AMD Non-Transparent Bridge"}, 91 92 { .vendor_id = NTB_HW_AMD_VENDOR_ID, 93 .device_id = NTB_HW_AMD_DEVICE_ID2, 94 .mw_count = 2, 95 .bar_start_idx = 2, 96 .spad_count = 16, 97 .db_count = 16, 98 .msix_vector_count = 24, 99 .quirks = 0, 100 .desc = "AMD Non-Transparent Bridge"}, 101 102 { .vendor_id = NTB_HW_HYGON_VENDOR_ID, 103 .device_id = NTB_HW_HYGON_DEVICE_ID1, 104 .mw_count = 3, 105 .bar_start_idx = 1, 106 .spad_count = 16, 107 .db_count = 16, 108 .msix_vector_count = 24, 109 .quirks = QUIRK_MW0_32BIT, 110 .desc = "Hygon Non-Transparent Bridge"}, 111 }; 112 113 static const struct pci_device_table amd_ntb_devs[] = { 114 { PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID1), 115 .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0], 116 PCI_DESCR("AMD Non-Transparent Bridge") }, 117 { PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID2), 118 .driver_data = (uintptr_t)&amd_ntb_hw_info_list[1], 119 PCI_DESCR("AMD Non-Transparent Bridge") }, 120 { PCI_DEV(NTB_HW_HYGON_VENDOR_ID, NTB_HW_HYGON_DEVICE_ID1), 121 .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0], 122 PCI_DESCR("Hygon Non-Transparent Bridge") } 123 }; 124 125 static unsigned g_amd_ntb_hw_debug_level; 126 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, 127 &g_amd_ntb_hw_debug_level, 0, "amd_ntb_hw log level -- higher is verbose"); 128 129 #define amd_ntb_printf(lvl, ...) do { \ 130 if (lvl <= g_amd_ntb_hw_debug_level) \ 131 device_printf(ntb->device, __VA_ARGS__); \ 132 } while (0) 133 134 #ifdef __i386__ 135 static __inline uint64_t 136 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 137 bus_size_t offset) 138 { 139 140 return (bus_space_read_4(tag, handle, offset) | 141 ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); 142 } 143 144 static __inline void 145 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, 146 bus_size_t offset, uint64_t val) 147 { 148 149 bus_space_write_4(tag, handle, offset, val); 150 bus_space_write_4(tag, handle, offset + 4, val >> 32); 151 } 152 #endif 153 154 /* 155 * AMD NTB INTERFACE ROUTINES 156 */ 157 static int 158 amd_ntb_port_number(device_t dev) 159 { 160 struct amd_ntb_softc *ntb = device_get_softc(dev); 161 162 amd_ntb_printf(1, "%s: conn_type %d\n", __func__, ntb->conn_type); 163 164 switch (ntb->conn_type) { 165 case NTB_CONN_PRI: 166 return (NTB_PORT_PRI_USD); 167 case NTB_CONN_SEC: 168 return (NTB_PORT_SEC_DSD); 169 default: 170 break; 171 } 172 173 return (-EINVAL); 174 } 175 176 static int 177 amd_ntb_peer_port_count(device_t dev) 178 { 179 struct amd_ntb_softc *ntb = device_get_softc(dev); 180 181 amd_ntb_printf(1, "%s: peer cnt %d\n", __func__, NTB_DEF_PEER_CNT); 182 return (NTB_DEF_PEER_CNT); 183 } 184 185 static int 186 amd_ntb_peer_port_number(device_t dev, int pidx) 187 { 188 struct amd_ntb_softc *ntb = device_get_softc(dev); 189 190 amd_ntb_printf(1, "%s: pidx %d conn type %d\n", 191 __func__, pidx, ntb->conn_type); 192 193 if (pidx != NTB_DEF_PEER_IDX) 194 return (-EINVAL); 195 196 switch (ntb->conn_type) { 197 case NTB_CONN_PRI: 198 return (NTB_PORT_SEC_DSD); 199 case NTB_CONN_SEC: 200 return (NTB_PORT_PRI_USD); 201 default: 202 break; 203 } 204 205 return (-EINVAL); 206 } 207 208 static int 209 amd_ntb_peer_port_idx(device_t dev, int port) 210 { 211 struct amd_ntb_softc *ntb = device_get_softc(dev); 212 int peer_port; 213 214 peer_port = amd_ntb_peer_port_number(dev, NTB_DEF_PEER_IDX); 215 216 amd_ntb_printf(1, "%s: port %d peer_port %d\n", 217 __func__, port, peer_port); 218 219 if (peer_port == -EINVAL || port != peer_port) 220 return (-EINVAL); 221 222 return (0); 223 } 224 225 /* 226 * AMD NTB INTERFACE - LINK ROUTINES 227 */ 228 static inline int 229 amd_link_is_up(struct amd_ntb_softc *ntb) 230 { 231 232 amd_ntb_printf(2, "%s: peer_sta 0x%x cntl_sta 0x%x\n", 233 __func__, ntb->peer_sta, ntb->cntl_sta); 234 235 if (!ntb->peer_sta) 236 return (NTB_LNK_STA_ACTIVE(ntb->cntl_sta)); 237 238 return (0); 239 } 240 241 static inline enum ntb_speed 242 amd_ntb_link_sta_speed(struct amd_ntb_softc *ntb) 243 { 244 245 if (!amd_link_is_up(ntb)) 246 return (NTB_SPEED_NONE); 247 248 return (NTB_LNK_STA_SPEED(ntb->lnk_sta)); 249 } 250 251 static inline enum ntb_width 252 amd_ntb_link_sta_width(struct amd_ntb_softc *ntb) 253 { 254 255 if (!amd_link_is_up(ntb)) 256 return (NTB_WIDTH_NONE); 257 258 return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); 259 } 260 261 static bool 262 amd_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width) 263 { 264 struct amd_ntb_softc *ntb = device_get_softc(dev); 265 266 if (speed != NULL) 267 *speed = amd_ntb_link_sta_speed(ntb); 268 if (width != NULL) 269 *width = amd_ntb_link_sta_width(ntb); 270 271 return (amd_link_is_up(ntb)); 272 } 273 274 static int 275 amd_ntb_link_enable(device_t dev, enum ntb_speed max_speed, 276 enum ntb_width max_width) 277 { 278 struct amd_ntb_softc *ntb = device_get_softc(dev); 279 uint32_t ntb_ctl; 280 281 amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n", 282 __func__, ntb->int_mask, ntb->conn_type); 283 284 amd_init_side_info(ntb); 285 286 /* Enable event interrupt */ 287 ntb->int_mask &= ~AMD_EVENT_INTMASK; 288 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); 289 290 if (ntb->conn_type == NTB_CONN_SEC) 291 return (EINVAL); 292 293 amd_ntb_printf(0, "%s: Enabling Link.\n", __func__); 294 295 ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET); 296 ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); 297 amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl); 298 amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl); 299 300 return (0); 301 } 302 303 static int 304 amd_ntb_link_disable(device_t dev) 305 { 306 struct amd_ntb_softc *ntb = device_get_softc(dev); 307 uint32_t ntb_ctl; 308 309 amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n", 310 __func__, ntb->int_mask, ntb->conn_type); 311 312 amd_deinit_side_info(ntb); 313 314 /* Disable event interrupt */ 315 ntb->int_mask |= AMD_EVENT_INTMASK; 316 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); 317 318 if (ntb->conn_type == NTB_CONN_SEC) 319 return (EINVAL); 320 321 amd_ntb_printf(0, "%s: Disabling Link.\n", __func__); 322 323 ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET); 324 ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); 325 amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl); 326 amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl); 327 328 return (0); 329 } 330 331 /* 332 * AMD NTB memory window routines 333 */ 334 static uint8_t 335 amd_ntb_mw_count(device_t dev) 336 { 337 struct amd_ntb_softc *ntb = device_get_softc(dev); 338 339 return (ntb->hw_info->mw_count); 340 } 341 342 static int 343 amd_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base, 344 caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, 345 bus_addr_t *plimit) 346 { 347 struct amd_ntb_softc *ntb = device_get_softc(dev); 348 struct amd_ntb_pci_bar_info *bar_info; 349 350 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 351 return (EINVAL); 352 353 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 354 355 if (base != NULL) 356 *base = bar_info->pbase; 357 358 if (vbase != NULL) 359 *vbase = bar_info->vbase; 360 361 if (align != NULL) 362 *align = bar_info->size; 363 364 if (size != NULL) 365 *size = bar_info->size; 366 367 if (align_size != NULL) 368 *align_size = 1; 369 370 if (plimit != NULL) { 371 /* 372 * For Device ID 0x145B (which has 3 memory windows), 373 * memory window 0 use a 32-bit bar. The remaining 374 * cases all use 64-bit bar. 375 */ 376 if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT)) 377 *plimit = BUS_SPACE_MAXADDR_32BIT; 378 else 379 *plimit = BUS_SPACE_MAXADDR; 380 } 381 382 return (0); 383 } 384 385 static int 386 amd_ntb_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size) 387 { 388 struct amd_ntb_softc *ntb = device_get_softc(dev); 389 struct amd_ntb_pci_bar_info *bar_info; 390 391 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 392 return (EINVAL); 393 394 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 395 396 /* Make sure the range fits in the usable mw size. */ 397 if (size > bar_info->size) { 398 amd_ntb_printf(0, "%s: size 0x%jx greater than mw_size 0x%jx\n", 399 __func__, (uintmax_t)size, (uintmax_t)bar_info->size); 400 return (EINVAL); 401 } 402 403 amd_ntb_printf(1, "%s: mw %d mw_size 0x%jx size 0x%jx base %p\n", 404 __func__, mw_idx, (uintmax_t)bar_info->size, 405 (uintmax_t)size, (void *)bar_info->pci_bus_handle); 406 407 /* 408 * AMD NTB XLAT and Limit registers needs to be written only after 409 * link enable. 410 * 411 * Set and verify setting the translation address register. 412 */ 413 amd_ntb_peer_reg_write(8, bar_info->xlat_off, (uint64_t)addr); 414 amd_ntb_printf(0, "%s: mw %d xlat_off 0x%x cur_val 0x%jx addr %p\n", 415 __func__, mw_idx, bar_info->xlat_off, 416 amd_ntb_peer_reg_read(8, bar_info->xlat_off), (void *)addr); 417 418 /* 419 * Set and verify setting the limit register. 420 * 421 * For Device ID 0x145B (which has 3 memory windows), 422 * memory window 0 use a 32-bit bar. The remaining 423 * cases all use 64-bit bar. 424 */ 425 if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT)) { 426 amd_ntb_reg_write(4, bar_info->limit_off, (uint32_t)size); 427 amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%x limit 0x%x\n", 428 __func__, bar_info->limit_off, 429 amd_ntb_peer_reg_read(4, bar_info->limit_off), 430 (uint32_t)size); 431 } else { 432 amd_ntb_reg_write(8, bar_info->limit_off, (uint64_t)size); 433 amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%jx limit 0x%jx\n", 434 __func__, bar_info->limit_off, 435 amd_ntb_peer_reg_read(8, bar_info->limit_off), 436 (uintmax_t)size); 437 } 438 439 return (0); 440 } 441 442 static int 443 amd_ntb_mw_clear_trans(device_t dev, unsigned mw_idx) 444 { 445 struct amd_ntb_softc *ntb = device_get_softc(dev); 446 447 amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx); 448 449 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 450 return (EINVAL); 451 452 return (amd_ntb_mw_set_trans(dev, mw_idx, 0, 0)); 453 } 454 455 static int 456 amd_ntb_mw_set_wc(device_t dev, unsigned int mw_idx, vm_memattr_t mode) 457 { 458 struct amd_ntb_softc *ntb = device_get_softc(dev); 459 struct amd_ntb_pci_bar_info *bar_info; 460 int rc; 461 462 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 463 return (EINVAL); 464 465 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 466 if (mode == bar_info->map_mode) 467 return (0); 468 469 rc = pmap_change_attr((vm_offset_t)bar_info->vbase, bar_info->size, mode); 470 if (rc == 0) 471 bar_info->map_mode = mode; 472 473 return (rc); 474 } 475 476 static int 477 amd_ntb_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode) 478 { 479 struct amd_ntb_softc *ntb = device_get_softc(dev); 480 struct amd_ntb_pci_bar_info *bar_info; 481 482 amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx); 483 484 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 485 return (EINVAL); 486 487 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 488 *mode = bar_info->map_mode; 489 490 return (0); 491 } 492 493 /* 494 * AMD NTB doorbell routines 495 */ 496 static int 497 amd_ntb_db_vector_count(device_t dev) 498 { 499 struct amd_ntb_softc *ntb = device_get_softc(dev); 500 501 amd_ntb_printf(1, "%s: db_count 0x%x\n", __func__, 502 ntb->hw_info->db_count); 503 504 return (ntb->hw_info->db_count); 505 } 506 507 static uint64_t 508 amd_ntb_db_valid_mask(device_t dev) 509 { 510 struct amd_ntb_softc *ntb = device_get_softc(dev); 511 512 amd_ntb_printf(1, "%s: db_valid_mask 0x%x\n", 513 __func__, ntb->db_valid_mask); 514 515 return (ntb->db_valid_mask); 516 } 517 518 static uint64_t 519 amd_ntb_db_vector_mask(device_t dev, uint32_t vector) 520 { 521 struct amd_ntb_softc *ntb = device_get_softc(dev); 522 523 amd_ntb_printf(1, "%s: vector %d db_count 0x%x db_valid_mask 0x%x\n", 524 __func__, vector, ntb->hw_info->db_count, ntb->db_valid_mask); 525 526 if (vector < 0 || vector >= ntb->hw_info->db_count) 527 return (0); 528 529 return (ntb->db_valid_mask & (1 << vector)); 530 } 531 532 static uint64_t 533 amd_ntb_db_read(device_t dev) 534 { 535 struct amd_ntb_softc *ntb = device_get_softc(dev); 536 uint64_t dbstat_off; 537 538 dbstat_off = (uint64_t)amd_ntb_reg_read(2, AMD_DBSTAT_OFFSET); 539 540 amd_ntb_printf(1, "%s: dbstat_off 0x%jx\n", __func__, dbstat_off); 541 542 return (dbstat_off); 543 } 544 545 static void 546 amd_ntb_db_clear(device_t dev, uint64_t db_bits) 547 { 548 struct amd_ntb_softc *ntb = device_get_softc(dev); 549 550 amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits); 551 amd_ntb_reg_write(2, AMD_DBSTAT_OFFSET, (uint16_t)db_bits); 552 } 553 554 static void 555 amd_ntb_db_set_mask(device_t dev, uint64_t db_bits) 556 { 557 struct amd_ntb_softc *ntb = device_get_softc(dev); 558 559 DB_MASK_LOCK(ntb); 560 amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n", 561 __func__, ntb->db_mask, db_bits); 562 563 ntb->db_mask |= db_bits; 564 amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask); 565 DB_MASK_UNLOCK(ntb); 566 } 567 568 static void 569 amd_ntb_db_clear_mask(device_t dev, uint64_t db_bits) 570 { 571 struct amd_ntb_softc *ntb = device_get_softc(dev); 572 573 DB_MASK_LOCK(ntb); 574 amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n", 575 __func__, ntb->db_mask, db_bits); 576 577 ntb->db_mask &= ~db_bits; 578 amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask); 579 DB_MASK_UNLOCK(ntb); 580 } 581 582 static void 583 amd_ntb_peer_db_set(device_t dev, uint64_t db_bits) 584 { 585 struct amd_ntb_softc *ntb = device_get_softc(dev); 586 587 amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits); 588 amd_ntb_reg_write(2, AMD_DBREQ_OFFSET, (uint16_t)db_bits); 589 } 590 591 /* 592 * AMD NTB scratchpad routines 593 */ 594 static uint8_t 595 amd_ntb_spad_count(device_t dev) 596 { 597 struct amd_ntb_softc *ntb = device_get_softc(dev); 598 599 amd_ntb_printf(1, "%s: spad_count 0x%x\n", __func__, 600 ntb->spad_count); 601 602 return (ntb->spad_count); 603 } 604 605 static int 606 amd_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val) 607 { 608 struct amd_ntb_softc *ntb = device_get_softc(dev); 609 uint32_t offset; 610 611 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 612 613 if (idx < 0 || idx >= ntb->spad_count) 614 return (EINVAL); 615 616 offset = ntb->self_spad + (idx << 2); 617 *val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset); 618 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val); 619 620 return (0); 621 } 622 623 static int 624 amd_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val) 625 { 626 struct amd_ntb_softc *ntb = device_get_softc(dev); 627 uint32_t offset; 628 629 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 630 631 if (idx < 0 || idx >= ntb->spad_count) 632 return (EINVAL); 633 634 offset = ntb->self_spad + (idx << 2); 635 amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val); 636 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val); 637 638 return (0); 639 } 640 641 static void 642 amd_ntb_spad_clear(struct amd_ntb_softc *ntb) 643 { 644 uint8_t i; 645 646 for (i = 0; i < ntb->spad_count; i++) 647 amd_ntb_spad_write(ntb->device, i, 0); 648 } 649 650 static int 651 amd_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val) 652 { 653 struct amd_ntb_softc *ntb = device_get_softc(dev); 654 uint32_t offset; 655 656 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 657 658 if (idx < 0 || idx >= ntb->spad_count) 659 return (EINVAL); 660 661 offset = ntb->peer_spad + (idx << 2); 662 *val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset); 663 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val); 664 665 return (0); 666 } 667 668 static int 669 amd_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val) 670 { 671 struct amd_ntb_softc *ntb = device_get_softc(dev); 672 uint32_t offset; 673 674 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 675 676 if (idx < 0 || idx >= ntb->spad_count) 677 return (EINVAL); 678 679 offset = ntb->peer_spad + (idx << 2); 680 amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val); 681 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val); 682 683 return (0); 684 } 685 686 /* 687 * AMD NTB INIT 688 */ 689 static int 690 amd_ntb_hw_info_handler(SYSCTL_HANDLER_ARGS) 691 { 692 struct amd_ntb_softc* ntb = arg1; 693 struct sbuf *sb; 694 int rc = 0; 695 696 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 697 if (sb == NULL) 698 return (sb->s_error); 699 700 sbuf_printf(sb, "NTB AMD Hardware info:\n\n"); 701 sbuf_printf(sb, "AMD NTB side: %s\n", 702 (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY"); 703 sbuf_printf(sb, "AMD LNK STA: 0x%#06x\n", ntb->lnk_sta); 704 705 if (!amd_link_is_up(ntb)) 706 sbuf_printf(sb, "AMD Link Status: Down\n"); 707 else { 708 sbuf_printf(sb, "AMD Link Status: Up\n"); 709 sbuf_printf(sb, "AMD Link Speed: PCI-E Gen %u\n", 710 NTB_LNK_STA_SPEED(ntb->lnk_sta)); 711 sbuf_printf(sb, "AMD Link Width: PCI-E Width %u\n", 712 NTB_LNK_STA_WIDTH(ntb->lnk_sta)); 713 } 714 715 sbuf_printf(sb, "AMD Memory window count: %d\n", 716 ntb->hw_info->mw_count); 717 sbuf_printf(sb, "AMD Spad count: %d\n", 718 ntb->spad_count); 719 sbuf_printf(sb, "AMD Doorbell count: %d\n", 720 ntb->hw_info->db_count); 721 sbuf_printf(sb, "AMD MSI-X vec count: %d\n\n", 722 ntb->msix_vec_count); 723 sbuf_printf(sb, "AMD Doorbell valid mask: 0x%x\n", 724 ntb->db_valid_mask); 725 sbuf_printf(sb, "AMD Doorbell Mask: 0x%x\n", 726 amd_ntb_reg_read(4, AMD_DBMASK_OFFSET)); 727 sbuf_printf(sb, "AMD Doorbell: 0x%x\n", 728 amd_ntb_reg_read(4, AMD_DBSTAT_OFFSET)); 729 sbuf_printf(sb, "AMD NTB Incoming XLAT: \n"); 730 sbuf_printf(sb, "AMD XLAT1: 0x%jx\n", 731 amd_ntb_peer_reg_read(8, AMD_BAR1XLAT_OFFSET)); 732 sbuf_printf(sb, "AMD XLAT23: 0x%jx\n", 733 amd_ntb_peer_reg_read(8, AMD_BAR23XLAT_OFFSET)); 734 sbuf_printf(sb, "AMD XLAT45: 0x%jx\n", 735 amd_ntb_peer_reg_read(8, AMD_BAR45XLAT_OFFSET)); 736 sbuf_printf(sb, "AMD LMT1: 0x%x\n", 737 amd_ntb_reg_read(4, AMD_BAR1LMT_OFFSET)); 738 sbuf_printf(sb, "AMD LMT23: 0x%jx\n", 739 amd_ntb_reg_read(8, AMD_BAR23LMT_OFFSET)); 740 sbuf_printf(sb, "AMD LMT45: 0x%jx\n", 741 amd_ntb_reg_read(8, AMD_BAR45LMT_OFFSET)); 742 743 rc = sbuf_finish(sb); 744 sbuf_delete(sb); 745 return (rc); 746 } 747 748 static void 749 amd_ntb_sysctl_init(struct amd_ntb_softc *ntb) 750 { 751 struct sysctl_oid_list *globals; 752 struct sysctl_ctx_list *ctx; 753 754 ctx = device_get_sysctl_ctx(ntb->device); 755 globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)); 756 757 SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "info", 758 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ntb, 0, 759 amd_ntb_hw_info_handler, "A", "AMD NTB HW Information"); 760 } 761 762 /* 763 * Polls the HW link status register(s); returns true if something has changed. 764 */ 765 static bool 766 amd_ntb_poll_link(struct amd_ntb_softc *ntb) 767 { 768 uint32_t fullreg, reg, stat; 769 770 fullreg = amd_ntb_peer_reg_read(4, AMD_SIDEINFO_OFFSET); 771 reg = fullreg & NTB_LIN_STA_ACTIVE_BIT; 772 773 if (reg == ntb->cntl_sta) 774 return (false); 775 776 amd_ntb_printf(0, "%s: SIDEINFO reg_val = 0x%x cntl_sta 0x%x\n", 777 __func__, fullreg, ntb->cntl_sta); 778 779 ntb->cntl_sta = reg; 780 781 stat = pci_read_config(ntb->device, AMD_LINK_STATUS_OFFSET, 4); 782 783 amd_ntb_printf(0, "%s: LINK_STATUS stat = 0x%x lnk_sta 0x%x.\n", 784 __func__, stat, ntb->lnk_sta); 785 786 ntb->lnk_sta = stat; 787 788 return (true); 789 } 790 791 static void 792 amd_link_hb(void *arg) 793 { 794 struct amd_ntb_softc *ntb = arg; 795 796 if (amd_ntb_poll_link(ntb)) 797 ntb_link_event(ntb->device); 798 799 if (!amd_link_is_up(ntb)) { 800 callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT, 801 amd_link_hb, ntb); 802 } else { 803 callout_reset(&ntb->hb_timer, (AMD_LINK_HB_TIMEOUT * 10), 804 amd_link_hb, ntb); 805 } 806 } 807 808 static void 809 amd_ntb_interrupt(struct amd_ntb_softc *ntb, uint16_t vec) 810 { 811 if (vec < ntb->hw_info->db_count) 812 ntb_db_event(ntb->device, vec); 813 else 814 amd_ntb_printf(0, "Invalid vector %d\n", vec); 815 } 816 817 static void 818 amd_ntb_vec_isr(void *arg) 819 { 820 struct amd_ntb_vec *nvec = arg; 821 822 amd_ntb_interrupt(nvec->ntb, nvec->num); 823 } 824 825 static void 826 amd_ntb_irq_isr(void *arg) 827 { 828 /* If we couldn't set up MSI-X, we only have the one vector. */ 829 amd_ntb_interrupt(arg, 0); 830 } 831 832 static void 833 amd_init_side_info(struct amd_ntb_softc *ntb) 834 { 835 unsigned int reg; 836 837 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 838 if (!(reg & AMD_SIDE_READY)) { 839 reg |= AMD_SIDE_READY; 840 amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg); 841 } 842 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 843 } 844 845 static void 846 amd_deinit_side_info(struct amd_ntb_softc *ntb) 847 { 848 unsigned int reg; 849 850 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 851 if (reg & AMD_SIDE_READY) { 852 reg &= ~AMD_SIDE_READY; 853 amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg); 854 amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 855 } 856 } 857 858 static int 859 amd_ntb_setup_isr(struct amd_ntb_softc *ntb, uint16_t num_vectors, bool msi, 860 bool intx) 861 { 862 uint16_t i; 863 int flags = 0, rc = 0; 864 865 flags |= RF_ACTIVE; 866 if (intx) 867 flags |= RF_SHAREABLE; 868 869 for (i = 0; i < num_vectors; i++) { 870 /* RID should be 0 for intx */ 871 if (intx) 872 ntb->int_info[i].rid = i; 873 else 874 ntb->int_info[i].rid = i + 1; 875 876 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, 877 SYS_RES_IRQ, &ntb->int_info[i].rid, flags); 878 if (ntb->int_info[i].res == NULL) { 879 amd_ntb_printf(0, "bus_alloc_resource IRQ failed\n"); 880 return (ENOMEM); 881 } 882 883 ntb->int_info[i].tag = NULL; 884 ntb->allocated_interrupts++; 885 886 if (msi || intx) { 887 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 888 INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_irq_isr, 889 ntb, &ntb->int_info[i].tag); 890 } else { 891 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 892 INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_vec_isr, 893 &ntb->msix_vec[i], &ntb->int_info[i].tag); 894 } 895 896 if (rc != 0) { 897 amd_ntb_printf(0, "bus_setup_intr %d failed\n", i); 898 return (ENXIO); 899 } 900 } 901 902 return (0); 903 } 904 905 static int 906 amd_ntb_create_msix_vec(struct amd_ntb_softc *ntb, uint32_t max_vectors) 907 { 908 uint8_t i; 909 910 ntb->msix_vec = malloc(max_vectors * sizeof(*ntb->msix_vec), M_AMD_NTB, 911 M_ZERO | M_WAITOK); 912 913 for (i = 0; i < max_vectors; i++) { 914 ntb->msix_vec[i].num = i; 915 ntb->msix_vec[i].ntb = ntb; 916 } 917 918 return (0); 919 } 920 921 static void 922 amd_ntb_free_msix_vec(struct amd_ntb_softc *ntb) 923 { 924 if (ntb->msix_vec_count) { 925 pci_release_msi(ntb->device); 926 ntb->msix_vec_count = 0; 927 } 928 929 if (ntb->msix_vec != NULL) { 930 free(ntb->msix_vec, M_AMD_NTB); 931 ntb->msix_vec = NULL; 932 } 933 } 934 935 static int 936 amd_ntb_init_isr(struct amd_ntb_softc *ntb) 937 { 938 uint32_t supported_vectors, num_vectors; 939 bool msi = false, intx = false; 940 int rc = 0; 941 942 ntb->db_mask = ntb->db_valid_mask; 943 944 rc = amd_ntb_create_msix_vec(ntb, ntb->hw_info->msix_vector_count); 945 if (rc != 0) { 946 amd_ntb_printf(0, "Error creating msix vectors: %d\n", rc); 947 return (ENOMEM); 948 } 949 950 /* 951 * Check the number of MSI-X message supported by the device. 952 * Minimum necessary MSI-X message count should be equal to db_count. 953 */ 954 supported_vectors = pci_msix_count(ntb->device); 955 num_vectors = MIN(supported_vectors, ntb->hw_info->db_count); 956 if (num_vectors < ntb->hw_info->db_count) { 957 amd_ntb_printf(0, "No minimum msix: supported %d db %d\n", 958 supported_vectors, ntb->hw_info->db_count); 959 msi = true; 960 goto err_msix_enable; 961 } 962 963 /* Allocate the necessary number of MSI-x messages */ 964 rc = pci_alloc_msix(ntb->device, &num_vectors); 965 if (rc != 0) { 966 amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc); 967 msi = true; 968 goto err_msix_enable; 969 } 970 971 if (num_vectors < ntb->hw_info->db_count) { 972 amd_ntb_printf(0, "Allocated only %d MSI-X\n", num_vectors); 973 msi = true; 974 /* 975 * Else set ntb->hw_info->db_count = ntb->msix_vec_count = 976 * num_vectors, msi=false and dont release msi. 977 */ 978 } 979 980 err_msix_enable: 981 982 if (msi) { 983 free(ntb->msix_vec, M_AMD_NTB); 984 ntb->msix_vec = NULL; 985 pci_release_msi(ntb->device); 986 num_vectors = 1; 987 rc = pci_alloc_msi(ntb->device, &num_vectors); 988 if (rc != 0) { 989 amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc); 990 msi = false; 991 intx = true; 992 } 993 } 994 995 ntb->hw_info->db_count = ntb->msix_vec_count = num_vectors; 996 997 if (intx) { 998 num_vectors = 1; 999 ntb->hw_info->db_count = 1; 1000 ntb->msix_vec_count = 0; 1001 } 1002 1003 amd_ntb_printf(0, "%s: db %d msix %d msi %d intx %d\n", 1004 __func__, ntb->hw_info->db_count, ntb->msix_vec_count, (int)msi, (int)intx); 1005 1006 rc = amd_ntb_setup_isr(ntb, num_vectors, msi, intx); 1007 if (rc != 0) { 1008 amd_ntb_printf(0, "Error setting up isr: %d\n", rc); 1009 amd_ntb_free_msix_vec(ntb); 1010 } 1011 1012 return (rc); 1013 } 1014 1015 static void 1016 amd_ntb_deinit_isr(struct amd_ntb_softc *ntb) 1017 { 1018 struct amd_ntb_int_info *current_int; 1019 int i; 1020 1021 /* Mask all doorbell interrupts */ 1022 ntb->db_mask = ntb->db_valid_mask; 1023 amd_ntb_reg_write(4, AMD_DBMASK_OFFSET, ntb->db_mask); 1024 1025 for (i = 0; i < ntb->allocated_interrupts; i++) { 1026 current_int = &ntb->int_info[i]; 1027 if (current_int->tag != NULL) 1028 bus_teardown_intr(ntb->device, current_int->res, 1029 current_int->tag); 1030 1031 if (current_int->res != NULL) 1032 bus_release_resource(ntb->device, SYS_RES_IRQ, 1033 rman_get_rid(current_int->res), current_int->res); 1034 } 1035 1036 amd_ntb_free_msix_vec(ntb); 1037 } 1038 1039 static enum amd_ntb_conn_type 1040 amd_ntb_get_topo(struct amd_ntb_softc *ntb) 1041 { 1042 uint32_t info; 1043 1044 info = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 1045 1046 if (info & AMD_SIDE_MASK) 1047 return (NTB_CONN_SEC); 1048 1049 return (NTB_CONN_PRI); 1050 } 1051 1052 static int 1053 amd_ntb_init_dev(struct amd_ntb_softc *ntb) 1054 { 1055 ntb->db_valid_mask = (1ull << ntb->hw_info->db_count) - 1; 1056 mtx_init(&ntb->db_mask_lock, "amd ntb db bits", NULL, MTX_SPIN); 1057 1058 switch (ntb->conn_type) { 1059 case NTB_CONN_PRI: 1060 case NTB_CONN_SEC: 1061 ntb->spad_count >>= 1; 1062 1063 if (ntb->conn_type == NTB_CONN_PRI) { 1064 ntb->self_spad = 0; 1065 ntb->peer_spad = 0x20; 1066 } else { 1067 ntb->self_spad = 0x20; 1068 ntb->peer_spad = 0; 1069 } 1070 1071 callout_init(&ntb->hb_timer, 1); 1072 callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT, 1073 amd_link_hb, ntb); 1074 1075 break; 1076 1077 default: 1078 amd_ntb_printf(0, "Unsupported AMD NTB topology %d\n", 1079 ntb->conn_type); 1080 return (EINVAL); 1081 } 1082 1083 ntb->int_mask = AMD_EVENT_INTMASK; 1084 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); 1085 1086 return (0); 1087 } 1088 1089 static int 1090 amd_ntb_init(struct amd_ntb_softc *ntb) 1091 { 1092 int rc = 0; 1093 1094 ntb->conn_type = amd_ntb_get_topo(ntb); 1095 amd_ntb_printf(0, "AMD NTB Side: %s\n", 1096 (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY"); 1097 1098 rc = amd_ntb_init_dev(ntb); 1099 if (rc != 0) 1100 return (rc); 1101 1102 rc = amd_ntb_init_isr(ntb); 1103 if (rc != 0) 1104 return (rc); 1105 1106 return (0); 1107 } 1108 1109 static void 1110 print_map_success(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar, 1111 const char *kind) 1112 { 1113 amd_ntb_printf(0, "Mapped BAR%d v:[%p-%p] p:[0x%jx-0x%jx] (0x%jx bytes) (%s)\n", 1114 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 1115 (char *)bar->vbase + bar->size - 1, (uintmax_t)bar->pbase, 1116 (uintmax_t)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind); 1117 } 1118 1119 static void 1120 save_bar_parameters(struct amd_ntb_pci_bar_info *bar) 1121 { 1122 bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); 1123 bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); 1124 bar->pbase = rman_get_start(bar->pci_resource); 1125 bar->size = rman_get_size(bar->pci_resource); 1126 bar->vbase = rman_get_virtual(bar->pci_resource); 1127 bar->map_mode = VM_MEMATTR_UNCACHEABLE; 1128 } 1129 1130 static int 1131 map_bar(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar) 1132 { 1133 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 1134 &bar->pci_resource_id, RF_ACTIVE); 1135 if (bar->pci_resource == NULL) 1136 return (ENXIO); 1137 1138 save_bar_parameters(bar); 1139 print_map_success(ntb, bar, "mmr"); 1140 1141 return (0); 1142 } 1143 1144 static int 1145 amd_ntb_map_pci_bars(struct amd_ntb_softc *ntb) 1146 { 1147 int rc = 0; 1148 1149 /* NTB Config/Control registers - BAR 0 */ 1150 ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); 1151 rc = map_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); 1152 if (rc != 0) 1153 goto out; 1154 1155 /* Memory Window 0 BAR - BAR 1 */ 1156 ntb->bar_info[NTB_BAR_1].pci_resource_id = PCIR_BAR(1); 1157 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_1]); 1158 if (rc != 0) 1159 goto out; 1160 ntb->bar_info[NTB_BAR_1].xlat_off = AMD_BAR1XLAT_OFFSET; 1161 ntb->bar_info[NTB_BAR_1].limit_off = AMD_BAR1LMT_OFFSET; 1162 1163 /* Memory Window 1 BAR - BAR 2&3 */ 1164 ntb->bar_info[NTB_BAR_2].pci_resource_id = PCIR_BAR(2); 1165 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_2]); 1166 if (rc != 0) 1167 goto out; 1168 ntb->bar_info[NTB_BAR_2].xlat_off = AMD_BAR23XLAT_OFFSET; 1169 ntb->bar_info[NTB_BAR_2].limit_off = AMD_BAR23LMT_OFFSET; 1170 1171 /* Memory Window 2 BAR - BAR 4&5 */ 1172 ntb->bar_info[NTB_BAR_3].pci_resource_id = PCIR_BAR(4); 1173 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_3]); 1174 if (rc != 0) 1175 goto out; 1176 ntb->bar_info[NTB_BAR_3].xlat_off = AMD_BAR45XLAT_OFFSET; 1177 ntb->bar_info[NTB_BAR_3].limit_off = AMD_BAR45LMT_OFFSET; 1178 1179 out: 1180 if (rc != 0) 1181 amd_ntb_printf(0, "unable to allocate pci resource\n"); 1182 1183 return (rc); 1184 } 1185 1186 static void 1187 amd_ntb_unmap_pci_bars(struct amd_ntb_softc *ntb) 1188 { 1189 struct amd_ntb_pci_bar_info *bar_info; 1190 int i; 1191 1192 for (i = 0; i < NTB_MAX_BARS; i++) { 1193 bar_info = &ntb->bar_info[i]; 1194 if (bar_info->pci_resource != NULL) 1195 bus_release_resource(ntb->device, SYS_RES_MEMORY, 1196 bar_info->pci_resource_id, bar_info->pci_resource); 1197 } 1198 } 1199 1200 static int 1201 amd_ntb_probe(device_t device) 1202 { 1203 struct amd_ntb_softc *ntb = device_get_softc(device); 1204 const struct pci_device_table *tbl; 1205 1206 tbl = PCI_MATCH(device, amd_ntb_devs); 1207 if (tbl == NULL) 1208 return (ENXIO); 1209 1210 ntb->hw_info = (struct amd_ntb_hw_info *)tbl->driver_data; 1211 ntb->spad_count = ntb->hw_info->spad_count; 1212 device_set_desc(device, tbl->descr); 1213 1214 return (BUS_PROBE_GENERIC); 1215 } 1216 1217 static int 1218 amd_ntb_attach(device_t device) 1219 { 1220 struct amd_ntb_softc *ntb = device_get_softc(device); 1221 int error; 1222 1223 ntb->device = device; 1224 1225 /* Enable PCI bus mastering for "device" */ 1226 pci_enable_busmaster(ntb->device); 1227 1228 error = amd_ntb_map_pci_bars(ntb); 1229 if (error) 1230 goto out; 1231 1232 error = amd_ntb_init(ntb); 1233 if (error) 1234 goto out; 1235 1236 amd_init_side_info(ntb); 1237 1238 amd_ntb_spad_clear(ntb); 1239 1240 amd_ntb_sysctl_init(ntb); 1241 1242 /* Attach children to this controller */ 1243 error = ntb_register_device(device); 1244 1245 out: 1246 if (error) 1247 amd_ntb_detach(device); 1248 1249 return (error); 1250 } 1251 1252 static int 1253 amd_ntb_detach(device_t device) 1254 { 1255 struct amd_ntb_softc *ntb = device_get_softc(device); 1256 1257 ntb_unregister_device(device); 1258 amd_deinit_side_info(ntb); 1259 callout_drain(&ntb->hb_timer); 1260 amd_ntb_deinit_isr(ntb); 1261 mtx_destroy(&ntb->db_mask_lock); 1262 pci_disable_busmaster(ntb->device); 1263 amd_ntb_unmap_pci_bars(ntb); 1264 1265 return (0); 1266 } 1267 1268 static device_method_t ntb_amd_methods[] = { 1269 /* Device interface */ 1270 DEVMETHOD(device_probe, amd_ntb_probe), 1271 DEVMETHOD(device_attach, amd_ntb_attach), 1272 DEVMETHOD(device_detach, amd_ntb_detach), 1273 1274 /* Bus interface */ 1275 DEVMETHOD(bus_child_location, ntb_child_location), 1276 DEVMETHOD(bus_print_child, ntb_print_child), 1277 DEVMETHOD(bus_get_dma_tag, ntb_get_dma_tag), 1278 1279 /* NTB interface */ 1280 DEVMETHOD(ntb_port_number, amd_ntb_port_number), 1281 DEVMETHOD(ntb_peer_port_count, amd_ntb_peer_port_count), 1282 DEVMETHOD(ntb_peer_port_number, amd_ntb_peer_port_number), 1283 DEVMETHOD(ntb_peer_port_idx, amd_ntb_peer_port_idx), 1284 DEVMETHOD(ntb_link_is_up, amd_ntb_link_is_up), 1285 DEVMETHOD(ntb_link_enable, amd_ntb_link_enable), 1286 DEVMETHOD(ntb_link_disable, amd_ntb_link_disable), 1287 DEVMETHOD(ntb_mw_count, amd_ntb_mw_count), 1288 DEVMETHOD(ntb_mw_get_range, amd_ntb_mw_get_range), 1289 DEVMETHOD(ntb_mw_set_trans, amd_ntb_mw_set_trans), 1290 DEVMETHOD(ntb_mw_clear_trans, amd_ntb_mw_clear_trans), 1291 DEVMETHOD(ntb_mw_set_wc, amd_ntb_mw_set_wc), 1292 DEVMETHOD(ntb_mw_get_wc, amd_ntb_mw_get_wc), 1293 DEVMETHOD(ntb_db_valid_mask, amd_ntb_db_valid_mask), 1294 DEVMETHOD(ntb_db_vector_count, amd_ntb_db_vector_count), 1295 DEVMETHOD(ntb_db_vector_mask, amd_ntb_db_vector_mask), 1296 DEVMETHOD(ntb_db_read, amd_ntb_db_read), 1297 DEVMETHOD(ntb_db_clear, amd_ntb_db_clear), 1298 DEVMETHOD(ntb_db_set_mask, amd_ntb_db_set_mask), 1299 DEVMETHOD(ntb_db_clear_mask, amd_ntb_db_clear_mask), 1300 DEVMETHOD(ntb_peer_db_set, amd_ntb_peer_db_set), 1301 DEVMETHOD(ntb_spad_count, amd_ntb_spad_count), 1302 DEVMETHOD(ntb_spad_read, amd_ntb_spad_read), 1303 DEVMETHOD(ntb_spad_write, amd_ntb_spad_write), 1304 DEVMETHOD(ntb_peer_spad_read, amd_ntb_peer_spad_read), 1305 DEVMETHOD(ntb_peer_spad_write, amd_ntb_peer_spad_write), 1306 DEVMETHOD_END 1307 }; 1308 1309 static DEFINE_CLASS_0(ntb_hw, ntb_amd_driver, ntb_amd_methods, 1310 sizeof(struct amd_ntb_softc)); 1311 DRIVER_MODULE(ntb_hw_amd, pci, ntb_amd_driver, NULL, NULL); 1312 MODULE_DEPEND(ntb_hw_amd, ntb, 1, 1, 1); 1313 MODULE_VERSION(ntb_hw_amd, 1); 1314 PCI_PNP_INFO(amd_ntb_devs); 1315