1 /*- 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright (C) 2019 Advanced Micro Devices, Inc. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * BSD LICENSE 14 * 15 * Copyright (c) 2019 Advanced Micro Devices, Inc. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. Neither the name of AMD corporation nor the names of its 26 * contributors may be used to endorse or promote products derived 27 * from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * Contact Information : 42 * Rajesh Kumar <rajesh1.kumar@amd.com> 43 */ 44 45 /* 46 * The Non-Transparent Bridge (NTB) is a device that allows you to connect 47 * two or more systems using a PCI-e links, providing remote memory access. 48 * 49 * This module contains a driver for NTB hardware in AMD CPUs 50 * 51 * Much of the code in this module is shared with Linux. Any patches may 52 * be picked up and redistributed in Linux with a dual GPL/BSD license. 53 */ 54 55 #include <sys/cdefs.h> 56 __FBSDID("$FreeBSD$"); 57 58 #include <sys/param.h> 59 #include <sys/kernel.h> 60 #include <sys/systm.h> 61 #include <sys/bus.h> 62 #include <sys/lock.h> 63 #include <sys/malloc.h> 64 #include <sys/module.h> 65 #include <sys/mutex.h> 66 #include <sys/rman.h> 67 #include <sys/sbuf.h> 68 #include <sys/sysctl.h> 69 70 #include <vm/vm.h> 71 #include <vm/pmap.h> 72 73 #include <machine/bus.h> 74 75 #include <dev/pci/pcireg.h> 76 #include <dev/pci/pcivar.h> 77 78 #include "ntb_hw_amd.h" 79 #include "dev/ntb/ntb.h" 80 81 MALLOC_DEFINE(M_AMD_NTB, "amd_ntb_hw", "amd_ntb_hw driver memory allocations"); 82 83 static const struct amd_ntb_hw_info amd_ntb_hw_info_list[] = { 84 { .vendor_id = NTB_HW_AMD_VENDOR_ID, 85 .device_id = NTB_HW_AMD_DEVICE_ID1, 86 .mw_count = 3, 87 .bar_start_idx = 1, 88 .spad_count = 16, 89 .db_count = 16, 90 .msix_vector_count = 24, 91 .quirks = QUIRK_MW0_32BIT, 92 .desc = "AMD Non-Transparent Bridge"}, 93 94 { .vendor_id = NTB_HW_AMD_VENDOR_ID, 95 .device_id = NTB_HW_AMD_DEVICE_ID2, 96 .mw_count = 2, 97 .bar_start_idx = 2, 98 .spad_count = 16, 99 .db_count = 16, 100 .msix_vector_count = 24, 101 .quirks = 0, 102 .desc = "AMD Non-Transparent Bridge"}, 103 104 { .vendor_id = NTB_HW_HYGON_VENDOR_ID, 105 .device_id = NTB_HW_HYGON_DEVICE_ID1, 106 .mw_count = 3, 107 .bar_start_idx = 1, 108 .spad_count = 16, 109 .db_count = 16, 110 .msix_vector_count = 24, 111 .quirks = QUIRK_MW0_32BIT, 112 .desc = "Hygon Non-Transparent Bridge"}, 113 }; 114 115 static const struct pci_device_table amd_ntb_devs[] = { 116 { PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID1), 117 .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0], 118 PCI_DESCR("AMD Non-Transparent Bridge") }, 119 { PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID2), 120 .driver_data = (uintptr_t)&amd_ntb_hw_info_list[1], 121 PCI_DESCR("AMD Non-Transparent Bridge") }, 122 { PCI_DEV(NTB_HW_HYGON_VENDOR_ID, NTB_HW_HYGON_DEVICE_ID1), 123 .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0], 124 PCI_DESCR("Hygon Non-Transparent Bridge") } 125 }; 126 127 static unsigned g_amd_ntb_hw_debug_level; 128 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, 129 &g_amd_ntb_hw_debug_level, 0, "amd_ntb_hw log level -- higher is verbose"); 130 131 #define amd_ntb_printf(lvl, ...) do { \ 132 if (lvl <= g_amd_ntb_hw_debug_level) \ 133 device_printf(ntb->device, __VA_ARGS__); \ 134 } while (0) 135 136 #ifdef __i386__ 137 static __inline uint64_t 138 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 139 bus_size_t offset) 140 { 141 142 return (bus_space_read_4(tag, handle, offset) | 143 ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); 144 } 145 146 static __inline void 147 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, 148 bus_size_t offset, uint64_t val) 149 { 150 151 bus_space_write_4(tag, handle, offset, val); 152 bus_space_write_4(tag, handle, offset + 4, val >> 32); 153 } 154 #endif 155 156 /* 157 * AMD NTB INTERFACE ROUTINES 158 */ 159 static int 160 amd_ntb_port_number(device_t dev) 161 { 162 struct amd_ntb_softc *ntb = device_get_softc(dev); 163 164 amd_ntb_printf(1, "%s: conn_type %d\n", __func__, ntb->conn_type); 165 166 switch (ntb->conn_type) { 167 case NTB_CONN_PRI: 168 return (NTB_PORT_PRI_USD); 169 case NTB_CONN_SEC: 170 return (NTB_PORT_SEC_DSD); 171 default: 172 break; 173 } 174 175 return (-EINVAL); 176 } 177 178 static int 179 amd_ntb_peer_port_count(device_t dev) 180 { 181 struct amd_ntb_softc *ntb = device_get_softc(dev); 182 183 amd_ntb_printf(1, "%s: peer cnt %d\n", __func__, NTB_DEF_PEER_CNT); 184 return (NTB_DEF_PEER_CNT); 185 } 186 187 static int 188 amd_ntb_peer_port_number(device_t dev, int pidx) 189 { 190 struct amd_ntb_softc *ntb = device_get_softc(dev); 191 192 amd_ntb_printf(1, "%s: pidx %d conn type %d\n", 193 __func__, pidx, ntb->conn_type); 194 195 if (pidx != NTB_DEF_PEER_IDX) 196 return (-EINVAL); 197 198 switch (ntb->conn_type) { 199 case NTB_CONN_PRI: 200 return (NTB_PORT_SEC_DSD); 201 case NTB_CONN_SEC: 202 return (NTB_PORT_PRI_USD); 203 default: 204 break; 205 } 206 207 return (-EINVAL); 208 } 209 210 static int 211 amd_ntb_peer_port_idx(device_t dev, int port) 212 { 213 struct amd_ntb_softc *ntb = device_get_softc(dev); 214 int peer_port; 215 216 peer_port = amd_ntb_peer_port_number(dev, NTB_DEF_PEER_IDX); 217 218 amd_ntb_printf(1, "%s: port %d peer_port %d\n", 219 __func__, port, peer_port); 220 221 if (peer_port == -EINVAL || port != peer_port) 222 return (-EINVAL); 223 224 return (0); 225 } 226 227 /* 228 * AMD NTB INTERFACE - LINK ROUTINES 229 */ 230 static inline int 231 amd_link_is_up(struct amd_ntb_softc *ntb) 232 { 233 234 amd_ntb_printf(2, "%s: peer_sta 0x%x cntl_sta 0x%x\n", 235 __func__, ntb->peer_sta, ntb->cntl_sta); 236 237 if (!ntb->peer_sta) 238 return (NTB_LNK_STA_ACTIVE(ntb->cntl_sta)); 239 240 return (0); 241 } 242 243 static inline enum ntb_speed 244 amd_ntb_link_sta_speed(struct amd_ntb_softc *ntb) 245 { 246 247 if (!amd_link_is_up(ntb)) 248 return (NTB_SPEED_NONE); 249 250 return (NTB_LNK_STA_SPEED(ntb->lnk_sta)); 251 } 252 253 static inline enum ntb_width 254 amd_ntb_link_sta_width(struct amd_ntb_softc *ntb) 255 { 256 257 if (!amd_link_is_up(ntb)) 258 return (NTB_WIDTH_NONE); 259 260 return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); 261 } 262 263 static bool 264 amd_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width) 265 { 266 struct amd_ntb_softc *ntb = device_get_softc(dev); 267 268 if (speed != NULL) 269 *speed = amd_ntb_link_sta_speed(ntb); 270 if (width != NULL) 271 *width = amd_ntb_link_sta_width(ntb); 272 273 return (amd_link_is_up(ntb)); 274 } 275 276 static int 277 amd_ntb_link_enable(device_t dev, enum ntb_speed max_speed, 278 enum ntb_width max_width) 279 { 280 struct amd_ntb_softc *ntb = device_get_softc(dev); 281 uint32_t ntb_ctl; 282 283 amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n", 284 __func__, ntb->int_mask, ntb->conn_type); 285 286 amd_init_side_info(ntb); 287 288 /* Enable event interrupt */ 289 ntb->int_mask &= ~AMD_EVENT_INTMASK; 290 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); 291 292 if (ntb->conn_type == NTB_CONN_SEC) 293 return (EINVAL); 294 295 amd_ntb_printf(0, "%s: Enabling Link.\n", __func__); 296 297 ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET); 298 ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); 299 amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl); 300 amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl); 301 302 return (0); 303 } 304 305 static int 306 amd_ntb_link_disable(device_t dev) 307 { 308 struct amd_ntb_softc *ntb = device_get_softc(dev); 309 uint32_t ntb_ctl; 310 311 amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n", 312 __func__, ntb->int_mask, ntb->conn_type); 313 314 amd_deinit_side_info(ntb); 315 316 /* Disable event interrupt */ 317 ntb->int_mask |= AMD_EVENT_INTMASK; 318 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); 319 320 if (ntb->conn_type == NTB_CONN_SEC) 321 return (EINVAL); 322 323 amd_ntb_printf(0, "%s: Disabling Link.\n", __func__); 324 325 ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET); 326 ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); 327 amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl); 328 amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl); 329 330 return (0); 331 } 332 333 /* 334 * AMD NTB memory window routines 335 */ 336 static uint8_t 337 amd_ntb_mw_count(device_t dev) 338 { 339 struct amd_ntb_softc *ntb = device_get_softc(dev); 340 341 return (ntb->hw_info->mw_count); 342 } 343 344 static int 345 amd_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base, 346 caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, 347 bus_addr_t *plimit) 348 { 349 struct amd_ntb_softc *ntb = device_get_softc(dev); 350 struct amd_ntb_pci_bar_info *bar_info; 351 352 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 353 return (EINVAL); 354 355 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 356 357 if (base != NULL) 358 *base = bar_info->pbase; 359 360 if (vbase != NULL) 361 *vbase = bar_info->vbase; 362 363 if (align != NULL) 364 *align = bar_info->size; 365 366 if (size != NULL) 367 *size = bar_info->size; 368 369 if (align_size != NULL) 370 *align_size = 1; 371 372 if (plimit != NULL) { 373 /* 374 * For Device ID 0x145B (which has 3 memory windows), 375 * memory window 0 use a 32-bit bar. The remaining 376 * cases all use 64-bit bar. 377 */ 378 if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT)) 379 *plimit = BUS_SPACE_MAXADDR_32BIT; 380 else 381 *plimit = BUS_SPACE_MAXADDR; 382 } 383 384 return (0); 385 } 386 387 static int 388 amd_ntb_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size) 389 { 390 struct amd_ntb_softc *ntb = device_get_softc(dev); 391 struct amd_ntb_pci_bar_info *bar_info; 392 393 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 394 return (EINVAL); 395 396 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 397 398 /* Make sure the range fits in the usable mw size. */ 399 if (size > bar_info->size) { 400 amd_ntb_printf(0, "%s: size 0x%jx greater than mw_size 0x%jx\n", 401 __func__, (uintmax_t)size, (uintmax_t)bar_info->size); 402 return (EINVAL); 403 } 404 405 amd_ntb_printf(1, "%s: mw %d mw_size 0x%jx size 0x%jx base %p\n", 406 __func__, mw_idx, (uintmax_t)bar_info->size, 407 (uintmax_t)size, (void *)bar_info->pci_bus_handle); 408 409 /* 410 * AMD NTB XLAT and Limit registers needs to be written only after 411 * link enable. 412 * 413 * Set and verify setting the translation address register. 414 */ 415 amd_ntb_peer_reg_write(8, bar_info->xlat_off, (uint64_t)addr); 416 amd_ntb_printf(0, "%s: mw %d xlat_off 0x%x cur_val 0x%jx addr %p\n", 417 __func__, mw_idx, bar_info->xlat_off, 418 amd_ntb_peer_reg_read(8, bar_info->xlat_off), (void *)addr); 419 420 /* 421 * Set and verify setting the limit register. 422 * 423 * For Device ID 0x145B (which has 3 memory windows), 424 * memory window 0 use a 32-bit bar. The remaining 425 * cases all use 64-bit bar. 426 */ 427 if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT)) { 428 amd_ntb_reg_write(4, bar_info->limit_off, (uint32_t)size); 429 amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%x limit 0x%x\n", 430 __func__, bar_info->limit_off, 431 amd_ntb_peer_reg_read(4, bar_info->limit_off), 432 (uint32_t)size); 433 } else { 434 amd_ntb_reg_write(8, bar_info->limit_off, (uint64_t)size); 435 amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%jx limit 0x%jx\n", 436 __func__, bar_info->limit_off, 437 amd_ntb_peer_reg_read(8, bar_info->limit_off), 438 (uintmax_t)size); 439 } 440 441 return (0); 442 } 443 444 static int 445 amd_ntb_mw_clear_trans(device_t dev, unsigned mw_idx) 446 { 447 struct amd_ntb_softc *ntb = device_get_softc(dev); 448 449 amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx); 450 451 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 452 return (EINVAL); 453 454 return (amd_ntb_mw_set_trans(dev, mw_idx, 0, 0)); 455 } 456 457 static int 458 amd_ntb_mw_set_wc(device_t dev, unsigned int mw_idx, vm_memattr_t mode) 459 { 460 struct amd_ntb_softc *ntb = device_get_softc(dev); 461 struct amd_ntb_pci_bar_info *bar_info; 462 int rc; 463 464 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 465 return (EINVAL); 466 467 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 468 if (mode == bar_info->map_mode) 469 return (0); 470 471 rc = pmap_change_attr((vm_offset_t)bar_info->vbase, bar_info->size, mode); 472 if (rc == 0) 473 bar_info->map_mode = mode; 474 475 return (rc); 476 } 477 478 static int 479 amd_ntb_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode) 480 { 481 struct amd_ntb_softc *ntb = device_get_softc(dev); 482 struct amd_ntb_pci_bar_info *bar_info; 483 484 amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx); 485 486 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 487 return (EINVAL); 488 489 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 490 *mode = bar_info->map_mode; 491 492 return (0); 493 } 494 495 /* 496 * AMD NTB doorbell routines 497 */ 498 static int 499 amd_ntb_db_vector_count(device_t dev) 500 { 501 struct amd_ntb_softc *ntb = device_get_softc(dev); 502 503 amd_ntb_printf(1, "%s: db_count 0x%x\n", __func__, 504 ntb->hw_info->db_count); 505 506 return (ntb->hw_info->db_count); 507 } 508 509 static uint64_t 510 amd_ntb_db_valid_mask(device_t dev) 511 { 512 struct amd_ntb_softc *ntb = device_get_softc(dev); 513 514 amd_ntb_printf(1, "%s: db_valid_mask 0x%x\n", 515 __func__, ntb->db_valid_mask); 516 517 return (ntb->db_valid_mask); 518 } 519 520 static uint64_t 521 amd_ntb_db_vector_mask(device_t dev, uint32_t vector) 522 { 523 struct amd_ntb_softc *ntb = device_get_softc(dev); 524 525 amd_ntb_printf(1, "%s: vector %d db_count 0x%x db_valid_mask 0x%x\n", 526 __func__, vector, ntb->hw_info->db_count, ntb->db_valid_mask); 527 528 if (vector < 0 || vector >= ntb->hw_info->db_count) 529 return (0); 530 531 return (ntb->db_valid_mask & (1 << vector)); 532 } 533 534 static uint64_t 535 amd_ntb_db_read(device_t dev) 536 { 537 struct amd_ntb_softc *ntb = device_get_softc(dev); 538 uint64_t dbstat_off; 539 540 dbstat_off = (uint64_t)amd_ntb_reg_read(2, AMD_DBSTAT_OFFSET); 541 542 amd_ntb_printf(1, "%s: dbstat_off 0x%jx\n", __func__, dbstat_off); 543 544 return (dbstat_off); 545 } 546 547 static void 548 amd_ntb_db_clear(device_t dev, uint64_t db_bits) 549 { 550 struct amd_ntb_softc *ntb = device_get_softc(dev); 551 552 amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits); 553 amd_ntb_reg_write(2, AMD_DBSTAT_OFFSET, (uint16_t)db_bits); 554 } 555 556 static void 557 amd_ntb_db_set_mask(device_t dev, uint64_t db_bits) 558 { 559 struct amd_ntb_softc *ntb = device_get_softc(dev); 560 561 DB_MASK_LOCK(ntb); 562 amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n", 563 __func__, ntb->db_mask, db_bits); 564 565 ntb->db_mask |= db_bits; 566 amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask); 567 DB_MASK_UNLOCK(ntb); 568 } 569 570 static void 571 amd_ntb_db_clear_mask(device_t dev, uint64_t db_bits) 572 { 573 struct amd_ntb_softc *ntb = device_get_softc(dev); 574 575 DB_MASK_LOCK(ntb); 576 amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n", 577 __func__, ntb->db_mask, db_bits); 578 579 ntb->db_mask &= ~db_bits; 580 amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask); 581 DB_MASK_UNLOCK(ntb); 582 } 583 584 static void 585 amd_ntb_peer_db_set(device_t dev, uint64_t db_bits) 586 { 587 struct amd_ntb_softc *ntb = device_get_softc(dev); 588 589 amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits); 590 amd_ntb_reg_write(2, AMD_DBREQ_OFFSET, (uint16_t)db_bits); 591 } 592 593 /* 594 * AMD NTB scratchpad routines 595 */ 596 static uint8_t 597 amd_ntb_spad_count(device_t dev) 598 { 599 struct amd_ntb_softc *ntb = device_get_softc(dev); 600 601 amd_ntb_printf(1, "%s: spad_count 0x%x\n", __func__, 602 ntb->spad_count); 603 604 return (ntb->spad_count); 605 } 606 607 static int 608 amd_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val) 609 { 610 struct amd_ntb_softc *ntb = device_get_softc(dev); 611 uint32_t offset; 612 613 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 614 615 if (idx < 0 || idx >= ntb->spad_count) 616 return (EINVAL); 617 618 offset = ntb->self_spad + (idx << 2); 619 *val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset); 620 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val); 621 622 return (0); 623 } 624 625 static int 626 amd_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val) 627 { 628 struct amd_ntb_softc *ntb = device_get_softc(dev); 629 uint32_t offset; 630 631 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 632 633 if (idx < 0 || idx >= ntb->spad_count) 634 return (EINVAL); 635 636 offset = ntb->self_spad + (idx << 2); 637 amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val); 638 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val); 639 640 return (0); 641 } 642 643 static void 644 amd_ntb_spad_clear(struct amd_ntb_softc *ntb) 645 { 646 uint8_t i; 647 648 for (i = 0; i < ntb->spad_count; i++) 649 amd_ntb_spad_write(ntb->device, i, 0); 650 } 651 652 static int 653 amd_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val) 654 { 655 struct amd_ntb_softc *ntb = device_get_softc(dev); 656 uint32_t offset; 657 658 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 659 660 if (idx < 0 || idx >= ntb->spad_count) 661 return (EINVAL); 662 663 offset = ntb->peer_spad + (idx << 2); 664 *val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset); 665 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val); 666 667 return (0); 668 } 669 670 static int 671 amd_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val) 672 { 673 struct amd_ntb_softc *ntb = device_get_softc(dev); 674 uint32_t offset; 675 676 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 677 678 if (idx < 0 || idx >= ntb->spad_count) 679 return (EINVAL); 680 681 offset = ntb->peer_spad + (idx << 2); 682 amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val); 683 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val); 684 685 return (0); 686 } 687 688 /* 689 * AMD NTB INIT 690 */ 691 static int 692 amd_ntb_hw_info_handler(SYSCTL_HANDLER_ARGS) 693 { 694 struct amd_ntb_softc* ntb = arg1; 695 struct sbuf *sb; 696 int rc = 0; 697 698 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 699 if (sb == NULL) 700 return (sb->s_error); 701 702 sbuf_printf(sb, "NTB AMD Hardware info:\n\n"); 703 sbuf_printf(sb, "AMD NTB side: %s\n", 704 (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY"); 705 sbuf_printf(sb, "AMD LNK STA: 0x%#06x\n", ntb->lnk_sta); 706 707 if (!amd_link_is_up(ntb)) 708 sbuf_printf(sb, "AMD Link Status: Down\n"); 709 else { 710 sbuf_printf(sb, "AMD Link Status: Up\n"); 711 sbuf_printf(sb, "AMD Link Speed: PCI-E Gen %u\n", 712 NTB_LNK_STA_SPEED(ntb->lnk_sta)); 713 sbuf_printf(sb, "AMD Link Width: PCI-E Width %u\n", 714 NTB_LNK_STA_WIDTH(ntb->lnk_sta)); 715 } 716 717 sbuf_printf(sb, "AMD Memory window count: %d\n", 718 ntb->hw_info->mw_count); 719 sbuf_printf(sb, "AMD Spad count: %d\n", 720 ntb->spad_count); 721 sbuf_printf(sb, "AMD Doorbell count: %d\n", 722 ntb->hw_info->db_count); 723 sbuf_printf(sb, "AMD MSI-X vec count: %d\n\n", 724 ntb->msix_vec_count); 725 sbuf_printf(sb, "AMD Doorbell valid mask: 0x%x\n", 726 ntb->db_valid_mask); 727 sbuf_printf(sb, "AMD Doorbell Mask: 0x%x\n", 728 amd_ntb_reg_read(4, AMD_DBMASK_OFFSET)); 729 sbuf_printf(sb, "AMD Doorbell: 0x%x\n", 730 amd_ntb_reg_read(4, AMD_DBSTAT_OFFSET)); 731 sbuf_printf(sb, "AMD NTB Incoming XLAT: \n"); 732 sbuf_printf(sb, "AMD XLAT1: 0x%jx\n", 733 amd_ntb_peer_reg_read(8, AMD_BAR1XLAT_OFFSET)); 734 sbuf_printf(sb, "AMD XLAT23: 0x%jx\n", 735 amd_ntb_peer_reg_read(8, AMD_BAR23XLAT_OFFSET)); 736 sbuf_printf(sb, "AMD XLAT45: 0x%jx\n", 737 amd_ntb_peer_reg_read(8, AMD_BAR45XLAT_OFFSET)); 738 sbuf_printf(sb, "AMD LMT1: 0x%x\n", 739 amd_ntb_reg_read(4, AMD_BAR1LMT_OFFSET)); 740 sbuf_printf(sb, "AMD LMT23: 0x%jx\n", 741 amd_ntb_reg_read(8, AMD_BAR23LMT_OFFSET)); 742 sbuf_printf(sb, "AMD LMT45: 0x%jx\n", 743 amd_ntb_reg_read(8, AMD_BAR45LMT_OFFSET)); 744 745 rc = sbuf_finish(sb); 746 sbuf_delete(sb); 747 return (rc); 748 } 749 750 static void 751 amd_ntb_sysctl_init(struct amd_ntb_softc *ntb) 752 { 753 struct sysctl_oid_list *globals; 754 struct sysctl_ctx_list *ctx; 755 756 ctx = device_get_sysctl_ctx(ntb->device); 757 globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)); 758 759 SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "info", 760 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ntb, 0, 761 amd_ntb_hw_info_handler, "A", "AMD NTB HW Information"); 762 } 763 764 /* 765 * Polls the HW link status register(s); returns true if something has changed. 766 */ 767 static bool 768 amd_ntb_poll_link(struct amd_ntb_softc *ntb) 769 { 770 uint32_t fullreg, reg, stat; 771 772 fullreg = amd_ntb_peer_reg_read(4, AMD_SIDEINFO_OFFSET); 773 reg = fullreg & NTB_LIN_STA_ACTIVE_BIT; 774 775 if (reg == ntb->cntl_sta) 776 return (false); 777 778 amd_ntb_printf(0, "%s: SIDEINFO reg_val = 0x%x cntl_sta 0x%x\n", 779 __func__, fullreg, ntb->cntl_sta); 780 781 ntb->cntl_sta = reg; 782 783 stat = pci_read_config(ntb->device, AMD_LINK_STATUS_OFFSET, 4); 784 785 amd_ntb_printf(0, "%s: LINK_STATUS stat = 0x%x lnk_sta 0x%x.\n", 786 __func__, stat, ntb->lnk_sta); 787 788 ntb->lnk_sta = stat; 789 790 return (true); 791 } 792 793 static void 794 amd_link_hb(void *arg) 795 { 796 struct amd_ntb_softc *ntb = arg; 797 798 if (amd_ntb_poll_link(ntb)) 799 ntb_link_event(ntb->device); 800 801 if (!amd_link_is_up(ntb)) { 802 callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT, 803 amd_link_hb, ntb); 804 } else { 805 callout_reset(&ntb->hb_timer, (AMD_LINK_HB_TIMEOUT * 10), 806 amd_link_hb, ntb); 807 } 808 } 809 810 static void 811 amd_ntb_interrupt(struct amd_ntb_softc *ntb, uint16_t vec) 812 { 813 if (vec < ntb->hw_info->db_count) 814 ntb_db_event(ntb->device, vec); 815 else 816 amd_ntb_printf(0, "Invalid vector %d\n", vec); 817 } 818 819 static void 820 amd_ntb_vec_isr(void *arg) 821 { 822 struct amd_ntb_vec *nvec = arg; 823 824 amd_ntb_interrupt(nvec->ntb, nvec->num); 825 } 826 827 static void 828 amd_ntb_irq_isr(void *arg) 829 { 830 /* If we couldn't set up MSI-X, we only have the one vector. */ 831 amd_ntb_interrupt(arg, 0); 832 } 833 834 static void 835 amd_init_side_info(struct amd_ntb_softc *ntb) 836 { 837 unsigned int reg; 838 839 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 840 if (!(reg & AMD_SIDE_READY)) { 841 reg |= AMD_SIDE_READY; 842 amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg); 843 } 844 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 845 } 846 847 static void 848 amd_deinit_side_info(struct amd_ntb_softc *ntb) 849 { 850 unsigned int reg; 851 852 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 853 if (reg & AMD_SIDE_READY) { 854 reg &= ~AMD_SIDE_READY; 855 amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg); 856 amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 857 } 858 } 859 860 static int 861 amd_ntb_setup_isr(struct amd_ntb_softc *ntb, uint16_t num_vectors, bool msi, 862 bool intx) 863 { 864 uint16_t i; 865 int flags = 0, rc = 0; 866 867 flags |= RF_ACTIVE; 868 if (intx) 869 flags |= RF_SHAREABLE; 870 871 for (i = 0; i < num_vectors; i++) { 872 /* RID should be 0 for intx */ 873 if (intx) 874 ntb->int_info[i].rid = i; 875 else 876 ntb->int_info[i].rid = i + 1; 877 878 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, 879 SYS_RES_IRQ, &ntb->int_info[i].rid, flags); 880 if (ntb->int_info[i].res == NULL) { 881 amd_ntb_printf(0, "bus_alloc_resource IRQ failed\n"); 882 return (ENOMEM); 883 } 884 885 ntb->int_info[i].tag = NULL; 886 ntb->allocated_interrupts++; 887 888 if (msi || intx) { 889 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 890 INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_irq_isr, 891 ntb, &ntb->int_info[i].tag); 892 } else { 893 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 894 INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_vec_isr, 895 &ntb->msix_vec[i], &ntb->int_info[i].tag); 896 } 897 898 if (rc != 0) { 899 amd_ntb_printf(0, "bus_setup_intr %d failed\n", i); 900 return (ENXIO); 901 } 902 } 903 904 return (0); 905 } 906 907 static int 908 amd_ntb_create_msix_vec(struct amd_ntb_softc *ntb, uint32_t max_vectors) 909 { 910 uint8_t i; 911 912 ntb->msix_vec = malloc(max_vectors * sizeof(*ntb->msix_vec), M_AMD_NTB, 913 M_ZERO | M_WAITOK); 914 915 for (i = 0; i < max_vectors; i++) { 916 ntb->msix_vec[i].num = i; 917 ntb->msix_vec[i].ntb = ntb; 918 } 919 920 return (0); 921 } 922 923 static void 924 amd_ntb_free_msix_vec(struct amd_ntb_softc *ntb) 925 { 926 if (ntb->msix_vec_count) { 927 pci_release_msi(ntb->device); 928 ntb->msix_vec_count = 0; 929 } 930 931 if (ntb->msix_vec != NULL) { 932 free(ntb->msix_vec, M_AMD_NTB); 933 ntb->msix_vec = NULL; 934 } 935 } 936 937 static int 938 amd_ntb_init_isr(struct amd_ntb_softc *ntb) 939 { 940 uint32_t supported_vectors, num_vectors; 941 bool msi = false, intx = false; 942 int rc = 0; 943 944 ntb->db_mask = ntb->db_valid_mask; 945 946 rc = amd_ntb_create_msix_vec(ntb, ntb->hw_info->msix_vector_count); 947 if (rc != 0) { 948 amd_ntb_printf(0, "Error creating msix vectors: %d\n", rc); 949 return (ENOMEM); 950 } 951 952 /* 953 * Check the number of MSI-X message supported by the device. 954 * Minimum necessary MSI-X message count should be equal to db_count. 955 */ 956 supported_vectors = pci_msix_count(ntb->device); 957 num_vectors = MIN(supported_vectors, ntb->hw_info->db_count); 958 if (num_vectors < ntb->hw_info->db_count) { 959 amd_ntb_printf(0, "No minimum msix: supported %d db %d\n", 960 supported_vectors, ntb->hw_info->db_count); 961 msi = true; 962 goto err_msix_enable; 963 } 964 965 /* Allocate the necessary number of MSI-x messages */ 966 rc = pci_alloc_msix(ntb->device, &num_vectors); 967 if (rc != 0) { 968 amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc); 969 msi = true; 970 goto err_msix_enable; 971 } 972 973 if (num_vectors < ntb->hw_info->db_count) { 974 amd_ntb_printf(0, "Allocated only %d MSI-X\n", num_vectors); 975 msi = true; 976 /* 977 * Else set ntb->hw_info->db_count = ntb->msix_vec_count = 978 * num_vectors, msi=false and dont release msi. 979 */ 980 } 981 982 err_msix_enable: 983 984 if (msi) { 985 free(ntb->msix_vec, M_AMD_NTB); 986 ntb->msix_vec = NULL; 987 pci_release_msi(ntb->device); 988 num_vectors = 1; 989 rc = pci_alloc_msi(ntb->device, &num_vectors); 990 if (rc != 0) { 991 amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc); 992 msi = false; 993 intx = true; 994 } 995 } 996 997 ntb->hw_info->db_count = ntb->msix_vec_count = num_vectors; 998 999 if (intx) { 1000 num_vectors = 1; 1001 ntb->hw_info->db_count = 1; 1002 ntb->msix_vec_count = 0; 1003 } 1004 1005 amd_ntb_printf(0, "%s: db %d msix %d msi %d intx %d\n", 1006 __func__, ntb->hw_info->db_count, ntb->msix_vec_count, (int)msi, (int)intx); 1007 1008 rc = amd_ntb_setup_isr(ntb, num_vectors, msi, intx); 1009 if (rc != 0) { 1010 amd_ntb_printf(0, "Error setting up isr: %d\n", rc); 1011 amd_ntb_free_msix_vec(ntb); 1012 } 1013 1014 return (rc); 1015 } 1016 1017 static void 1018 amd_ntb_deinit_isr(struct amd_ntb_softc *ntb) 1019 { 1020 struct amd_ntb_int_info *current_int; 1021 int i; 1022 1023 /* Mask all doorbell interrupts */ 1024 ntb->db_mask = ntb->db_valid_mask; 1025 amd_ntb_reg_write(4, AMD_DBMASK_OFFSET, ntb->db_mask); 1026 1027 for (i = 0; i < ntb->allocated_interrupts; i++) { 1028 current_int = &ntb->int_info[i]; 1029 if (current_int->tag != NULL) 1030 bus_teardown_intr(ntb->device, current_int->res, 1031 current_int->tag); 1032 1033 if (current_int->res != NULL) 1034 bus_release_resource(ntb->device, SYS_RES_IRQ, 1035 rman_get_rid(current_int->res), current_int->res); 1036 } 1037 1038 amd_ntb_free_msix_vec(ntb); 1039 } 1040 1041 static enum amd_ntb_conn_type 1042 amd_ntb_get_topo(struct amd_ntb_softc *ntb) 1043 { 1044 uint32_t info; 1045 1046 info = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 1047 1048 if (info & AMD_SIDE_MASK) 1049 return (NTB_CONN_SEC); 1050 1051 return (NTB_CONN_PRI); 1052 } 1053 1054 static int 1055 amd_ntb_init_dev(struct amd_ntb_softc *ntb) 1056 { 1057 ntb->db_valid_mask = (1ull << ntb->hw_info->db_count) - 1; 1058 mtx_init(&ntb->db_mask_lock, "amd ntb db bits", NULL, MTX_SPIN); 1059 1060 switch (ntb->conn_type) { 1061 case NTB_CONN_PRI: 1062 case NTB_CONN_SEC: 1063 ntb->spad_count >>= 1; 1064 1065 if (ntb->conn_type == NTB_CONN_PRI) { 1066 ntb->self_spad = 0; 1067 ntb->peer_spad = 0x20; 1068 } else { 1069 ntb->self_spad = 0x20; 1070 ntb->peer_spad = 0; 1071 } 1072 1073 callout_init(&ntb->hb_timer, 1); 1074 callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT, 1075 amd_link_hb, ntb); 1076 1077 break; 1078 1079 default: 1080 amd_ntb_printf(0, "Unsupported AMD NTB topology %d\n", 1081 ntb->conn_type); 1082 return (EINVAL); 1083 } 1084 1085 ntb->int_mask = AMD_EVENT_INTMASK; 1086 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); 1087 1088 return (0); 1089 } 1090 1091 static int 1092 amd_ntb_init(struct amd_ntb_softc *ntb) 1093 { 1094 int rc = 0; 1095 1096 ntb->conn_type = amd_ntb_get_topo(ntb); 1097 amd_ntb_printf(0, "AMD NTB Side: %s\n", 1098 (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY"); 1099 1100 rc = amd_ntb_init_dev(ntb); 1101 if (rc != 0) 1102 return (rc); 1103 1104 rc = amd_ntb_init_isr(ntb); 1105 if (rc != 0) 1106 return (rc); 1107 1108 return (0); 1109 } 1110 1111 static void 1112 print_map_success(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar, 1113 const char *kind) 1114 { 1115 amd_ntb_printf(0, "Mapped BAR%d v:[%p-%p] p:[0x%jx-0x%jx] (0x%jx bytes) (%s)\n", 1116 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 1117 (char *)bar->vbase + bar->size - 1, (uintmax_t)bar->pbase, 1118 (uintmax_t)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind); 1119 } 1120 1121 static void 1122 save_bar_parameters(struct amd_ntb_pci_bar_info *bar) 1123 { 1124 bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); 1125 bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); 1126 bar->pbase = rman_get_start(bar->pci_resource); 1127 bar->size = rman_get_size(bar->pci_resource); 1128 bar->vbase = rman_get_virtual(bar->pci_resource); 1129 bar->map_mode = VM_MEMATTR_UNCACHEABLE; 1130 } 1131 1132 static int 1133 map_bar(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar) 1134 { 1135 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 1136 &bar->pci_resource_id, RF_ACTIVE); 1137 if (bar->pci_resource == NULL) 1138 return (ENXIO); 1139 1140 save_bar_parameters(bar); 1141 print_map_success(ntb, bar, "mmr"); 1142 1143 return (0); 1144 } 1145 1146 static int 1147 amd_ntb_map_pci_bars(struct amd_ntb_softc *ntb) 1148 { 1149 int rc = 0; 1150 1151 /* NTB Config/Control registers - BAR 0 */ 1152 ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); 1153 rc = map_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); 1154 if (rc != 0) 1155 goto out; 1156 1157 /* Memory Window 0 BAR - BAR 1 */ 1158 ntb->bar_info[NTB_BAR_1].pci_resource_id = PCIR_BAR(1); 1159 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_1]); 1160 if (rc != 0) 1161 goto out; 1162 ntb->bar_info[NTB_BAR_1].xlat_off = AMD_BAR1XLAT_OFFSET; 1163 ntb->bar_info[NTB_BAR_1].limit_off = AMD_BAR1LMT_OFFSET; 1164 1165 /* Memory Window 1 BAR - BAR 2&3 */ 1166 ntb->bar_info[NTB_BAR_2].pci_resource_id = PCIR_BAR(2); 1167 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_2]); 1168 if (rc != 0) 1169 goto out; 1170 ntb->bar_info[NTB_BAR_2].xlat_off = AMD_BAR23XLAT_OFFSET; 1171 ntb->bar_info[NTB_BAR_2].limit_off = AMD_BAR23LMT_OFFSET; 1172 1173 /* Memory Window 2 BAR - BAR 4&5 */ 1174 ntb->bar_info[NTB_BAR_3].pci_resource_id = PCIR_BAR(4); 1175 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_3]); 1176 if (rc != 0) 1177 goto out; 1178 ntb->bar_info[NTB_BAR_3].xlat_off = AMD_BAR45XLAT_OFFSET; 1179 ntb->bar_info[NTB_BAR_3].limit_off = AMD_BAR45LMT_OFFSET; 1180 1181 out: 1182 if (rc != 0) 1183 amd_ntb_printf(0, "unable to allocate pci resource\n"); 1184 1185 return (rc); 1186 } 1187 1188 static void 1189 amd_ntb_unmap_pci_bars(struct amd_ntb_softc *ntb) 1190 { 1191 struct amd_ntb_pci_bar_info *bar_info; 1192 int i; 1193 1194 for (i = 0; i < NTB_MAX_BARS; i++) { 1195 bar_info = &ntb->bar_info[i]; 1196 if (bar_info->pci_resource != NULL) 1197 bus_release_resource(ntb->device, SYS_RES_MEMORY, 1198 bar_info->pci_resource_id, bar_info->pci_resource); 1199 } 1200 } 1201 1202 static int 1203 amd_ntb_probe(device_t device) 1204 { 1205 struct amd_ntb_softc *ntb = device_get_softc(device); 1206 const struct pci_device_table *tbl; 1207 1208 tbl = PCI_MATCH(device, amd_ntb_devs); 1209 if (tbl == NULL) 1210 return (ENXIO); 1211 1212 ntb->hw_info = (struct amd_ntb_hw_info *)tbl->driver_data; 1213 ntb->spad_count = ntb->hw_info->spad_count; 1214 device_set_desc(device, tbl->descr); 1215 1216 return (BUS_PROBE_GENERIC); 1217 } 1218 1219 static int 1220 amd_ntb_attach(device_t device) 1221 { 1222 struct amd_ntb_softc *ntb = device_get_softc(device); 1223 int error; 1224 1225 ntb->device = device; 1226 1227 /* Enable PCI bus mastering for "device" */ 1228 pci_enable_busmaster(ntb->device); 1229 1230 error = amd_ntb_map_pci_bars(ntb); 1231 if (error) 1232 goto out; 1233 1234 error = amd_ntb_init(ntb); 1235 if (error) 1236 goto out; 1237 1238 amd_init_side_info(ntb); 1239 1240 amd_ntb_spad_clear(ntb); 1241 1242 amd_ntb_sysctl_init(ntb); 1243 1244 /* Attach children to this controller */ 1245 error = ntb_register_device(device); 1246 1247 out: 1248 if (error) 1249 amd_ntb_detach(device); 1250 1251 return (error); 1252 } 1253 1254 static int 1255 amd_ntb_detach(device_t device) 1256 { 1257 struct amd_ntb_softc *ntb = device_get_softc(device); 1258 1259 ntb_unregister_device(device); 1260 amd_deinit_side_info(ntb); 1261 callout_drain(&ntb->hb_timer); 1262 amd_ntb_deinit_isr(ntb); 1263 mtx_destroy(&ntb->db_mask_lock); 1264 pci_disable_busmaster(ntb->device); 1265 amd_ntb_unmap_pci_bars(ntb); 1266 1267 return (0); 1268 } 1269 1270 static device_method_t ntb_amd_methods[] = { 1271 /* Device interface */ 1272 DEVMETHOD(device_probe, amd_ntb_probe), 1273 DEVMETHOD(device_attach, amd_ntb_attach), 1274 DEVMETHOD(device_detach, amd_ntb_detach), 1275 1276 /* Bus interface */ 1277 DEVMETHOD(bus_child_location, ntb_child_location), 1278 DEVMETHOD(bus_print_child, ntb_print_child), 1279 DEVMETHOD(bus_get_dma_tag, ntb_get_dma_tag), 1280 1281 /* NTB interface */ 1282 DEVMETHOD(ntb_port_number, amd_ntb_port_number), 1283 DEVMETHOD(ntb_peer_port_count, amd_ntb_peer_port_count), 1284 DEVMETHOD(ntb_peer_port_number, amd_ntb_peer_port_number), 1285 DEVMETHOD(ntb_peer_port_idx, amd_ntb_peer_port_idx), 1286 DEVMETHOD(ntb_link_is_up, amd_ntb_link_is_up), 1287 DEVMETHOD(ntb_link_enable, amd_ntb_link_enable), 1288 DEVMETHOD(ntb_link_disable, amd_ntb_link_disable), 1289 DEVMETHOD(ntb_mw_count, amd_ntb_mw_count), 1290 DEVMETHOD(ntb_mw_get_range, amd_ntb_mw_get_range), 1291 DEVMETHOD(ntb_mw_set_trans, amd_ntb_mw_set_trans), 1292 DEVMETHOD(ntb_mw_clear_trans, amd_ntb_mw_clear_trans), 1293 DEVMETHOD(ntb_mw_set_wc, amd_ntb_mw_set_wc), 1294 DEVMETHOD(ntb_mw_get_wc, amd_ntb_mw_get_wc), 1295 DEVMETHOD(ntb_db_valid_mask, amd_ntb_db_valid_mask), 1296 DEVMETHOD(ntb_db_vector_count, amd_ntb_db_vector_count), 1297 DEVMETHOD(ntb_db_vector_mask, amd_ntb_db_vector_mask), 1298 DEVMETHOD(ntb_db_read, amd_ntb_db_read), 1299 DEVMETHOD(ntb_db_clear, amd_ntb_db_clear), 1300 DEVMETHOD(ntb_db_set_mask, amd_ntb_db_set_mask), 1301 DEVMETHOD(ntb_db_clear_mask, amd_ntb_db_clear_mask), 1302 DEVMETHOD(ntb_peer_db_set, amd_ntb_peer_db_set), 1303 DEVMETHOD(ntb_spad_count, amd_ntb_spad_count), 1304 DEVMETHOD(ntb_spad_read, amd_ntb_spad_read), 1305 DEVMETHOD(ntb_spad_write, amd_ntb_spad_write), 1306 DEVMETHOD(ntb_peer_spad_read, amd_ntb_peer_spad_read), 1307 DEVMETHOD(ntb_peer_spad_write, amd_ntb_peer_spad_write), 1308 DEVMETHOD_END 1309 }; 1310 1311 static DEFINE_CLASS_0(ntb_hw, ntb_amd_driver, ntb_amd_methods, 1312 sizeof(struct amd_ntb_softc)); 1313 DRIVER_MODULE(ntb_hw_amd, pci, ntb_amd_driver, NULL, NULL); 1314 MODULE_DEPEND(ntb_hw_amd, ntb, 1, 1, 1); 1315 MODULE_VERSION(ntb_hw_amd, 1); 1316 PCI_PNP_INFO(amd_ntb_devs); 1317