1 /*- 2 * This file is provided under a dual BSD/GPLv2 license. When using or 3 * redistributing this file, you may do so under either license. 4 * 5 * GPL LICENSE SUMMARY 6 * 7 * Copyright (C) 2019 Advanced Micro Devices, Inc. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of version 2 of the GNU General Public License as 11 * published by the Free Software Foundation. 12 * 13 * BSD LICENSE 14 * 15 * Copyright (c) 2019 Advanced Micro Devices, Inc. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. Neither the name of AMD corporation nor the names of its 26 * contributors may be used to endorse or promote products derived 27 * from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * Contact Information : 42 * Rajesh Kumar <rajesh1.kumar@amd.com> 43 */ 44 45 /* 46 * The Non-Transparent Bridge (NTB) is a device that allows you to connect 47 * two or more systems using a PCI-e links, providing remote memory access. 48 * 49 * This module contains a driver for NTB hardware in AMD CPUs 50 * 51 * Much of the code in this module is shared with Linux. Any patches may 52 * be picked up and redistributed in Linux with a dual GPL/BSD license. 53 */ 54 55 #include <sys/param.h> 56 #include <sys/kernel.h> 57 #include <sys/systm.h> 58 #include <sys/bus.h> 59 #include <sys/lock.h> 60 #include <sys/malloc.h> 61 #include <sys/module.h> 62 #include <sys/mutex.h> 63 #include <sys/rman.h> 64 #include <sys/sbuf.h> 65 #include <sys/sysctl.h> 66 67 #include <vm/vm.h> 68 #include <vm/pmap.h> 69 70 #include <machine/bus.h> 71 72 #include <dev/pci/pcireg.h> 73 #include <dev/pci/pcivar.h> 74 75 #include "ntb_hw_amd.h" 76 #include "dev/ntb/ntb.h" 77 78 MALLOC_DEFINE(M_AMD_NTB, "amd_ntb_hw", "amd_ntb_hw driver memory allocations"); 79 80 static const struct amd_ntb_hw_info amd_ntb_hw_info_list[] = { 81 { .vendor_id = NTB_HW_AMD_VENDOR_ID, 82 .device_id = NTB_HW_AMD_DEVICE_ID1, 83 .mw_count = 3, 84 .bar_start_idx = 1, 85 .spad_count = 16, 86 .db_count = 16, 87 .msix_vector_count = 24, 88 .quirks = QUIRK_MW0_32BIT, 89 .desc = "AMD Non-Transparent Bridge"}, 90 91 { .vendor_id = NTB_HW_AMD_VENDOR_ID, 92 .device_id = NTB_HW_AMD_DEVICE_ID2, 93 .mw_count = 2, 94 .bar_start_idx = 2, 95 .spad_count = 16, 96 .db_count = 16, 97 .msix_vector_count = 24, 98 .quirks = 0, 99 .desc = "AMD Non-Transparent Bridge"}, 100 101 { .vendor_id = NTB_HW_HYGON_VENDOR_ID, 102 .device_id = NTB_HW_HYGON_DEVICE_ID1, 103 .mw_count = 3, 104 .bar_start_idx = 1, 105 .spad_count = 16, 106 .db_count = 16, 107 .msix_vector_count = 24, 108 .quirks = QUIRK_MW0_32BIT, 109 .desc = "Hygon Non-Transparent Bridge"}, 110 }; 111 112 static const struct pci_device_table amd_ntb_devs[] = { 113 { PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID1), 114 .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0], 115 PCI_DESCR("AMD Non-Transparent Bridge") }, 116 { PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID2), 117 .driver_data = (uintptr_t)&amd_ntb_hw_info_list[1], 118 PCI_DESCR("AMD Non-Transparent Bridge") }, 119 { PCI_DEV(NTB_HW_HYGON_VENDOR_ID, NTB_HW_HYGON_DEVICE_ID1), 120 .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0], 121 PCI_DESCR("Hygon Non-Transparent Bridge") } 122 }; 123 124 static unsigned g_amd_ntb_hw_debug_level; 125 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, 126 &g_amd_ntb_hw_debug_level, 0, "amd_ntb_hw log level -- higher is verbose"); 127 128 #define amd_ntb_printf(lvl, ...) do { \ 129 if (lvl <= g_amd_ntb_hw_debug_level) \ 130 device_printf(ntb->device, __VA_ARGS__); \ 131 } while (0) 132 133 #ifdef __i386__ 134 static __inline uint64_t 135 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 136 bus_size_t offset) 137 { 138 139 return (bus_space_read_4(tag, handle, offset) | 140 ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); 141 } 142 143 static __inline void 144 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, 145 bus_size_t offset, uint64_t val) 146 { 147 148 bus_space_write_4(tag, handle, offset, val); 149 bus_space_write_4(tag, handle, offset + 4, val >> 32); 150 } 151 #endif 152 153 /* 154 * AMD NTB INTERFACE ROUTINES 155 */ 156 static int 157 amd_ntb_port_number(device_t dev) 158 { 159 struct amd_ntb_softc *ntb = device_get_softc(dev); 160 161 amd_ntb_printf(1, "%s: conn_type %d\n", __func__, ntb->conn_type); 162 163 switch (ntb->conn_type) { 164 case NTB_CONN_PRI: 165 return (NTB_PORT_PRI_USD); 166 case NTB_CONN_SEC: 167 return (NTB_PORT_SEC_DSD); 168 default: 169 break; 170 } 171 172 return (-EINVAL); 173 } 174 175 static int 176 amd_ntb_peer_port_count(device_t dev) 177 { 178 struct amd_ntb_softc *ntb = device_get_softc(dev); 179 180 amd_ntb_printf(1, "%s: peer cnt %d\n", __func__, NTB_DEF_PEER_CNT); 181 return (NTB_DEF_PEER_CNT); 182 } 183 184 static int 185 amd_ntb_peer_port_number(device_t dev, int pidx) 186 { 187 struct amd_ntb_softc *ntb = device_get_softc(dev); 188 189 amd_ntb_printf(1, "%s: pidx %d conn type %d\n", 190 __func__, pidx, ntb->conn_type); 191 192 if (pidx != NTB_DEF_PEER_IDX) 193 return (-EINVAL); 194 195 switch (ntb->conn_type) { 196 case NTB_CONN_PRI: 197 return (NTB_PORT_SEC_DSD); 198 case NTB_CONN_SEC: 199 return (NTB_PORT_PRI_USD); 200 default: 201 break; 202 } 203 204 return (-EINVAL); 205 } 206 207 static int 208 amd_ntb_peer_port_idx(device_t dev, int port) 209 { 210 struct amd_ntb_softc *ntb = device_get_softc(dev); 211 int peer_port; 212 213 peer_port = amd_ntb_peer_port_number(dev, NTB_DEF_PEER_IDX); 214 215 amd_ntb_printf(1, "%s: port %d peer_port %d\n", 216 __func__, port, peer_port); 217 218 if (peer_port == -EINVAL || port != peer_port) 219 return (-EINVAL); 220 221 return (0); 222 } 223 224 /* 225 * AMD NTB INTERFACE - LINK ROUTINES 226 */ 227 static inline int 228 amd_link_is_up(struct amd_ntb_softc *ntb) 229 { 230 231 amd_ntb_printf(2, "%s: peer_sta 0x%x cntl_sta 0x%x\n", 232 __func__, ntb->peer_sta, ntb->cntl_sta); 233 234 if (!ntb->peer_sta) 235 return (NTB_LNK_STA_ACTIVE(ntb->cntl_sta)); 236 237 return (0); 238 } 239 240 static inline enum ntb_speed 241 amd_ntb_link_sta_speed(struct amd_ntb_softc *ntb) 242 { 243 244 if (!amd_link_is_up(ntb)) 245 return (NTB_SPEED_NONE); 246 247 return (NTB_LNK_STA_SPEED(ntb->lnk_sta)); 248 } 249 250 static inline enum ntb_width 251 amd_ntb_link_sta_width(struct amd_ntb_softc *ntb) 252 { 253 254 if (!amd_link_is_up(ntb)) 255 return (NTB_WIDTH_NONE); 256 257 return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); 258 } 259 260 static bool 261 amd_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width) 262 { 263 struct amd_ntb_softc *ntb = device_get_softc(dev); 264 265 if (speed != NULL) 266 *speed = amd_ntb_link_sta_speed(ntb); 267 if (width != NULL) 268 *width = amd_ntb_link_sta_width(ntb); 269 270 return (amd_link_is_up(ntb)); 271 } 272 273 static int 274 amd_ntb_link_enable(device_t dev, enum ntb_speed max_speed, 275 enum ntb_width max_width) 276 { 277 struct amd_ntb_softc *ntb = device_get_softc(dev); 278 uint32_t ntb_ctl; 279 280 amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n", 281 __func__, ntb->int_mask, ntb->conn_type); 282 283 amd_init_side_info(ntb); 284 285 /* Enable event interrupt */ 286 ntb->int_mask &= ~AMD_EVENT_INTMASK; 287 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); 288 289 if (ntb->conn_type == NTB_CONN_SEC) 290 return (EINVAL); 291 292 amd_ntb_printf(0, "%s: Enabling Link.\n", __func__); 293 294 ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET); 295 ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); 296 amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl); 297 amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl); 298 299 return (0); 300 } 301 302 static int 303 amd_ntb_link_disable(device_t dev) 304 { 305 struct amd_ntb_softc *ntb = device_get_softc(dev); 306 uint32_t ntb_ctl; 307 308 amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n", 309 __func__, ntb->int_mask, ntb->conn_type); 310 311 amd_deinit_side_info(ntb); 312 313 /* Disable event interrupt */ 314 ntb->int_mask |= AMD_EVENT_INTMASK; 315 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); 316 317 if (ntb->conn_type == NTB_CONN_SEC) 318 return (EINVAL); 319 320 amd_ntb_printf(0, "%s: Disabling Link.\n", __func__); 321 322 ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET); 323 ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); 324 amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl); 325 amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl); 326 327 return (0); 328 } 329 330 /* 331 * AMD NTB memory window routines 332 */ 333 static uint8_t 334 amd_ntb_mw_count(device_t dev) 335 { 336 struct amd_ntb_softc *ntb = device_get_softc(dev); 337 338 return (ntb->hw_info->mw_count); 339 } 340 341 static int 342 amd_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base, 343 caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, 344 bus_addr_t *plimit) 345 { 346 struct amd_ntb_softc *ntb = device_get_softc(dev); 347 struct amd_ntb_pci_bar_info *bar_info; 348 349 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 350 return (EINVAL); 351 352 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 353 354 if (base != NULL) 355 *base = bar_info->pbase; 356 357 if (vbase != NULL) 358 *vbase = bar_info->vbase; 359 360 if (align != NULL) 361 *align = bar_info->size; 362 363 if (size != NULL) 364 *size = bar_info->size; 365 366 if (align_size != NULL) 367 *align_size = 1; 368 369 if (plimit != NULL) { 370 /* 371 * For Device ID 0x145B (which has 3 memory windows), 372 * memory window 0 use a 32-bit bar. The remaining 373 * cases all use 64-bit bar. 374 */ 375 if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT)) 376 *plimit = BUS_SPACE_MAXADDR_32BIT; 377 else 378 *plimit = BUS_SPACE_MAXADDR; 379 } 380 381 return (0); 382 } 383 384 static int 385 amd_ntb_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size) 386 { 387 struct amd_ntb_softc *ntb = device_get_softc(dev); 388 struct amd_ntb_pci_bar_info *bar_info; 389 390 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 391 return (EINVAL); 392 393 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 394 395 /* Make sure the range fits in the usable mw size. */ 396 if (size > bar_info->size) { 397 amd_ntb_printf(0, "%s: size 0x%jx greater than mw_size 0x%jx\n", 398 __func__, (uintmax_t)size, (uintmax_t)bar_info->size); 399 return (EINVAL); 400 } 401 402 amd_ntb_printf(1, "%s: mw %d mw_size 0x%jx size 0x%jx base %p\n", 403 __func__, mw_idx, (uintmax_t)bar_info->size, 404 (uintmax_t)size, (void *)bar_info->pci_bus_handle); 405 406 /* 407 * AMD NTB XLAT and Limit registers needs to be written only after 408 * link enable. 409 * 410 * Set and verify setting the translation address register. 411 */ 412 amd_ntb_peer_reg_write(8, bar_info->xlat_off, (uint64_t)addr); 413 amd_ntb_printf(0, "%s: mw %d xlat_off 0x%x cur_val 0x%jx addr %p\n", 414 __func__, mw_idx, bar_info->xlat_off, 415 amd_ntb_peer_reg_read(8, bar_info->xlat_off), (void *)addr); 416 417 /* 418 * Set and verify setting the limit register. 419 * 420 * For Device ID 0x145B (which has 3 memory windows), 421 * memory window 0 use a 32-bit bar. The remaining 422 * cases all use 64-bit bar. 423 */ 424 if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT)) { 425 amd_ntb_reg_write(4, bar_info->limit_off, (uint32_t)size); 426 amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%x limit 0x%x\n", 427 __func__, bar_info->limit_off, 428 amd_ntb_peer_reg_read(4, bar_info->limit_off), 429 (uint32_t)size); 430 } else { 431 amd_ntb_reg_write(8, bar_info->limit_off, (uint64_t)size); 432 amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%jx limit 0x%jx\n", 433 __func__, bar_info->limit_off, 434 amd_ntb_peer_reg_read(8, bar_info->limit_off), 435 (uintmax_t)size); 436 } 437 438 return (0); 439 } 440 441 static int 442 amd_ntb_mw_clear_trans(device_t dev, unsigned mw_idx) 443 { 444 struct amd_ntb_softc *ntb = device_get_softc(dev); 445 446 amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx); 447 448 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 449 return (EINVAL); 450 451 return (amd_ntb_mw_set_trans(dev, mw_idx, 0, 0)); 452 } 453 454 static int 455 amd_ntb_mw_set_wc(device_t dev, unsigned int mw_idx, vm_memattr_t mode) 456 { 457 struct amd_ntb_softc *ntb = device_get_softc(dev); 458 struct amd_ntb_pci_bar_info *bar_info; 459 int rc; 460 461 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 462 return (EINVAL); 463 464 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 465 if (mode == bar_info->map_mode) 466 return (0); 467 468 rc = pmap_change_attr((vm_offset_t)bar_info->vbase, bar_info->size, mode); 469 if (rc == 0) 470 bar_info->map_mode = mode; 471 472 return (rc); 473 } 474 475 static int 476 amd_ntb_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode) 477 { 478 struct amd_ntb_softc *ntb = device_get_softc(dev); 479 struct amd_ntb_pci_bar_info *bar_info; 480 481 amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx); 482 483 if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count) 484 return (EINVAL); 485 486 bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx]; 487 *mode = bar_info->map_mode; 488 489 return (0); 490 } 491 492 /* 493 * AMD NTB doorbell routines 494 */ 495 static int 496 amd_ntb_db_vector_count(device_t dev) 497 { 498 struct amd_ntb_softc *ntb = device_get_softc(dev); 499 500 amd_ntb_printf(1, "%s: db_count 0x%x\n", __func__, 501 ntb->hw_info->db_count); 502 503 return (ntb->hw_info->db_count); 504 } 505 506 static uint64_t 507 amd_ntb_db_valid_mask(device_t dev) 508 { 509 struct amd_ntb_softc *ntb = device_get_softc(dev); 510 511 amd_ntb_printf(1, "%s: db_valid_mask 0x%x\n", 512 __func__, ntb->db_valid_mask); 513 514 return (ntb->db_valid_mask); 515 } 516 517 static uint64_t 518 amd_ntb_db_vector_mask(device_t dev, uint32_t vector) 519 { 520 struct amd_ntb_softc *ntb = device_get_softc(dev); 521 522 amd_ntb_printf(1, "%s: vector %d db_count 0x%x db_valid_mask 0x%x\n", 523 __func__, vector, ntb->hw_info->db_count, ntb->db_valid_mask); 524 525 if (vector < 0 || vector >= ntb->hw_info->db_count) 526 return (0); 527 528 return (ntb->db_valid_mask & (1 << vector)); 529 } 530 531 static uint64_t 532 amd_ntb_db_read(device_t dev) 533 { 534 struct amd_ntb_softc *ntb = device_get_softc(dev); 535 uint64_t dbstat_off; 536 537 dbstat_off = (uint64_t)amd_ntb_reg_read(2, AMD_DBSTAT_OFFSET); 538 539 amd_ntb_printf(1, "%s: dbstat_off 0x%jx\n", __func__, dbstat_off); 540 541 return (dbstat_off); 542 } 543 544 static void 545 amd_ntb_db_clear(device_t dev, uint64_t db_bits) 546 { 547 struct amd_ntb_softc *ntb = device_get_softc(dev); 548 549 amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits); 550 amd_ntb_reg_write(2, AMD_DBSTAT_OFFSET, (uint16_t)db_bits); 551 } 552 553 static void 554 amd_ntb_db_set_mask(device_t dev, uint64_t db_bits) 555 { 556 struct amd_ntb_softc *ntb = device_get_softc(dev); 557 558 DB_MASK_LOCK(ntb); 559 amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n", 560 __func__, ntb->db_mask, db_bits); 561 562 ntb->db_mask |= db_bits; 563 amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask); 564 DB_MASK_UNLOCK(ntb); 565 } 566 567 static void 568 amd_ntb_db_clear_mask(device_t dev, uint64_t db_bits) 569 { 570 struct amd_ntb_softc *ntb = device_get_softc(dev); 571 572 DB_MASK_LOCK(ntb); 573 amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n", 574 __func__, ntb->db_mask, db_bits); 575 576 ntb->db_mask &= ~db_bits; 577 amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask); 578 DB_MASK_UNLOCK(ntb); 579 } 580 581 static void 582 amd_ntb_peer_db_set(device_t dev, uint64_t db_bits) 583 { 584 struct amd_ntb_softc *ntb = device_get_softc(dev); 585 586 amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits); 587 amd_ntb_reg_write(2, AMD_DBREQ_OFFSET, (uint16_t)db_bits); 588 } 589 590 /* 591 * AMD NTB scratchpad routines 592 */ 593 static uint8_t 594 amd_ntb_spad_count(device_t dev) 595 { 596 struct amd_ntb_softc *ntb = device_get_softc(dev); 597 598 amd_ntb_printf(1, "%s: spad_count 0x%x\n", __func__, 599 ntb->spad_count); 600 601 return (ntb->spad_count); 602 } 603 604 static int 605 amd_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val) 606 { 607 struct amd_ntb_softc *ntb = device_get_softc(dev); 608 uint32_t offset; 609 610 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 611 612 if (idx < 0 || idx >= ntb->spad_count) 613 return (EINVAL); 614 615 offset = ntb->self_spad + (idx << 2); 616 *val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset); 617 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val); 618 619 return (0); 620 } 621 622 static int 623 amd_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val) 624 { 625 struct amd_ntb_softc *ntb = device_get_softc(dev); 626 uint32_t offset; 627 628 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 629 630 if (idx < 0 || idx >= ntb->spad_count) 631 return (EINVAL); 632 633 offset = ntb->self_spad + (idx << 2); 634 amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val); 635 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val); 636 637 return (0); 638 } 639 640 static void 641 amd_ntb_spad_clear(struct amd_ntb_softc *ntb) 642 { 643 uint8_t i; 644 645 for (i = 0; i < ntb->spad_count; i++) 646 amd_ntb_spad_write(ntb->device, i, 0); 647 } 648 649 static int 650 amd_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val) 651 { 652 struct amd_ntb_softc *ntb = device_get_softc(dev); 653 uint32_t offset; 654 655 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 656 657 if (idx < 0 || idx >= ntb->spad_count) 658 return (EINVAL); 659 660 offset = ntb->peer_spad + (idx << 2); 661 *val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset); 662 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val); 663 664 return (0); 665 } 666 667 static int 668 amd_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val) 669 { 670 struct amd_ntb_softc *ntb = device_get_softc(dev); 671 uint32_t offset; 672 673 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx); 674 675 if (idx < 0 || idx >= ntb->spad_count) 676 return (EINVAL); 677 678 offset = ntb->peer_spad + (idx << 2); 679 amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val); 680 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val); 681 682 return (0); 683 } 684 685 /* 686 * AMD NTB INIT 687 */ 688 static int 689 amd_ntb_hw_info_handler(SYSCTL_HANDLER_ARGS) 690 { 691 struct amd_ntb_softc* ntb = arg1; 692 struct sbuf *sb; 693 int rc = 0; 694 695 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 696 if (sb == NULL) 697 return (sb->s_error); 698 699 sbuf_printf(sb, "NTB AMD Hardware info:\n\n"); 700 sbuf_printf(sb, "AMD NTB side: %s\n", 701 (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY"); 702 sbuf_printf(sb, "AMD LNK STA: 0x%#06x\n", ntb->lnk_sta); 703 704 if (!amd_link_is_up(ntb)) 705 sbuf_printf(sb, "AMD Link Status: Down\n"); 706 else { 707 sbuf_printf(sb, "AMD Link Status: Up\n"); 708 sbuf_printf(sb, "AMD Link Speed: PCI-E Gen %u\n", 709 NTB_LNK_STA_SPEED(ntb->lnk_sta)); 710 sbuf_printf(sb, "AMD Link Width: PCI-E Width %u\n", 711 NTB_LNK_STA_WIDTH(ntb->lnk_sta)); 712 } 713 714 sbuf_printf(sb, "AMD Memory window count: %d\n", 715 ntb->hw_info->mw_count); 716 sbuf_printf(sb, "AMD Spad count: %d\n", 717 ntb->spad_count); 718 sbuf_printf(sb, "AMD Doorbell count: %d\n", 719 ntb->hw_info->db_count); 720 sbuf_printf(sb, "AMD MSI-X vec count: %d\n\n", 721 ntb->msix_vec_count); 722 sbuf_printf(sb, "AMD Doorbell valid mask: 0x%x\n", 723 ntb->db_valid_mask); 724 sbuf_printf(sb, "AMD Doorbell Mask: 0x%x\n", 725 amd_ntb_reg_read(4, AMD_DBMASK_OFFSET)); 726 sbuf_printf(sb, "AMD Doorbell: 0x%x\n", 727 amd_ntb_reg_read(4, AMD_DBSTAT_OFFSET)); 728 sbuf_printf(sb, "AMD NTB Incoming XLAT: \n"); 729 sbuf_printf(sb, "AMD XLAT1: 0x%jx\n", 730 amd_ntb_peer_reg_read(8, AMD_BAR1XLAT_OFFSET)); 731 sbuf_printf(sb, "AMD XLAT23: 0x%jx\n", 732 amd_ntb_peer_reg_read(8, AMD_BAR23XLAT_OFFSET)); 733 sbuf_printf(sb, "AMD XLAT45: 0x%jx\n", 734 amd_ntb_peer_reg_read(8, AMD_BAR45XLAT_OFFSET)); 735 sbuf_printf(sb, "AMD LMT1: 0x%x\n", 736 amd_ntb_reg_read(4, AMD_BAR1LMT_OFFSET)); 737 sbuf_printf(sb, "AMD LMT23: 0x%jx\n", 738 amd_ntb_reg_read(8, AMD_BAR23LMT_OFFSET)); 739 sbuf_printf(sb, "AMD LMT45: 0x%jx\n", 740 amd_ntb_reg_read(8, AMD_BAR45LMT_OFFSET)); 741 742 rc = sbuf_finish(sb); 743 sbuf_delete(sb); 744 return (rc); 745 } 746 747 static void 748 amd_ntb_sysctl_init(struct amd_ntb_softc *ntb) 749 { 750 struct sysctl_oid_list *globals; 751 struct sysctl_ctx_list *ctx; 752 753 ctx = device_get_sysctl_ctx(ntb->device); 754 globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)); 755 756 SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "info", 757 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ntb, 0, 758 amd_ntb_hw_info_handler, "A", "AMD NTB HW Information"); 759 } 760 761 /* 762 * Polls the HW link status register(s); returns true if something has changed. 763 */ 764 static bool 765 amd_ntb_poll_link(struct amd_ntb_softc *ntb) 766 { 767 uint32_t fullreg, reg, stat; 768 769 fullreg = amd_ntb_peer_reg_read(4, AMD_SIDEINFO_OFFSET); 770 reg = fullreg & NTB_LIN_STA_ACTIVE_BIT; 771 772 if (reg == ntb->cntl_sta) 773 return (false); 774 775 amd_ntb_printf(0, "%s: SIDEINFO reg_val = 0x%x cntl_sta 0x%x\n", 776 __func__, fullreg, ntb->cntl_sta); 777 778 ntb->cntl_sta = reg; 779 780 stat = pci_read_config(ntb->device, AMD_LINK_STATUS_OFFSET, 4); 781 782 amd_ntb_printf(0, "%s: LINK_STATUS stat = 0x%x lnk_sta 0x%x.\n", 783 __func__, stat, ntb->lnk_sta); 784 785 ntb->lnk_sta = stat; 786 787 return (true); 788 } 789 790 static void 791 amd_link_hb(void *arg) 792 { 793 struct amd_ntb_softc *ntb = arg; 794 795 if (amd_ntb_poll_link(ntb)) 796 ntb_link_event(ntb->device); 797 798 if (!amd_link_is_up(ntb)) { 799 callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT, 800 amd_link_hb, ntb); 801 } else { 802 callout_reset(&ntb->hb_timer, (AMD_LINK_HB_TIMEOUT * 10), 803 amd_link_hb, ntb); 804 } 805 } 806 807 static void 808 amd_ntb_interrupt(struct amd_ntb_softc *ntb, uint16_t vec) 809 { 810 if (vec < ntb->hw_info->db_count) 811 ntb_db_event(ntb->device, vec); 812 else 813 amd_ntb_printf(0, "Invalid vector %d\n", vec); 814 } 815 816 static void 817 amd_ntb_vec_isr(void *arg) 818 { 819 struct amd_ntb_vec *nvec = arg; 820 821 amd_ntb_interrupt(nvec->ntb, nvec->num); 822 } 823 824 static void 825 amd_ntb_irq_isr(void *arg) 826 { 827 /* If we couldn't set up MSI-X, we only have the one vector. */ 828 amd_ntb_interrupt(arg, 0); 829 } 830 831 static void 832 amd_init_side_info(struct amd_ntb_softc *ntb) 833 { 834 unsigned int reg; 835 836 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 837 if (!(reg & AMD_SIDE_READY)) { 838 reg |= AMD_SIDE_READY; 839 amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg); 840 } 841 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 842 } 843 844 static void 845 amd_deinit_side_info(struct amd_ntb_softc *ntb) 846 { 847 unsigned int reg; 848 849 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 850 if (reg & AMD_SIDE_READY) { 851 reg &= ~AMD_SIDE_READY; 852 amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg); 853 amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 854 } 855 } 856 857 static int 858 amd_ntb_setup_isr(struct amd_ntb_softc *ntb, uint16_t num_vectors, bool msi, 859 bool intx) 860 { 861 uint16_t i; 862 int flags = 0, rc = 0; 863 864 flags |= RF_ACTIVE; 865 if (intx) 866 flags |= RF_SHAREABLE; 867 868 for (i = 0; i < num_vectors; i++) { 869 /* RID should be 0 for intx */ 870 if (intx) 871 ntb->int_info[i].rid = i; 872 else 873 ntb->int_info[i].rid = i + 1; 874 875 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, 876 SYS_RES_IRQ, &ntb->int_info[i].rid, flags); 877 if (ntb->int_info[i].res == NULL) { 878 amd_ntb_printf(0, "bus_alloc_resource IRQ failed\n"); 879 return (ENOMEM); 880 } 881 882 ntb->int_info[i].tag = NULL; 883 ntb->allocated_interrupts++; 884 885 if (msi || intx) { 886 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 887 INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_irq_isr, 888 ntb, &ntb->int_info[i].tag); 889 } else { 890 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 891 INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_vec_isr, 892 &ntb->msix_vec[i], &ntb->int_info[i].tag); 893 } 894 895 if (rc != 0) { 896 amd_ntb_printf(0, "bus_setup_intr %d failed\n", i); 897 return (ENXIO); 898 } 899 } 900 901 return (0); 902 } 903 904 static int 905 amd_ntb_create_msix_vec(struct amd_ntb_softc *ntb, uint32_t max_vectors) 906 { 907 uint8_t i; 908 909 ntb->msix_vec = malloc(max_vectors * sizeof(*ntb->msix_vec), M_AMD_NTB, 910 M_ZERO | M_WAITOK); 911 912 for (i = 0; i < max_vectors; i++) { 913 ntb->msix_vec[i].num = i; 914 ntb->msix_vec[i].ntb = ntb; 915 } 916 917 return (0); 918 } 919 920 static void 921 amd_ntb_free_msix_vec(struct amd_ntb_softc *ntb) 922 { 923 if (ntb->msix_vec_count) { 924 pci_release_msi(ntb->device); 925 ntb->msix_vec_count = 0; 926 } 927 928 if (ntb->msix_vec != NULL) { 929 free(ntb->msix_vec, M_AMD_NTB); 930 ntb->msix_vec = NULL; 931 } 932 } 933 934 static int 935 amd_ntb_init_isr(struct amd_ntb_softc *ntb) 936 { 937 uint32_t supported_vectors, num_vectors; 938 bool msi = false, intx = false; 939 int rc = 0; 940 941 ntb->db_mask = ntb->db_valid_mask; 942 943 rc = amd_ntb_create_msix_vec(ntb, ntb->hw_info->msix_vector_count); 944 if (rc != 0) { 945 amd_ntb_printf(0, "Error creating msix vectors: %d\n", rc); 946 return (ENOMEM); 947 } 948 949 /* 950 * Check the number of MSI-X message supported by the device. 951 * Minimum necessary MSI-X message count should be equal to db_count. 952 */ 953 supported_vectors = pci_msix_count(ntb->device); 954 num_vectors = MIN(supported_vectors, ntb->hw_info->db_count); 955 if (num_vectors < ntb->hw_info->db_count) { 956 amd_ntb_printf(0, "No minimum msix: supported %d db %d\n", 957 supported_vectors, ntb->hw_info->db_count); 958 msi = true; 959 goto err_msix_enable; 960 } 961 962 /* Allocate the necessary number of MSI-x messages */ 963 rc = pci_alloc_msix(ntb->device, &num_vectors); 964 if (rc != 0) { 965 amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc); 966 msi = true; 967 goto err_msix_enable; 968 } 969 970 if (num_vectors < ntb->hw_info->db_count) { 971 amd_ntb_printf(0, "Allocated only %d MSI-X\n", num_vectors); 972 msi = true; 973 /* 974 * Else set ntb->hw_info->db_count = ntb->msix_vec_count = 975 * num_vectors, msi=false and dont release msi. 976 */ 977 } 978 979 err_msix_enable: 980 981 if (msi) { 982 free(ntb->msix_vec, M_AMD_NTB); 983 ntb->msix_vec = NULL; 984 pci_release_msi(ntb->device); 985 num_vectors = 1; 986 rc = pci_alloc_msi(ntb->device, &num_vectors); 987 if (rc != 0) { 988 amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc); 989 msi = false; 990 intx = true; 991 } 992 } 993 994 ntb->hw_info->db_count = ntb->msix_vec_count = num_vectors; 995 996 if (intx) { 997 num_vectors = 1; 998 ntb->hw_info->db_count = 1; 999 ntb->msix_vec_count = 0; 1000 } 1001 1002 amd_ntb_printf(0, "%s: db %d msix %d msi %d intx %d\n", 1003 __func__, ntb->hw_info->db_count, ntb->msix_vec_count, (int)msi, (int)intx); 1004 1005 rc = amd_ntb_setup_isr(ntb, num_vectors, msi, intx); 1006 if (rc != 0) { 1007 amd_ntb_printf(0, "Error setting up isr: %d\n", rc); 1008 amd_ntb_free_msix_vec(ntb); 1009 } 1010 1011 return (rc); 1012 } 1013 1014 static void 1015 amd_ntb_deinit_isr(struct amd_ntb_softc *ntb) 1016 { 1017 struct amd_ntb_int_info *current_int; 1018 int i; 1019 1020 /* Mask all doorbell interrupts */ 1021 ntb->db_mask = ntb->db_valid_mask; 1022 amd_ntb_reg_write(4, AMD_DBMASK_OFFSET, ntb->db_mask); 1023 1024 for (i = 0; i < ntb->allocated_interrupts; i++) { 1025 current_int = &ntb->int_info[i]; 1026 if (current_int->tag != NULL) 1027 bus_teardown_intr(ntb->device, current_int->res, 1028 current_int->tag); 1029 1030 if (current_int->res != NULL) 1031 bus_release_resource(ntb->device, SYS_RES_IRQ, 1032 rman_get_rid(current_int->res), current_int->res); 1033 } 1034 1035 amd_ntb_free_msix_vec(ntb); 1036 } 1037 1038 static enum amd_ntb_conn_type 1039 amd_ntb_get_topo(struct amd_ntb_softc *ntb) 1040 { 1041 uint32_t info; 1042 1043 info = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET); 1044 1045 if (info & AMD_SIDE_MASK) 1046 return (NTB_CONN_SEC); 1047 1048 return (NTB_CONN_PRI); 1049 } 1050 1051 static int 1052 amd_ntb_init_dev(struct amd_ntb_softc *ntb) 1053 { 1054 ntb->db_valid_mask = (1ull << ntb->hw_info->db_count) - 1; 1055 mtx_init(&ntb->db_mask_lock, "amd ntb db bits", NULL, MTX_SPIN); 1056 1057 switch (ntb->conn_type) { 1058 case NTB_CONN_PRI: 1059 case NTB_CONN_SEC: 1060 ntb->spad_count >>= 1; 1061 1062 if (ntb->conn_type == NTB_CONN_PRI) { 1063 ntb->self_spad = 0; 1064 ntb->peer_spad = 0x20; 1065 } else { 1066 ntb->self_spad = 0x20; 1067 ntb->peer_spad = 0; 1068 } 1069 1070 callout_init(&ntb->hb_timer, 1); 1071 callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT, 1072 amd_link_hb, ntb); 1073 1074 break; 1075 1076 default: 1077 amd_ntb_printf(0, "Unsupported AMD NTB topology %d\n", 1078 ntb->conn_type); 1079 return (EINVAL); 1080 } 1081 1082 ntb->int_mask = AMD_EVENT_INTMASK; 1083 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask); 1084 1085 return (0); 1086 } 1087 1088 static int 1089 amd_ntb_init(struct amd_ntb_softc *ntb) 1090 { 1091 int rc = 0; 1092 1093 ntb->conn_type = amd_ntb_get_topo(ntb); 1094 amd_ntb_printf(0, "AMD NTB Side: %s\n", 1095 (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY"); 1096 1097 rc = amd_ntb_init_dev(ntb); 1098 if (rc != 0) 1099 return (rc); 1100 1101 rc = amd_ntb_init_isr(ntb); 1102 if (rc != 0) 1103 return (rc); 1104 1105 return (0); 1106 } 1107 1108 static void 1109 print_map_success(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar, 1110 const char *kind) 1111 { 1112 amd_ntb_printf(0, "Mapped BAR%d v:[%p-%p] p:[0x%jx-0x%jx] (0x%jx bytes) (%s)\n", 1113 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 1114 (char *)bar->vbase + bar->size - 1, (uintmax_t)bar->pbase, 1115 (uintmax_t)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind); 1116 } 1117 1118 static void 1119 save_bar_parameters(struct amd_ntb_pci_bar_info *bar) 1120 { 1121 bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); 1122 bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); 1123 bar->pbase = rman_get_start(bar->pci_resource); 1124 bar->size = rman_get_size(bar->pci_resource); 1125 bar->vbase = rman_get_virtual(bar->pci_resource); 1126 bar->map_mode = VM_MEMATTR_UNCACHEABLE; 1127 } 1128 1129 static int 1130 map_bar(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar) 1131 { 1132 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 1133 &bar->pci_resource_id, RF_ACTIVE); 1134 if (bar->pci_resource == NULL) 1135 return (ENXIO); 1136 1137 save_bar_parameters(bar); 1138 print_map_success(ntb, bar, "mmr"); 1139 1140 return (0); 1141 } 1142 1143 static int 1144 amd_ntb_map_pci_bars(struct amd_ntb_softc *ntb) 1145 { 1146 int rc = 0; 1147 1148 /* NTB Config/Control registers - BAR 0 */ 1149 ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); 1150 rc = map_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); 1151 if (rc != 0) 1152 goto out; 1153 1154 /* Memory Window 0 BAR - BAR 1 */ 1155 ntb->bar_info[NTB_BAR_1].pci_resource_id = PCIR_BAR(1); 1156 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_1]); 1157 if (rc != 0) 1158 goto out; 1159 ntb->bar_info[NTB_BAR_1].xlat_off = AMD_BAR1XLAT_OFFSET; 1160 ntb->bar_info[NTB_BAR_1].limit_off = AMD_BAR1LMT_OFFSET; 1161 1162 /* Memory Window 1 BAR - BAR 2&3 */ 1163 ntb->bar_info[NTB_BAR_2].pci_resource_id = PCIR_BAR(2); 1164 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_2]); 1165 if (rc != 0) 1166 goto out; 1167 ntb->bar_info[NTB_BAR_2].xlat_off = AMD_BAR23XLAT_OFFSET; 1168 ntb->bar_info[NTB_BAR_2].limit_off = AMD_BAR23LMT_OFFSET; 1169 1170 /* Memory Window 2 BAR - BAR 4&5 */ 1171 ntb->bar_info[NTB_BAR_3].pci_resource_id = PCIR_BAR(4); 1172 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_3]); 1173 if (rc != 0) 1174 goto out; 1175 ntb->bar_info[NTB_BAR_3].xlat_off = AMD_BAR45XLAT_OFFSET; 1176 ntb->bar_info[NTB_BAR_3].limit_off = AMD_BAR45LMT_OFFSET; 1177 1178 out: 1179 if (rc != 0) 1180 amd_ntb_printf(0, "unable to allocate pci resource\n"); 1181 1182 return (rc); 1183 } 1184 1185 static void 1186 amd_ntb_unmap_pci_bars(struct amd_ntb_softc *ntb) 1187 { 1188 struct amd_ntb_pci_bar_info *bar_info; 1189 int i; 1190 1191 for (i = 0; i < NTB_MAX_BARS; i++) { 1192 bar_info = &ntb->bar_info[i]; 1193 if (bar_info->pci_resource != NULL) 1194 bus_release_resource(ntb->device, SYS_RES_MEMORY, 1195 bar_info->pci_resource_id, bar_info->pci_resource); 1196 } 1197 } 1198 1199 static int 1200 amd_ntb_probe(device_t device) 1201 { 1202 struct amd_ntb_softc *ntb = device_get_softc(device); 1203 const struct pci_device_table *tbl; 1204 1205 tbl = PCI_MATCH(device, amd_ntb_devs); 1206 if (tbl == NULL) 1207 return (ENXIO); 1208 1209 ntb->hw_info = (struct amd_ntb_hw_info *)tbl->driver_data; 1210 ntb->spad_count = ntb->hw_info->spad_count; 1211 device_set_desc(device, tbl->descr); 1212 1213 return (BUS_PROBE_GENERIC); 1214 } 1215 1216 static int 1217 amd_ntb_attach(device_t device) 1218 { 1219 struct amd_ntb_softc *ntb = device_get_softc(device); 1220 int error; 1221 1222 ntb->device = device; 1223 1224 /* Enable PCI bus mastering for "device" */ 1225 pci_enable_busmaster(ntb->device); 1226 1227 error = amd_ntb_map_pci_bars(ntb); 1228 if (error) 1229 goto out; 1230 1231 error = amd_ntb_init(ntb); 1232 if (error) 1233 goto out; 1234 1235 amd_init_side_info(ntb); 1236 1237 amd_ntb_spad_clear(ntb); 1238 1239 amd_ntb_sysctl_init(ntb); 1240 1241 /* Attach children to this controller */ 1242 error = ntb_register_device(device); 1243 1244 out: 1245 if (error) 1246 amd_ntb_detach(device); 1247 1248 return (error); 1249 } 1250 1251 static int 1252 amd_ntb_detach(device_t device) 1253 { 1254 struct amd_ntb_softc *ntb = device_get_softc(device); 1255 1256 ntb_unregister_device(device); 1257 amd_deinit_side_info(ntb); 1258 callout_drain(&ntb->hb_timer); 1259 amd_ntb_deinit_isr(ntb); 1260 mtx_destroy(&ntb->db_mask_lock); 1261 pci_disable_busmaster(ntb->device); 1262 amd_ntb_unmap_pci_bars(ntb); 1263 1264 return (0); 1265 } 1266 1267 static device_method_t ntb_amd_methods[] = { 1268 /* Device interface */ 1269 DEVMETHOD(device_probe, amd_ntb_probe), 1270 DEVMETHOD(device_attach, amd_ntb_attach), 1271 DEVMETHOD(device_detach, amd_ntb_detach), 1272 1273 /* Bus interface */ 1274 DEVMETHOD(bus_child_location, ntb_child_location), 1275 DEVMETHOD(bus_print_child, ntb_print_child), 1276 DEVMETHOD(bus_get_dma_tag, ntb_get_dma_tag), 1277 1278 /* NTB interface */ 1279 DEVMETHOD(ntb_port_number, amd_ntb_port_number), 1280 DEVMETHOD(ntb_peer_port_count, amd_ntb_peer_port_count), 1281 DEVMETHOD(ntb_peer_port_number, amd_ntb_peer_port_number), 1282 DEVMETHOD(ntb_peer_port_idx, amd_ntb_peer_port_idx), 1283 DEVMETHOD(ntb_link_is_up, amd_ntb_link_is_up), 1284 DEVMETHOD(ntb_link_enable, amd_ntb_link_enable), 1285 DEVMETHOD(ntb_link_disable, amd_ntb_link_disable), 1286 DEVMETHOD(ntb_mw_count, amd_ntb_mw_count), 1287 DEVMETHOD(ntb_mw_get_range, amd_ntb_mw_get_range), 1288 DEVMETHOD(ntb_mw_set_trans, amd_ntb_mw_set_trans), 1289 DEVMETHOD(ntb_mw_clear_trans, amd_ntb_mw_clear_trans), 1290 DEVMETHOD(ntb_mw_set_wc, amd_ntb_mw_set_wc), 1291 DEVMETHOD(ntb_mw_get_wc, amd_ntb_mw_get_wc), 1292 DEVMETHOD(ntb_db_valid_mask, amd_ntb_db_valid_mask), 1293 DEVMETHOD(ntb_db_vector_count, amd_ntb_db_vector_count), 1294 DEVMETHOD(ntb_db_vector_mask, amd_ntb_db_vector_mask), 1295 DEVMETHOD(ntb_db_read, amd_ntb_db_read), 1296 DEVMETHOD(ntb_db_clear, amd_ntb_db_clear), 1297 DEVMETHOD(ntb_db_set_mask, amd_ntb_db_set_mask), 1298 DEVMETHOD(ntb_db_clear_mask, amd_ntb_db_clear_mask), 1299 DEVMETHOD(ntb_peer_db_set, amd_ntb_peer_db_set), 1300 DEVMETHOD(ntb_spad_count, amd_ntb_spad_count), 1301 DEVMETHOD(ntb_spad_read, amd_ntb_spad_read), 1302 DEVMETHOD(ntb_spad_write, amd_ntb_spad_write), 1303 DEVMETHOD(ntb_peer_spad_read, amd_ntb_peer_spad_read), 1304 DEVMETHOD(ntb_peer_spad_write, amd_ntb_peer_spad_write), 1305 DEVMETHOD_END 1306 }; 1307 1308 static DEFINE_CLASS_0(ntb_hw, ntb_amd_driver, ntb_amd_methods, 1309 sizeof(struct amd_ntb_softc)); 1310 DRIVER_MODULE(ntb_hw_amd, pci, ntb_amd_driver, NULL, NULL); 1311 MODULE_DEPEND(ntb_hw_amd, ntb, 1, 1, 1); 1312 MODULE_VERSION(ntb_hw_amd, 1); 1313 PCI_PNP_INFO(amd_ntb_devs); 1314