xref: /freebsd/sys/dev/ntb/ntb_hw/ntb_hw_amd.c (revision f6a3b357e9be4c6423c85eff9a847163a0d307c8)
1 /*-
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright (C) 2019 Advanced Micro Devices, Inc.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * BSD LICENSE
14  *
15  * Copyright (c) 2019 Advanced Micro Devices, Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of AMD corporation nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  * Contact Information :
42  * Rajesh Kumar <rajesh1.kumar@amd.com>
43  */
44 
45 /*
46  * The Non-Transparent Bridge (NTB) is a device that allows you to connect
47  * two or more systems using a PCI-e links, providing remote memory access.
48  *
49  * This module contains a driver for NTB hardware in AMD CPUs
50  *
51  * Much of the code in this module is shared with Linux. Any patches may
52  * be picked up and redistributed in Linux with a dual GPL/BSD license.
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include <sys/param.h>
59 #include <sys/kernel.h>
60 #include <sys/systm.h>
61 #include <sys/bus.h>
62 #include <sys/lock.h>
63 #include <sys/malloc.h>
64 #include <sys/module.h>
65 #include <sys/mutex.h>
66 #include <sys/rman.h>
67 #include <sys/sbuf.h>
68 #include <sys/sysctl.h>
69 
70 #include <vm/vm.h>
71 #include <vm/pmap.h>
72 
73 #include <machine/bus.h>
74 
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
77 
78 #include "ntb_hw_amd.h"
79 #include "dev/ntb/ntb.h"
80 
81 MALLOC_DEFINE(M_AMD_NTB, "amd_ntb_hw", "amd_ntb_hw driver memory allocations");
82 
83 static const struct amd_ntb_hw_info amd_ntb_hw_info_list[] = {
84 
85 	{ .vendor_id = NTB_HW_AMD_VENDOR_ID,
86 	  .device_id = NTB_HW_AMD_DEVICE_ID1,
87 	  .mw_count = 3,
88 	  .bar_start_idx = 1,
89 	  .spad_count = 16,
90 	  .db_count = 16,
91 	  .msix_vector_count = 24,
92 	  .quirks = QUIRK_MW0_32BIT,
93 	  .desc = "AMD Non-Transparent Bridge"},
94 
95 	{ .vendor_id = NTB_HW_AMD_VENDOR_ID,
96 	  .device_id = NTB_HW_AMD_DEVICE_ID2,
97 	  .mw_count = 2,
98 	  .bar_start_idx = 2,
99 	  .spad_count = 16,
100 	  .db_count = 16,
101 	  .msix_vector_count = 24,
102 	  .quirks = 0,
103 	  .desc = "AMD Non-Transparent Bridge"},
104 };
105 
106 static const struct pci_device_table amd_ntb_devs[] = {
107 	{ PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID1),
108 	  .driver_data = (uintptr_t)&amd_ntb_hw_info_list[0],
109 	  PCI_DESCR("AMD Non-Transparent Bridge") },
110 	{ PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID2),
111 	  .driver_data = (uintptr_t)&amd_ntb_hw_info_list[1],
112 	  PCI_DESCR("AMD Non-Transparent Bridge") }
113 };
114 
115 static unsigned g_amd_ntb_hw_debug_level;
116 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
117     &g_amd_ntb_hw_debug_level, 0, "amd_ntb_hw log level -- higher is verbose");
118 
119 #define amd_ntb_printf(lvl, ...) do {				\
120         if (lvl <= g_amd_ntb_hw_debug_level)			\
121                 device_printf(ntb->device, __VA_ARGS__);	\
122 } while (0)
123 
124 #ifdef __i386__
125 static __inline uint64_t
126 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
127     bus_size_t offset)
128 {
129 
130 	return (bus_space_read_4(tag, handle, offset) |
131 	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
132 }
133 
134 static __inline void
135 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
136     bus_size_t offset, uint64_t val)
137 {
138 
139 	bus_space_write_4(tag, handle, offset, val);
140 	bus_space_write_4(tag, handle, offset + 4, val >> 32);
141 }
142 #endif
143 
144 /*
145  * AMD NTB INTERFACE ROUTINES
146  */
147 static int
148 amd_ntb_port_number(device_t dev)
149 {
150 	struct amd_ntb_softc *ntb = device_get_softc(dev);
151 
152 	amd_ntb_printf(1, "%s: conn_type %d\n", __func__, ntb->conn_type);
153 
154 	switch (ntb->conn_type) {
155 	case NTB_CONN_PRI:
156 		return (NTB_PORT_PRI_USD);
157 	case NTB_CONN_SEC:
158 		return (NTB_PORT_SEC_DSD);
159 	default:
160 		break;
161 	}
162 
163 	return (-EINVAL);
164 }
165 
166 static int
167 amd_ntb_peer_port_count(device_t dev)
168 {
169 	struct amd_ntb_softc *ntb = device_get_softc(dev);
170 
171 	amd_ntb_printf(1, "%s: peer cnt %d\n", __func__, NTB_DEF_PEER_CNT);
172 	return (NTB_DEF_PEER_CNT);
173 }
174 
175 static int
176 amd_ntb_peer_port_number(device_t dev, int pidx)
177 {
178 	struct amd_ntb_softc *ntb = device_get_softc(dev);
179 
180 	amd_ntb_printf(1, "%s: pidx %d conn type %d\n",
181 	    __func__, pidx, ntb->conn_type);
182 
183 	if (pidx != NTB_DEF_PEER_IDX)
184 		return (-EINVAL);
185 
186 	switch (ntb->conn_type) {
187 	case NTB_CONN_PRI:
188 		return (NTB_PORT_SEC_DSD);
189 	case NTB_CONN_SEC:
190 		return (NTB_PORT_PRI_USD);
191 	default:
192 		break;
193 	}
194 
195 	return (-EINVAL);
196 }
197 
198 static int
199 amd_ntb_peer_port_idx(device_t dev, int port)
200 {
201 	struct amd_ntb_softc *ntb = device_get_softc(dev);
202 	int peer_port;
203 
204 	peer_port = amd_ntb_peer_port_number(dev, NTB_DEF_PEER_IDX);
205 
206 	amd_ntb_printf(1, "%s: port %d peer_port %d\n",
207 	    __func__, port, peer_port);
208 
209 	if (peer_port == -EINVAL || port != peer_port)
210 		return (-EINVAL);
211 
212 	return (0);
213 }
214 
215 /*
216  * AMD NTB INTERFACE - LINK ROUTINES
217  */
218 static inline int
219 amd_link_is_up(struct amd_ntb_softc *ntb)
220 {
221 
222 	amd_ntb_printf(2, "%s: peer_sta 0x%x cntl_sta 0x%x\n",
223 	    __func__, ntb->peer_sta, ntb->cntl_sta);
224 
225 	if (!ntb->peer_sta)
226 		return (NTB_LNK_STA_ACTIVE(ntb->cntl_sta));
227 
228 	return (0);
229 }
230 
231 static inline enum ntb_speed
232 amd_ntb_link_sta_speed(struct amd_ntb_softc *ntb)
233 {
234 
235 	if (!amd_link_is_up(ntb))
236 		return (NTB_SPEED_NONE);
237 
238 	return (NTB_LNK_STA_SPEED(ntb->lnk_sta));
239 }
240 
241 static inline enum ntb_width
242 amd_ntb_link_sta_width(struct amd_ntb_softc *ntb)
243 {
244 
245 	if (!amd_link_is_up(ntb))
246 		return (NTB_WIDTH_NONE);
247 
248 	return (NTB_LNK_STA_WIDTH(ntb->lnk_sta));
249 }
250 
251 static bool
252 amd_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
253 {
254 	struct amd_ntb_softc *ntb = device_get_softc(dev);
255 
256 	if (speed != NULL)
257 		*speed = amd_ntb_link_sta_speed(ntb);
258 	if (width != NULL)
259 		*width = amd_ntb_link_sta_width(ntb);
260 
261 	return (amd_link_is_up(ntb));
262 }
263 
264 static int
265 amd_ntb_link_enable(device_t dev, enum ntb_speed max_speed,
266     enum ntb_width max_width)
267 {
268 	struct amd_ntb_softc *ntb = device_get_softc(dev);
269 	uint32_t ntb_ctl;
270 
271 	amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n",
272 	    __func__, ntb->int_mask, ntb->conn_type);
273 
274 	amd_init_side_info(ntb);
275 
276 	/* Enable event interrupt */
277 	ntb->int_mask &= ~AMD_EVENT_INTMASK;
278 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
279 
280 	if (ntb->conn_type == NTB_CONN_SEC)
281 		return (EINVAL);
282 
283 	amd_ntb_printf(0, "%s: Enabling Link.\n", __func__);
284 
285 	ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET);
286 	ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
287 	amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl);
288 	amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl);
289 
290 	return (0);
291 }
292 
293 static int
294 amd_ntb_link_disable(device_t dev)
295 {
296 	struct amd_ntb_softc *ntb = device_get_softc(dev);
297 	uint32_t ntb_ctl;
298 
299 	amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n",
300 	    __func__, ntb->int_mask, ntb->conn_type);
301 
302 	amd_deinit_side_info(ntb);
303 
304 	/* Disable event interrupt */
305 	ntb->int_mask |= AMD_EVENT_INTMASK;
306 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
307 
308 	if (ntb->conn_type == NTB_CONN_SEC)
309 		return (EINVAL);
310 
311 	amd_ntb_printf(0, "%s: Disabling Link.\n", __func__);
312 
313 	ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET);
314 	ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
315 	amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl);
316 	amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl);
317 
318 	return (0);
319 }
320 
321 /*
322  * AMD NTB memory window routines
323  */
324 static uint8_t
325 amd_ntb_mw_count(device_t dev)
326 {
327 	struct amd_ntb_softc *ntb = device_get_softc(dev);
328 
329 	return (ntb->hw_info->mw_count);
330 }
331 
332 static int
333 amd_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
334     caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
335     bus_addr_t *plimit)
336 {
337 	struct amd_ntb_softc *ntb = device_get_softc(dev);
338 	struct amd_ntb_pci_bar_info *bar_info;
339 
340 	if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count)
341 		return (EINVAL);
342 
343 	bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx];
344 
345 	if (base != NULL)
346 		*base = bar_info->pbase;
347 
348 	if (vbase != NULL)
349 		*vbase = bar_info->vbase;
350 
351 	if (align != NULL)
352 		*align = bar_info->size;
353 
354 	if (size != NULL)
355 		*size = bar_info->size;
356 
357 	if (align_size != NULL)
358 		*align_size = 1;
359 
360 	if (plimit != NULL) {
361 		/*
362 		 * For Device ID 0x145B (which has 3 memory windows),
363 		 * memory window 0 use a 32-bit bar. The remaining
364 		 * cases all use 64-bit bar.
365 		 */
366 		if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT))
367 			*plimit = BUS_SPACE_MAXADDR_32BIT;
368 		else
369 			*plimit = BUS_SPACE_MAXADDR;
370 	}
371 
372 	return (0);
373 }
374 
375 static int
376 amd_ntb_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size)
377 {
378 	struct amd_ntb_softc *ntb = device_get_softc(dev);
379 	struct amd_ntb_pci_bar_info *bar_info;
380 
381 	if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count)
382 		return (EINVAL);
383 
384 	bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx];
385 
386 	/* Make sure the range fits in the usable mw size. */
387 	if (size > bar_info->size) {
388 		amd_ntb_printf(0, "%s: size 0x%jx greater than mw_size 0x%jx\n",
389 		    __func__, (uintmax_t)size, (uintmax_t)bar_info->size);
390 		return (EINVAL);
391 	}
392 
393 	amd_ntb_printf(1, "%s: mw %d mw_size 0x%jx size 0x%jx base %p\n",
394 	    __func__, mw_idx, (uintmax_t)bar_info->size,
395 	    (uintmax_t)size, (void *)bar_info->pci_bus_handle);
396 
397 	/*
398 	 * AMD NTB XLAT and Limit registers needs to be written only after
399 	 * link enable.
400 	 *
401 	 * Set and verify setting the translation address register.
402 	 */
403 	amd_ntb_peer_reg_write(8, bar_info->xlat_off, (uint64_t)addr);
404 	amd_ntb_printf(0, "%s: mw %d xlat_off 0x%x cur_val 0x%jx addr %p\n",
405 	    __func__, mw_idx, bar_info->xlat_off,
406 	    amd_ntb_peer_reg_read(8, bar_info->xlat_off), (void *)addr);
407 
408 	/*
409 	 * Set and verify setting the limit register.
410 	 *
411 	 * For Device ID 0x145B (which has 3 memory windows),
412 	 * memory window 0 use a 32-bit bar. The remaining
413 	 * cases all use 64-bit bar.
414 	 */
415 	if ((mw_idx == 0) && (ntb->hw_info->quirks & QUIRK_MW0_32BIT)) {
416 		amd_ntb_reg_write(4, bar_info->limit_off, (uint32_t)size);
417 		amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%x limit 0x%x\n",
418 		    __func__, bar_info->limit_off,
419 		    amd_ntb_peer_reg_read(4, bar_info->limit_off),
420 		    (uint32_t)size);
421 	} else {
422 		amd_ntb_reg_write(8, bar_info->limit_off, (uint64_t)size);
423 		amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%jx limit 0x%jx\n",
424 		    __func__, bar_info->limit_off,
425 		    amd_ntb_peer_reg_read(8, bar_info->limit_off),
426 		    (uintmax_t)size);
427 	}
428 
429 	return (0);
430 }
431 
432 static int
433 amd_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
434 {
435 	struct amd_ntb_softc *ntb = device_get_softc(dev);
436 
437 	amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx);
438 
439 	if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count)
440 		return (EINVAL);
441 
442 	return (amd_ntb_mw_set_trans(dev, mw_idx, 0, 0));
443 }
444 
445 static int
446 amd_ntb_mw_set_wc(device_t dev, unsigned int mw_idx, vm_memattr_t mode)
447 {
448 	struct amd_ntb_softc *ntb = device_get_softc(dev);
449 	struct amd_ntb_pci_bar_info *bar_info;
450 	int rc;
451 
452 	if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count)
453 		return (EINVAL);
454 
455 	bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx];
456 	if (mode == bar_info->map_mode)
457 		return (0);
458 
459 	rc = pmap_change_attr((vm_offset_t)bar_info->vbase, bar_info->size, mode);
460 	if (rc == 0)
461 		bar_info->map_mode = mode;
462 
463 	return (rc);
464 }
465 
466 static int
467 amd_ntb_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode)
468 {
469 	struct amd_ntb_softc *ntb = device_get_softc(dev);
470 	struct amd_ntb_pci_bar_info *bar_info;
471 
472 	amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx);
473 
474 	if (mw_idx < 0 || mw_idx >= ntb->hw_info->mw_count)
475 		return (EINVAL);
476 
477 	bar_info = &ntb->bar_info[ntb->hw_info->bar_start_idx + mw_idx];
478 	*mode = bar_info->map_mode;
479 
480 	return (0);
481 }
482 
483 /*
484  * AMD NTB doorbell routines
485  */
486 static int
487 amd_ntb_db_vector_count(device_t dev)
488 {
489 	struct amd_ntb_softc *ntb = device_get_softc(dev);
490 
491 	amd_ntb_printf(1, "%s: db_count 0x%x\n", __func__,
492 	    ntb->hw_info->db_count);
493 
494 	return (ntb->hw_info->db_count);
495 }
496 
497 static uint64_t
498 amd_ntb_db_valid_mask(device_t dev)
499 {
500 	struct amd_ntb_softc *ntb = device_get_softc(dev);
501 
502 	amd_ntb_printf(1, "%s: db_valid_mask 0x%x\n",
503 	    __func__, ntb->db_valid_mask);
504 
505 	return (ntb->db_valid_mask);
506 }
507 
508 static uint64_t
509 amd_ntb_db_vector_mask(device_t dev, uint32_t vector)
510 {
511 	struct amd_ntb_softc *ntb = device_get_softc(dev);
512 
513 	amd_ntb_printf(1, "%s: vector %d db_count 0x%x db_valid_mask 0x%x\n",
514 	    __func__, vector, ntb->hw_info->db_count, ntb->db_valid_mask);
515 
516 	if (vector < 0 || vector >= ntb->hw_info->db_count)
517 		return (0);
518 
519 	return (ntb->db_valid_mask & (1 << vector));
520 }
521 
522 static uint64_t
523 amd_ntb_db_read(device_t dev)
524 {
525 	struct amd_ntb_softc *ntb = device_get_softc(dev);
526 	uint64_t dbstat_off;
527 
528 	dbstat_off = (uint64_t)amd_ntb_reg_read(2, AMD_DBSTAT_OFFSET);
529 
530 	amd_ntb_printf(1, "%s: dbstat_off 0x%jx\n", __func__, dbstat_off);
531 
532 	return (dbstat_off);
533 }
534 
535 static void
536 amd_ntb_db_clear(device_t dev, uint64_t db_bits)
537 {
538 	struct amd_ntb_softc *ntb = device_get_softc(dev);
539 
540 	amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits);
541 	amd_ntb_reg_write(2, AMD_DBSTAT_OFFSET, (uint16_t)db_bits);
542 }
543 
544 static void
545 amd_ntb_db_set_mask(device_t dev, uint64_t db_bits)
546 {
547 	struct amd_ntb_softc *ntb = device_get_softc(dev);
548 
549 	DB_MASK_LOCK(ntb);
550 	amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n",
551 	    __func__, ntb->db_mask, db_bits);
552 
553 	ntb->db_mask |= db_bits;
554 	amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask);
555 	DB_MASK_UNLOCK(ntb);
556 }
557 
558 static void
559 amd_ntb_db_clear_mask(device_t dev, uint64_t db_bits)
560 {
561 	struct amd_ntb_softc *ntb = device_get_softc(dev);
562 
563 	DB_MASK_LOCK(ntb);
564 	amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n",
565 	    __func__, ntb->db_mask, db_bits);
566 
567 	ntb->db_mask &= ~db_bits;
568 	amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask);
569 	DB_MASK_UNLOCK(ntb);
570 }
571 
572 static void
573 amd_ntb_peer_db_set(device_t dev, uint64_t db_bits)
574 {
575 	struct amd_ntb_softc *ntb = device_get_softc(dev);
576 
577 	amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits);
578 	amd_ntb_reg_write(2, AMD_DBREQ_OFFSET, (uint16_t)db_bits);
579 }
580 
581 /*
582  * AMD NTB scratchpad routines
583  */
584 static uint8_t
585 amd_ntb_spad_count(device_t dev)
586 {
587 	struct amd_ntb_softc *ntb = device_get_softc(dev);
588 
589 	amd_ntb_printf(1, "%s: spad_count 0x%x\n", __func__,
590 	    ntb->spad_count);
591 
592 	return (ntb->spad_count);
593 }
594 
595 static int
596 amd_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
597 {
598 	struct amd_ntb_softc *ntb = device_get_softc(dev);
599 	uint32_t offset;
600 
601 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
602 
603 	if (idx < 0 || idx >= ntb->spad_count)
604 		return (EINVAL);
605 
606 	offset = ntb->self_spad + (idx << 2);
607 	*val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset);
608 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val);
609 
610 	return (0);
611 }
612 
613 static int
614 amd_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
615 {
616 	struct amd_ntb_softc *ntb = device_get_softc(dev);
617 	uint32_t offset;
618 
619 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
620 
621 	if (idx < 0 || idx >= ntb->spad_count)
622 		return (EINVAL);
623 
624 	offset = ntb->self_spad + (idx << 2);
625 	amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val);
626 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val);
627 
628 	return (0);
629 }
630 
631 static void
632 amd_ntb_spad_clear(struct amd_ntb_softc *ntb)
633 {
634 	uint8_t i;
635 
636 	for (i = 0; i < ntb->spad_count; i++)
637 		amd_ntb_spad_write(ntb->device, i, 0);
638 }
639 
640 static int
641 amd_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
642 {
643 	struct amd_ntb_softc *ntb = device_get_softc(dev);
644 	uint32_t offset;
645 
646 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
647 
648 	if (idx < 0 || idx >= ntb->spad_count)
649 		return (EINVAL);
650 
651 	offset = ntb->peer_spad + (idx << 2);
652 	*val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset);
653 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val);
654 
655 	return (0);
656 }
657 
658 static int
659 amd_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
660 {
661 	struct amd_ntb_softc *ntb = device_get_softc(dev);
662 	uint32_t offset;
663 
664 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
665 
666 	if (idx < 0 || idx >= ntb->spad_count)
667 		return (EINVAL);
668 
669 	offset = ntb->peer_spad + (idx << 2);
670 	amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val);
671 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val);
672 
673 	return (0);
674 }
675 
676 
677 /*
678  * AMD NTB INIT
679  */
680 static int
681 amd_ntb_hw_info_handler(SYSCTL_HANDLER_ARGS)
682 {
683 	struct amd_ntb_softc* ntb = arg1;
684 	struct sbuf *sb;
685 	int rc = 0;
686 
687 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
688 	if (sb == NULL)
689 		return (sb->s_error);
690 
691 	sbuf_printf(sb, "NTB AMD Hardware info:\n\n");
692 	sbuf_printf(sb, "AMD NTB side: %s\n",
693 	    (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY");
694 	sbuf_printf(sb, "AMD LNK STA: 0x%#06x\n", ntb->lnk_sta);
695 
696 	if (!amd_link_is_up(ntb))
697 		sbuf_printf(sb, "AMD Link Status: Down\n");
698 	else {
699 		sbuf_printf(sb, "AMD Link Status: Up\n");
700 		sbuf_printf(sb, "AMD Link Speed: PCI-E Gen %u\n",
701 		    NTB_LNK_STA_SPEED(ntb->lnk_sta));
702 		sbuf_printf(sb, "AMD Link Width: PCI-E Width %u\n",
703 		    NTB_LNK_STA_WIDTH(ntb->lnk_sta));
704 	}
705 
706 	sbuf_printf(sb, "AMD Memory window count: %d\n",
707 	    ntb->hw_info->mw_count);
708 	sbuf_printf(sb, "AMD Spad count: %d\n",
709 	    ntb->spad_count);
710 	sbuf_printf(sb, "AMD Doorbell count: %d\n",
711 	    ntb->hw_info->db_count);
712 	sbuf_printf(sb, "AMD MSI-X vec count: %d\n\n",
713 	    ntb->msix_vec_count);
714 	sbuf_printf(sb, "AMD Doorbell valid mask: 0x%x\n",
715 	    ntb->db_valid_mask);
716 	sbuf_printf(sb, "AMD Doorbell Mask: 0x%x\n",
717 	    amd_ntb_reg_read(4, AMD_DBMASK_OFFSET));
718 	sbuf_printf(sb, "AMD Doorbell: 0x%x\n",
719 	    amd_ntb_reg_read(4, AMD_DBSTAT_OFFSET));
720 	sbuf_printf(sb, "AMD NTB Incoming XLAT: \n");
721 	sbuf_printf(sb, "AMD XLAT1: 0x%jx\n",
722 	    amd_ntb_peer_reg_read(8, AMD_BAR1XLAT_OFFSET));
723 	sbuf_printf(sb, "AMD XLAT23: 0x%jx\n",
724 	    amd_ntb_peer_reg_read(8, AMD_BAR23XLAT_OFFSET));
725 	sbuf_printf(sb, "AMD XLAT45: 0x%jx\n",
726 	    amd_ntb_peer_reg_read(8, AMD_BAR45XLAT_OFFSET));
727 	sbuf_printf(sb, "AMD LMT1: 0x%x\n",
728 	    amd_ntb_reg_read(4, AMD_BAR1LMT_OFFSET));
729 	sbuf_printf(sb, "AMD LMT23: 0x%jx\n",
730 	    amd_ntb_reg_read(8, AMD_BAR23LMT_OFFSET));
731 	sbuf_printf(sb, "AMD LMT45: 0x%jx\n",
732 	    amd_ntb_reg_read(8, AMD_BAR45LMT_OFFSET));
733 
734 	rc = sbuf_finish(sb);
735 	sbuf_delete(sb);
736 	return (rc);
737 }
738 
739 static void
740 amd_ntb_sysctl_init(struct amd_ntb_softc *ntb)
741 {
742 	struct sysctl_oid_list *globals;
743 	struct sysctl_ctx_list *ctx;
744 
745 	ctx = device_get_sysctl_ctx(ntb->device);
746 	globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device));
747 
748 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "info",
749 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ntb, 0,
750 	    amd_ntb_hw_info_handler, "A", "AMD NTB HW Information");
751 }
752 
753 /*
754  * Polls the HW link status register(s); returns true if something has changed.
755  */
756 static bool
757 amd_ntb_poll_link(struct amd_ntb_softc *ntb)
758 {
759 	uint32_t fullreg, reg, stat;
760 
761 	fullreg = amd_ntb_peer_reg_read(4, AMD_SIDEINFO_OFFSET);
762 	reg = fullreg & NTB_LIN_STA_ACTIVE_BIT;
763 
764 	if (reg == ntb->cntl_sta)
765 		return (false);
766 
767 	amd_ntb_printf(0, "%s: SIDEINFO reg_val = 0x%x cntl_sta 0x%x\n",
768 	    __func__, fullreg, ntb->cntl_sta);
769 
770 	ntb->cntl_sta = reg;
771 
772 	stat = pci_read_config(ntb->device, AMD_LINK_STATUS_OFFSET, 4);
773 
774 	amd_ntb_printf(0, "%s: LINK_STATUS stat = 0x%x lnk_sta 0x%x.\n",
775 	    __func__, stat, ntb->lnk_sta);
776 
777 	ntb->lnk_sta = stat;
778 
779 	return (true);
780 }
781 
782 static void
783 amd_link_hb(void *arg)
784 {
785 	struct amd_ntb_softc *ntb = arg;
786 
787 	if (amd_ntb_poll_link(ntb))
788 		ntb_link_event(ntb->device);
789 
790 	if (!amd_link_is_up(ntb)) {
791 		callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT,
792 		    amd_link_hb, ntb);
793 	} else {
794 		callout_reset(&ntb->hb_timer, (AMD_LINK_HB_TIMEOUT * 10),
795 		    amd_link_hb, ntb);
796 	}
797 }
798 
799 static void
800 amd_ntb_interrupt(struct amd_ntb_softc *ntb, uint16_t vec)
801 {
802 	if (vec < ntb->hw_info->db_count)
803 		ntb_db_event(ntb->device, vec);
804 	else
805 		amd_ntb_printf(0, "Invalid vector %d\n", vec);
806 }
807 
808 static void
809 amd_ntb_vec_isr(void *arg)
810 {
811 	struct amd_ntb_vec *nvec = arg;
812 
813 	amd_ntb_interrupt(nvec->ntb, nvec->num);
814 }
815 
816 static void
817 amd_ntb_irq_isr(void *arg)
818 {
819 	/* If we couldn't set up MSI-X, we only have the one vector. */
820 	amd_ntb_interrupt(arg, 0);
821 }
822 
823 static void
824 amd_init_side_info(struct amd_ntb_softc *ntb)
825 {
826 	unsigned int reg;
827 
828 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
829 	if (!(reg & AMD_SIDE_READY)) {
830 		reg |= AMD_SIDE_READY;
831 		amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg);
832 	}
833 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
834 }
835 
836 static void
837 amd_deinit_side_info(struct amd_ntb_softc *ntb)
838 {
839 	unsigned int reg;
840 
841 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
842 	if (reg & AMD_SIDE_READY) {
843 		reg &= ~AMD_SIDE_READY;
844 		amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg);
845 		amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
846 	}
847 }
848 
849 static int
850 amd_ntb_setup_isr(struct amd_ntb_softc *ntb, uint16_t num_vectors, bool msi,
851     bool intx)
852 {
853 	uint16_t i;
854 	int flags = 0, rc = 0;
855 
856 	flags |= RF_ACTIVE;
857 	if (intx)
858 		flags |= RF_SHAREABLE;
859 
860 	for (i = 0; i < num_vectors; i++) {
861 
862 		/* RID should be 0 for intx */
863 		if (intx)
864 			ntb->int_info[i].rid = i;
865 		else
866 			ntb->int_info[i].rid = i + 1;
867 
868 		ntb->int_info[i].res = bus_alloc_resource_any(ntb->device,
869 		    SYS_RES_IRQ, &ntb->int_info[i].rid, flags);
870 		if (ntb->int_info[i].res == NULL) {
871 			amd_ntb_printf(0, "bus_alloc_resource IRQ failed\n");
872 			return (ENOMEM);
873 		}
874 
875 		ntb->int_info[i].tag = NULL;
876 		ntb->allocated_interrupts++;
877 
878 		if (msi || intx) {
879 			rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
880 			    INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_irq_isr,
881 			    ntb, &ntb->int_info[i].tag);
882 		} else {
883 			rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
884 			    INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_vec_isr,
885 			    &ntb->msix_vec[i], &ntb->int_info[i].tag);
886 		}
887 
888 		if (rc != 0) {
889 			amd_ntb_printf(0, "bus_setup_intr %d failed\n", i);
890 			return (ENXIO);
891 		}
892 	}
893 
894 	return (0);
895 }
896 
897 static int
898 amd_ntb_create_msix_vec(struct amd_ntb_softc *ntb, uint32_t max_vectors)
899 {
900 	uint8_t i;
901 
902 	ntb->msix_vec = malloc(max_vectors * sizeof(*ntb->msix_vec), M_AMD_NTB,
903 	    M_ZERO | M_WAITOK);
904 
905 	for (i = 0; i < max_vectors; i++) {
906 		ntb->msix_vec[i].num = i;
907 		ntb->msix_vec[i].ntb = ntb;
908 	}
909 
910 	return (0);
911 }
912 
913 static void
914 amd_ntb_free_msix_vec(struct amd_ntb_softc *ntb)
915 {
916 	if (ntb->msix_vec_count) {
917 		pci_release_msi(ntb->device);
918 		ntb->msix_vec_count = 0;
919 	}
920 
921 	if (ntb->msix_vec != NULL) {
922 		free(ntb->msix_vec, M_AMD_NTB);
923 		ntb->msix_vec = NULL;
924 	}
925 }
926 
927 static int
928 amd_ntb_init_isr(struct amd_ntb_softc *ntb)
929 {
930 	uint32_t supported_vectors, num_vectors;
931 	bool msi = false, intx = false;
932 	int rc = 0;
933 
934 	ntb->db_mask = ntb->db_valid_mask;
935 
936 	rc = amd_ntb_create_msix_vec(ntb, ntb->hw_info->msix_vector_count);
937 	if (rc != 0) {
938 		amd_ntb_printf(0, "Error creating msix vectors: %d\n", rc);
939 		return (ENOMEM);
940 	}
941 
942 	/*
943 	 * Check the number of MSI-X message supported by the device.
944 	 * Minimum necessary MSI-X message count should be equal to db_count.
945 	 */
946 	supported_vectors = pci_msix_count(ntb->device);
947 	num_vectors = MIN(supported_vectors, ntb->hw_info->db_count);
948 	if (num_vectors < ntb->hw_info->db_count) {
949 		amd_ntb_printf(0, "No minimum msix: supported %d db %d\n",
950 		    supported_vectors, ntb->hw_info->db_count);
951 		msi = true;
952 		goto err_msix_enable;
953 	}
954 
955 	/* Allocate the necessary number of MSI-x messages */
956 	rc = pci_alloc_msix(ntb->device, &num_vectors);
957 	if (rc != 0) {
958 		amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc);
959 		msi = true;
960 		goto err_msix_enable;
961 	}
962 
963 	if (num_vectors < ntb->hw_info->db_count) {
964 		amd_ntb_printf(0, "Allocated only %d MSI-X\n", num_vectors);
965 		msi = true;
966 		/*
967 		 * Else set ntb->hw_info->db_count = ntb->msix_vec_count =
968 		 * num_vectors, msi=false and dont release msi.
969 		 */
970 	}
971 
972 err_msix_enable:
973 
974 	if (msi) {
975 		free(ntb->msix_vec, M_AMD_NTB);
976 		ntb->msix_vec = NULL;
977 		pci_release_msi(ntb->device);
978 		num_vectors = 1;
979 		rc = pci_alloc_msi(ntb->device, &num_vectors);
980 		if (rc != 0) {
981 			amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc);
982 			msi = false;
983 			intx = true;
984 		}
985 	}
986 
987 	ntb->hw_info->db_count = ntb->msix_vec_count = num_vectors;
988 
989 	if (intx) {
990 		num_vectors = 1;
991 		ntb->hw_info->db_count = 1;
992 		ntb->msix_vec_count = 0;
993 	}
994 
995 	amd_ntb_printf(0, "%s: db %d msix %d msi %d intx %d\n",
996 	    __func__, ntb->hw_info->db_count, ntb->msix_vec_count, (int)msi, (int)intx);
997 
998 	rc = amd_ntb_setup_isr(ntb, num_vectors, msi, intx);
999 	if (rc != 0) {
1000 		amd_ntb_printf(0, "Error setting up isr: %d\n", rc);
1001 		amd_ntb_free_msix_vec(ntb);
1002 	}
1003 
1004 	return (rc);
1005 }
1006 
1007 static void
1008 amd_ntb_deinit_isr(struct amd_ntb_softc *ntb)
1009 {
1010 	struct amd_ntb_int_info *current_int;
1011 	int i;
1012 
1013 	/* Mask all doorbell interrupts */
1014 	ntb->db_mask = ntb->db_valid_mask;
1015 	amd_ntb_reg_write(4, AMD_DBMASK_OFFSET, ntb->db_mask);
1016 
1017 	for (i = 0; i < ntb->allocated_interrupts; i++) {
1018 		current_int = &ntb->int_info[i];
1019 		if (current_int->tag != NULL)
1020 			bus_teardown_intr(ntb->device, current_int->res,
1021 			    current_int->tag);
1022 
1023 		if (current_int->res != NULL)
1024 			bus_release_resource(ntb->device, SYS_RES_IRQ,
1025 			    rman_get_rid(current_int->res), current_int->res);
1026 	}
1027 
1028 	amd_ntb_free_msix_vec(ntb);
1029 }
1030 
1031 static enum amd_ntb_conn_type
1032 amd_ntb_get_topo(struct amd_ntb_softc *ntb)
1033 {
1034 	uint32_t info;
1035 
1036 	info = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
1037 
1038 	if (info & AMD_SIDE_MASK)
1039 		return (NTB_CONN_SEC);
1040 
1041 	return (NTB_CONN_PRI);
1042 }
1043 
1044 static int
1045 amd_ntb_init_dev(struct amd_ntb_softc *ntb)
1046 {
1047 	ntb->db_valid_mask	 = (1ull << ntb->hw_info->db_count) - 1;
1048 	mtx_init(&ntb->db_mask_lock, "amd ntb db bits", NULL, MTX_SPIN);
1049 
1050 	switch (ntb->conn_type) {
1051 	case NTB_CONN_PRI:
1052 	case NTB_CONN_SEC:
1053 		ntb->spad_count >>= 1;
1054 
1055 		if (ntb->conn_type == NTB_CONN_PRI) {
1056 			ntb->self_spad = 0;
1057 			ntb->peer_spad = 0x20;
1058 		} else {
1059 			ntb->self_spad = 0x20;
1060 			ntb->peer_spad = 0;
1061 		}
1062 
1063 		callout_init(&ntb->hb_timer, 1);
1064 		callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT,
1065 		    amd_link_hb, ntb);
1066 
1067 		break;
1068 
1069 	default:
1070 		amd_ntb_printf(0, "Unsupported AMD NTB topology %d\n",
1071 		    ntb->conn_type);
1072 		return (EINVAL);
1073 	}
1074 
1075 	ntb->int_mask = AMD_EVENT_INTMASK;
1076 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
1077 
1078 	return (0);
1079 }
1080 
1081 static int
1082 amd_ntb_init(struct amd_ntb_softc *ntb)
1083 {
1084 	int rc = 0;
1085 
1086 	ntb->conn_type = amd_ntb_get_topo(ntb);
1087 	amd_ntb_printf(0, "AMD NTB Side: %s\n",
1088 	    (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY");
1089 
1090 	rc = amd_ntb_init_dev(ntb);
1091 	if (rc != 0)
1092 		return (rc);
1093 
1094 	rc = amd_ntb_init_isr(ntb);
1095 	if (rc != 0)
1096 		return (rc);
1097 
1098 	return (0);
1099 }
1100 
1101 static void
1102 print_map_success(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar,
1103     const char *kind)
1104 {
1105 	amd_ntb_printf(0, "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n",
1106 	    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1107 	    (char *)bar->vbase + bar->size - 1, (void *)bar->pbase,
1108 	    (void *)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind);
1109 }
1110 
1111 static void
1112 save_bar_parameters(struct amd_ntb_pci_bar_info *bar)
1113 {
1114 	bar->pci_bus_tag = rman_get_bustag(bar->pci_resource);
1115 	bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource);
1116 	bar->pbase = rman_get_start(bar->pci_resource);
1117 	bar->size = rman_get_size(bar->pci_resource);
1118 	bar->vbase = rman_get_virtual(bar->pci_resource);
1119 	bar->map_mode = VM_MEMATTR_UNCACHEABLE;
1120 }
1121 
1122 static int
1123 map_bar(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar)
1124 {
1125 	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
1126 	    &bar->pci_resource_id, RF_ACTIVE);
1127 	if (bar->pci_resource == NULL)
1128 		return (ENXIO);
1129 
1130 	save_bar_parameters(bar);
1131 	print_map_success(ntb, bar, "mmr");
1132 
1133 	return (0);
1134 }
1135 
1136 static int
1137 amd_ntb_map_pci_bars(struct amd_ntb_softc *ntb)
1138 {
1139 	int rc = 0;
1140 
1141 	/* NTB Config/Control registers - BAR 0 */
1142 	ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0);
1143 	rc = map_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]);
1144 	if (rc != 0)
1145 		goto out;
1146 
1147 	/* Memory Window 0 BAR - BAR 1 */
1148 	ntb->bar_info[NTB_BAR_1].pci_resource_id = PCIR_BAR(1);
1149 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_1]);
1150 	if (rc != 0)
1151 		goto out;
1152 	ntb->bar_info[NTB_BAR_1].xlat_off = AMD_BAR1XLAT_OFFSET;
1153 	ntb->bar_info[NTB_BAR_1].limit_off = AMD_BAR1LMT_OFFSET;
1154 
1155 	/* Memory Window 1 BAR - BAR 2&3 */
1156 	ntb->bar_info[NTB_BAR_2].pci_resource_id = PCIR_BAR(2);
1157 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_2]);
1158 	if (rc != 0)
1159 		goto out;
1160 	ntb->bar_info[NTB_BAR_2].xlat_off = AMD_BAR23XLAT_OFFSET;
1161 	ntb->bar_info[NTB_BAR_2].limit_off = AMD_BAR23LMT_OFFSET;
1162 
1163 	/* Memory Window 2 BAR - BAR 4&5 */
1164 	ntb->bar_info[NTB_BAR_3].pci_resource_id = PCIR_BAR(4);
1165 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_3]);
1166 	if (rc != 0)
1167 		goto out;
1168 	ntb->bar_info[NTB_BAR_3].xlat_off = AMD_BAR45XLAT_OFFSET;
1169 	ntb->bar_info[NTB_BAR_3].limit_off = AMD_BAR45LMT_OFFSET;
1170 
1171 out:
1172 	if (rc != 0)
1173 		amd_ntb_printf(0, "unable to allocate pci resource\n");
1174 
1175 	return (rc);
1176 }
1177 
1178 static void
1179 amd_ntb_unmap_pci_bars(struct amd_ntb_softc *ntb)
1180 {
1181 	struct amd_ntb_pci_bar_info *bar_info;
1182 	int i;
1183 
1184 	for (i = 0; i < NTB_MAX_BARS; i++) {
1185 		bar_info = &ntb->bar_info[i];
1186 		if (bar_info->pci_resource != NULL)
1187 			bus_release_resource(ntb->device, SYS_RES_MEMORY,
1188 			    bar_info->pci_resource_id, bar_info->pci_resource);
1189 	}
1190 }
1191 
1192 static int
1193 amd_ntb_probe(device_t device)
1194 {
1195 	struct amd_ntb_softc *ntb = device_get_softc(device);
1196 	const struct pci_device_table *tbl;
1197 
1198 	tbl = PCI_MATCH(device, amd_ntb_devs);
1199 	if (tbl == NULL)
1200 		return (ENXIO);
1201 
1202 	ntb->hw_info = (struct amd_ntb_hw_info *)tbl->driver_data;
1203 	ntb->spad_count = ntb->hw_info->spad_count;
1204 	device_set_desc(device, tbl->descr);
1205 
1206 	return (BUS_PROBE_GENERIC);
1207 }
1208 
1209 static int
1210 amd_ntb_attach(device_t device)
1211 {
1212 	struct amd_ntb_softc *ntb = device_get_softc(device);
1213 	int error;
1214 
1215 	ntb->device = device;
1216 
1217 	/* Enable PCI bus mastering for "device" */
1218 	pci_enable_busmaster(ntb->device);
1219 
1220 	error = amd_ntb_map_pci_bars(ntb);
1221 	if (error)
1222 		goto out;
1223 
1224 	error = amd_ntb_init(ntb);
1225 	if (error)
1226 		goto out;
1227 
1228 	amd_init_side_info(ntb);
1229 
1230 	amd_ntb_spad_clear(ntb);
1231 
1232 	amd_ntb_sysctl_init(ntb);
1233 
1234 	/* Attach children to this controller */
1235 	error = ntb_register_device(device);
1236 
1237 out:
1238 	if (error)
1239 		amd_ntb_detach(device);
1240 
1241 	return (error);
1242 }
1243 
1244 static int
1245 amd_ntb_detach(device_t device)
1246 {
1247 	struct amd_ntb_softc *ntb = device_get_softc(device);
1248 
1249 	ntb_unregister_device(device);
1250 	amd_deinit_side_info(ntb);
1251 	callout_drain(&ntb->hb_timer);
1252 	amd_ntb_deinit_isr(ntb);
1253 	mtx_destroy(&ntb->db_mask_lock);
1254 	pci_disable_busmaster(ntb->device);
1255 	amd_ntb_unmap_pci_bars(ntb);
1256 
1257 	return (0);
1258 }
1259 
1260 static device_method_t ntb_amd_methods[] = {
1261 	/* Device interface */
1262 	DEVMETHOD(device_probe,		amd_ntb_probe),
1263 	DEVMETHOD(device_attach,	amd_ntb_attach),
1264 	DEVMETHOD(device_detach,	amd_ntb_detach),
1265 
1266 	/* Bus interface */
1267 	DEVMETHOD(bus_child_location_str, ntb_child_location_str),
1268 	DEVMETHOD(bus_print_child,	ntb_print_child),
1269 
1270 	/* NTB interface */
1271 	DEVMETHOD(ntb_port_number,	amd_ntb_port_number),
1272 	DEVMETHOD(ntb_peer_port_count,	amd_ntb_peer_port_count),
1273 	DEVMETHOD(ntb_peer_port_number,	amd_ntb_peer_port_number),
1274 	DEVMETHOD(ntb_peer_port_idx, 	amd_ntb_peer_port_idx),
1275 	DEVMETHOD(ntb_link_is_up,	amd_ntb_link_is_up),
1276 	DEVMETHOD(ntb_link_enable,	amd_ntb_link_enable),
1277 	DEVMETHOD(ntb_link_disable,	amd_ntb_link_disable),
1278 	DEVMETHOD(ntb_mw_count,		amd_ntb_mw_count),
1279 	DEVMETHOD(ntb_mw_get_range,	amd_ntb_mw_get_range),
1280 	DEVMETHOD(ntb_mw_set_trans,	amd_ntb_mw_set_trans),
1281 	DEVMETHOD(ntb_mw_clear_trans,	amd_ntb_mw_clear_trans),
1282 	DEVMETHOD(ntb_mw_set_wc,	amd_ntb_mw_set_wc),
1283 	DEVMETHOD(ntb_mw_get_wc,	amd_ntb_mw_get_wc),
1284 	DEVMETHOD(ntb_db_valid_mask,	amd_ntb_db_valid_mask),
1285 	DEVMETHOD(ntb_db_vector_count,	amd_ntb_db_vector_count),
1286 	DEVMETHOD(ntb_db_vector_mask,	amd_ntb_db_vector_mask),
1287 	DEVMETHOD(ntb_db_read,		amd_ntb_db_read),
1288 	DEVMETHOD(ntb_db_clear,		amd_ntb_db_clear),
1289 	DEVMETHOD(ntb_db_set_mask,	amd_ntb_db_set_mask),
1290 	DEVMETHOD(ntb_db_clear_mask,	amd_ntb_db_clear_mask),
1291 	DEVMETHOD(ntb_peer_db_set,	amd_ntb_peer_db_set),
1292 	DEVMETHOD(ntb_spad_count,	amd_ntb_spad_count),
1293 	DEVMETHOD(ntb_spad_read,	amd_ntb_spad_read),
1294 	DEVMETHOD(ntb_spad_write,	amd_ntb_spad_write),
1295 	DEVMETHOD(ntb_peer_spad_read,	amd_ntb_peer_spad_read),
1296 	DEVMETHOD(ntb_peer_spad_write,	amd_ntb_peer_spad_write),
1297 	DEVMETHOD_END
1298 };
1299 
1300 static DEFINE_CLASS_0(ntb_hw, ntb_amd_driver, ntb_amd_methods,
1301     sizeof(struct amd_ntb_softc));
1302 DRIVER_MODULE(ntb_hw_amd, pci, ntb_amd_driver, ntb_hw_devclass, NULL, NULL);
1303 MODULE_DEPEND(ntb_hw_amd, ntb, 1, 1, 1);
1304 MODULE_VERSION(ntb_hw_amd, 1);
1305 PCI_PNP_INFO(amd_ntb_devs);
1306