xref: /freebsd/sys/dev/ntb/ntb_hw/ntb_hw_amd.c (revision 6683132d54bd6d589889e43dabdc53d35e38a028)
1 /*-
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright (C) 2019 Advanced Micro Devices, Inc.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * BSD LICENSE
14  *
15  * Copyright (c) 2019 Advanced Micro Devices, Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of AMD corporation nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  * Contact Information :
42  * Rajesh Kumar <rajesh1.kumar@amd.com>
43  */
44 
45 /*
46  * The Non-Transparent Bridge (NTB) is a device that allows you to connect
47  * two or more systems using a PCI-e links, providing remote memory access.
48  *
49  * This module contains a driver for NTB hardware in AMD CPUs
50  *
51  * Much of the code in this module is shared with Linux. Any patches may
52  * be picked up and redistributed in Linux with a dual GPL/BSD license.
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include <sys/param.h>
59 #include <sys/kernel.h>
60 #include <sys/systm.h>
61 #include <sys/bus.h>
62 #include <sys/malloc.h>
63 #include <sys/module.h>
64 #include <sys/mutex.h>
65 #include <sys/rman.h>
66 #include <sys/sbuf.h>
67 #include <sys/sysctl.h>
68 
69 #include <vm/vm.h>
70 #include <vm/pmap.h>
71 
72 #include <machine/bus.h>
73 
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76 
77 #include "ntb_hw_amd.h"
78 #include "dev/ntb/ntb.h"
79 
80 MALLOC_DEFINE(M_AMD_NTB, "amd_ntb_hw", "amd_ntb_hw driver memory allocations");
81 
82 struct pci_device_table amd_ntb_devs[] = {
83 	{ PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID),
84 	  PCI_DESCR("AMD Non-Transparent Bridge") }
85 };
86 
87 static unsigned g_amd_ntb_hw_debug_level;
88 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
89     &g_amd_ntb_hw_debug_level, 0, "amd_ntb_hw log level -- higher is verbose");
90 
91 #define amd_ntb_printf(lvl, ...) do {				\
92         if (lvl <= g_amd_ntb_hw_debug_level)			\
93                 device_printf(ntb->device, __VA_ARGS__);	\
94 } while (0)
95 
96 /*
97  * AMD NTB INTERFACE ROUTINES
98  */
99 static int
100 amd_ntb_port_number(device_t dev)
101 {
102 	struct amd_ntb_softc *ntb = device_get_softc(dev);
103 
104 	amd_ntb_printf(1, "%s: conn_type %d\n", __func__, ntb->conn_type);
105 
106 	switch (ntb->conn_type) {
107 	case NTB_CONN_PRI:
108 		return (NTB_PORT_PRI_USD);
109 	case NTB_CONN_SEC:
110 		return (NTB_PORT_SEC_DSD);
111 	default:
112 		break;
113 	}
114 
115 	return (-EINVAL);
116 }
117 
118 static int
119 amd_ntb_peer_port_count(device_t dev)
120 {
121 	struct amd_ntb_softc *ntb = device_get_softc(dev);
122 
123 	amd_ntb_printf(1, "%s: peer cnt %d\n", __func__, NTB_DEF_PEER_CNT);
124 	return (NTB_DEF_PEER_CNT);
125 }
126 
127 static int
128 amd_ntb_peer_port_number(device_t dev, int pidx)
129 {
130 	struct amd_ntb_softc *ntb = device_get_softc(dev);
131 
132 	amd_ntb_printf(1, "%s: pidx %d conn type %d\n",
133 	    __func__, pidx, ntb->conn_type);
134 
135 	if (pidx != NTB_DEF_PEER_IDX)
136 		return (-EINVAL);
137 
138 	switch (ntb->conn_type) {
139 	case NTB_CONN_PRI:
140 		return (NTB_PORT_SEC_DSD);
141 	case NTB_CONN_SEC:
142 		return (NTB_PORT_PRI_USD);
143 	default:
144 		break;
145 	}
146 
147 	return (-EINVAL);
148 }
149 
150 static int
151 amd_ntb_peer_port_idx(device_t dev, int port)
152 {
153 	struct amd_ntb_softc *ntb = device_get_softc(dev);
154 	int peer_port;
155 
156 	peer_port = amd_ntb_peer_port_number(dev, NTB_DEF_PEER_IDX);
157 
158 	amd_ntb_printf(1, "%s: port %d peer_port %d\n",
159 	    __func__, port, peer_port);
160 
161 	if (peer_port == -EINVAL || port != peer_port)
162 		return (-EINVAL);
163 
164 	return (0);
165 }
166 
167 /*
168  * AMD NTB INTERFACE - LINK ROUTINES
169  */
170 static inline int
171 amd_link_is_up(struct amd_ntb_softc *ntb)
172 {
173 
174 	amd_ntb_printf(2, "%s: peer_sta 0x%x cntl_sta 0x%x\n",
175 	    __func__, ntb->peer_sta, ntb->cntl_sta);
176 
177 	if (!ntb->peer_sta)
178 		return (NTB_LNK_STA_ACTIVE(ntb->cntl_sta));
179 
180 	return (0);
181 }
182 
183 static inline enum ntb_speed
184 amd_ntb_link_sta_speed(struct amd_ntb_softc *ntb)
185 {
186 
187 	if (!amd_link_is_up(ntb))
188 		return (NTB_SPEED_NONE);
189 
190 	return (NTB_LNK_STA_SPEED(ntb->lnk_sta));
191 }
192 
193 static inline enum ntb_width
194 amd_ntb_link_sta_width(struct amd_ntb_softc *ntb)
195 {
196 
197 	if (!amd_link_is_up(ntb))
198 		return (NTB_WIDTH_NONE);
199 
200 	return (NTB_LNK_STA_WIDTH(ntb->lnk_sta));
201 }
202 
203 static bool
204 amd_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
205 {
206 	struct amd_ntb_softc *ntb = device_get_softc(dev);
207 
208 	if (speed != NULL)
209 		*speed = amd_ntb_link_sta_speed(ntb);
210 	if (width != NULL)
211 		*width = amd_ntb_link_sta_width(ntb);
212 
213 	return (amd_link_is_up(ntb));
214 }
215 
216 static int
217 amd_ntb_link_enable(device_t dev, enum ntb_speed max_speed,
218     enum ntb_width max_width)
219 {
220 	struct amd_ntb_softc *ntb = device_get_softc(dev);
221 	uint32_t ntb_ctl;
222 
223 	amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n",
224 	    __func__, ntb->int_mask, ntb->conn_type);
225 
226 	amd_init_side_info(ntb);
227 
228 	/* Enable event interrupt */
229 	ntb->int_mask &= ~AMD_EVENT_INTMASK;
230 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
231 
232 	if (ntb->conn_type == NTB_CONN_SEC)
233 		return (EINVAL);
234 
235 	amd_ntb_printf(0, "%s: Enabling Link.\n", __func__);
236 
237 	ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET);
238 	ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
239 	amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl);
240 	amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl);
241 
242 	return (0);
243 }
244 
245 static int
246 amd_ntb_link_disable(device_t dev)
247 {
248 	struct amd_ntb_softc *ntb = device_get_softc(dev);
249 	uint32_t ntb_ctl;
250 
251 	amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n",
252 	    __func__, ntb->int_mask, ntb->conn_type);
253 
254 	amd_deinit_side_info(ntb);
255 
256 	/* Disable event interrupt */
257 	ntb->int_mask |= AMD_EVENT_INTMASK;
258 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
259 
260 	if (ntb->conn_type == NTB_CONN_SEC)
261 		return (EINVAL);
262 
263 	amd_ntb_printf(0, "%s: Disabling Link.\n", __func__);
264 
265 	ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET);
266 	ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
267 	amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl);
268 	amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl);
269 
270 	return (0);
271 }
272 
273 /*
274  * AMD NTB memory window routines
275  */
276 static uint8_t
277 amd_ntb_mw_count(device_t dev)
278 {
279 	struct amd_ntb_softc *ntb = device_get_softc(dev);
280 
281 	return (ntb->mw_count);
282 }
283 
284 static int
285 amd_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
286     caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
287     bus_addr_t *plimit)
288 {
289 	struct amd_ntb_softc *ntb = device_get_softc(dev);
290 	struct amd_ntb_pci_bar_info *bar_info;
291 
292 	if (mw_idx < 0 || mw_idx >= ntb->mw_count)
293 		return (EINVAL);
294 
295 	bar_info = &ntb->bar_info[mw_idx+1];
296 
297 	if (base != NULL)
298 		*base = bar_info->pbase;
299 
300 	if (vbase != NULL)
301 		*vbase = bar_info->vbase;
302 
303 	if (align != NULL)
304 		*align = bar_info->size;
305 
306 	if (size != NULL)
307 		*size = bar_info->size;
308 
309 	if (align_size != NULL)
310 		*align_size = 1;
311 
312 	if (plimit != NULL) {
313 		if (mw_idx != 0)
314 			*plimit = BUS_SPACE_MAXADDR;
315 		else
316 			*plimit = BUS_SPACE_MAXADDR_32BIT;
317 	}
318 
319 	amd_ntb_printf(1, "%s: mw %d padd %p vadd %p psize 0x%lx "
320 	    "align 0x%lx asize 0x%lx alimit %p\n", __func__, mw_idx,
321 	    (void *)*base, (void *)*vbase, (uint64_t)*size, (uint64_t)*align,
322 	    (uint64_t)*align_size, (void *)*plimit);
323 
324 	return (0);
325 }
326 
327 static int
328 amd_ntb_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size)
329 {
330 	struct amd_ntb_softc *ntb = device_get_softc(dev);
331 	struct amd_ntb_pci_bar_info *bar_info;
332 
333 	if (mw_idx < 0 || mw_idx >= ntb->mw_count)
334 		return (EINVAL);
335 
336 	bar_info = &ntb->bar_info[mw_idx+1];
337 
338 	/* make sure the range fits in the usable mw size */
339 	if (size > bar_info->size) {
340 		amd_ntb_printf(0, "%s: size 0x%x greater than mw_size 0x%x\n",
341 		    __func__, (uint32_t)size, (uint32_t)bar_info->size);
342 		return (EINVAL);
343 	}
344 
345 	amd_ntb_printf(1, "%s: mw %d mw_size 0x%x size 0x%x base %p\n",
346 	    __func__, mw_idx, (uint32_t)bar_info->size,
347 	    (uint32_t)size, (void *)bar_info->pci_bus_handle);
348 
349 	/*
350 	 * AMD NTB XLAT and Limit registers needs to be written only after
351 	 * link enable
352 	 *
353 	 * set and verify setting the translation address
354 	 */
355 	amd_ntb_peer_reg_write(8, bar_info->xlat_off, (uint64_t)addr);
356 	amd_ntb_printf(0, "%s: mw %d xlat_off 0x%x cur_val 0x%lx addr %p\n",
357 	    __func__, mw_idx, bar_info->xlat_off,
358 	    amd_ntb_peer_reg_read(8, bar_info->xlat_off), (void *)addr);
359 
360 	/* set and verify setting the limit */
361 	if (mw_idx != 0) {
362 		amd_ntb_reg_write(8, bar_info->limit_off, (uint64_t)size);
363 		amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%lx limit 0x%x\n",
364 		    __func__, bar_info->limit_off,
365 		    amd_ntb_peer_reg_read(8, bar_info->limit_off), (uint32_t)size);
366 	} else {
367 		amd_ntb_reg_write(4, bar_info->limit_off, (uint64_t)size);
368 		amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%x limit 0x%x\n",
369 		    __func__, bar_info->limit_off,
370 		    amd_ntb_peer_reg_read(4, bar_info->limit_off), (uint32_t)size);
371 	}
372 
373 	return (0);
374 }
375 
376 static int
377 amd_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
378 {
379 	struct amd_ntb_softc *ntb = device_get_softc(dev);
380 
381 	amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx);
382 
383 	if (mw_idx < 0 || mw_idx >= ntb->mw_count)
384 		return (EINVAL);
385 
386 	return (amd_ntb_mw_set_trans(dev, mw_idx, 0, 0));
387 }
388 
389 static int
390 amd_ntb_mw_set_wc(device_t dev, unsigned int mw_idx, vm_memattr_t mode)
391 {
392 	struct amd_ntb_softc *ntb = device_get_softc(dev);
393 	struct amd_ntb_pci_bar_info *bar_info;
394 	int rc;
395 
396 	if (mw_idx < 0 || mw_idx >= ntb->mw_count)
397 		return (EINVAL);
398 
399 	bar_info = &ntb->bar_info[mw_idx+1];
400 	if (mode == bar_info->map_mode)
401 		return (0);
402 
403 	rc = pmap_change_attr((vm_offset_t)bar_info->vbase, bar_info->size, mode);
404 	if (rc == 0)
405 		bar_info->map_mode = mode;
406 
407 	return (rc);
408 }
409 
410 static int
411 amd_ntb_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode)
412 {
413 	struct amd_ntb_softc *ntb = device_get_softc(dev);
414 	struct amd_ntb_pci_bar_info *bar_info;
415 
416 	amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx);
417 
418 	if (mw_idx < 0 || mw_idx >= ntb->mw_count)
419 		return (EINVAL);
420 
421 	bar_info = &ntb->bar_info[mw_idx+1];
422 	*mode = bar_info->map_mode;
423 
424 	return (0);
425 }
426 
427 /*
428  * AMD NTB doorbell routines
429  */
430 static int
431 amd_ntb_db_vector_count(device_t dev)
432 {
433 	struct amd_ntb_softc *ntb = device_get_softc(dev);
434 
435 	amd_ntb_printf(1, "%s: db_count 0x%x\n", __func__, ntb->db_count);
436 
437 	return (ntb->db_count);
438 }
439 
440 static uint64_t
441 amd_ntb_db_valid_mask(device_t dev)
442 {
443 	struct amd_ntb_softc *ntb = device_get_softc(dev);
444 
445 	amd_ntb_printf(1, "%s: db_valid_mask 0x%x\n",
446 	    __func__, ntb->db_valid_mask);
447 
448 	return (ntb->db_valid_mask);
449 }
450 
451 static uint64_t
452 amd_ntb_db_vector_mask(device_t dev, uint32_t vector)
453 {
454 	struct amd_ntb_softc *ntb = device_get_softc(dev);
455 
456 	amd_ntb_printf(1, "%s: vector %d db_count 0x%x db_valid_mask 0x%x\n",
457 	    __func__, vector, ntb->db_count, ntb->db_valid_mask);
458 
459 	if (vector < 0 || vector >= ntb->db_count)
460 		return (0);
461 
462 	return (ntb->db_valid_mask & (1 << vector));
463 }
464 
465 static uint64_t
466 amd_ntb_db_read(device_t dev)
467 {
468 	struct amd_ntb_softc *ntb = device_get_softc(dev);
469 	uint64_t dbstat_off;
470 
471 	dbstat_off = (uint64_t)amd_ntb_reg_read(2, AMD_DBSTAT_OFFSET);
472 
473 	amd_ntb_printf(1, "%s: dbstat_off 0x%lx\n", __func__, dbstat_off);
474 
475 	return (dbstat_off);
476 }
477 
478 static void
479 amd_ntb_db_clear(device_t dev, uint64_t db_bits)
480 {
481 	struct amd_ntb_softc *ntb = device_get_softc(dev);
482 
483 	amd_ntb_printf(1, "%s: db_bits 0x%lx\n", __func__, db_bits);
484 	amd_ntb_reg_write(2, AMD_DBSTAT_OFFSET, (uint16_t)db_bits);
485 }
486 
487 static void
488 amd_ntb_db_set_mask(device_t dev, uint64_t db_bits)
489 {
490 	struct amd_ntb_softc *ntb = device_get_softc(dev);
491 
492 	DB_MASK_LOCK(ntb);
493 	amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%lx\n",
494 	    __func__, ntb->db_mask, db_bits);
495 
496 	ntb->db_mask |= db_bits;
497 	amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask);
498 	DB_MASK_UNLOCK(ntb);
499 }
500 
501 static void
502 amd_ntb_db_clear_mask(device_t dev, uint64_t db_bits)
503 {
504 	struct amd_ntb_softc *ntb = device_get_softc(dev);
505 
506 	DB_MASK_LOCK(ntb);
507 	amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%lx\n",
508 	    __func__, ntb->db_mask, db_bits);
509 
510 	ntb->db_mask &= ~db_bits;
511 	amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask);
512 	DB_MASK_UNLOCK(ntb);
513 }
514 
515 static void
516 amd_ntb_peer_db_set(device_t dev, uint64_t db_bits)
517 {
518 	struct amd_ntb_softc *ntb = device_get_softc(dev);
519 
520 	amd_ntb_printf(1, "%s: db_bits 0x%lx\n", __func__, db_bits);
521 	amd_ntb_reg_write(2, AMD_DBREQ_OFFSET, (uint16_t)db_bits);
522 }
523 
524 /*
525  * AMD NTB scratchpad routines
526  */
527 static uint8_t
528 amd_ntb_spad_count(device_t dev)
529 {
530 	struct amd_ntb_softc *ntb = device_get_softc(dev);
531 
532 	amd_ntb_printf(1, "%s: spad_count 0x%x\n", __func__, ntb->spad_count);
533 
534 	return (ntb->spad_count);
535 }
536 
537 static int
538 amd_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
539 {
540 	struct amd_ntb_softc *ntb = device_get_softc(dev);
541 	uint32_t offset;
542 
543 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
544 
545 	if (idx < 0 || idx >= ntb->spad_count)
546 		return (EINVAL);
547 
548 	offset = ntb->self_spad + (idx << 2);
549 	*val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset);
550 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val);
551 
552 	return (0);
553 }
554 
555 static int
556 amd_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
557 {
558 	struct amd_ntb_softc *ntb = device_get_softc(dev);
559 	uint32_t offset;
560 
561 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
562 
563 	if (idx < 0 || idx >= ntb->spad_count)
564 		return (EINVAL);
565 
566 	offset = ntb->self_spad + (idx << 2);
567 	amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val);
568 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val);
569 
570 	return (0);
571 }
572 
573 static void
574 amd_ntb_spad_clear(struct amd_ntb_softc *ntb)
575 {
576 	uint8_t i;
577 
578 	for (i = 0; i < ntb->spad_count; i++)
579 		amd_ntb_spad_write(ntb->device, i, 0);
580 }
581 
582 static int
583 amd_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
584 {
585 	struct amd_ntb_softc *ntb = device_get_softc(dev);
586 	uint32_t offset;
587 
588 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
589 
590 	if (idx < 0 || idx >= ntb->spad_count)
591 		return (EINVAL);
592 
593 	offset = ntb->peer_spad + (idx << 2);
594 	*val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset);
595 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val);
596 
597 	return (0);
598 }
599 
600 static int
601 amd_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
602 {
603 	struct amd_ntb_softc *ntb = device_get_softc(dev);
604 	uint32_t offset;
605 
606 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
607 
608 	if (idx < 0 || idx >= ntb->spad_count)
609 		return (EINVAL);
610 
611 	offset = ntb->peer_spad + (idx << 2);
612 	amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val);
613 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val);
614 
615 	return (0);
616 }
617 
618 
619 /*
620  * AMD NTB INIT
621  */
622 static int
623 amd_ntb_hw_info_handler(SYSCTL_HANDLER_ARGS)
624 {
625 	struct amd_ntb_softc* ntb = arg1;
626 	struct sbuf *sb;
627 	int rc = 0;
628 
629 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
630 	if (sb == NULL)
631 		return (sb->s_error);
632 
633 	sbuf_printf(sb, "NTB AMD Hardware info:\n\n");
634 	sbuf_printf(sb, "AMD NTB side: %s\n",
635 	    (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY");
636 	sbuf_printf(sb, "AMD LNK STA: 0x%#06x\n", ntb->lnk_sta);
637 
638 	if (!amd_link_is_up(ntb))
639 		sbuf_printf(sb, "AMD Link Status: Down\n");
640 	else {
641 		sbuf_printf(sb, "AMD Link Status: Up\n");
642 		sbuf_printf(sb, "AMD Link Speed: PCI-E Gen %u\n",
643 		    NTB_LNK_STA_SPEED(ntb->lnk_sta));
644 		sbuf_printf(sb, "AMD Link Width: PCI-E Width %u\n",
645 		    NTB_LNK_STA_WIDTH(ntb->lnk_sta));
646 	}
647 
648 	sbuf_printf(sb, "AMD Memory window count: %d\n",
649 	    ntb->mw_count);
650 	sbuf_printf(sb, "AMD Spad count: %d\n",
651 	    ntb->spad_count);
652 	sbuf_printf(sb, "AMD Doorbell count: %d\n",
653 	    ntb->db_count);
654 	sbuf_printf(sb, "AMD MSI-X vec count: %d\n\n",
655 	    ntb->msix_vec_count);
656 	sbuf_printf(sb, "AMD Doorbell valid mask: 0x%x\n",
657 	    ntb->db_valid_mask);
658 	sbuf_printf(sb, "AMD Doorbell Mask: 0x%x\n",
659 	    amd_ntb_reg_read(4, AMD_DBMASK_OFFSET));
660 	sbuf_printf(sb, "AMD Doorbell: 0x%x\n",
661 	    amd_ntb_reg_read(4, AMD_DBSTAT_OFFSET));
662 	sbuf_printf(sb, "AMD NTB Incoming XLAT: \n");
663 	sbuf_printf(sb, "AMD XLAT1: 0x%lx\n",
664 	    amd_ntb_peer_reg_read(8, AMD_BAR1XLAT_OFFSET));
665 	sbuf_printf(sb, "AMD XLAT23: 0x%lx\n",
666 	    amd_ntb_peer_reg_read(8, AMD_BAR23XLAT_OFFSET));
667 	sbuf_printf(sb, "AMD XLAT45: 0x%lx\n",
668 	    amd_ntb_peer_reg_read(8, AMD_BAR45XLAT_OFFSET));
669 	sbuf_printf(sb, "AMD LMT1: 0x%x\n",
670 	    amd_ntb_reg_read(4, AMD_BAR1LMT_OFFSET));
671 	sbuf_printf(sb, "AMD LMT23: 0x%lx\n",
672 	    amd_ntb_reg_read(8, AMD_BAR23LMT_OFFSET));
673 	sbuf_printf(sb, "AMD LMT45: 0x%lx\n",
674 	    amd_ntb_reg_read(8, AMD_BAR45LMT_OFFSET));
675 
676 	rc = sbuf_finish(sb);
677 	sbuf_delete(sb);
678 	return (rc);
679 }
680 
681 static void
682 amd_ntb_sysctl_init(struct amd_ntb_softc *ntb)
683 {
684 	struct sysctl_oid_list *globals;
685 	struct sysctl_ctx_list *ctx;
686 
687 	ctx = device_get_sysctl_ctx(ntb->device);
688 	globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device));
689 
690 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "info",
691 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ntb, 0,
692 	    amd_ntb_hw_info_handler, "A", "AMD NTB HW Information");
693 }
694 
695 /*
696  * Polls the HW link status register(s); returns true if something has changed.
697  */
698 static bool
699 amd_ntb_poll_link(struct amd_ntb_softc *ntb)
700 {
701 	uint32_t fullreg, reg, stat;
702 
703 	fullreg = amd_ntb_peer_reg_read(4, AMD_SIDEINFO_OFFSET);
704 	reg = fullreg & NTB_LIN_STA_ACTIVE_BIT;
705 
706 	if (reg == ntb->cntl_sta)
707 		return (false);
708 
709 	amd_ntb_printf(0, "%s: SIDEINFO reg_val = 0x%x cntl_sta 0x%x\n",
710 	    __func__, fullreg, ntb->cntl_sta);
711 
712 	ntb->cntl_sta = reg;
713 
714 	stat = pci_read_config(ntb->device, AMD_LINK_STATUS_OFFSET, 4);
715 
716 	amd_ntb_printf(0, "%s: LINK_STATUS stat = 0x%x lnk_sta 0x%x.\n",
717 	    __func__, stat, ntb->lnk_sta);
718 
719 	ntb->lnk_sta = stat;
720 
721 	return (true);
722 }
723 
724 static void
725 amd_link_hb(void *arg)
726 {
727 	struct amd_ntb_softc *ntb = arg;
728 
729 	if (amd_ntb_poll_link(ntb))
730 		ntb_link_event(ntb->device);
731 
732 	if (!amd_link_is_up(ntb)) {
733 		callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT,
734 		    amd_link_hb, ntb);
735 	} else {
736 		callout_reset(&ntb->hb_timer, (AMD_LINK_HB_TIMEOUT * 10),
737 		    amd_link_hb, ntb);
738 	}
739 }
740 
741 static void
742 amd_ntb_interrupt(struct amd_ntb_softc *ntb, uint16_t vec)
743 {
744 	if (vec < AMD_DB_CNT)
745 		ntb_db_event(ntb->device, vec);
746 	else
747 		amd_ntb_printf(0, "Invalid vector %d\n", vec);
748 }
749 
750 static void
751 amd_ntb_vec_isr(void *arg)
752 {
753 	struct amd_ntb_vec *nvec = arg;
754 
755 	amd_ntb_interrupt(nvec->ntb, nvec->num);
756 }
757 
758 static void
759 amd_ntb_irq_isr(void *arg)
760 {
761 	/* If we couldn't set up MSI-X, we only have the one vector. */
762 	amd_ntb_interrupt(arg, 0);
763 }
764 
765 static void
766 amd_init_side_info(struct amd_ntb_softc *ntb)
767 {
768 	unsigned int reg;
769 
770 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
771 	if (!(reg & AMD_SIDE_READY)) {
772 		reg |= AMD_SIDE_READY;
773 		amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg);
774 	}
775 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
776 }
777 
778 static void
779 amd_deinit_side_info(struct amd_ntb_softc *ntb)
780 {
781 	unsigned int reg;
782 
783 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
784 	if (reg & AMD_SIDE_READY) {
785 		reg &= ~AMD_SIDE_READY;
786 		amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg);
787 		amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
788 	}
789 }
790 
791 static int
792 amd_ntb_setup_isr(struct amd_ntb_softc *ntb, uint16_t num_vectors, bool msi,
793     bool intx)
794 {
795 	uint16_t i;
796 	int flags = 0, rc = 0;
797 
798 	flags |= RF_ACTIVE;
799 	if (intx)
800 		flags |= RF_SHAREABLE;
801 
802 	for (i = 0; i < num_vectors; i++) {
803 
804 		/* RID should be 0 for intx */
805 		if (intx)
806 			ntb->int_info[i].rid = i;
807 		else
808 			ntb->int_info[i].rid = i + 1;
809 
810 		ntb->int_info[i].res = bus_alloc_resource_any(ntb->device,
811 		    SYS_RES_IRQ, &ntb->int_info[i].rid, flags);
812 		if (ntb->int_info[i].res == NULL) {
813 			amd_ntb_printf(0, "bus_alloc_resource IRQ failed\n");
814 			return (ENOMEM);
815 		}
816 
817 		ntb->int_info[i].tag = NULL;
818 		ntb->allocated_interrupts++;
819 
820 		if (msi || intx) {
821 			rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
822 			    INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_irq_isr,
823 			    ntb, &ntb->int_info[i].tag);
824 		} else {
825 			rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
826 			    INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_vec_isr,
827 			    &ntb->msix_vec[i], &ntb->int_info[i].tag);
828 		}
829 
830 		if (rc != 0) {
831 			amd_ntb_printf(0, "bus_setup_intr %d failed\n", i);
832 			return (ENXIO);
833 		}
834 	}
835 
836 	return (0);
837 }
838 
839 static int
840 amd_ntb_create_msix_vec(struct amd_ntb_softc *ntb, uint32_t max_vectors)
841 {
842 	uint8_t i;
843 
844 	ntb->msix_vec = malloc(max_vectors * sizeof(*ntb->msix_vec), M_AMD_NTB,
845 	    M_ZERO | M_WAITOK);
846 
847 	for (i = 0; i < max_vectors; i++) {
848 		ntb->msix_vec[i].num = i;
849 		ntb->msix_vec[i].ntb = ntb;
850 	}
851 
852 	return (0);
853 }
854 
855 static void
856 amd_ntb_free_msix_vec(struct amd_ntb_softc *ntb)
857 {
858 	if (ntb->msix_vec_count) {
859 		pci_release_msi(ntb->device);
860 		ntb->msix_vec_count = 0;
861 	}
862 
863 	if (ntb->msix_vec != NULL) {
864 		free(ntb->msix_vec, M_AMD_NTB);
865 		ntb->msix_vec = NULL;
866 	}
867 }
868 
869 static int
870 amd_ntb_init_isr(struct amd_ntb_softc *ntb)
871 {
872 	uint32_t supported_vectors, num_vectors;
873 	bool msi = false, intx = false;
874 	int rc = 0;
875 
876 	ntb->db_mask = ntb->db_valid_mask;
877 
878 	rc = amd_ntb_create_msix_vec(ntb, AMD_MSIX_VECTOR_CNT);
879 	if (rc != 0) {
880 		amd_ntb_printf(0, "Error creating msix vectors: %d\n", rc);
881 		return (ENOMEM);
882 	}
883 
884 	/*
885 	 * Check the number of MSI-X message supported by the device.
886 	 * Minimum necessary MSI-X message count should be equal to db_count
887 	 */
888 	supported_vectors = pci_msix_count(ntb->device);
889 	num_vectors = MIN(supported_vectors, ntb->db_count);
890 	if (num_vectors < ntb->db_count) {
891 		amd_ntb_printf(0, "No minimum msix: supported %d db %d\n",
892 		    supported_vectors, ntb->db_count);
893 		msi = true;
894 		goto err_msix_enable;
895 	}
896 
897 	/* Allocate the necessary number of MSI-x messages */
898 	rc = pci_alloc_msix(ntb->device, &num_vectors);
899 	if (rc != 0) {
900 		amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc);
901 		msi = true;
902 		goto err_msix_enable;
903 	}
904 
905 	if (num_vectors < ntb->db_count) {
906 		amd_ntb_printf(0, "Allocated only %d MSI-X\n", num_vectors);
907 		msi = true;
908 		/*
909 		 * Else set ntb->db_count = ntb->msix_vec_count = num_vectors,
910 		 * msi=false and dont release msi
911 		 */
912 	}
913 
914 err_msix_enable:
915 
916 	if (msi) {
917 		free(ntb->msix_vec, M_AMD_NTB);
918 		ntb->msix_vec = NULL;
919 		pci_release_msi(ntb->device);
920 		num_vectors = 1;
921 		rc = pci_alloc_msi(ntb->device, &num_vectors);
922 		if (rc != 0) {
923 			amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc);
924 			msi = false;
925 			intx = true;
926 		}
927 	}
928 
929 	ntb->db_count = ntb->msix_vec_count = num_vectors;
930 
931 	if (intx) {
932 		num_vectors = 1;
933 		ntb->db_count = 1;
934 		ntb->msix_vec_count = 0;
935 	}
936 
937 	amd_ntb_printf(0, "%s: db %d msix %d msi %d intx %d\n",
938 	    __func__, ntb->db_count, ntb->msix_vec_count, (int)msi, (int)intx);
939 
940 	rc = amd_ntb_setup_isr(ntb, num_vectors, msi, intx);
941 	if (rc != 0) {
942 		amd_ntb_printf(0, "Error setting up isr: %d\n", rc);
943 		amd_ntb_free_msix_vec(ntb);
944 	}
945 
946 	return (rc);
947 }
948 
949 static void
950 amd_ntb_deinit_isr(struct amd_ntb_softc *ntb)
951 {
952 	struct amd_ntb_int_info *current_int;
953 	int i;
954 
955 	/* Mask all doorbell interrupts */
956 	ntb->db_mask = ntb->db_valid_mask;
957 	amd_ntb_reg_write(4, AMD_DBMASK_OFFSET, ntb->db_mask);
958 
959 	for (i = 0; i < ntb->allocated_interrupts; i++) {
960 		current_int = &ntb->int_info[i];
961 		if (current_int->tag != NULL)
962 			bus_teardown_intr(ntb->device, current_int->res,
963 			    current_int->tag);
964 
965 		if (current_int->res != NULL)
966 			bus_release_resource(ntb->device, SYS_RES_IRQ,
967 			    rman_get_rid(current_int->res), current_int->res);
968 	}
969 
970 	amd_ntb_free_msix_vec(ntb);
971 }
972 
973 static enum amd_ntb_conn_type
974 amd_ntb_get_topo(struct amd_ntb_softc *ntb)
975 {
976 	uint32_t info;
977 
978 	info = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
979 
980 	if (info & AMD_SIDE_MASK)
981 		return (NTB_CONN_SEC);
982 
983 	return (NTB_CONN_PRI);
984 }
985 
986 static int
987 amd_ntb_init_dev(struct amd_ntb_softc *ntb)
988 {
989 	ntb->mw_count		 = AMD_MW_CNT;
990 	ntb->spad_count		 = AMD_SPADS_CNT;
991 	ntb->db_count		 = AMD_DB_CNT;
992 	ntb->db_valid_mask	 = (1ull << ntb->db_count) - 1;
993 	mtx_init(&ntb->db_mask_lock, "amd ntb db bits", NULL, MTX_SPIN);
994 
995 	switch (ntb->conn_type) {
996 	case NTB_CONN_PRI:
997 	case NTB_CONN_SEC:
998 		ntb->spad_count >>= 1;
999 
1000 		if (ntb->conn_type == NTB_CONN_PRI) {
1001 			ntb->self_spad = 0;
1002 			ntb->peer_spad = 0x20;
1003 		} else {
1004 			ntb->self_spad = 0x20;
1005 			ntb->peer_spad = 0;
1006 		}
1007 
1008 		callout_init(&ntb->hb_timer, 1);
1009 		callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT,
1010 		    amd_link_hb, ntb);
1011 
1012 		break;
1013 
1014 	default:
1015 		amd_ntb_printf(0, "Unsupported AMD NTB topology %d\n",
1016 		    ntb->conn_type);
1017 		return (EINVAL);
1018 	}
1019 
1020 	ntb->int_mask = AMD_EVENT_INTMASK;
1021 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
1022 
1023 	return (0);
1024 }
1025 
1026 static int
1027 amd_ntb_init(struct amd_ntb_softc *ntb)
1028 {
1029 	int rc = 0;
1030 
1031 	ntb->conn_type = amd_ntb_get_topo(ntb);
1032 	amd_ntb_printf(0, "AMD NTB Side: %s\n",
1033 	    (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY");
1034 
1035 	rc = amd_ntb_init_dev(ntb);
1036 	if (rc != 0)
1037 		return (rc);
1038 
1039 	rc = amd_ntb_init_isr(ntb);
1040 	if (rc != 0)
1041 		return (rc);
1042 
1043 	return (0);
1044 }
1045 
1046 static void
1047 print_map_success(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar,
1048     const char *kind)
1049 {
1050 	amd_ntb_printf(0, "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n",
1051 	    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1052 	    (char *)bar->vbase + bar->size - 1, (void *)bar->pbase,
1053 	    (void *)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind);
1054 }
1055 
1056 static void
1057 save_bar_parameters(struct amd_ntb_pci_bar_info *bar)
1058 {
1059 	bar->pci_bus_tag = rman_get_bustag(bar->pci_resource);
1060 	bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource);
1061 	bar->pbase = rman_get_start(bar->pci_resource);
1062 	bar->size = rman_get_size(bar->pci_resource);
1063 	bar->vbase = rman_get_virtual(bar->pci_resource);
1064 	bar->map_mode = VM_MEMATTR_UNCACHEABLE;
1065 }
1066 
1067 static int
1068 map_bar(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar)
1069 {
1070 	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
1071 	    &bar->pci_resource_id, RF_ACTIVE);
1072 	if (bar->pci_resource == NULL)
1073 		return (ENXIO);
1074 
1075 	save_bar_parameters(bar);
1076 	print_map_success(ntb, bar, "mmr");
1077 
1078 	return (0);
1079 }
1080 
1081 static int
1082 amd_ntb_map_pci_bars(struct amd_ntb_softc *ntb)
1083 {
1084 	int rc = 0;
1085 
1086 	/* NTB Config/Control registers - BAR 0 */
1087 	ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0);
1088 	rc = map_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]);
1089 	if (rc != 0)
1090 		goto out;
1091 
1092 	/* Memory Window 0 BAR - BAR 1*/
1093 	ntb->bar_info[NTB_BAR_1].pci_resource_id = PCIR_BAR(1);
1094 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_1]);
1095 	if (rc != 0)
1096 		goto out;
1097 	ntb->bar_info[NTB_BAR_1].xlat_off = AMD_BAR1XLAT_OFFSET;
1098 	ntb->bar_info[NTB_BAR_1].limit_off = AMD_BAR1LMT_OFFSET;
1099 
1100 	/* Memory Window 1 BAR - BAR 2&3 */
1101 	ntb->bar_info[NTB_BAR_2].pci_resource_id = PCIR_BAR(2);
1102 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_2]);
1103 	if (rc != 0)
1104 		goto out;
1105 	ntb->bar_info[NTB_BAR_2].xlat_off = AMD_BAR23XLAT_OFFSET;
1106 	ntb->bar_info[NTB_BAR_2].limit_off = AMD_BAR23LMT_OFFSET;
1107 
1108 	/* Memory Window 2 BAR - BAR 4&5 */
1109 	ntb->bar_info[NTB_BAR_3].pci_resource_id = PCIR_BAR(4);
1110 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_3]);
1111 	if (rc != 0)
1112 		goto out;
1113 	ntb->bar_info[NTB_BAR_3].xlat_off = AMD_BAR45XLAT_OFFSET;
1114 	ntb->bar_info[NTB_BAR_3].limit_off = AMD_BAR45LMT_OFFSET;
1115 
1116 out:
1117 	if (rc != 0)
1118 		amd_ntb_printf(0, "unable to allocate pci resource\n");
1119 
1120 	return (rc);
1121 }
1122 
1123 static void
1124 amd_ntb_unmap_pci_bars(struct amd_ntb_softc *ntb)
1125 {
1126 	struct amd_ntb_pci_bar_info *bar_info;
1127 	int i;
1128 
1129 	for (i = 0; i < NTB_MAX_BARS; i++) {
1130 		bar_info = &ntb->bar_info[i];
1131 		if (bar_info->pci_resource != NULL)
1132 			bus_release_resource(ntb->device, SYS_RES_MEMORY,
1133 			    bar_info->pci_resource_id, bar_info->pci_resource);
1134 	}
1135 }
1136 
1137 static int
1138 amd_ntb_probe(device_t device)
1139 {
1140 	const struct pci_device_table *tbl;
1141 
1142 	tbl = PCI_MATCH(device, amd_ntb_devs);
1143 	if (tbl == NULL)
1144 		return (ENXIO);
1145 
1146 	device_set_desc(device, tbl->descr);
1147 
1148 	return (BUS_PROBE_GENERIC);
1149 }
1150 
1151 static int
1152 amd_ntb_attach(device_t device)
1153 {
1154 	struct amd_ntb_softc *ntb = device_get_softc(device);
1155 	int error;
1156 
1157 	ntb->device = device;
1158 
1159 	/* Enable PCI bus mastering for "device" */
1160 	pci_enable_busmaster(ntb->device);
1161 
1162 	error = amd_ntb_map_pci_bars(ntb);
1163 	if (error)
1164 		goto out;
1165 
1166 	error = amd_ntb_init(ntb);
1167 	if (error)
1168 		goto out;
1169 
1170 	amd_init_side_info(ntb);
1171 
1172 	amd_ntb_spad_clear(ntb);
1173 
1174 	amd_ntb_sysctl_init(ntb);
1175 
1176 	/* Attach children to this controller */
1177 	error = ntb_register_device(device);
1178 
1179 out:
1180 	if (error)
1181 		amd_ntb_detach(device);
1182 
1183 	return (error);
1184 }
1185 
1186 static int
1187 amd_ntb_detach(device_t device)
1188 {
1189 	struct amd_ntb_softc *ntb = device_get_softc(device);
1190 
1191 	ntb_unregister_device(device);
1192 	amd_deinit_side_info(ntb);
1193 	callout_drain(&ntb->hb_timer);
1194 	amd_ntb_deinit_isr(ntb);
1195 	mtx_destroy(&ntb->db_mask_lock);
1196 	pci_disable_busmaster(ntb->device);
1197 	amd_ntb_unmap_pci_bars(ntb);
1198 
1199 	return (0);
1200 }
1201 
1202 static device_method_t ntb_amd_methods[] = {
1203 	/* Device interface */
1204 	DEVMETHOD(device_probe,		amd_ntb_probe),
1205 	DEVMETHOD(device_attach,	amd_ntb_attach),
1206 	DEVMETHOD(device_detach,	amd_ntb_detach),
1207 
1208 	/* Bus interface */
1209 	DEVMETHOD(bus_child_location_str, ntb_child_location_str),
1210 	DEVMETHOD(bus_print_child,	ntb_print_child),
1211 
1212 	/* NTB interface */
1213 	DEVMETHOD(ntb_port_number,	amd_ntb_port_number),
1214 	DEVMETHOD(ntb_peer_port_count,	amd_ntb_peer_port_count),
1215 	DEVMETHOD(ntb_peer_port_number,	amd_ntb_peer_port_number),
1216 	DEVMETHOD(ntb_peer_port_idx, 	amd_ntb_peer_port_idx),
1217 	DEVMETHOD(ntb_link_is_up,	amd_ntb_link_is_up),
1218 	DEVMETHOD(ntb_link_enable,	amd_ntb_link_enable),
1219 	DEVMETHOD(ntb_link_disable,	amd_ntb_link_disable),
1220 	DEVMETHOD(ntb_mw_count,		amd_ntb_mw_count),
1221 	DEVMETHOD(ntb_mw_get_range,	amd_ntb_mw_get_range),
1222 	DEVMETHOD(ntb_mw_set_trans,	amd_ntb_mw_set_trans),
1223 	DEVMETHOD(ntb_mw_clear_trans,	amd_ntb_mw_clear_trans),
1224 	DEVMETHOD(ntb_mw_set_wc,	amd_ntb_mw_set_wc),
1225 	DEVMETHOD(ntb_mw_get_wc,	amd_ntb_mw_get_wc),
1226 	DEVMETHOD(ntb_db_valid_mask,	amd_ntb_db_valid_mask),
1227 	DEVMETHOD(ntb_db_vector_count,	amd_ntb_db_vector_count),
1228 	DEVMETHOD(ntb_db_vector_mask,	amd_ntb_db_vector_mask),
1229 	DEVMETHOD(ntb_db_read,		amd_ntb_db_read),
1230 	DEVMETHOD(ntb_db_clear,		amd_ntb_db_clear),
1231 	DEVMETHOD(ntb_db_set_mask,	amd_ntb_db_set_mask),
1232 	DEVMETHOD(ntb_db_clear_mask,	amd_ntb_db_clear_mask),
1233 	DEVMETHOD(ntb_peer_db_set,	amd_ntb_peer_db_set),
1234 	DEVMETHOD(ntb_spad_count,	amd_ntb_spad_count),
1235 	DEVMETHOD(ntb_spad_read,	amd_ntb_spad_read),
1236 	DEVMETHOD(ntb_spad_write,	amd_ntb_spad_write),
1237 	DEVMETHOD(ntb_peer_spad_read,	amd_ntb_peer_spad_read),
1238 	DEVMETHOD(ntb_peer_spad_write,	amd_ntb_peer_spad_write),
1239 	DEVMETHOD_END
1240 };
1241 
1242 static DEFINE_CLASS_0(ntb_hw, ntb_amd_driver, ntb_amd_methods,
1243     sizeof(struct amd_ntb_softc));
1244 DRIVER_MODULE(ntb_hw_amd, pci, ntb_amd_driver, ntb_hw_devclass, NULL, NULL);
1245 MODULE_DEPEND(ntb_hw_amd, ntb, 1, 1, 1);
1246 MODULE_VERSION(ntb_hw_amd, 1);
1247 PCI_PNP_INFO(amd_ntb_devs);
1248