xref: /freebsd/sys/dev/ntb/ntb_hw/ntb_hw_amd.c (revision 0bf48626aaa33768078f5872b922b1487b3a9296)
1 /*-
2  * This file is provided under a dual BSD/GPLv2 license.  When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright (C) 2019 Advanced Micro Devices, Inc.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * BSD LICENSE
14  *
15  * Copyright (c) 2019 Advanced Micro Devices, Inc.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  * 1. Redistributions of source code must retain the above copyright
21  *    notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *    notice, this list of conditions and the following disclaimer in the
24  *    documentation and/or other materials provided with the distribution.
25  * 3. Neither the name of AMD corporation nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  * Contact Information :
42  * Rajesh Kumar <rajesh1.kumar@amd.com>
43  */
44 
45 /*
46  * The Non-Transparent Bridge (NTB) is a device that allows you to connect
47  * two or more systems using a PCI-e links, providing remote memory access.
48  *
49  * This module contains a driver for NTB hardware in AMD CPUs
50  *
51  * Much of the code in this module is shared with Linux. Any patches may
52  * be picked up and redistributed in Linux with a dual GPL/BSD license.
53  */
54 
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
57 
58 #include <sys/param.h>
59 #include <sys/kernel.h>
60 #include <sys/systm.h>
61 #include <sys/bus.h>
62 #include <sys/lock.h>
63 #include <sys/malloc.h>
64 #include <sys/module.h>
65 #include <sys/mutex.h>
66 #include <sys/rman.h>
67 #include <sys/sbuf.h>
68 #include <sys/sysctl.h>
69 
70 #include <vm/vm.h>
71 #include <vm/pmap.h>
72 
73 #include <machine/bus.h>
74 
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
77 
78 #include "ntb_hw_amd.h"
79 #include "dev/ntb/ntb.h"
80 
81 MALLOC_DEFINE(M_AMD_NTB, "amd_ntb_hw", "amd_ntb_hw driver memory allocations");
82 
83 struct pci_device_table amd_ntb_devs[] = {
84 	{ PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID),
85 	  PCI_DESCR("AMD Non-Transparent Bridge") }
86 };
87 
88 static unsigned g_amd_ntb_hw_debug_level;
89 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
90     &g_amd_ntb_hw_debug_level, 0, "amd_ntb_hw log level -- higher is verbose");
91 
92 #define amd_ntb_printf(lvl, ...) do {				\
93         if (lvl <= g_amd_ntb_hw_debug_level)			\
94                 device_printf(ntb->device, __VA_ARGS__);	\
95 } while (0)
96 
97 #ifdef __i386__
98 static __inline uint64_t
99 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
100     bus_size_t offset)
101 {
102 
103 	return (bus_space_read_4(tag, handle, offset) |
104 	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
105 }
106 
107 static __inline void
108 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
109     bus_size_t offset, uint64_t val)
110 {
111 
112 	bus_space_write_4(tag, handle, offset, val);
113 	bus_space_write_4(tag, handle, offset + 4, val >> 32);
114 }
115 #endif
116 
117 /*
118  * AMD NTB INTERFACE ROUTINES
119  */
120 static int
121 amd_ntb_port_number(device_t dev)
122 {
123 	struct amd_ntb_softc *ntb = device_get_softc(dev);
124 
125 	amd_ntb_printf(1, "%s: conn_type %d\n", __func__, ntb->conn_type);
126 
127 	switch (ntb->conn_type) {
128 	case NTB_CONN_PRI:
129 		return (NTB_PORT_PRI_USD);
130 	case NTB_CONN_SEC:
131 		return (NTB_PORT_SEC_DSD);
132 	default:
133 		break;
134 	}
135 
136 	return (-EINVAL);
137 }
138 
139 static int
140 amd_ntb_peer_port_count(device_t dev)
141 {
142 	struct amd_ntb_softc *ntb = device_get_softc(dev);
143 
144 	amd_ntb_printf(1, "%s: peer cnt %d\n", __func__, NTB_DEF_PEER_CNT);
145 	return (NTB_DEF_PEER_CNT);
146 }
147 
148 static int
149 amd_ntb_peer_port_number(device_t dev, int pidx)
150 {
151 	struct amd_ntb_softc *ntb = device_get_softc(dev);
152 
153 	amd_ntb_printf(1, "%s: pidx %d conn type %d\n",
154 	    __func__, pidx, ntb->conn_type);
155 
156 	if (pidx != NTB_DEF_PEER_IDX)
157 		return (-EINVAL);
158 
159 	switch (ntb->conn_type) {
160 	case NTB_CONN_PRI:
161 		return (NTB_PORT_SEC_DSD);
162 	case NTB_CONN_SEC:
163 		return (NTB_PORT_PRI_USD);
164 	default:
165 		break;
166 	}
167 
168 	return (-EINVAL);
169 }
170 
171 static int
172 amd_ntb_peer_port_idx(device_t dev, int port)
173 {
174 	struct amd_ntb_softc *ntb = device_get_softc(dev);
175 	int peer_port;
176 
177 	peer_port = amd_ntb_peer_port_number(dev, NTB_DEF_PEER_IDX);
178 
179 	amd_ntb_printf(1, "%s: port %d peer_port %d\n",
180 	    __func__, port, peer_port);
181 
182 	if (peer_port == -EINVAL || port != peer_port)
183 		return (-EINVAL);
184 
185 	return (0);
186 }
187 
188 /*
189  * AMD NTB INTERFACE - LINK ROUTINES
190  */
191 static inline int
192 amd_link_is_up(struct amd_ntb_softc *ntb)
193 {
194 
195 	amd_ntb_printf(2, "%s: peer_sta 0x%x cntl_sta 0x%x\n",
196 	    __func__, ntb->peer_sta, ntb->cntl_sta);
197 
198 	if (!ntb->peer_sta)
199 		return (NTB_LNK_STA_ACTIVE(ntb->cntl_sta));
200 
201 	return (0);
202 }
203 
204 static inline enum ntb_speed
205 amd_ntb_link_sta_speed(struct amd_ntb_softc *ntb)
206 {
207 
208 	if (!amd_link_is_up(ntb))
209 		return (NTB_SPEED_NONE);
210 
211 	return (NTB_LNK_STA_SPEED(ntb->lnk_sta));
212 }
213 
214 static inline enum ntb_width
215 amd_ntb_link_sta_width(struct amd_ntb_softc *ntb)
216 {
217 
218 	if (!amd_link_is_up(ntb))
219 		return (NTB_WIDTH_NONE);
220 
221 	return (NTB_LNK_STA_WIDTH(ntb->lnk_sta));
222 }
223 
224 static bool
225 amd_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
226 {
227 	struct amd_ntb_softc *ntb = device_get_softc(dev);
228 
229 	if (speed != NULL)
230 		*speed = amd_ntb_link_sta_speed(ntb);
231 	if (width != NULL)
232 		*width = amd_ntb_link_sta_width(ntb);
233 
234 	return (amd_link_is_up(ntb));
235 }
236 
237 static int
238 amd_ntb_link_enable(device_t dev, enum ntb_speed max_speed,
239     enum ntb_width max_width)
240 {
241 	struct amd_ntb_softc *ntb = device_get_softc(dev);
242 	uint32_t ntb_ctl;
243 
244 	amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n",
245 	    __func__, ntb->int_mask, ntb->conn_type);
246 
247 	amd_init_side_info(ntb);
248 
249 	/* Enable event interrupt */
250 	ntb->int_mask &= ~AMD_EVENT_INTMASK;
251 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
252 
253 	if (ntb->conn_type == NTB_CONN_SEC)
254 		return (EINVAL);
255 
256 	amd_ntb_printf(0, "%s: Enabling Link.\n", __func__);
257 
258 	ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET);
259 	ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
260 	amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl);
261 	amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl);
262 
263 	return (0);
264 }
265 
266 static int
267 amd_ntb_link_disable(device_t dev)
268 {
269 	struct amd_ntb_softc *ntb = device_get_softc(dev);
270 	uint32_t ntb_ctl;
271 
272 	amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n",
273 	    __func__, ntb->int_mask, ntb->conn_type);
274 
275 	amd_deinit_side_info(ntb);
276 
277 	/* Disable event interrupt */
278 	ntb->int_mask |= AMD_EVENT_INTMASK;
279 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
280 
281 	if (ntb->conn_type == NTB_CONN_SEC)
282 		return (EINVAL);
283 
284 	amd_ntb_printf(0, "%s: Disabling Link.\n", __func__);
285 
286 	ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET);
287 	ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
288 	amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl);
289 	amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl);
290 
291 	return (0);
292 }
293 
294 /*
295  * AMD NTB memory window routines
296  */
297 static uint8_t
298 amd_ntb_mw_count(device_t dev)
299 {
300 	struct amd_ntb_softc *ntb = device_get_softc(dev);
301 
302 	return (ntb->mw_count);
303 }
304 
305 static int
306 amd_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
307     caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
308     bus_addr_t *plimit)
309 {
310 	struct amd_ntb_softc *ntb = device_get_softc(dev);
311 	struct amd_ntb_pci_bar_info *bar_info;
312 
313 	if (mw_idx < 0 || mw_idx >= ntb->mw_count)
314 		return (EINVAL);
315 
316 	bar_info = &ntb->bar_info[mw_idx+1];
317 
318 	if (base != NULL)
319 		*base = bar_info->pbase;
320 
321 	if (vbase != NULL)
322 		*vbase = bar_info->vbase;
323 
324 	if (align != NULL)
325 		*align = bar_info->size;
326 
327 	if (size != NULL)
328 		*size = bar_info->size;
329 
330 	if (align_size != NULL)
331 		*align_size = 1;
332 
333 	if (plimit != NULL) {
334 		if (mw_idx != 0)
335 			*plimit = BUS_SPACE_MAXADDR;
336 		else
337 			*plimit = BUS_SPACE_MAXADDR_32BIT;
338 	}
339 
340 	return (0);
341 }
342 
343 static int
344 amd_ntb_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size)
345 {
346 	struct amd_ntb_softc *ntb = device_get_softc(dev);
347 	struct amd_ntb_pci_bar_info *bar_info;
348 
349 	if (mw_idx < 0 || mw_idx >= ntb->mw_count)
350 		return (EINVAL);
351 
352 	bar_info = &ntb->bar_info[mw_idx+1];
353 
354 	/* make sure the range fits in the usable mw size */
355 	if (size > bar_info->size) {
356 		amd_ntb_printf(0, "%s: size 0x%x greater than mw_size 0x%x\n",
357 		    __func__, (uint32_t)size, (uint32_t)bar_info->size);
358 		return (EINVAL);
359 	}
360 
361 	amd_ntb_printf(1, "%s: mw %d mw_size 0x%x size 0x%x base %p\n",
362 	    __func__, mw_idx, (uint32_t)bar_info->size,
363 	    (uint32_t)size, (void *)bar_info->pci_bus_handle);
364 
365 	/*
366 	 * AMD NTB XLAT and Limit registers needs to be written only after
367 	 * link enable
368 	 *
369 	 * set and verify setting the translation address
370 	 */
371 	amd_ntb_peer_reg_write(8, bar_info->xlat_off, (uint64_t)addr);
372 	amd_ntb_printf(0, "%s: mw %d xlat_off 0x%x cur_val 0x%jx addr %p\n",
373 	    __func__, mw_idx, bar_info->xlat_off,
374 	    amd_ntb_peer_reg_read(8, bar_info->xlat_off), (void *)addr);
375 
376 	/* set and verify setting the limit */
377 	if (mw_idx != 0) {
378 		amd_ntb_reg_write(8, bar_info->limit_off, (uint64_t)size);
379 		amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%jx limit 0x%x\n",
380 		    __func__, bar_info->limit_off,
381 		    amd_ntb_peer_reg_read(8, bar_info->limit_off), (uint32_t)size);
382 	} else {
383 		amd_ntb_reg_write(4, bar_info->limit_off, (uint64_t)size);
384 		amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%x limit 0x%x\n",
385 		    __func__, bar_info->limit_off,
386 		    amd_ntb_peer_reg_read(4, bar_info->limit_off), (uint32_t)size);
387 	}
388 
389 	return (0);
390 }
391 
392 static int
393 amd_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
394 {
395 	struct amd_ntb_softc *ntb = device_get_softc(dev);
396 
397 	amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx);
398 
399 	if (mw_idx < 0 || mw_idx >= ntb->mw_count)
400 		return (EINVAL);
401 
402 	return (amd_ntb_mw_set_trans(dev, mw_idx, 0, 0));
403 }
404 
405 static int
406 amd_ntb_mw_set_wc(device_t dev, unsigned int mw_idx, vm_memattr_t mode)
407 {
408 	struct amd_ntb_softc *ntb = device_get_softc(dev);
409 	struct amd_ntb_pci_bar_info *bar_info;
410 	int rc;
411 
412 	if (mw_idx < 0 || mw_idx >= ntb->mw_count)
413 		return (EINVAL);
414 
415 	bar_info = &ntb->bar_info[mw_idx+1];
416 	if (mode == bar_info->map_mode)
417 		return (0);
418 
419 	rc = pmap_change_attr((vm_offset_t)bar_info->vbase, bar_info->size, mode);
420 	if (rc == 0)
421 		bar_info->map_mode = mode;
422 
423 	return (rc);
424 }
425 
426 static int
427 amd_ntb_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode)
428 {
429 	struct amd_ntb_softc *ntb = device_get_softc(dev);
430 	struct amd_ntb_pci_bar_info *bar_info;
431 
432 	amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx);
433 
434 	if (mw_idx < 0 || mw_idx >= ntb->mw_count)
435 		return (EINVAL);
436 
437 	bar_info = &ntb->bar_info[mw_idx+1];
438 	*mode = bar_info->map_mode;
439 
440 	return (0);
441 }
442 
443 /*
444  * AMD NTB doorbell routines
445  */
446 static int
447 amd_ntb_db_vector_count(device_t dev)
448 {
449 	struct amd_ntb_softc *ntb = device_get_softc(dev);
450 
451 	amd_ntb_printf(1, "%s: db_count 0x%x\n", __func__, ntb->db_count);
452 
453 	return (ntb->db_count);
454 }
455 
456 static uint64_t
457 amd_ntb_db_valid_mask(device_t dev)
458 {
459 	struct amd_ntb_softc *ntb = device_get_softc(dev);
460 
461 	amd_ntb_printf(1, "%s: db_valid_mask 0x%x\n",
462 	    __func__, ntb->db_valid_mask);
463 
464 	return (ntb->db_valid_mask);
465 }
466 
467 static uint64_t
468 amd_ntb_db_vector_mask(device_t dev, uint32_t vector)
469 {
470 	struct amd_ntb_softc *ntb = device_get_softc(dev);
471 
472 	amd_ntb_printf(1, "%s: vector %d db_count 0x%x db_valid_mask 0x%x\n",
473 	    __func__, vector, ntb->db_count, ntb->db_valid_mask);
474 
475 	if (vector < 0 || vector >= ntb->db_count)
476 		return (0);
477 
478 	return (ntb->db_valid_mask & (1 << vector));
479 }
480 
481 static uint64_t
482 amd_ntb_db_read(device_t dev)
483 {
484 	struct amd_ntb_softc *ntb = device_get_softc(dev);
485 	uint64_t dbstat_off;
486 
487 	dbstat_off = (uint64_t)amd_ntb_reg_read(2, AMD_DBSTAT_OFFSET);
488 
489 	amd_ntb_printf(1, "%s: dbstat_off 0x%jx\n", __func__, dbstat_off);
490 
491 	return (dbstat_off);
492 }
493 
494 static void
495 amd_ntb_db_clear(device_t dev, uint64_t db_bits)
496 {
497 	struct amd_ntb_softc *ntb = device_get_softc(dev);
498 
499 	amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits);
500 	amd_ntb_reg_write(2, AMD_DBSTAT_OFFSET, (uint16_t)db_bits);
501 }
502 
503 static void
504 amd_ntb_db_set_mask(device_t dev, uint64_t db_bits)
505 {
506 	struct amd_ntb_softc *ntb = device_get_softc(dev);
507 
508 	DB_MASK_LOCK(ntb);
509 	amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n",
510 	    __func__, ntb->db_mask, db_bits);
511 
512 	ntb->db_mask |= db_bits;
513 	amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask);
514 	DB_MASK_UNLOCK(ntb);
515 }
516 
517 static void
518 amd_ntb_db_clear_mask(device_t dev, uint64_t db_bits)
519 {
520 	struct amd_ntb_softc *ntb = device_get_softc(dev);
521 
522 	DB_MASK_LOCK(ntb);
523 	amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n",
524 	    __func__, ntb->db_mask, db_bits);
525 
526 	ntb->db_mask &= ~db_bits;
527 	amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask);
528 	DB_MASK_UNLOCK(ntb);
529 }
530 
531 static void
532 amd_ntb_peer_db_set(device_t dev, uint64_t db_bits)
533 {
534 	struct amd_ntb_softc *ntb = device_get_softc(dev);
535 
536 	amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits);
537 	amd_ntb_reg_write(2, AMD_DBREQ_OFFSET, (uint16_t)db_bits);
538 }
539 
540 /*
541  * AMD NTB scratchpad routines
542  */
543 static uint8_t
544 amd_ntb_spad_count(device_t dev)
545 {
546 	struct amd_ntb_softc *ntb = device_get_softc(dev);
547 
548 	amd_ntb_printf(1, "%s: spad_count 0x%x\n", __func__, ntb->spad_count);
549 
550 	return (ntb->spad_count);
551 }
552 
553 static int
554 amd_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
555 {
556 	struct amd_ntb_softc *ntb = device_get_softc(dev);
557 	uint32_t offset;
558 
559 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
560 
561 	if (idx < 0 || idx >= ntb->spad_count)
562 		return (EINVAL);
563 
564 	offset = ntb->self_spad + (idx << 2);
565 	*val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset);
566 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val);
567 
568 	return (0);
569 }
570 
571 static int
572 amd_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
573 {
574 	struct amd_ntb_softc *ntb = device_get_softc(dev);
575 	uint32_t offset;
576 
577 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
578 
579 	if (idx < 0 || idx >= ntb->spad_count)
580 		return (EINVAL);
581 
582 	offset = ntb->self_spad + (idx << 2);
583 	amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val);
584 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val);
585 
586 	return (0);
587 }
588 
589 static void
590 amd_ntb_spad_clear(struct amd_ntb_softc *ntb)
591 {
592 	uint8_t i;
593 
594 	for (i = 0; i < ntb->spad_count; i++)
595 		amd_ntb_spad_write(ntb->device, i, 0);
596 }
597 
598 static int
599 amd_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
600 {
601 	struct amd_ntb_softc *ntb = device_get_softc(dev);
602 	uint32_t offset;
603 
604 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
605 
606 	if (idx < 0 || idx >= ntb->spad_count)
607 		return (EINVAL);
608 
609 	offset = ntb->peer_spad + (idx << 2);
610 	*val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset);
611 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val);
612 
613 	return (0);
614 }
615 
616 static int
617 amd_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
618 {
619 	struct amd_ntb_softc *ntb = device_get_softc(dev);
620 	uint32_t offset;
621 
622 	amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
623 
624 	if (idx < 0 || idx >= ntb->spad_count)
625 		return (EINVAL);
626 
627 	offset = ntb->peer_spad + (idx << 2);
628 	amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val);
629 	amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val);
630 
631 	return (0);
632 }
633 
634 
635 /*
636  * AMD NTB INIT
637  */
638 static int
639 amd_ntb_hw_info_handler(SYSCTL_HANDLER_ARGS)
640 {
641 	struct amd_ntb_softc* ntb = arg1;
642 	struct sbuf *sb;
643 	int rc = 0;
644 
645 	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
646 	if (sb == NULL)
647 		return (sb->s_error);
648 
649 	sbuf_printf(sb, "NTB AMD Hardware info:\n\n");
650 	sbuf_printf(sb, "AMD NTB side: %s\n",
651 	    (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY");
652 	sbuf_printf(sb, "AMD LNK STA: 0x%#06x\n", ntb->lnk_sta);
653 
654 	if (!amd_link_is_up(ntb))
655 		sbuf_printf(sb, "AMD Link Status: Down\n");
656 	else {
657 		sbuf_printf(sb, "AMD Link Status: Up\n");
658 		sbuf_printf(sb, "AMD Link Speed: PCI-E Gen %u\n",
659 		    NTB_LNK_STA_SPEED(ntb->lnk_sta));
660 		sbuf_printf(sb, "AMD Link Width: PCI-E Width %u\n",
661 		    NTB_LNK_STA_WIDTH(ntb->lnk_sta));
662 	}
663 
664 	sbuf_printf(sb, "AMD Memory window count: %d\n",
665 	    ntb->mw_count);
666 	sbuf_printf(sb, "AMD Spad count: %d\n",
667 	    ntb->spad_count);
668 	sbuf_printf(sb, "AMD Doorbell count: %d\n",
669 	    ntb->db_count);
670 	sbuf_printf(sb, "AMD MSI-X vec count: %d\n\n",
671 	    ntb->msix_vec_count);
672 	sbuf_printf(sb, "AMD Doorbell valid mask: 0x%x\n",
673 	    ntb->db_valid_mask);
674 	sbuf_printf(sb, "AMD Doorbell Mask: 0x%x\n",
675 	    amd_ntb_reg_read(4, AMD_DBMASK_OFFSET));
676 	sbuf_printf(sb, "AMD Doorbell: 0x%x\n",
677 	    amd_ntb_reg_read(4, AMD_DBSTAT_OFFSET));
678 	sbuf_printf(sb, "AMD NTB Incoming XLAT: \n");
679 	sbuf_printf(sb, "AMD XLAT1: 0x%jx\n",
680 	    amd_ntb_peer_reg_read(8, AMD_BAR1XLAT_OFFSET));
681 	sbuf_printf(sb, "AMD XLAT23: 0x%jx\n",
682 	    amd_ntb_peer_reg_read(8, AMD_BAR23XLAT_OFFSET));
683 	sbuf_printf(sb, "AMD XLAT45: 0x%jx\n",
684 	    amd_ntb_peer_reg_read(8, AMD_BAR45XLAT_OFFSET));
685 	sbuf_printf(sb, "AMD LMT1: 0x%x\n",
686 	    amd_ntb_reg_read(4, AMD_BAR1LMT_OFFSET));
687 	sbuf_printf(sb, "AMD LMT23: 0x%jx\n",
688 	    amd_ntb_reg_read(8, AMD_BAR23LMT_OFFSET));
689 	sbuf_printf(sb, "AMD LMT45: 0x%jx\n",
690 	    amd_ntb_reg_read(8, AMD_BAR45LMT_OFFSET));
691 
692 	rc = sbuf_finish(sb);
693 	sbuf_delete(sb);
694 	return (rc);
695 }
696 
697 static void
698 amd_ntb_sysctl_init(struct amd_ntb_softc *ntb)
699 {
700 	struct sysctl_oid_list *globals;
701 	struct sysctl_ctx_list *ctx;
702 
703 	ctx = device_get_sysctl_ctx(ntb->device);
704 	globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device));
705 
706 	SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "info",
707 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ntb, 0,
708 	    amd_ntb_hw_info_handler, "A", "AMD NTB HW Information");
709 }
710 
711 /*
712  * Polls the HW link status register(s); returns true if something has changed.
713  */
714 static bool
715 amd_ntb_poll_link(struct amd_ntb_softc *ntb)
716 {
717 	uint32_t fullreg, reg, stat;
718 
719 	fullreg = amd_ntb_peer_reg_read(4, AMD_SIDEINFO_OFFSET);
720 	reg = fullreg & NTB_LIN_STA_ACTIVE_BIT;
721 
722 	if (reg == ntb->cntl_sta)
723 		return (false);
724 
725 	amd_ntb_printf(0, "%s: SIDEINFO reg_val = 0x%x cntl_sta 0x%x\n",
726 	    __func__, fullreg, ntb->cntl_sta);
727 
728 	ntb->cntl_sta = reg;
729 
730 	stat = pci_read_config(ntb->device, AMD_LINK_STATUS_OFFSET, 4);
731 
732 	amd_ntb_printf(0, "%s: LINK_STATUS stat = 0x%x lnk_sta 0x%x.\n",
733 	    __func__, stat, ntb->lnk_sta);
734 
735 	ntb->lnk_sta = stat;
736 
737 	return (true);
738 }
739 
740 static void
741 amd_link_hb(void *arg)
742 {
743 	struct amd_ntb_softc *ntb = arg;
744 
745 	if (amd_ntb_poll_link(ntb))
746 		ntb_link_event(ntb->device);
747 
748 	if (!amd_link_is_up(ntb)) {
749 		callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT,
750 		    amd_link_hb, ntb);
751 	} else {
752 		callout_reset(&ntb->hb_timer, (AMD_LINK_HB_TIMEOUT * 10),
753 		    amd_link_hb, ntb);
754 	}
755 }
756 
757 static void
758 amd_ntb_interrupt(struct amd_ntb_softc *ntb, uint16_t vec)
759 {
760 	if (vec < AMD_DB_CNT)
761 		ntb_db_event(ntb->device, vec);
762 	else
763 		amd_ntb_printf(0, "Invalid vector %d\n", vec);
764 }
765 
766 static void
767 amd_ntb_vec_isr(void *arg)
768 {
769 	struct amd_ntb_vec *nvec = arg;
770 
771 	amd_ntb_interrupt(nvec->ntb, nvec->num);
772 }
773 
774 static void
775 amd_ntb_irq_isr(void *arg)
776 {
777 	/* If we couldn't set up MSI-X, we only have the one vector. */
778 	amd_ntb_interrupt(arg, 0);
779 }
780 
781 static void
782 amd_init_side_info(struct amd_ntb_softc *ntb)
783 {
784 	unsigned int reg;
785 
786 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
787 	if (!(reg & AMD_SIDE_READY)) {
788 		reg |= AMD_SIDE_READY;
789 		amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg);
790 	}
791 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
792 }
793 
794 static void
795 amd_deinit_side_info(struct amd_ntb_softc *ntb)
796 {
797 	unsigned int reg;
798 
799 	reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
800 	if (reg & AMD_SIDE_READY) {
801 		reg &= ~AMD_SIDE_READY;
802 		amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg);
803 		amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
804 	}
805 }
806 
807 static int
808 amd_ntb_setup_isr(struct amd_ntb_softc *ntb, uint16_t num_vectors, bool msi,
809     bool intx)
810 {
811 	uint16_t i;
812 	int flags = 0, rc = 0;
813 
814 	flags |= RF_ACTIVE;
815 	if (intx)
816 		flags |= RF_SHAREABLE;
817 
818 	for (i = 0; i < num_vectors; i++) {
819 
820 		/* RID should be 0 for intx */
821 		if (intx)
822 			ntb->int_info[i].rid = i;
823 		else
824 			ntb->int_info[i].rid = i + 1;
825 
826 		ntb->int_info[i].res = bus_alloc_resource_any(ntb->device,
827 		    SYS_RES_IRQ, &ntb->int_info[i].rid, flags);
828 		if (ntb->int_info[i].res == NULL) {
829 			amd_ntb_printf(0, "bus_alloc_resource IRQ failed\n");
830 			return (ENOMEM);
831 		}
832 
833 		ntb->int_info[i].tag = NULL;
834 		ntb->allocated_interrupts++;
835 
836 		if (msi || intx) {
837 			rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
838 			    INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_irq_isr,
839 			    ntb, &ntb->int_info[i].tag);
840 		} else {
841 			rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
842 			    INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_vec_isr,
843 			    &ntb->msix_vec[i], &ntb->int_info[i].tag);
844 		}
845 
846 		if (rc != 0) {
847 			amd_ntb_printf(0, "bus_setup_intr %d failed\n", i);
848 			return (ENXIO);
849 		}
850 	}
851 
852 	return (0);
853 }
854 
855 static int
856 amd_ntb_create_msix_vec(struct amd_ntb_softc *ntb, uint32_t max_vectors)
857 {
858 	uint8_t i;
859 
860 	ntb->msix_vec = malloc(max_vectors * sizeof(*ntb->msix_vec), M_AMD_NTB,
861 	    M_ZERO | M_WAITOK);
862 
863 	for (i = 0; i < max_vectors; i++) {
864 		ntb->msix_vec[i].num = i;
865 		ntb->msix_vec[i].ntb = ntb;
866 	}
867 
868 	return (0);
869 }
870 
871 static void
872 amd_ntb_free_msix_vec(struct amd_ntb_softc *ntb)
873 {
874 	if (ntb->msix_vec_count) {
875 		pci_release_msi(ntb->device);
876 		ntb->msix_vec_count = 0;
877 	}
878 
879 	if (ntb->msix_vec != NULL) {
880 		free(ntb->msix_vec, M_AMD_NTB);
881 		ntb->msix_vec = NULL;
882 	}
883 }
884 
885 static int
886 amd_ntb_init_isr(struct amd_ntb_softc *ntb)
887 {
888 	uint32_t supported_vectors, num_vectors;
889 	bool msi = false, intx = false;
890 	int rc = 0;
891 
892 	ntb->db_mask = ntb->db_valid_mask;
893 
894 	rc = amd_ntb_create_msix_vec(ntb, AMD_MSIX_VECTOR_CNT);
895 	if (rc != 0) {
896 		amd_ntb_printf(0, "Error creating msix vectors: %d\n", rc);
897 		return (ENOMEM);
898 	}
899 
900 	/*
901 	 * Check the number of MSI-X message supported by the device.
902 	 * Minimum necessary MSI-X message count should be equal to db_count
903 	 */
904 	supported_vectors = pci_msix_count(ntb->device);
905 	num_vectors = MIN(supported_vectors, ntb->db_count);
906 	if (num_vectors < ntb->db_count) {
907 		amd_ntb_printf(0, "No minimum msix: supported %d db %d\n",
908 		    supported_vectors, ntb->db_count);
909 		msi = true;
910 		goto err_msix_enable;
911 	}
912 
913 	/* Allocate the necessary number of MSI-x messages */
914 	rc = pci_alloc_msix(ntb->device, &num_vectors);
915 	if (rc != 0) {
916 		amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc);
917 		msi = true;
918 		goto err_msix_enable;
919 	}
920 
921 	if (num_vectors < ntb->db_count) {
922 		amd_ntb_printf(0, "Allocated only %d MSI-X\n", num_vectors);
923 		msi = true;
924 		/*
925 		 * Else set ntb->db_count = ntb->msix_vec_count = num_vectors,
926 		 * msi=false and dont release msi
927 		 */
928 	}
929 
930 err_msix_enable:
931 
932 	if (msi) {
933 		free(ntb->msix_vec, M_AMD_NTB);
934 		ntb->msix_vec = NULL;
935 		pci_release_msi(ntb->device);
936 		num_vectors = 1;
937 		rc = pci_alloc_msi(ntb->device, &num_vectors);
938 		if (rc != 0) {
939 			amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc);
940 			msi = false;
941 			intx = true;
942 		}
943 	}
944 
945 	ntb->db_count = ntb->msix_vec_count = num_vectors;
946 
947 	if (intx) {
948 		num_vectors = 1;
949 		ntb->db_count = 1;
950 		ntb->msix_vec_count = 0;
951 	}
952 
953 	amd_ntb_printf(0, "%s: db %d msix %d msi %d intx %d\n",
954 	    __func__, ntb->db_count, ntb->msix_vec_count, (int)msi, (int)intx);
955 
956 	rc = amd_ntb_setup_isr(ntb, num_vectors, msi, intx);
957 	if (rc != 0) {
958 		amd_ntb_printf(0, "Error setting up isr: %d\n", rc);
959 		amd_ntb_free_msix_vec(ntb);
960 	}
961 
962 	return (rc);
963 }
964 
965 static void
966 amd_ntb_deinit_isr(struct amd_ntb_softc *ntb)
967 {
968 	struct amd_ntb_int_info *current_int;
969 	int i;
970 
971 	/* Mask all doorbell interrupts */
972 	ntb->db_mask = ntb->db_valid_mask;
973 	amd_ntb_reg_write(4, AMD_DBMASK_OFFSET, ntb->db_mask);
974 
975 	for (i = 0; i < ntb->allocated_interrupts; i++) {
976 		current_int = &ntb->int_info[i];
977 		if (current_int->tag != NULL)
978 			bus_teardown_intr(ntb->device, current_int->res,
979 			    current_int->tag);
980 
981 		if (current_int->res != NULL)
982 			bus_release_resource(ntb->device, SYS_RES_IRQ,
983 			    rman_get_rid(current_int->res), current_int->res);
984 	}
985 
986 	amd_ntb_free_msix_vec(ntb);
987 }
988 
989 static enum amd_ntb_conn_type
990 amd_ntb_get_topo(struct amd_ntb_softc *ntb)
991 {
992 	uint32_t info;
993 
994 	info = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
995 
996 	if (info & AMD_SIDE_MASK)
997 		return (NTB_CONN_SEC);
998 
999 	return (NTB_CONN_PRI);
1000 }
1001 
1002 static int
1003 amd_ntb_init_dev(struct amd_ntb_softc *ntb)
1004 {
1005 	ntb->mw_count		 = AMD_MW_CNT;
1006 	ntb->spad_count		 = AMD_SPADS_CNT;
1007 	ntb->db_count		 = AMD_DB_CNT;
1008 	ntb->db_valid_mask	 = (1ull << ntb->db_count) - 1;
1009 	mtx_init(&ntb->db_mask_lock, "amd ntb db bits", NULL, MTX_SPIN);
1010 
1011 	switch (ntb->conn_type) {
1012 	case NTB_CONN_PRI:
1013 	case NTB_CONN_SEC:
1014 		ntb->spad_count >>= 1;
1015 
1016 		if (ntb->conn_type == NTB_CONN_PRI) {
1017 			ntb->self_spad = 0;
1018 			ntb->peer_spad = 0x20;
1019 		} else {
1020 			ntb->self_spad = 0x20;
1021 			ntb->peer_spad = 0;
1022 		}
1023 
1024 		callout_init(&ntb->hb_timer, 1);
1025 		callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT,
1026 		    amd_link_hb, ntb);
1027 
1028 		break;
1029 
1030 	default:
1031 		amd_ntb_printf(0, "Unsupported AMD NTB topology %d\n",
1032 		    ntb->conn_type);
1033 		return (EINVAL);
1034 	}
1035 
1036 	ntb->int_mask = AMD_EVENT_INTMASK;
1037 	amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
1038 
1039 	return (0);
1040 }
1041 
1042 static int
1043 amd_ntb_init(struct amd_ntb_softc *ntb)
1044 {
1045 	int rc = 0;
1046 
1047 	ntb->conn_type = amd_ntb_get_topo(ntb);
1048 	amd_ntb_printf(0, "AMD NTB Side: %s\n",
1049 	    (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY");
1050 
1051 	rc = amd_ntb_init_dev(ntb);
1052 	if (rc != 0)
1053 		return (rc);
1054 
1055 	rc = amd_ntb_init_isr(ntb);
1056 	if (rc != 0)
1057 		return (rc);
1058 
1059 	return (0);
1060 }
1061 
1062 static void
1063 print_map_success(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar,
1064     const char *kind)
1065 {
1066 	amd_ntb_printf(0, "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n",
1067 	    PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1068 	    (char *)bar->vbase + bar->size - 1, (void *)bar->pbase,
1069 	    (void *)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind);
1070 }
1071 
1072 static void
1073 save_bar_parameters(struct amd_ntb_pci_bar_info *bar)
1074 {
1075 	bar->pci_bus_tag = rman_get_bustag(bar->pci_resource);
1076 	bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource);
1077 	bar->pbase = rman_get_start(bar->pci_resource);
1078 	bar->size = rman_get_size(bar->pci_resource);
1079 	bar->vbase = rman_get_virtual(bar->pci_resource);
1080 	bar->map_mode = VM_MEMATTR_UNCACHEABLE;
1081 }
1082 
1083 static int
1084 map_bar(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar)
1085 {
1086 	bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
1087 	    &bar->pci_resource_id, RF_ACTIVE);
1088 	if (bar->pci_resource == NULL)
1089 		return (ENXIO);
1090 
1091 	save_bar_parameters(bar);
1092 	print_map_success(ntb, bar, "mmr");
1093 
1094 	return (0);
1095 }
1096 
1097 static int
1098 amd_ntb_map_pci_bars(struct amd_ntb_softc *ntb)
1099 {
1100 	int rc = 0;
1101 
1102 	/* NTB Config/Control registers - BAR 0 */
1103 	ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0);
1104 	rc = map_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]);
1105 	if (rc != 0)
1106 		goto out;
1107 
1108 	/* Memory Window 0 BAR - BAR 1*/
1109 	ntb->bar_info[NTB_BAR_1].pci_resource_id = PCIR_BAR(1);
1110 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_1]);
1111 	if (rc != 0)
1112 		goto out;
1113 	ntb->bar_info[NTB_BAR_1].xlat_off = AMD_BAR1XLAT_OFFSET;
1114 	ntb->bar_info[NTB_BAR_1].limit_off = AMD_BAR1LMT_OFFSET;
1115 
1116 	/* Memory Window 1 BAR - BAR 2&3 */
1117 	ntb->bar_info[NTB_BAR_2].pci_resource_id = PCIR_BAR(2);
1118 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_2]);
1119 	if (rc != 0)
1120 		goto out;
1121 	ntb->bar_info[NTB_BAR_2].xlat_off = AMD_BAR23XLAT_OFFSET;
1122 	ntb->bar_info[NTB_BAR_2].limit_off = AMD_BAR23LMT_OFFSET;
1123 
1124 	/* Memory Window 2 BAR - BAR 4&5 */
1125 	ntb->bar_info[NTB_BAR_3].pci_resource_id = PCIR_BAR(4);
1126 	rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_3]);
1127 	if (rc != 0)
1128 		goto out;
1129 	ntb->bar_info[NTB_BAR_3].xlat_off = AMD_BAR45XLAT_OFFSET;
1130 	ntb->bar_info[NTB_BAR_3].limit_off = AMD_BAR45LMT_OFFSET;
1131 
1132 out:
1133 	if (rc != 0)
1134 		amd_ntb_printf(0, "unable to allocate pci resource\n");
1135 
1136 	return (rc);
1137 }
1138 
1139 static void
1140 amd_ntb_unmap_pci_bars(struct amd_ntb_softc *ntb)
1141 {
1142 	struct amd_ntb_pci_bar_info *bar_info;
1143 	int i;
1144 
1145 	for (i = 0; i < NTB_MAX_BARS; i++) {
1146 		bar_info = &ntb->bar_info[i];
1147 		if (bar_info->pci_resource != NULL)
1148 			bus_release_resource(ntb->device, SYS_RES_MEMORY,
1149 			    bar_info->pci_resource_id, bar_info->pci_resource);
1150 	}
1151 }
1152 
1153 static int
1154 amd_ntb_probe(device_t device)
1155 {
1156 	const struct pci_device_table *tbl;
1157 
1158 	tbl = PCI_MATCH(device, amd_ntb_devs);
1159 	if (tbl == NULL)
1160 		return (ENXIO);
1161 
1162 	device_set_desc(device, tbl->descr);
1163 
1164 	return (BUS_PROBE_GENERIC);
1165 }
1166 
1167 static int
1168 amd_ntb_attach(device_t device)
1169 {
1170 	struct amd_ntb_softc *ntb = device_get_softc(device);
1171 	int error;
1172 
1173 	ntb->device = device;
1174 
1175 	/* Enable PCI bus mastering for "device" */
1176 	pci_enable_busmaster(ntb->device);
1177 
1178 	error = amd_ntb_map_pci_bars(ntb);
1179 	if (error)
1180 		goto out;
1181 
1182 	error = amd_ntb_init(ntb);
1183 	if (error)
1184 		goto out;
1185 
1186 	amd_init_side_info(ntb);
1187 
1188 	amd_ntb_spad_clear(ntb);
1189 
1190 	amd_ntb_sysctl_init(ntb);
1191 
1192 	/* Attach children to this controller */
1193 	error = ntb_register_device(device);
1194 
1195 out:
1196 	if (error)
1197 		amd_ntb_detach(device);
1198 
1199 	return (error);
1200 }
1201 
1202 static int
1203 amd_ntb_detach(device_t device)
1204 {
1205 	struct amd_ntb_softc *ntb = device_get_softc(device);
1206 
1207 	ntb_unregister_device(device);
1208 	amd_deinit_side_info(ntb);
1209 	callout_drain(&ntb->hb_timer);
1210 	amd_ntb_deinit_isr(ntb);
1211 	mtx_destroy(&ntb->db_mask_lock);
1212 	pci_disable_busmaster(ntb->device);
1213 	amd_ntb_unmap_pci_bars(ntb);
1214 
1215 	return (0);
1216 }
1217 
1218 static device_method_t ntb_amd_methods[] = {
1219 	/* Device interface */
1220 	DEVMETHOD(device_probe,		amd_ntb_probe),
1221 	DEVMETHOD(device_attach,	amd_ntb_attach),
1222 	DEVMETHOD(device_detach,	amd_ntb_detach),
1223 
1224 	/* Bus interface */
1225 	DEVMETHOD(bus_child_location_str, ntb_child_location_str),
1226 	DEVMETHOD(bus_print_child,	ntb_print_child),
1227 
1228 	/* NTB interface */
1229 	DEVMETHOD(ntb_port_number,	amd_ntb_port_number),
1230 	DEVMETHOD(ntb_peer_port_count,	amd_ntb_peer_port_count),
1231 	DEVMETHOD(ntb_peer_port_number,	amd_ntb_peer_port_number),
1232 	DEVMETHOD(ntb_peer_port_idx, 	amd_ntb_peer_port_idx),
1233 	DEVMETHOD(ntb_link_is_up,	amd_ntb_link_is_up),
1234 	DEVMETHOD(ntb_link_enable,	amd_ntb_link_enable),
1235 	DEVMETHOD(ntb_link_disable,	amd_ntb_link_disable),
1236 	DEVMETHOD(ntb_mw_count,		amd_ntb_mw_count),
1237 	DEVMETHOD(ntb_mw_get_range,	amd_ntb_mw_get_range),
1238 	DEVMETHOD(ntb_mw_set_trans,	amd_ntb_mw_set_trans),
1239 	DEVMETHOD(ntb_mw_clear_trans,	amd_ntb_mw_clear_trans),
1240 	DEVMETHOD(ntb_mw_set_wc,	amd_ntb_mw_set_wc),
1241 	DEVMETHOD(ntb_mw_get_wc,	amd_ntb_mw_get_wc),
1242 	DEVMETHOD(ntb_db_valid_mask,	amd_ntb_db_valid_mask),
1243 	DEVMETHOD(ntb_db_vector_count,	amd_ntb_db_vector_count),
1244 	DEVMETHOD(ntb_db_vector_mask,	amd_ntb_db_vector_mask),
1245 	DEVMETHOD(ntb_db_read,		amd_ntb_db_read),
1246 	DEVMETHOD(ntb_db_clear,		amd_ntb_db_clear),
1247 	DEVMETHOD(ntb_db_set_mask,	amd_ntb_db_set_mask),
1248 	DEVMETHOD(ntb_db_clear_mask,	amd_ntb_db_clear_mask),
1249 	DEVMETHOD(ntb_peer_db_set,	amd_ntb_peer_db_set),
1250 	DEVMETHOD(ntb_spad_count,	amd_ntb_spad_count),
1251 	DEVMETHOD(ntb_spad_read,	amd_ntb_spad_read),
1252 	DEVMETHOD(ntb_spad_write,	amd_ntb_spad_write),
1253 	DEVMETHOD(ntb_peer_spad_read,	amd_ntb_peer_spad_read),
1254 	DEVMETHOD(ntb_peer_spad_write,	amd_ntb_peer_spad_write),
1255 	DEVMETHOD_END
1256 };
1257 
1258 static DEFINE_CLASS_0(ntb_hw, ntb_amd_driver, ntb_amd_methods,
1259     sizeof(struct amd_ntb_softc));
1260 DRIVER_MODULE(ntb_hw_amd, pci, ntb_amd_driver, ntb_hw_devclass, NULL, NULL);
1261 MODULE_DEPEND(ntb_hw_amd, ntb, 1, 1, 1);
1262 MODULE_VERSION(ntb_hw_amd, 1);
1263 PCI_PNP_INFO(amd_ntb_devs);
1264