xref: /freebsd/sys/dev/ntb/ntb_hw/ntb_hw_plx.c (revision 1d386b48a555f61cb7325543adbbb5c3f3407a66)
1 /*-
2  * Copyright (c) 2017-2019 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * The Non-Transparent Bridge (NTB) is a device that allows you to connect
29  * two or more systems using a PCI-e links, providing remote memory access.
30  *
31  * This module contains a driver for NTBs in PLX/Avago/Broadcom PCIe bridges.
32  */
33 
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/interrupt.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
44 #include <sys/tree.h>
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <machine/bus.h>
48 #include <machine/intr_machdep.h>
49 #include <machine/resource.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pcivar.h>
52 #include <dev/iommu/iommu.h>
53 
54 #include "../ntb.h"
55 
56 #define PLX_MAX_BARS		4	/* There are at most 4 data BARs. */
57 #define PLX_NUM_SPAD		8	/* There are 8 scratchpads. */
58 #define PLX_NUM_SPAD_PATT	4	/* Use test pattern as 4 more. */
59 #define PLX_NUM_DB		16	/* There are 16 doorbells. */
60 #define PLX_MAX_SPLIT		128	/* Allow are at most 128 splits. */
61 
62 struct ntb_plx_mw_info {
63 	int			 mw_bar;
64 	int			 mw_64bit;
65 	int			 mw_rid;
66 	struct resource		*mw_res;
67 	vm_paddr_t		 mw_pbase;
68 	caddr_t			 mw_vbase;
69 	vm_size_t		 mw_size;
70 	struct {
71 		vm_memattr_t	 mw_map_mode;
72 		bus_addr_t	 mw_xlat_addr;
73 		bus_size_t	 mw_xlat_size;
74 	} splits[PLX_MAX_SPLIT];
75 };
76 
77 struct ntb_plx_softc {
78 	/* ntb.c context. Do not move! Must go first! */
79 	void			*ntb_store;
80 
81 	device_t		 dev;
82 	struct resource		*conf_res;
83 	int			 conf_rid;
84 	u_int			 ntx;		/* NTx number within chip. */
85 	u_int			 link;		/* Link v/s Virtual side. */
86 	u_int			 port;		/* Port number within chip. */
87 	u_int			 alut;		/* A-LUT is enabled for NTx */
88 	u_int			 split;		/* split BAR2 into 2^x parts */
89 
90 	int			 int_rid;
91 	struct resource		*int_res;
92 	void			*int_tag;
93 
94 	struct ntb_plx_mw_info	 mw_info[PLX_MAX_BARS];
95 	int			 mw_count;	/* Number of memory windows. */
96 
97 	int			 spad_count1;	/* Number of standard spads. */
98 	int			 spad_count2;	/* Number of extra spads. */
99 	uint32_t		 spad_off1;	/* Offset of our spads. */
100 	uint32_t		 spad_off2;	/* Offset of our extra spads. */
101 	uint32_t		 spad_offp1;	/* Offset of peer spads. */
102 	uint32_t		 spad_offp2;	/* Offset of peer extra spads. */
103 
104 	/* Parameters of window shared with peer config access in B2B mode. */
105 	int			 b2b_mw;	/* Shared window number. */
106 	uint64_t		 b2b_off;	/* Offset in shared window. */
107 };
108 
109 #define	PLX_NT0_BASE		0x3E000
110 #define	PLX_NT1_BASE		0x3C000
111 #define	PLX_NTX_BASE(sc)	((sc)->ntx ? PLX_NT1_BASE : PLX_NT0_BASE)
112 #define	PLX_NTX_LINK_OFFSET	0x01000
113 
114 /* Bases of NTx our/peer interface registers */
115 #define	PLX_NTX_OUR_BASE(sc)				\
116     (PLX_NTX_BASE(sc) + ((sc)->link ? PLX_NTX_LINK_OFFSET : 0))
117 #define	PLX_NTX_PEER_BASE(sc)				\
118     (PLX_NTX_BASE(sc) + ((sc)->link ? 0 : PLX_NTX_LINK_OFFSET))
119 
120 /* Read/write NTx our interface registers */
121 #define	NTX_READ(sc, reg)				\
122     bus_read_4((sc)->conf_res, PLX_NTX_OUR_BASE(sc) + (reg))
123 #define	NTX_WRITE(sc, reg, val)				\
124     bus_write_4((sc)->conf_res, PLX_NTX_OUR_BASE(sc) + (reg), (val))
125 
126 /* Read/write NTx peer interface registers */
127 #define	PNTX_READ(sc, reg)				\
128     bus_read_4((sc)->conf_res, PLX_NTX_PEER_BASE(sc) + (reg))
129 #define	PNTX_WRITE(sc, reg, val)			\
130     bus_write_4((sc)->conf_res, PLX_NTX_PEER_BASE(sc) + (reg), (val))
131 
132 /* Read/write B2B NTx registers */
133 #define	BNTX_READ(sc, reg)				\
134     bus_read_4((sc)->mw_info[(sc)->b2b_mw].mw_res,	\
135     PLX_NTX_BASE(sc) + (reg))
136 #define	BNTX_WRITE(sc, reg, val)			\
137     bus_write_4((sc)->mw_info[(sc)->b2b_mw].mw_res,	\
138     PLX_NTX_BASE(sc) + (reg), (val))
139 
140 #define	PLX_PORT_BASE(p)		((p) << 12)
141 #define	PLX_STATION_PORT_BASE(sc)	PLX_PORT_BASE((sc)->port & ~7)
142 
143 #define	PLX_PORT_CONTROL(sc)		(PLX_STATION_PORT_BASE(sc) + 0x208)
144 
145 static int ntb_plx_init(device_t dev);
146 static int ntb_plx_detach(device_t dev);
147 static int ntb_plx_mw_set_trans_internal(device_t dev, unsigned mw_idx);
148 
149 static int
150 ntb_plx_probe(device_t dev)
151 {
152 
153 	switch (pci_get_devid(dev)) {
154 	case 0x87a010b5:
155 		device_set_desc(dev, "PLX Non-Transparent Bridge NT0 Link");
156 		return (BUS_PROBE_DEFAULT);
157 	case 0x87a110b5:
158 		device_set_desc(dev, "PLX Non-Transparent Bridge NT1 Link");
159 		return (BUS_PROBE_DEFAULT);
160 	case 0x87b010b5:
161 		device_set_desc(dev, "PLX Non-Transparent Bridge NT0 Virtual");
162 		return (BUS_PROBE_DEFAULT);
163 	case 0x87b110b5:
164 		device_set_desc(dev, "PLX Non-Transparent Bridge NT1 Virtual");
165 		return (BUS_PROBE_DEFAULT);
166 	}
167 	return (ENXIO);
168 }
169 
170 static int
171 ntb_plx_init(device_t dev)
172 {
173 	struct ntb_plx_softc *sc = device_get_softc(dev);
174 	struct ntb_plx_mw_info *mw;
175 	uint64_t val64;
176 	int i;
177 	uint32_t val;
178 
179 	if (sc->b2b_mw >= 0) {
180 		/* Set peer BAR0/1 size and address for B2B NTx access. */
181 		mw = &sc->mw_info[sc->b2b_mw];
182 		if (mw->mw_64bit) {
183 			PNTX_WRITE(sc, 0xe4, 0x3);	/* 64-bit */
184 			val64 = 0x2000000000000000 * mw->mw_bar | 0x4;
185 			PNTX_WRITE(sc, PCIR_BAR(0), val64);
186 			PNTX_WRITE(sc, PCIR_BAR(0) + 4, val64 >> 32);
187 		} else {
188 			PNTX_WRITE(sc, 0xe4, 0x2);	/* 32-bit */
189 			val = 0x20000000 * mw->mw_bar;
190 			PNTX_WRITE(sc, PCIR_BAR(0), val);
191 		}
192 
193 		/* Set Virtual to Link address translation for B2B. */
194 		for (i = 0; i < sc->mw_count; i++) {
195 			mw = &sc->mw_info[i];
196 			if (mw->mw_64bit) {
197 				val64 = 0x2000000000000000 * mw->mw_bar;
198 				NTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4, val64);
199 				NTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4 + 4, val64 >> 32);
200 			} else {
201 				val = 0x20000000 * mw->mw_bar;
202 				NTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4, val);
203 			}
204 		}
205 
206 		/* Make sure Virtual to Link A-LUT is disabled. */
207 		if (sc->alut)
208 			PNTX_WRITE(sc, 0xc94, 0);
209 
210 		/* Enable all Link Interface LUT entries for peer. */
211 		for (i = 0; i < 32; i += 2) {
212 			PNTX_WRITE(sc, 0xdb4 + i * 2,
213 			    0x00010001 | ((i + 1) << 19) | (i << 3));
214 		}
215 	}
216 
217 	/*
218 	 * Enable Virtual Interface LUT entry 0 for 0:0.*.
219 	 * entry 1 for our Requester ID reported by the chip,
220 	 * entries 2-5 for 0/64/128/192:4.* of I/OAT DMA engines.
221 	 * XXX: Its a hack, we can't know all DMA engines, but this covers all
222 	 * I/OAT of Xeon E5/E7 at least from Sandy Bridge till Skylake I saw.
223 	 */
224 	val = (NTX_READ(sc, 0xc90) << 16) | 0x00010001;
225 	NTX_WRITE(sc, sc->link ? 0xdb4 : 0xd94, val);
226 	NTX_WRITE(sc, sc->link ? 0xdb8 : 0xd98, 0x40210021);
227 	NTX_WRITE(sc, sc->link ? 0xdbc : 0xd9c, 0xc0218021);
228 
229 	/* Set Link to Virtual address translation. */
230 	for (i = 0; i < sc->mw_count; i++)
231 		ntb_plx_mw_set_trans_internal(dev, i);
232 
233 	pci_enable_busmaster(dev);
234 	if (sc->b2b_mw >= 0)
235 		PNTX_WRITE(sc, PCIR_COMMAND, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
236 
237 	return (0);
238 }
239 
240 static void
241 ntb_plx_isr(void *arg)
242 {
243 	device_t dev = arg;
244 	struct ntb_plx_softc *sc = device_get_softc(dev);
245 	uint32_t val;
246 
247 	ntb_db_event((device_t)arg, 0);
248 
249 	if (sc->link)	/* Link Interface has no Link Error registers. */
250 		return;
251 
252 	val = NTX_READ(sc, 0xfe0);
253 	if (val == 0)
254 		return;
255 	NTX_WRITE(sc, 0xfe0, val);
256 	if (val & 1)
257 		device_printf(dev, "Correctable Error\n");
258 	if (val & 2)
259 		device_printf(dev, "Uncorrectable Error\n");
260 	if (val & 4) {
261 		/* DL_Down resets link side registers, have to reinit. */
262 		ntb_plx_init(dev);
263 		ntb_link_event(dev);
264 	}
265 	if (val & 8)
266 		device_printf(dev, "Uncorrectable Error Message Drop\n");
267 }
268 
269 static int
270 ntb_plx_setup_intr(device_t dev)
271 {
272 	struct ntb_plx_softc *sc = device_get_softc(dev);
273 	int error;
274 
275 	/*
276 	 * XXX: This hardware supports MSI, but I found it unusable.
277 	 * It generates new MSI only when doorbell register goes from
278 	 * zero, but does not generate it when another bit is set or on
279 	 * partial clear.  It makes operation very racy and unreliable.
280 	 * The data book mentions some mask juggling magic to workaround
281 	 * that, but I failed to make it work.
282 	 */
283 	sc->int_rid = 0;
284 	sc->int_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
285 	    &sc->int_rid, RF_SHAREABLE|RF_ACTIVE);
286 	if (sc->int_res == NULL) {
287 		device_printf(dev, "bus_alloc_resource failed\n");
288 		return (ENOMEM);
289 	}
290 	error = bus_setup_intr(dev, sc->int_res, INTR_MPSAFE | INTR_TYPE_MISC,
291 	    NULL, ntb_plx_isr, dev, &sc->int_tag);
292 	if (error != 0) {
293 		device_printf(dev, "bus_setup_intr failed: %d\n", error);
294 		return (error);
295 	}
296 
297 	if (!sc->link) { /* Link Interface has no Link Error registers. */
298 		NTX_WRITE(sc, 0xfe0, 0xf);	/* Clear link interrupts. */
299 		NTX_WRITE(sc, 0xfe4, 0x0);	/* Unmask link interrupts. */
300 	}
301 	return (0);
302 }
303 
304 static void
305 ntb_plx_teardown_intr(device_t dev)
306 {
307 	struct ntb_plx_softc *sc = device_get_softc(dev);
308 
309 	if (!sc->link)	/* Link Interface has no Link Error registers. */
310 		NTX_WRITE(sc, 0xfe4, 0xf);	/* Mask link interrupts. */
311 
312 	if (sc->int_res) {
313 		bus_teardown_intr(dev, sc->int_res, sc->int_tag);
314 		bus_release_resource(dev, SYS_RES_IRQ, sc->int_rid,
315 		    sc->int_res);
316 	}
317 }
318 
319 static int
320 ntb_plx_attach(device_t dev)
321 {
322 	struct ntb_plx_softc *sc = device_get_softc(dev);
323 	struct ntb_plx_mw_info *mw;
324 	int error = 0, i, j;
325 	uint32_t val;
326 	char buf[32];
327 
328 	/* Identify what we are (what side of what NTx). */
329 	sc->dev = dev;
330 	val = pci_read_config(dev, 0xc8c, 4);
331 	sc->ntx = (val & 1) != 0;
332 	sc->link = (val & 0x80000000) != 0;
333 
334 	/* Get access to whole 256KB of chip configuration space via BAR0/1. */
335 	sc->conf_rid = PCIR_BAR(0);
336 	sc->conf_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
337 	    &sc->conf_rid, RF_ACTIVE);
338 	if (sc->conf_res == NULL) {
339 		device_printf(dev, "Can't allocate configuration BAR.\n");
340 		return (ENXIO);
341 	}
342 
343 	/*
344 	 * The device occupies whole bus.  In translated TLP slot field
345 	 * keeps LUT index (original bus/slot), function is passed through.
346 	 */
347 	bus_dma_iommu_set_buswide(dev);
348 
349 	/* Identify chip port we are connected to. */
350 	val = bus_read_4(sc->conf_res, 0x360);
351 	sc->port = (val >> ((sc->ntx == 0) ? 8 : 16)) & 0x1f;
352 
353 	/* Detect A-LUT enable and size. */
354 	val >>= 30;
355 	sc->alut = (val == 0x3) ? 1 : ((val & (1 << sc->ntx)) ? 2 : 0);
356 	if (sc->alut)
357 		device_printf(dev, "%u A-LUT entries\n", 128 * sc->alut);
358 
359 	/* Find configured memory windows at BAR2-5. */
360 	sc->mw_count = 0;
361 	for (i = 2; i <= 5; i++) {
362 		mw = &sc->mw_info[sc->mw_count];
363 		mw->mw_bar = i;
364 		mw->mw_rid = PCIR_BAR(mw->mw_bar);
365 		mw->mw_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
366 		    &mw->mw_rid, RF_ACTIVE);
367 		if (mw->mw_res == NULL)
368 			continue;
369 		mw->mw_pbase = rman_get_start(mw->mw_res);
370 		mw->mw_size = rman_get_size(mw->mw_res);
371 		mw->mw_vbase = rman_get_virtual(mw->mw_res);
372 		for (j = 0; j < PLX_MAX_SPLIT; j++)
373 			mw->splits[j].mw_map_mode = VM_MEMATTR_UNCACHEABLE;
374 		sc->mw_count++;
375 
376 		/* Skip over adjacent BAR for 64-bit BARs. */
377 		val = pci_read_config(dev, PCIR_BAR(mw->mw_bar), 4);
378 		if ((val & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64) {
379 			mw->mw_64bit = 1;
380 			i++;
381 		}
382 	}
383 
384 	/* Try to identify B2B mode. */
385 	i = 1;
386 	snprintf(buf, sizeof(buf), "hint.%s.%d.b2b", device_get_name(dev),
387 	    device_get_unit(dev));
388 	TUNABLE_INT_FETCH(buf, &i);
389 	if (sc->link) {
390 		device_printf(dev, "NTB-to-Root Port mode (Link Interface)\n");
391 		sc->b2b_mw = -1;
392 	} else if (i == 0) {
393 		device_printf(dev, "NTB-to-Root Port mode (Virtual Interface)\n");
394 		sc->b2b_mw = -1;
395 	} else {
396 		device_printf(dev, "NTB-to-NTB (back-to-back) mode\n");
397 
398 		/* We need at least one memory window for B2B peer access. */
399 		if (sc->mw_count == 0) {
400 			device_printf(dev, "No memory window BARs enabled.\n");
401 			error = ENXIO;
402 			goto out;
403 		}
404 		sc->b2b_mw = sc->mw_count - 1;
405 
406 		/* Use half of the window for B2B, but no less then 1MB. */
407 		mw = &sc->mw_info[sc->b2b_mw];
408 		if (mw->mw_size >= 2 * 1024 * 1024)
409 			sc->b2b_off = mw->mw_size / 2;
410 		else
411 			sc->b2b_off = 0;
412 	}
413 
414 	snprintf(buf, sizeof(buf), "hint.%s.%d.split", device_get_name(dev),
415 	    device_get_unit(dev));
416 	TUNABLE_INT_FETCH(buf, &sc->split);
417 	if (sc->split > 7) {
418 		device_printf(dev, "Split value is too high (%u)\n", sc->split);
419 		sc->split = 0;
420 	} else if (sc->split > 0 && sc->alut == 0) {
421 		device_printf(dev, "Can't split with disabled A-LUT\n");
422 		sc->split = 0;
423 	} else if (sc->split > 0 && (sc->mw_count == 0 || sc->mw_info[0].mw_bar != 2)) {
424 		device_printf(dev, "Can't split disabled BAR2\n");
425 		sc->split = 0;
426 	} else if (sc->split > 0 && (sc->b2b_mw == 0 && sc->b2b_off == 0)) {
427 		device_printf(dev, "Can't split BAR2 consumed by B2B\n");
428 		sc->split = 0;
429 	} else if (sc->split > 0) {
430 		device_printf(dev, "Splitting BAR2 into %d memory windows\n",
431 		    1 << sc->split);
432 	}
433 
434 	/*
435 	 * Use Physical Layer User Test Pattern as additional scratchpad.
436 	 * Make sure they are present and enabled by writing to them.
437 	 * XXX: Its a hack, but standard 8 registers are not enough.
438 	 */
439 	sc->spad_offp1 = sc->spad_off1 = PLX_NTX_OUR_BASE(sc) + 0xc6c;
440 	sc->spad_offp2 = sc->spad_off2 = PLX_PORT_BASE(sc->ntx * 8) + 0x20c;
441 	if (sc->b2b_mw >= 0) {
442 		/* In NTB-to-NTB mode each side has own scratchpads. */
443 		sc->spad_count1 = PLX_NUM_SPAD;
444 		bus_write_4(sc->conf_res, sc->spad_off2, 0x12345678);
445 		if (bus_read_4(sc->conf_res, sc->spad_off2) == 0x12345678)
446 			sc->spad_count2 = PLX_NUM_SPAD_PATT;
447 	} else {
448 		/* Otherwise we have share scratchpads with the peer. */
449 		if (sc->link) {
450 			sc->spad_off1 += PLX_NUM_SPAD / 2 * 4;
451 			sc->spad_off2 += PLX_NUM_SPAD_PATT / 2 * 4;
452 		} else {
453 			sc->spad_offp1 += PLX_NUM_SPAD / 2 * 4;
454 			sc->spad_offp2 += PLX_NUM_SPAD_PATT / 2 * 4;
455 		}
456 		sc->spad_count1 = PLX_NUM_SPAD / 2;
457 		bus_write_4(sc->conf_res, sc->spad_off2, 0x12345678);
458 		if (bus_read_4(sc->conf_res, sc->spad_off2) == 0x12345678)
459 			sc->spad_count2 = PLX_NUM_SPAD_PATT / 2;
460 	}
461 
462 	/* Apply static part of NTB configuration. */
463 	ntb_plx_init(dev);
464 
465 	/* Allocate and setup interrupts. */
466 	error = ntb_plx_setup_intr(dev);
467 	if (error)
468 		goto out;
469 
470 	/* Attach children to this controller */
471 	error = ntb_register_device(dev);
472 
473 out:
474 	if (error != 0)
475 		ntb_plx_detach(dev);
476 	return (error);
477 }
478 
479 static int
480 ntb_plx_detach(device_t dev)
481 {
482 	struct ntb_plx_softc *sc = device_get_softc(dev);
483 	struct ntb_plx_mw_info *mw;
484 	int i;
485 
486 	/* Detach & delete all children */
487 	ntb_unregister_device(dev);
488 
489 	/* Disable and free interrupts. */
490 	ntb_plx_teardown_intr(dev);
491 
492 	/* Free memory resources. */
493 	for (i = 0; i < sc->mw_count; i++) {
494 		mw = &sc->mw_info[i];
495 		bus_release_resource(dev, SYS_RES_MEMORY, mw->mw_rid,
496 		    mw->mw_res);
497 	}
498 	bus_release_resource(dev, SYS_RES_MEMORY, sc->conf_rid, sc->conf_res);
499 	return (0);
500 }
501 
502 static int
503 ntb_plx_port_number(device_t dev)
504 {
505 	struct ntb_plx_softc *sc = device_get_softc(dev);
506 
507 	return (sc->link ? 1 : 0);
508 }
509 
510 static int
511 ntb_plx_peer_port_count(device_t dev)
512 {
513 
514 	return (1);
515 }
516 
517 static int
518 ntb_plx_peer_port_number(device_t dev, int pidx)
519 {
520 	struct ntb_plx_softc *sc = device_get_softc(dev);
521 
522 	if (pidx != 0)
523 		return (-EINVAL);
524 
525 	return (sc->link ? 0 : 1);
526 }
527 
528 static int
529 ntb_plx_peer_port_idx(device_t dev, int port)
530 {
531 	int peer_port;
532 
533 	peer_port = ntb_plx_peer_port_number(dev, 0);
534 	if (peer_port == -EINVAL || port != peer_port)
535 		return (-EINVAL);
536 
537 	return (0);
538 }
539 
540 static bool
541 ntb_plx_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
542 {
543 	uint16_t link;
544 
545 	link = pcie_read_config(dev, PCIER_LINK_STA, 2);
546 	if (speed != NULL)
547 		*speed = (link & PCIEM_LINK_STA_SPEED);
548 	if (width != NULL)
549 		*width = (link & PCIEM_LINK_STA_WIDTH) >> 4;
550 	return ((link & PCIEM_LINK_STA_WIDTH) != 0);
551 }
552 
553 static int
554 ntb_plx_link_enable(device_t dev, enum ntb_speed speed __unused,
555     enum ntb_width width __unused)
556 {
557 	struct ntb_plx_softc *sc = device_get_softc(dev);
558 	uint32_t reg, val;
559 
560 	/* The fact that we see the Link Interface means link is enabled. */
561 	if (sc->link) {
562 		ntb_link_event(dev);
563 		return (0);
564 	}
565 
566 	reg = PLX_PORT_CONTROL(sc);
567 	val = bus_read_4(sc->conf_res, reg);
568 	if ((val & (1 << (sc->port & 7))) == 0) {
569 		/* If already enabled, generate fake link event and exit. */
570 		ntb_link_event(dev);
571 		return (0);
572 	}
573 	val &= ~(1 << (sc->port & 7));
574 	bus_write_4(sc->conf_res, reg, val);
575 	return (0);
576 }
577 
578 static int
579 ntb_plx_link_disable(device_t dev)
580 {
581 	struct ntb_plx_softc *sc = device_get_softc(dev);
582 	uint32_t reg, val;
583 
584 	/* Link disable for Link Interface would be suicidal. */
585 	if (sc->link)
586 		return (0);
587 
588 	reg = PLX_PORT_CONTROL(sc);
589 	val = bus_read_4(sc->conf_res, reg);
590 	val |= (1 << (sc->port & 7));
591 	bus_write_4(sc->conf_res, reg, val);
592 	return (0);
593 }
594 
595 static bool
596 ntb_plx_link_enabled(device_t dev)
597 {
598 	struct ntb_plx_softc *sc = device_get_softc(dev);
599 	uint32_t reg, val;
600 
601 	/* The fact that we see the Link Interface means link is enabled. */
602 	if (sc->link)
603 		return (TRUE);
604 
605 	reg = PLX_PORT_CONTROL(sc);
606 	val = bus_read_4(sc->conf_res, reg);
607 	return ((val & (1 << (sc->port & 7))) == 0);
608 }
609 
610 static uint8_t
611 ntb_plx_mw_count(device_t dev)
612 {
613 	struct ntb_plx_softc *sc = device_get_softc(dev);
614 	uint8_t res;
615 
616 	res = sc->mw_count;
617 	res += (1 << sc->split) - 1;
618 	if (sc->b2b_mw >= 0 && sc->b2b_off == 0)
619 		res--; /* B2B consumed whole window. */
620 	return (res);
621 }
622 
623 static unsigned
624 ntb_plx_user_mw_to_idx(struct ntb_plx_softc *sc, unsigned uidx, unsigned *sp)
625 {
626 	unsigned t;
627 
628 	t = 1 << sc->split;
629 	if (uidx < t) {
630 		*sp = uidx;
631 		return (0);
632 	}
633 	*sp = 0;
634 	return (uidx - (t - 1));
635 }
636 
637 static int
638 ntb_plx_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
639     caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
640     bus_addr_t *plimit)
641 {
642 	struct ntb_plx_softc *sc = device_get_softc(dev);
643 	struct ntb_plx_mw_info *mw;
644 	size_t off, ss;
645 	unsigned sp, split;
646 
647 	mw_idx = ntb_plx_user_mw_to_idx(sc, mw_idx, &sp);
648 	if (mw_idx >= sc->mw_count)
649 		return (EINVAL);
650 	off = 0;
651 	if (mw_idx == sc->b2b_mw) {
652 		KASSERT(sc->b2b_off != 0,
653 		    ("user shouldn't get non-shared b2b mw"));
654 		off = sc->b2b_off;
655 	}
656 	mw = &sc->mw_info[mw_idx];
657 	split = (mw->mw_bar == 2) ? sc->split : 0;
658 	ss = (mw->mw_size - off) >> split;
659 
660 	/* Local to remote memory window parameters. */
661 	if (base != NULL)
662 		*base = mw->mw_pbase + off + ss * sp;
663 	if (vbase != NULL)
664 		*vbase = mw->mw_vbase + off + ss * sp;
665 	if (size != NULL)
666 		*size = ss;
667 
668 	/*
669 	 * Remote to local memory window translation address alignment.
670 	 * Translation address has to be aligned to the BAR size, but A-LUT
671 	 * entries re-map addresses can be aligned to 1/128 or 1/256 of it.
672 	 * XXX: In B2B mode we can change BAR size (and so alignmet) live,
673 	 * but there is no way to report it here, so report safe value.
674 	 */
675 	if (align != NULL) {
676 		if (sc->alut && mw->mw_bar == 2)
677 			*align = (mw->mw_size - off) / 128 / sc->alut;
678 		else
679 			*align = mw->mw_size - off;
680 	}
681 
682 	/*
683 	 * Remote to local memory window size alignment.
684 	 * The chip has no limit registers, but A-LUT, when available, allows
685 	 * access control with granularity of 1/128 or 1/256 of the BAR size.
686 	 * XXX: In B2B case we can change BAR size live, but there is no way
687 	 * to report it, so report half of the BAR size, that should be safe.
688 	 * In non-B2B case there is no control at all, so report the BAR size.
689 	 */
690 	if (align_size != NULL) {
691 		if (sc->alut && mw->mw_bar == 2)
692 			*align_size = (mw->mw_size - off) / 128 / sc->alut;
693 		else if (sc->b2b_mw >= 0)
694 			*align_size = (mw->mw_size - off) / 2;
695 		else
696 			*align_size = mw->mw_size - off;
697 	}
698 
699 	/* Remote to local memory window translation address upper limit. */
700 	if (plimit != NULL)
701 		*plimit = mw->mw_64bit ? BUS_SPACE_MAXADDR :
702 		    BUS_SPACE_MAXADDR_32BIT;
703 	return (0);
704 }
705 
706 static int
707 ntb_plx_mw_set_trans_internal(device_t dev, unsigned mw_idx)
708 {
709 	struct ntb_plx_softc *sc = device_get_softc(dev);
710 	struct ntb_plx_mw_info *mw;
711 	uint64_t addr, eaddr, off, size, bsize, esize, val64;
712 	uint32_t val;
713 	unsigned i, sp, split;
714 
715 	mw = &sc->mw_info[mw_idx];
716 	off = (mw_idx == sc->b2b_mw) ? sc->b2b_off : 0;
717 	split = (mw->mw_bar == 2) ? sc->split : 0;
718 
719 	/* Get BAR size.  In case of split or B2RP we can't change it. */
720 	if (split || sc->b2b_mw < 0) {
721 		bsize = mw->mw_size - off;
722 	} else {
723 		bsize = mw->splits[0].mw_xlat_size;
724 		if (!powerof2(bsize))
725 			bsize = 1LL << flsll(bsize);
726 		if (bsize > 0 && bsize < 1024 * 1024)
727 			bsize = 1024 * 1024;
728 	}
729 
730 	/*
731 	 * While for B2B we can set any BAR size on a link side, for shared
732 	 * window we can't go above preconfigured size due to BAR address
733 	 * alignment requirements.
734 	 */
735 	if ((off & (bsize - 1)) != 0)
736 		return (EINVAL);
737 
738 	/* In B2B mode set Link Interface BAR size/address. */
739 	if (sc->b2b_mw >= 0 && mw->mw_64bit) {
740 		val64 = 0;
741 		if (bsize > 0)
742 			val64 = (~(bsize - 1) & ~0xfffff);
743 		val64 |= 0xc;
744 		PNTX_WRITE(sc, 0xe8 + (mw->mw_bar - 2) * 4, val64);
745 		PNTX_WRITE(sc, 0xe8 + (mw->mw_bar - 2) * 4 + 4, val64 >> 32);
746 
747 		val64 = 0x2000000000000000 * mw->mw_bar + off;
748 		PNTX_WRITE(sc, PCIR_BAR(mw->mw_bar), val64);
749 		PNTX_WRITE(sc, PCIR_BAR(mw->mw_bar) + 4, val64 >> 32);
750 	} else if (sc->b2b_mw >= 0) {
751 		val = 0;
752 		if (bsize > 0)
753 			val = (~(bsize - 1) & ~0xfffff);
754 		PNTX_WRITE(sc, 0xe8 + (mw->mw_bar - 2) * 4, val);
755 
756 		val64 = 0x20000000 * mw->mw_bar + off;
757 		PNTX_WRITE(sc, PCIR_BAR(mw->mw_bar), val64);
758 	}
759 
760 	/* Set BARs address translation */
761 	addr = split ? UINT64_MAX : mw->splits[0].mw_xlat_addr;
762 	if (mw->mw_64bit) {
763 		PNTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4, addr);
764 		PNTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4 + 4, addr >> 32);
765 	} else {
766 		PNTX_WRITE(sc, 0xc3c + (mw->mw_bar - 2) * 4, addr);
767 	}
768 
769 	/* Configure and enable A-LUT if we need it. */
770 	size = split ? 0 : mw->splits[0].mw_xlat_size;
771 	if (sc->alut && mw->mw_bar == 2 && (sc->split > 0 ||
772 	    ((addr & (bsize - 1)) != 0 || size != bsize))) {
773 		esize = bsize / (128 * sc->alut);
774 		for (i = sp = 0; i < 128 * sc->alut; i++) {
775 			if (i % (128 * sc->alut >> sc->split) == 0) {
776 				eaddr = addr = mw->splits[sp].mw_xlat_addr;
777 				size = mw->splits[sp++].mw_xlat_size;
778 			}
779 			val = sc->link ? 0 : 1;
780 			if (sc->alut == 1)
781 				val += 2 * sc->ntx;
782 			val *= 0x1000 * sc->alut;
783 			val += 0x38000 + i * 4 + (i >= 128 ? 0x0e00 : 0);
784 			bus_write_4(sc->conf_res, val, eaddr);
785 			bus_write_4(sc->conf_res, val + 0x400, eaddr >> 32);
786 			bus_write_4(sc->conf_res, val + 0x800,
787 			    (eaddr < addr + size) ? 0x3 : 0);
788 			eaddr += esize;
789 		}
790 		NTX_WRITE(sc, 0xc94, 0x10000000);
791 	} else if (sc->alut && mw->mw_bar == 2)
792 		NTX_WRITE(sc, 0xc94, 0);
793 
794 	return (0);
795 }
796 
797 static int
798 ntb_plx_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size)
799 {
800 	struct ntb_plx_softc *sc = device_get_softc(dev);
801 	struct ntb_plx_mw_info *mw;
802 	unsigned sp;
803 
804 	mw_idx = ntb_plx_user_mw_to_idx(sc, mw_idx, &sp);
805 	if (mw_idx >= sc->mw_count)
806 		return (EINVAL);
807 	mw = &sc->mw_info[mw_idx];
808 	if (!mw->mw_64bit &&
809 	    ((addr & UINT32_MAX) != addr ||
810 	     ((addr + size) & UINT32_MAX) != (addr + size)))
811 		return (ERANGE);
812 	mw->splits[sp].mw_xlat_addr = addr;
813 	mw->splits[sp].mw_xlat_size = size;
814 	return (ntb_plx_mw_set_trans_internal(dev, mw_idx));
815 }
816 
817 static int
818 ntb_plx_mw_clear_trans(device_t dev, unsigned mw_idx)
819 {
820 
821 	return (ntb_plx_mw_set_trans(dev, mw_idx, 0, 0));
822 }
823 
824 static int
825 ntb_plx_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode)
826 {
827 	struct ntb_plx_softc *sc = device_get_softc(dev);
828 	struct ntb_plx_mw_info *mw;
829 	unsigned sp;
830 
831 	mw_idx = ntb_plx_user_mw_to_idx(sc, mw_idx, &sp);
832 	if (mw_idx >= sc->mw_count)
833 		return (EINVAL);
834 	mw = &sc->mw_info[mw_idx];
835 	*mode = mw->splits[sp].mw_map_mode;
836 	return (0);
837 }
838 
839 static int
840 ntb_plx_mw_set_wc(device_t dev, unsigned mw_idx, vm_memattr_t mode)
841 {
842 	struct ntb_plx_softc *sc = device_get_softc(dev);
843 	struct ntb_plx_mw_info *mw;
844 	uint64_t off, ss;
845 	int rc;
846 	unsigned sp, split;
847 
848 	mw_idx = ntb_plx_user_mw_to_idx(sc, mw_idx, &sp);
849 	if (mw_idx >= sc->mw_count)
850 		return (EINVAL);
851 	mw = &sc->mw_info[mw_idx];
852 	if (mw->splits[sp].mw_map_mode == mode)
853 		return (0);
854 
855 	off = 0;
856 	if (mw_idx == sc->b2b_mw) {
857 		KASSERT(sc->b2b_off != 0,
858 		    ("user shouldn't get non-shared b2b mw"));
859 		off = sc->b2b_off;
860 	}
861 
862 	split = (mw->mw_bar == 2) ? sc->split : 0;
863 	ss = (mw->mw_size - off) >> split;
864 	rc = pmap_change_attr((vm_offset_t)mw->mw_vbase + off + ss * sp,
865 	    ss, mode);
866 	if (rc == 0)
867 		mw->splits[sp].mw_map_mode = mode;
868 	return (rc);
869 }
870 
871 static uint8_t
872 ntb_plx_spad_count(device_t dev)
873 {
874 	struct ntb_plx_softc *sc = device_get_softc(dev);
875 
876 	return (sc->spad_count1 + sc->spad_count2);
877 }
878 
879 static int
880 ntb_plx_spad_write(device_t dev, unsigned int idx, uint32_t val)
881 {
882 	struct ntb_plx_softc *sc = device_get_softc(dev);
883 	u_int off;
884 
885 	if (idx >= sc->spad_count1 + sc->spad_count2)
886 		return (EINVAL);
887 
888 	if (idx < sc->spad_count1)
889 		off = sc->spad_off1 + idx * 4;
890 	else
891 		off = sc->spad_off2 + (idx - sc->spad_count1) * 4;
892 	bus_write_4(sc->conf_res, off, val);
893 	return (0);
894 }
895 
896 static void
897 ntb_plx_spad_clear(device_t dev)
898 {
899 	struct ntb_plx_softc *sc = device_get_softc(dev);
900 	int i;
901 
902 	for (i = 0; i < sc->spad_count1 + sc->spad_count2; i++)
903 		ntb_plx_spad_write(dev, i, 0);
904 }
905 
906 static int
907 ntb_plx_spad_read(device_t dev, unsigned int idx, uint32_t *val)
908 {
909 	struct ntb_plx_softc *sc = device_get_softc(dev);
910 	u_int off;
911 
912 	if (idx >= sc->spad_count1 + sc->spad_count2)
913 		return (EINVAL);
914 
915 	if (idx < sc->spad_count1)
916 		off = sc->spad_off1 + idx * 4;
917 	else
918 		off = sc->spad_off2 + (idx - sc->spad_count1) * 4;
919 	*val = bus_read_4(sc->conf_res, off);
920 	return (0);
921 }
922 
923 static int
924 ntb_plx_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
925 {
926 	struct ntb_plx_softc *sc = device_get_softc(dev);
927 	u_int off;
928 
929 	if (idx >= sc->spad_count1 + sc->spad_count2)
930 		return (EINVAL);
931 
932 	if (idx < sc->spad_count1)
933 		off = sc->spad_offp1 + idx * 4;
934 	else
935 		off = sc->spad_offp2 + (idx - sc->spad_count1) * 4;
936 	if (sc->b2b_mw >= 0)
937 		bus_write_4(sc->mw_info[sc->b2b_mw].mw_res, off, val);
938 	else
939 		bus_write_4(sc->conf_res, off, val);
940 	return (0);
941 }
942 
943 static int
944 ntb_plx_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
945 {
946 	struct ntb_plx_softc *sc = device_get_softc(dev);
947 	u_int off;
948 
949 	if (idx >= sc->spad_count1 + sc->spad_count2)
950 		return (EINVAL);
951 
952 	if (idx < sc->spad_count1)
953 		off = sc->spad_offp1 + idx * 4;
954 	else
955 		off = sc->spad_offp2 + (idx - sc->spad_count1) * 4;
956 	if (sc->b2b_mw >= 0)
957 		*val = bus_read_4(sc->mw_info[sc->b2b_mw].mw_res, off);
958 	else
959 		*val = bus_read_4(sc->conf_res, off);
960 	return (0);
961 }
962 
963 static uint64_t
964 ntb_plx_db_valid_mask(device_t dev)
965 {
966 
967 	return ((1LL << PLX_NUM_DB) - 1);
968 }
969 
970 static int
971 ntb_plx_db_vector_count(device_t dev)
972 {
973 
974 	return (1);
975 }
976 
977 static uint64_t
978 ntb_plx_db_vector_mask(device_t dev, uint32_t vector)
979 {
980 
981 	if (vector > 0)
982 		return (0);
983 	return ((1LL << PLX_NUM_DB) - 1);
984 }
985 
986 static void
987 ntb_plx_db_clear(device_t dev, uint64_t bits)
988 {
989 	struct ntb_plx_softc *sc = device_get_softc(dev);
990 
991 	NTX_WRITE(sc, sc->link ? 0xc60 : 0xc50, bits);
992 }
993 
994 static void
995 ntb_plx_db_clear_mask(device_t dev, uint64_t bits)
996 {
997 	struct ntb_plx_softc *sc = device_get_softc(dev);
998 
999 	NTX_WRITE(sc, sc->link ? 0xc68 : 0xc58, bits);
1000 }
1001 
1002 static uint64_t
1003 ntb_plx_db_read(device_t dev)
1004 {
1005 	struct ntb_plx_softc *sc = device_get_softc(dev);
1006 
1007 	return (NTX_READ(sc, sc->link ? 0xc5c : 0xc4c));
1008 }
1009 
1010 static void
1011 ntb_plx_db_set_mask(device_t dev, uint64_t bits)
1012 {
1013 	struct ntb_plx_softc *sc = device_get_softc(dev);
1014 
1015 	NTX_WRITE(sc, sc->link ? 0xc64 : 0xc54, bits);
1016 }
1017 
1018 static int
1019 ntb_plx_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size)
1020 {
1021 	struct ntb_plx_softc *sc = device_get_softc(dev);
1022 	struct ntb_plx_mw_info *mw;
1023 
1024 	KASSERT((db_addr != NULL && db_size != NULL), ("must be non-NULL"));
1025 
1026 	if (sc->b2b_mw >= 0) {
1027 		mw = &sc->mw_info[sc->b2b_mw];
1028 		*db_addr = (uint64_t)mw->mw_pbase + PLX_NTX_BASE(sc) + 0xc4c;
1029 	} else {
1030 		*db_addr = rman_get_start(sc->conf_res) + PLX_NTX_BASE(sc);
1031 		*db_addr += sc->link ? 0xc4c : 0xc5c;
1032 	}
1033 	*db_size = 4;
1034 	return (0);
1035 }
1036 
1037 static void
1038 ntb_plx_peer_db_set(device_t dev, uint64_t bit)
1039 {
1040 	struct ntb_plx_softc *sc = device_get_softc(dev);
1041 
1042 	if (sc->b2b_mw >= 0)
1043 		BNTX_WRITE(sc, 0xc4c, bit);
1044 	else
1045 		NTX_WRITE(sc, sc->link ? 0xc4c : 0xc5c, bit);
1046 }
1047 
1048 static device_method_t ntb_plx_methods[] = {
1049 	/* Device interface */
1050 	DEVMETHOD(device_probe,		ntb_plx_probe),
1051 	DEVMETHOD(device_attach,	ntb_plx_attach),
1052 	DEVMETHOD(device_detach,	ntb_plx_detach),
1053 	/* Bus interface */
1054 	DEVMETHOD(bus_child_location,	ntb_child_location),
1055 	DEVMETHOD(bus_print_child,	ntb_print_child),
1056 	DEVMETHOD(bus_get_dma_tag,	ntb_get_dma_tag),
1057 	/* NTB interface */
1058 	DEVMETHOD(ntb_port_number,	ntb_plx_port_number),
1059 	DEVMETHOD(ntb_peer_port_count,	ntb_plx_peer_port_count),
1060 	DEVMETHOD(ntb_peer_port_number,	ntb_plx_peer_port_number),
1061 	DEVMETHOD(ntb_peer_port_idx, 	ntb_plx_peer_port_idx),
1062 	DEVMETHOD(ntb_link_is_up,	ntb_plx_link_is_up),
1063 	DEVMETHOD(ntb_link_enable,	ntb_plx_link_enable),
1064 	DEVMETHOD(ntb_link_disable,	ntb_plx_link_disable),
1065 	DEVMETHOD(ntb_link_enabled,	ntb_plx_link_enabled),
1066 	DEVMETHOD(ntb_mw_count,		ntb_plx_mw_count),
1067 	DEVMETHOD(ntb_mw_get_range,	ntb_plx_mw_get_range),
1068 	DEVMETHOD(ntb_mw_set_trans,	ntb_plx_mw_set_trans),
1069 	DEVMETHOD(ntb_mw_clear_trans,	ntb_plx_mw_clear_trans),
1070 	DEVMETHOD(ntb_mw_get_wc,	ntb_plx_mw_get_wc),
1071 	DEVMETHOD(ntb_mw_set_wc,	ntb_plx_mw_set_wc),
1072 	DEVMETHOD(ntb_spad_count,	ntb_plx_spad_count),
1073 	DEVMETHOD(ntb_spad_clear,	ntb_plx_spad_clear),
1074 	DEVMETHOD(ntb_spad_write,	ntb_plx_spad_write),
1075 	DEVMETHOD(ntb_spad_read,	ntb_plx_spad_read),
1076 	DEVMETHOD(ntb_peer_spad_write,	ntb_plx_peer_spad_write),
1077 	DEVMETHOD(ntb_peer_spad_read,	ntb_plx_peer_spad_read),
1078 	DEVMETHOD(ntb_db_valid_mask,	ntb_plx_db_valid_mask),
1079 	DEVMETHOD(ntb_db_vector_count,	ntb_plx_db_vector_count),
1080 	DEVMETHOD(ntb_db_vector_mask,	ntb_plx_db_vector_mask),
1081 	DEVMETHOD(ntb_db_clear,		ntb_plx_db_clear),
1082 	DEVMETHOD(ntb_db_clear_mask,	ntb_plx_db_clear_mask),
1083 	DEVMETHOD(ntb_db_read,		ntb_plx_db_read),
1084 	DEVMETHOD(ntb_db_set_mask,	ntb_plx_db_set_mask),
1085 	DEVMETHOD(ntb_peer_db_addr,	ntb_plx_peer_db_addr),
1086 	DEVMETHOD(ntb_peer_db_set,	ntb_plx_peer_db_set),
1087 	DEVMETHOD_END
1088 };
1089 
1090 static DEFINE_CLASS_0(ntb_hw, ntb_plx_driver, ntb_plx_methods,
1091     sizeof(struct ntb_plx_softc));
1092 DRIVER_MODULE(ntb_hw_plx, pci, ntb_plx_driver, NULL, NULL);
1093 MODULE_DEPEND(ntb_hw_plx, ntb, 1, 1, 1);
1094 MODULE_VERSION(ntb_hw_plx, 1);
1095