1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2022 Scott Long
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_thunderbolt.h"
30
31 /* PCIe interface for Thunderbolt Native Host Interface */
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/malloc.h>
40 #include <sys/sysctl.h>
41 #include <sys/lock.h>
42 #include <sys/param.h>
43 #include <sys/endian.h>
44 #include <sys/taskqueue.h>
45
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <machine/stdarg.h>
49 #include <sys/rman.h>
50
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pci_private.h>
54
55 #include <dev/thunderbolt/tb_reg.h>
56 #include <dev/thunderbolt/nhi_reg.h>
57 #include <dev/thunderbolt/nhi_var.h>
58 #include <dev/thunderbolt/tbcfg_reg.h>
59 #include <dev/thunderbolt/router_var.h>
60 #include <dev/thunderbolt/tb_debug.h>
61 #include "tb_if.h"
62
63 static int nhi_pci_probe(device_t);
64 static int nhi_pci_attach(device_t);
65 static int nhi_pci_detach(device_t);
66 static int nhi_pci_suspend(device_t);
67 static int nhi_pci_resume(device_t);
68 static void nhi_pci_free(struct nhi_softc *);
69 static int nhi_pci_allocate_interrupts(struct nhi_softc *);
70 static void nhi_pci_free_resources(struct nhi_softc *);
71 static int nhi_pci_icl_poweron(struct nhi_softc *);
72
73 static device_method_t nhi_methods[] = {
74 DEVMETHOD(device_probe, nhi_pci_probe),
75 DEVMETHOD(device_attach, nhi_pci_attach),
76 DEVMETHOD(device_detach, nhi_pci_detach),
77 DEVMETHOD(device_suspend, nhi_pci_suspend),
78 DEVMETHOD(device_resume, nhi_pci_resume),
79
80 DEVMETHOD(tb_find_ufp, tb_generic_find_ufp),
81 DEVMETHOD(tb_get_debug, tb_generic_get_debug),
82
83 DEVMETHOD_END
84 };
85
86 static driver_t nhi_pci_driver = {
87 "nhi",
88 nhi_methods,
89 sizeof(struct nhi_softc)
90 };
91
92 struct nhi_ident {
93 uint16_t vendor;
94 uint16_t device;
95 uint16_t subvendor;
96 uint16_t subdevice;
97 uint32_t flags;
98 const char *desc;
99 } nhi_identifiers[] = {
100 { VENDOR_INTEL, DEVICE_AR_2C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
101 "Thunderbolt 3 NHI (Alpine Ridge 2C)" },
102 { VENDOR_INTEL, DEVICE_AR_DP_B_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
103 "Thunderbolt 3 NHI (Alpine Ridge 4C Rev B)" },
104 { VENDOR_INTEL, DEVICE_AR_DP_C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
105 "Thunderbolt 3 NHI (Alpine Ridge 4C Rev C)" },
106 { VENDOR_INTEL, DEVICE_AR_LP_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
107 "Thunderbolt 3 NHI (Alpine Ridge LP 2C)" },
108 { VENDOR_INTEL, DEVICE_ICL_NHI_0, 0xffff, 0xffff, NHI_TYPE_ICL,
109 "Thunderbolt 3 NHI Port 0 (IceLake)" },
110 { VENDOR_INTEL, DEVICE_ICL_NHI_1, 0xffff, 0xffff, NHI_TYPE_ICL,
111 "Thunderbolt 3 NHI Port 1 (IceLake)" },
112 { VENDOR_AMD, DEVICE_PINK_SARDINE_0, 0xffff, 0xffff, NHI_TYPE_USB4,
113 "USB4 NHI Port 0 (Pink Sardine)" },
114 { VENDOR_AMD, DEVICE_PINK_SARDINE_1, 0xffff, 0xffff, NHI_TYPE_USB4,
115 "USB4 NHI Port 1 (Pink Sardine)" },
116 { 0, 0, 0, 0, 0, NULL }
117 };
118
119 DRIVER_MODULE_ORDERED(nhi, pci, nhi_pci_driver, NULL, NULL,
120 SI_ORDER_ANY);
121 MODULE_PNP_INFO("U16:vendor;U16:device;V16:subvendor;V16:subdevice;U32:#;D:#",
122 pci, nhi, nhi_identifiers, nitems(nhi_identifiers) - 1);
123
124 static struct nhi_ident *
nhi_find_ident(device_t dev)125 nhi_find_ident(device_t dev)
126 {
127 struct nhi_ident *n;
128
129 for (n = nhi_identifiers; n->vendor != 0; n++) {
130 if (n->vendor != pci_get_vendor(dev))
131 continue;
132 if (n->device != pci_get_device(dev))
133 continue;
134 if ((n->subvendor != 0xffff) &&
135 (n->subvendor != pci_get_subvendor(dev)))
136 continue;
137 if ((n->subdevice != 0xffff) &&
138 (n->subdevice != pci_get_subdevice(dev)))
139 continue;
140 return (n);
141 }
142
143 return (NULL);
144 }
145
146 static int
nhi_pci_probe(device_t dev)147 nhi_pci_probe(device_t dev)
148 {
149 struct nhi_ident *n;
150
151 if (resource_disabled("tb", 0))
152 return (ENXIO);
153 if ((n = nhi_find_ident(dev)) != NULL) {
154 device_set_desc(dev, n->desc);
155 return (BUS_PROBE_DEFAULT);
156 }
157 return (ENXIO);
158 }
159
160 static int
nhi_pci_attach(device_t dev)161 nhi_pci_attach(device_t dev)
162 {
163 devclass_t dc;
164 bus_dma_template_t t;
165 struct nhi_softc *sc;
166 struct nhi_ident *n;
167 int error = 0;
168
169 sc = device_get_softc(dev);
170 bzero(sc, sizeof(*sc));
171 sc->dev = dev;
172 n = nhi_find_ident(dev);
173 sc->hwflags = n->flags;
174 nhi_get_tunables(sc);
175
176 tb_debug(sc, DBG_INIT|DBG_FULL, "busmaster status was %s\n",
177 (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_BUSMASTEREN)
178 ? "enabled" : "disabled");
179 pci_enable_busmaster(dev);
180
181 sc->ufp = NULL;
182 if ((TB_FIND_UFP(dev, &sc->ufp) != 0) || (sc->ufp == NULL)) {
183 dc = devclass_find("tbolt");
184 if (dc != NULL)
185 sc->ufp = devclass_get_device(dc, device_get_unit(dev));
186 }
187 if (sc->ufp == NULL)
188 tb_printf(sc, "Cannot find Upstream Facing Port\n");
189 else
190 tb_printf(sc, "Upstream Facing Port is %s\n",
191 device_get_nameunit(sc->ufp));
192
193 if (NHI_IS_ICL(sc)) {
194 if ((error = nhi_pci_icl_poweron(sc)) != 0)
195 return (error);
196 }
197
198
199 /* Allocate BAR0 DMA registers */
200 sc->regs_rid = PCIR_BAR(0);
201 if ((sc->regs_resource = bus_alloc_resource_any(dev,
202 SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE)) == NULL) {
203 tb_printf(sc, "Cannot allocate PCI registers\n");
204 return (ENXIO);
205 }
206 sc->regs_btag = rman_get_bustag(sc->regs_resource);
207 sc->regs_bhandle = rman_get_bushandle(sc->regs_resource);
208
209 /* Allocate parent DMA tag */
210 bus_dma_template_init(&t, bus_get_dma_tag(dev));
211 if (bus_dma_template_tag(&t, &sc->parent_dmat) != 0) {
212 tb_printf(sc, "Cannot allocate parent DMA tag\n");
213 nhi_pci_free(sc);
214 return (ENOMEM);
215 }
216
217 error = nhi_pci_allocate_interrupts(sc);
218 if (error == 0)
219 error = nhi_attach(sc);
220 if (error != 0)
221 nhi_pci_detach(sc->dev);
222 return (error);
223 }
224
225 static int
nhi_pci_detach(device_t dev)226 nhi_pci_detach(device_t dev)
227 {
228 struct nhi_softc *sc;
229
230 sc = device_get_softc(dev);
231
232 nhi_detach(sc);
233 nhi_pci_free(sc);
234
235 return (0);
236 }
237
238 static int
nhi_pci_suspend(device_t dev)239 nhi_pci_suspend(device_t dev)
240 {
241
242 return (0);
243 }
244
245 static int
nhi_pci_resume(device_t dev)246 nhi_pci_resume(device_t dev)
247 {
248
249 return (0);
250 }
251
252 static void
nhi_pci_free(struct nhi_softc * sc)253 nhi_pci_free(struct nhi_softc *sc)
254 {
255
256 nhi_pci_free_resources(sc);
257
258 if (sc->parent_dmat != NULL) {
259 bus_dma_tag_destroy(sc->parent_dmat);
260 sc->parent_dmat = NULL;
261 }
262
263 if (sc->regs_resource != NULL) {
264 bus_release_resource(sc->dev, SYS_RES_MEMORY,
265 sc->regs_rid, sc->regs_resource);
266 sc->regs_resource = NULL;
267 }
268
269 return;
270 }
271
272 static int
nhi_pci_allocate_interrupts(struct nhi_softc * sc)273 nhi_pci_allocate_interrupts(struct nhi_softc *sc)
274 {
275 int msgs, error = 0;
276
277 /* Map the Pending Bit Array and Vector Table BARs for MSI-X */
278 sc->irq_pba_rid = pci_msix_pba_bar(sc->dev);
279 sc->irq_table_rid = pci_msix_table_bar(sc->dev);
280
281 if (sc->irq_pba_rid != -1)
282 sc->irq_pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
283 &sc->irq_pba_rid, RF_ACTIVE);
284 if (sc->irq_table_rid != -1)
285 sc->irq_table = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
286 &sc->irq_table_rid, RF_ACTIVE);
287
288 msgs = pci_msix_count(sc->dev);
289 tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
290 "Counted %d MSI-X messages\n", msgs);
291 msgs = min(msgs, NHI_MSIX_MAX);
292 msgs = max(msgs, 1);
293 if (msgs != 0) {
294 tb_debug(sc, DBG_INIT|DBG_INTR, "Attempting to allocate %d "
295 "MSI-X interrupts\n", msgs);
296 error = pci_alloc_msix(sc->dev, &msgs);
297 tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
298 "pci_alloc_msix return msgs= %d, error= %d\n", msgs, error);
299 }
300
301 if ((error != 0) || (msgs <= 0)) {
302 tb_printf(sc, "Failed to allocate any interrupts\n");
303 msgs = 0;
304 }
305
306 sc->msix_count = msgs;
307 return (error);
308 }
309
310 void
nhi_pci_free_interrupts(struct nhi_softc * sc)311 nhi_pci_free_interrupts(struct nhi_softc *sc)
312 {
313 int i;
314
315 for (i = 0; i < sc->msix_count; i++) {
316 bus_teardown_intr(sc->dev, sc->irqs[i], sc->intrhand[i]);
317 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid[i],
318 sc->irqs[i]);
319 }
320
321 pci_release_msi(sc->dev);
322 }
323
324 static void
nhi_pci_free_resources(struct nhi_softc * sc)325 nhi_pci_free_resources(struct nhi_softc *sc)
326 {
327 if (sc->irq_table != NULL) {
328 bus_release_resource(sc->dev, SYS_RES_MEMORY,
329 sc->irq_table_rid, sc->irq_table);
330 sc->irq_table = NULL;
331 }
332
333 if (sc->irq_pba != NULL) {
334 bus_release_resource(sc->dev, SYS_RES_MEMORY,
335 sc->irq_pba_rid, sc->irq_pba);
336 sc->irq_pba = NULL;
337 }
338
339 if (sc->intr_trackers != NULL)
340 free(sc->intr_trackers, M_NHI);
341 return;
342 }
343
344 int
nhi_pci_configure_interrupts(struct nhi_softc * sc)345 nhi_pci_configure_interrupts(struct nhi_softc *sc)
346 {
347 struct nhi_intr_tracker *trkr;
348 int rid, i, error;
349
350 nhi_pci_disable_interrupts(sc);
351
352 sc->intr_trackers = malloc(sizeof(struct nhi_intr_tracker) *
353 sc->msix_count, M_NHI, M_ZERO | M_NOWAIT);
354 if (sc->intr_trackers == NULL) {
355 tb_debug(sc, DBG_INIT, "Cannot allocate intr trackers\n");
356 return (ENOMEM);
357 }
358
359 for (i = 0; i < sc->msix_count; i++) {
360 rid = i + 1;
361 trkr = &sc->intr_trackers[i];
362 trkr->sc = sc;
363 trkr->ring = NULL;
364 trkr->vector = i;
365
366 sc->irq_rid[i] = rid;
367 sc->irqs[i] = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
368 &sc->irq_rid[i], RF_ACTIVE);
369 if (sc->irqs[i] == NULL) {
370 tb_debug(sc, DBG_INIT,
371 "Cannot allocate interrupt RID %d\n",
372 sc->irq_rid[i]);
373 break;
374 }
375 error = bus_setup_intr(sc->dev, sc->irqs[i], INTR_TYPE_BIO |
376 INTR_MPSAFE, NULL, nhi_intr, trkr, &sc->intrhand[i]);
377 if (error) {
378 tb_debug(sc, DBG_INIT,
379 "cannot setup interrupt RID %d\n", sc->irq_rid[i]);
380 break;
381 }
382 }
383
384 tb_debug(sc, DBG_INIT, "Set up %d interrupts\n", sc->msix_count);
385
386 /* Set the interrupt throttle rate to 128us */
387 for (i = 0; i < 16; i ++)
388 nhi_write_reg(sc, NHI_ITR0 + i * 4, 0x1f4);
389
390 return (error);
391 }
392
393 #define NHI_SET_INTERRUPT(offset, mask, val) \
394 do { \
395 reg = offset / 32; \
396 offset %= 32; \
397 ivr[reg] &= ~(mask << offset); \
398 ivr[reg] |= (val << offset); \
399 } while (0)
400
401 void
nhi_pci_enable_interrupt(struct nhi_ring_pair * r)402 nhi_pci_enable_interrupt(struct nhi_ring_pair *r)
403 {
404 struct nhi_softc *sc = r->sc;
405 uint32_t ivr[5];
406 u_int offset, reg;
407
408 tb_debug(sc, DBG_INIT|DBG_INTR, "Enabling interrupts for ring %d\n",
409 r->ring_num);
410 /*
411 * Compute the routing between event type and MSI-X vector.
412 * 4 bits per descriptor.
413 */
414 ivr[0] = nhi_read_reg(sc, NHI_IVR0);
415 ivr[1] = nhi_read_reg(sc, NHI_IVR1);
416 ivr[2] = nhi_read_reg(sc, NHI_IVR2);
417 ivr[3] = nhi_read_reg(sc, NHI_IVR3);
418 ivr[4] = nhi_read_reg(sc, NHI_IVR4);
419
420 /* Program TX */
421 offset = (r->ring_num + IVR_TX_OFFSET) * 4;
422 NHI_SET_INTERRUPT(offset, 0x0f, r->ring_num);
423
424 /* Now program RX */
425 offset = (r->ring_num + IVR_RX_OFFSET) * 4;
426 NHI_SET_INTERRUPT(offset, 0x0f, r->ring_num);
427
428 /* Last, program Nearly Empty. This one always going to vector 15 */
429 offset = (r->ring_num + IVR_NE_OFFSET) * 4;
430 NHI_SET_INTERRUPT(offset, 0x0f, 0x0f);
431
432 nhi_write_reg(sc, NHI_IVR0, ivr[0]);
433 nhi_write_reg(sc, NHI_IVR1, ivr[1]);
434 nhi_write_reg(sc, NHI_IVR2, ivr[2]);
435 nhi_write_reg(sc, NHI_IVR3, ivr[3]);
436 nhi_write_reg(sc, NHI_IVR4, ivr[4]);
437
438 tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
439 "Wrote IVR 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
440 ivr[0], ivr[1], ivr[2], ivr[3], ivr[4]);
441
442 /* Now do the Interrupt Mask Register, 1 bit per descriptor */
443 ivr[0] = nhi_read_reg(sc, NHI_IMR0);
444 ivr[1] = nhi_read_reg(sc, NHI_IMR1);
445
446 /* Tx */
447 offset = r->ring_num + IMR_TX_OFFSET;
448 NHI_SET_INTERRUPT(offset, 0x01, 1);
449
450 /* Rx */
451 offset = r->ring_num + IMR_RX_OFFSET;
452 NHI_SET_INTERRUPT(offset, 0x01, 1);
453
454 /* NE */
455 offset = r->ring_num + IMR_NE_OFFSET;
456 NHI_SET_INTERRUPT(offset, 0x01, 1);
457
458 nhi_write_reg(sc, NHI_IMR0, ivr[0]);
459 nhi_write_reg(sc, NHI_IMR1, ivr[1]);
460 tb_debug(sc, DBG_INIT|DBG_FULL,
461 "Wrote IMR 0x%08x 0x%08x\n", ivr[0], ivr[1]);
462 }
463
464 void
nhi_pci_disable_interrupts(struct nhi_softc * sc)465 nhi_pci_disable_interrupts(struct nhi_softc *sc)
466 {
467
468 tb_debug(sc, DBG_INIT, "Disabling interrupts\n");
469 nhi_write_reg(sc, NHI_IMR0, 0);
470 nhi_write_reg(sc, NHI_IMR1, 0);
471 nhi_write_reg(sc, NHI_IVR0, 0);
472 nhi_write_reg(sc, NHI_IVR1, 0);
473 nhi_write_reg(sc, NHI_IVR2, 0);
474 nhi_write_reg(sc, NHI_IVR3, 0);
475 nhi_write_reg(sc, NHI_IVR4, 0);
476
477 /* Dummy reads to clear pending bits */
478 nhi_read_reg(sc, NHI_ISR0);
479 nhi_read_reg(sc, NHI_ISR1);
480 }
481
482 /*
483 * Icelake controllers need to be notified of power-on
484 */
485 static int
nhi_pci_icl_poweron(struct nhi_softc * sc)486 nhi_pci_icl_poweron(struct nhi_softc *sc)
487 {
488 device_t dev;
489 uint32_t val;
490 int i, error = 0;
491
492 dev = sc->dev;
493 val = pci_read_config(dev, ICL_VSCAP_9, 4);
494 tb_debug(sc, DBG_INIT, "icl_poweron val= 0x%x\n", val);
495 if (val & ICL_VSCAP9_FWREADY)
496 return (0);
497
498 val = pci_read_config(dev, ICL_VSCAP_22, 4);
499 val |= ICL_VSCAP22_FORCEPWR;
500 tb_debug(sc, DBG_INIT|DBG_FULL, "icl_poweron writing 0x%x\n", val);
501 pci_write_config(dev, ICL_VSCAP_22, val, 4);
502
503 error = ETIMEDOUT;
504 for (i = 0; i < 15; i++) {
505 DELAY(1000000);
506 val = pci_read_config(dev, ICL_VSCAP_9, 4);
507 if (val & ICL_VSCAP9_FWREADY) {
508 error = 0;
509 break;
510 }
511 }
512
513 return (error);
514 }
515
516 /*
517 * Icelake and Alderlake controllers store their UUID in PCI config space
518 */
519 int
nhi_pci_get_uuid(struct nhi_softc * sc)520 nhi_pci_get_uuid(struct nhi_softc *sc)
521 {
522 device_t dev;
523 uint32_t val[4];
524
525 dev = sc->dev;
526 val[0] = pci_read_config(dev, ICL_VSCAP_10, 4);
527 val[1] = pci_read_config(dev, ICL_VSCAP_11, 4);
528 val[2] = 0xffffffff;
529 val[3] = 0xffffffff;
530
531 bcopy(val, &sc->uuid, 16);
532 return (0);
533 }
534