1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2022 Scott Long
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_thunderbolt.h"
30
31 /* PCIe interface for Thunderbolt Native Host Interface */
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/malloc.h>
40 #include <sys/sysctl.h>
41 #include <sys/lock.h>
42 #include <sys/param.h>
43 #include <sys/endian.h>
44 #include <sys/taskqueue.h>
45
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <machine/stdarg.h>
49 #include <sys/rman.h>
50
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pci_private.h>
54
55 #include <dev/thunderbolt/tb_reg.h>
56 #include <dev/thunderbolt/nhi_reg.h>
57 #include <dev/thunderbolt/nhi_var.h>
58 #include <dev/thunderbolt/tbcfg_reg.h>
59 #include <dev/thunderbolt/router_var.h>
60 #include <dev/thunderbolt/tb_debug.h>
61 #include "tb_if.h"
62
63 static int nhi_pci_probe(device_t);
64 static int nhi_pci_attach(device_t);
65 static int nhi_pci_detach(device_t);
66 static int nhi_pci_suspend(device_t);
67 static int nhi_pci_resume(device_t);
68 static void nhi_pci_free(struct nhi_softc *);
69 static int nhi_pci_allocate_interrupts(struct nhi_softc *);
70 static void nhi_pci_free_interrupts(struct nhi_softc *);
71 static int nhi_pci_icl_poweron(struct nhi_softc *);
72
73 static device_method_t nhi_methods[] = {
74 DEVMETHOD(device_probe, nhi_pci_probe),
75 DEVMETHOD(device_attach, nhi_pci_attach),
76 DEVMETHOD(device_detach, nhi_pci_detach),
77 DEVMETHOD(device_suspend, nhi_pci_suspend),
78 DEVMETHOD(device_resume, nhi_pci_resume),
79
80 DEVMETHOD(tb_find_ufp, tb_generic_find_ufp),
81 DEVMETHOD(tb_get_debug, tb_generic_get_debug),
82
83 DEVMETHOD_END
84 };
85
86 static driver_t nhi_pci_driver = {
87 "nhi",
88 nhi_methods,
89 sizeof(struct nhi_softc)
90 };
91
92 struct nhi_ident {
93 uint16_t vendor;
94 uint16_t device;
95 uint16_t subvendor;
96 uint16_t subdevice;
97 uint32_t flags;
98 const char *desc;
99 } nhi_identifiers[] = {
100 { VENDOR_INTEL, DEVICE_AR_2C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
101 "Thunderbolt 3 NHI (Alpine Ridge 2C)" },
102 { VENDOR_INTEL, DEVICE_AR_DP_B_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
103 "Thunderbolt 3 NHI (Alpine Ridge 4C Rev B)" },
104 { VENDOR_INTEL, DEVICE_AR_DP_C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
105 "Thunderbolt 3 NHI (Alpine Ridge 4C Rev C)" },
106 { VENDOR_INTEL, DEVICE_AR_LP_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
107 "Thunderbolt 3 NHI (Alpine Ridge LP 2C)" },
108 { VENDOR_INTEL, DEVICE_ICL_NHI_0, 0xffff, 0xffff, NHI_TYPE_ICL,
109 "Thunderbolt 3 NHI Port 0 (IceLake)" },
110 { VENDOR_INTEL, DEVICE_ICL_NHI_1, 0xffff, 0xffff, NHI_TYPE_ICL,
111 "Thunderbolt 3 NHI Port 1 (IceLake)" },
112 { VENDOR_AMD, DEVICE_PINK_SARDINE_0, 0xffff, 0xffff, NHI_TYPE_USB4,
113 "USB4 NHI Port 0 (Pink Sardine)" },
114 { VENDOR_AMD, DEVICE_PINK_SARDINE_1, 0xffff, 0xffff, NHI_TYPE_USB4,
115 "USB4 NHI Port 1 (Pink Sardine)" },
116 { 0, 0, 0, 0, 0, NULL }
117 };
118
119 DRIVER_MODULE_ORDERED(nhi, pci, nhi_pci_driver, NULL, NULL,
120 SI_ORDER_ANY);
121 MODULE_PNP_INFO("U16:vendor;U16:device;V16:subvendor;V16:subdevice;U32:#;D:#",
122 pci, nhi, nhi_identifiers, nitems(nhi_identifiers) - 1);
123
124 static struct nhi_ident *
nhi_find_ident(device_t dev)125 nhi_find_ident(device_t dev)
126 {
127 struct nhi_ident *n;
128
129 for (n = nhi_identifiers; n->vendor != 0; n++) {
130 if (n->vendor != pci_get_vendor(dev))
131 continue;
132 if (n->device != pci_get_device(dev))
133 continue;
134 if ((n->subvendor != 0xffff) &&
135 (n->subvendor != pci_get_subvendor(dev)))
136 continue;
137 if ((n->subdevice != 0xffff) &&
138 (n->subdevice != pci_get_subdevice(dev)))
139 continue;
140 return (n);
141 }
142
143 return (NULL);
144 }
145
146 static int
nhi_pci_probe(device_t dev)147 nhi_pci_probe(device_t dev)
148 {
149 struct nhi_ident *n;
150
151 if (resource_disabled("tb", 0))
152 return (ENXIO);
153 if ((n = nhi_find_ident(dev)) != NULL) {
154 device_set_desc(dev, n->desc);
155 return (BUS_PROBE_DEFAULT);
156 }
157 return (ENXIO);
158 }
159
160 static int
nhi_pci_attach(device_t dev)161 nhi_pci_attach(device_t dev)
162 {
163 devclass_t dc;
164 bus_dma_template_t t;
165 struct nhi_softc *sc;
166 struct nhi_ident *n;
167 int error = 0;
168
169 sc = device_get_softc(dev);
170 bzero(sc, sizeof(*sc));
171 sc->dev = dev;
172 n = nhi_find_ident(dev);
173 sc->hwflags = n->flags;
174 nhi_get_tunables(sc);
175
176 tb_debug(sc, DBG_INIT|DBG_FULL, "busmaster status was %s\n",
177 (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_BUSMASTEREN)
178 ? "enabled" : "disabled");
179 pci_enable_busmaster(dev);
180
181 sc->ufp = NULL;
182 if ((TB_FIND_UFP(dev, &sc->ufp) != 0) || (sc->ufp == NULL)) {
183 dc = devclass_find("tbolt");
184 if (dc != NULL)
185 sc->ufp = devclass_get_device(dc, device_get_unit(dev));
186 }
187 if (sc->ufp == NULL)
188 tb_printf(sc, "Cannot find Upstream Facing Port\n");
189 else
190 tb_printf(sc, "Upstream Facing Port is %s\n",
191 device_get_nameunit(sc->ufp));
192
193 if (NHI_IS_ICL(sc)) {
194 if ((error = nhi_pci_icl_poweron(sc)) != 0)
195 return (error);
196 }
197
198
199 /* Allocate BAR0 DMA registers */
200 sc->regs_rid = PCIR_BAR(0);
201 if ((sc->regs_resource = bus_alloc_resource_any(dev,
202 SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE)) == NULL) {
203 tb_printf(sc, "Cannot allocate PCI registers\n");
204 return (ENXIO);
205 }
206 sc->regs_btag = rman_get_bustag(sc->regs_resource);
207 sc->regs_bhandle = rman_get_bushandle(sc->regs_resource);
208
209 /* Allocate parent DMA tag */
210 bus_dma_template_init(&t, bus_get_dma_tag(dev));
211 if (bus_dma_template_tag(&t, &sc->parent_dmat) != 0) {
212 tb_printf(sc, "Cannot allocate parent DMA tag\n");
213 nhi_pci_free(sc);
214 return (ENOMEM);
215 }
216
217 error = nhi_pci_allocate_interrupts(sc);
218 if (error == 0)
219 error = nhi_attach(sc);
220 if (error != 0)
221 nhi_pci_detach(sc->dev);
222 return (error);
223 }
224
225 static int
nhi_pci_detach(device_t dev)226 nhi_pci_detach(device_t dev)
227 {
228 struct nhi_softc *sc;
229
230 sc = device_get_softc(dev);
231
232 nhi_detach(sc);
233 nhi_pci_free(sc);
234
235 return (0);
236 }
237
238 static int
nhi_pci_suspend(device_t dev)239 nhi_pci_suspend(device_t dev)
240 {
241
242 return (0);
243 }
244
245 static int
nhi_pci_resume(device_t dev)246 nhi_pci_resume(device_t dev)
247 {
248
249 return (0);
250 }
251
252 static void
nhi_pci_free(struct nhi_softc * sc)253 nhi_pci_free(struct nhi_softc *sc)
254 {
255
256 nhi_pci_free_interrupts(sc);
257
258 if (sc->parent_dmat != NULL) {
259 bus_dma_tag_destroy(sc->parent_dmat);
260 sc->parent_dmat = NULL;
261 }
262
263 if (sc->regs_resource != NULL) {
264 bus_release_resource(sc->dev, SYS_RES_MEMORY,
265 sc->regs_rid, sc->regs_resource);
266 sc->regs_resource = NULL;
267 }
268
269 return;
270 }
271
272 static int
nhi_pci_allocate_interrupts(struct nhi_softc * sc)273 nhi_pci_allocate_interrupts(struct nhi_softc *sc)
274 {
275 int msgs, error = 0;
276
277 /* Map the Pending Bit Array and Vector Table BARs for MSI-X */
278 sc->irq_pba_rid = pci_msix_pba_bar(sc->dev);
279 sc->irq_table_rid = pci_msix_table_bar(sc->dev);
280
281 if (sc->irq_pba_rid != -1)
282 sc->irq_pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
283 &sc->irq_pba_rid, RF_ACTIVE);
284 if (sc->irq_table_rid != -1)
285 sc->irq_table = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
286 &sc->irq_table_rid, RF_ACTIVE);
287
288 msgs = pci_msix_count(sc->dev);
289 tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
290 "Counted %d MSI-X messages\n", msgs);
291 msgs = min(msgs, NHI_MSIX_MAX);
292 msgs = max(msgs, 1);
293 if (msgs != 0) {
294 tb_debug(sc, DBG_INIT|DBG_INTR, "Attempting to allocate %d "
295 "MSI-X interrupts\n", msgs);
296 error = pci_alloc_msix(sc->dev, &msgs);
297 tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
298 "pci_alloc_msix return msgs= %d, error= %d\n", msgs, error);
299 }
300
301 if ((error != 0) || (msgs <= 0)) {
302 tb_printf(sc, "Failed to allocate any interrupts\n");
303 msgs = 0;
304 }
305
306 sc->msix_count = msgs;
307 return (error);
308 }
309
310 static void
nhi_pci_free_interrupts(struct nhi_softc * sc)311 nhi_pci_free_interrupts(struct nhi_softc *sc)
312 {
313 int i;
314
315 for (i = 0; i < sc->msix_count; i++) {
316 bus_teardown_intr(sc->dev, sc->irqs[i], sc->intrhand[i]);
317 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid[i],
318 sc->irqs[i]);
319 }
320
321 pci_release_msi(sc->dev);
322
323 if (sc->irq_table != NULL) {
324 bus_release_resource(sc->dev, SYS_RES_MEMORY,
325 sc->irq_table_rid, sc->irq_table);
326 sc->irq_table = NULL;
327 }
328
329 if (sc->irq_pba != NULL) {
330 bus_release_resource(sc->dev, SYS_RES_MEMORY,
331 sc->irq_pba_rid, sc->irq_pba);
332 sc->irq_pba = NULL;
333 }
334
335 if (sc->intr_trackers != NULL)
336 free(sc->intr_trackers, M_NHI);
337 return;
338 }
339
340 int
nhi_pci_configure_interrupts(struct nhi_softc * sc)341 nhi_pci_configure_interrupts(struct nhi_softc *sc)
342 {
343 struct nhi_intr_tracker *trkr;
344 int rid, i, error;
345
346 nhi_pci_disable_interrupts(sc);
347
348 sc->intr_trackers = malloc(sizeof(struct nhi_intr_tracker) *
349 sc->msix_count, M_NHI, M_ZERO | M_NOWAIT);
350 if (sc->intr_trackers == NULL) {
351 tb_debug(sc, DBG_INIT, "Cannot allocate intr trackers\n");
352 return (ENOMEM);
353 }
354
355 for (i = 0; i < sc->msix_count; i++) {
356 rid = i + 1;
357 trkr = &sc->intr_trackers[i];
358 trkr->sc = sc;
359 trkr->ring = NULL;
360 trkr->vector = i;
361
362 sc->irq_rid[i] = rid;
363 sc->irqs[i] = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
364 &sc->irq_rid[i], RF_ACTIVE);
365 if (sc->irqs[i] == NULL) {
366 tb_debug(sc, DBG_INIT,
367 "Cannot allocate interrupt RID %d\n",
368 sc->irq_rid[i]);
369 break;
370 }
371 error = bus_setup_intr(sc->dev, sc->irqs[i], INTR_TYPE_BIO |
372 INTR_MPSAFE, NULL, nhi_intr, trkr, &sc->intrhand[i]);
373 if (error) {
374 tb_debug(sc, DBG_INIT,
375 "cannot setup interrupt RID %d\n", sc->irq_rid[i]);
376 break;
377 }
378 }
379
380 tb_debug(sc, DBG_INIT, "Set up %d interrupts\n", sc->msix_count);
381
382 /* Set the interrupt throttle rate to 128us */
383 for (i = 0; i < 16; i ++)
384 nhi_write_reg(sc, NHI_ITR0 + i * 4, 0x1f4);
385
386 return (error);
387 }
388
389 #define NHI_SET_INTERRUPT(offset, mask, val) \
390 do { \
391 reg = offset / 32; \
392 offset %= 32; \
393 ivr[reg] &= ~(mask << offset); \
394 ivr[reg] |= (val << offset); \
395 } while (0)
396
397 void
nhi_pci_enable_interrupt(struct nhi_ring_pair * r)398 nhi_pci_enable_interrupt(struct nhi_ring_pair *r)
399 {
400 struct nhi_softc *sc = r->sc;
401 uint32_t ivr[5];
402 u_int offset, reg;
403
404 tb_debug(sc, DBG_INIT|DBG_INTR, "Enabling interrupts for ring %d\n",
405 r->ring_num);
406 /*
407 * Compute the routing between event type and MSI-X vector.
408 * 4 bits per descriptor.
409 */
410 ivr[0] = nhi_read_reg(sc, NHI_IVR0);
411 ivr[1] = nhi_read_reg(sc, NHI_IVR1);
412 ivr[2] = nhi_read_reg(sc, NHI_IVR2);
413 ivr[3] = nhi_read_reg(sc, NHI_IVR3);
414 ivr[4] = nhi_read_reg(sc, NHI_IVR4);
415
416 /* Program TX */
417 offset = (r->ring_num + IVR_TX_OFFSET) * 4;
418 NHI_SET_INTERRUPT(offset, 0x0f, r->ring_num);
419
420 /* Now program RX */
421 offset = (r->ring_num + IVR_RX_OFFSET) * 4;
422 NHI_SET_INTERRUPT(offset, 0x0f, r->ring_num);
423
424 /* Last, program Nearly Empty. This one always going to vector 15 */
425 offset = (r->ring_num + IVR_NE_OFFSET) * 4;
426 NHI_SET_INTERRUPT(offset, 0x0f, 0x0f);
427
428 nhi_write_reg(sc, NHI_IVR0, ivr[0]);
429 nhi_write_reg(sc, NHI_IVR1, ivr[1]);
430 nhi_write_reg(sc, NHI_IVR2, ivr[2]);
431 nhi_write_reg(sc, NHI_IVR3, ivr[3]);
432 nhi_write_reg(sc, NHI_IVR4, ivr[4]);
433
434 tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
435 "Wrote IVR 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
436 ivr[0], ivr[1], ivr[2], ivr[3], ivr[4]);
437
438 /* Now do the Interrupt Mask Register, 1 bit per descriptor */
439 ivr[0] = nhi_read_reg(sc, NHI_IMR0);
440 ivr[1] = nhi_read_reg(sc, NHI_IMR1);
441
442 /* Tx */
443 offset = r->ring_num + IMR_TX_OFFSET;
444 NHI_SET_INTERRUPT(offset, 0x01, 1);
445
446 /* Rx */
447 offset = r->ring_num + IMR_RX_OFFSET;
448 NHI_SET_INTERRUPT(offset, 0x01, 1);
449
450 /* NE */
451 offset = r->ring_num + IMR_NE_OFFSET;
452 NHI_SET_INTERRUPT(offset, 0x01, 1);
453
454 nhi_write_reg(sc, NHI_IMR0, ivr[0]);
455 nhi_write_reg(sc, NHI_IMR1, ivr[1]);
456 tb_debug(sc, DBG_INIT|DBG_FULL,
457 "Wrote IMR 0x%08x 0x%08x\n", ivr[0], ivr[1]);
458 }
459
460 void
nhi_pci_disable_interrupts(struct nhi_softc * sc)461 nhi_pci_disable_interrupts(struct nhi_softc *sc)
462 {
463
464 tb_debug(sc, DBG_INIT, "Disabling interrupts\n");
465 nhi_write_reg(sc, NHI_IMR0, 0);
466 nhi_write_reg(sc, NHI_IMR1, 0);
467 nhi_write_reg(sc, NHI_IVR0, 0);
468 nhi_write_reg(sc, NHI_IVR1, 0);
469 nhi_write_reg(sc, NHI_IVR2, 0);
470 nhi_write_reg(sc, NHI_IVR3, 0);
471 nhi_write_reg(sc, NHI_IVR4, 0);
472
473 /* Dummy reads to clear pending bits */
474 nhi_read_reg(sc, NHI_ISR0);
475 nhi_read_reg(sc, NHI_ISR1);
476 }
477
478 /*
479 * Icelake controllers need to be notified of power-on
480 */
481 static int
nhi_pci_icl_poweron(struct nhi_softc * sc)482 nhi_pci_icl_poweron(struct nhi_softc *sc)
483 {
484 device_t dev;
485 uint32_t val;
486 int i, error = 0;
487
488 dev = sc->dev;
489 val = pci_read_config(dev, ICL_VSCAP_9, 4);
490 tb_debug(sc, DBG_INIT, "icl_poweron val= 0x%x\n", val);
491 if (val & ICL_VSCAP9_FWREADY)
492 return (0);
493
494 val = pci_read_config(dev, ICL_VSCAP_22, 4);
495 val |= ICL_VSCAP22_FORCEPWR;
496 tb_debug(sc, DBG_INIT|DBG_FULL, "icl_poweron writing 0x%x\n", val);
497 pci_write_config(dev, ICL_VSCAP_22, val, 4);
498
499 error = ETIMEDOUT;
500 for (i = 0; i < 15; i++) {
501 DELAY(1000000);
502 val = pci_read_config(dev, ICL_VSCAP_9, 4);
503 if (val & ICL_VSCAP9_FWREADY) {
504 error = 0;
505 break;
506 }
507 }
508
509 return (error);
510 }
511
512 /*
513 * Icelake and Alderlake controllers store their UUID in PCI config space
514 */
515 int
nhi_pci_get_uuid(struct nhi_softc * sc)516 nhi_pci_get_uuid(struct nhi_softc *sc)
517 {
518 device_t dev;
519 uint32_t val[4];
520
521 dev = sc->dev;
522 val[0] = pci_read_config(dev, ICL_VSCAP_10, 4);
523 val[1] = pci_read_config(dev, ICL_VSCAP_11, 4);
524 val[2] = 0xffffffff;
525 val[3] = 0xffffffff;
526
527 bcopy(val, &sc->uuid, 16);
528 return (0);
529 }
530