1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2022 Scott Long
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_thunderbolt.h"
30
31 /* PCIe interface for Thunderbolt Native Host Interface (nhi) */
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/sysctl.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/taskqueue.h>
45 #include <sys/gsb_crc32.h>
46 #include <sys/endian.h>
47 #include <vm/vm.h>
48 #include <vm/pmap.h>
49
50 #include <machine/bus.h>
51 #include <machine/stdarg.h>
52
53 #include <dev/thunderbolt/nhi_reg.h>
54 #include <dev/thunderbolt/nhi_var.h>
55 #include <dev/thunderbolt/tb_reg.h>
56 #include <dev/thunderbolt/tb_var.h>
57 #include <dev/thunderbolt/tb_debug.h>
58 #include <dev/thunderbolt/hcm_var.h>
59 #include <dev/thunderbolt/tbcfg_reg.h>
60 #include <dev/thunderbolt/router_var.h>
61 #include <dev/thunderbolt/tb_dev.h>
62 #include "tb_if.h"
63
64 static int nhi_alloc_ring(struct nhi_softc *, int, int, int,
65 struct nhi_ring_pair **);
66 static void nhi_free_ring(struct nhi_ring_pair *);
67 static void nhi_free_rings(struct nhi_softc *);
68 static int nhi_configure_ring(struct nhi_softc *, struct nhi_ring_pair *);
69 static int nhi_activate_ring(struct nhi_ring_pair *);
70 static int nhi_deactivate_ring(struct nhi_ring_pair *);
71 static int nhi_alloc_ring0(struct nhi_softc *);
72 static void nhi_free_ring0(struct nhi_softc *);
73 static void nhi_fill_rx_ring(struct nhi_softc *, struct nhi_ring_pair *);
74 static int nhi_init(struct nhi_softc *);
75 static void nhi_post_init(void *);
76 static int nhi_tx_enqueue(struct nhi_ring_pair *, struct nhi_cmd_frame *);
77 static int nhi_setup_sysctl(struct nhi_softc *);
78
79 SYSCTL_NODE(_hw, OID_AUTO, nhi, CTLFLAG_RD, 0, "NHI Driver Parameters");
80
81 MALLOC_DEFINE(M_NHI, "nhi", "nhi driver memory");
82
83 #ifndef NHI_DEBUG_LEVEL
84 #define NHI_DEBUG_LEVEL 0
85 #endif
86
87 /* 0 = default, 1 = force-on, 2 = force-off */
88 #ifndef NHI_FORCE_HCM
89 #define NHI_FORCE_HCM 0
90 #endif
91
92 void
nhi_get_tunables(struct nhi_softc * sc)93 nhi_get_tunables(struct nhi_softc *sc)
94 {
95 devclass_t dc;
96 device_t ufp;
97 char tmpstr[80], oid[80];
98 u_int val;
99
100 /* Set local defaults */
101 sc->debug = NHI_DEBUG_LEVEL;
102 sc->max_ring_count = NHI_DEFAULT_NUM_RINGS;
103 sc->force_hcm = NHI_FORCE_HCM;
104
105 /* Inherit setting from the upstream thunderbolt switch node */
106 val = TB_GET_DEBUG(sc->dev, &sc->debug);
107 if (val != 0) {
108 dc = devclass_find("tbolt");
109 if (dc != NULL) {
110 ufp = devclass_get_device(dc, device_get_unit(sc->dev));
111 if (ufp != NULL)
112 TB_GET_DEBUG(ufp, &sc->debug);
113 } else {
114 if (TUNABLE_STR_FETCH("hw.tbolt.debug_level", oid,
115 80) != 0)
116 tb_parse_debug(&sc->debug, oid);
117 }
118 }
119
120 /*
121 * Grab global variables. Allow nhi debug flags to override
122 * thunderbolt debug flags, if present.
123 */
124 bzero(oid, 80);
125 if (TUNABLE_STR_FETCH("hw.nhi.debug_level", oid, 80) != 0)
126 tb_parse_debug(&sc->debug, oid);
127 if (TUNABLE_INT_FETCH("hw.nhi.max_rings", &val) != 0) {
128 val = min(val, NHI_MAX_NUM_RINGS);
129 sc->max_ring_count = max(val, 1);
130 }
131 if (TUNABLE_INT_FETCH("hw.nhi.force_hcm", &val) != 0)
132 sc->force_hcm = val;
133
134 /* Grab instance variables */
135 bzero(oid, 80);
136 snprintf(tmpstr, sizeof(tmpstr), "dev.nhi.%d.debug_level",
137 device_get_unit(sc->dev));
138 if (TUNABLE_STR_FETCH(tmpstr, oid, 80) != 0)
139 tb_parse_debug(&sc->debug, oid);
140 snprintf(tmpstr, sizeof(tmpstr), "dev.nhi.%d.max_rings",
141 device_get_unit(sc->dev));
142 if (TUNABLE_INT_FETCH(tmpstr, &val) != 0) {
143 val = min(val, NHI_MAX_NUM_RINGS);
144 sc->max_ring_count = max(val, 1);
145 }
146 snprintf(tmpstr, sizeof(tmpstr), "dev, nhi.%d.force_hcm",
147 device_get_unit(sc->dev));
148 if (TUNABLE_INT_FETCH(tmpstr, &val) != 0)
149 sc->force_hcm = val;
150
151 return;
152 }
153
154 static void
nhi_configure_caps(struct nhi_softc * sc)155 nhi_configure_caps(struct nhi_softc *sc)
156 {
157
158 if (NHI_IS_USB4(sc) || (sc->force_hcm == NHI_FORCE_HCM_ON))
159 sc->caps |= NHI_CAP_HCM;
160 if (sc->force_hcm == NHI_FORCE_HCM_OFF)
161 sc->caps &= ~NHI_CAP_HCM;
162 }
163
164 struct nhi_cmd_frame *
nhi_alloc_tx_frame(struct nhi_ring_pair * r)165 nhi_alloc_tx_frame(struct nhi_ring_pair *r)
166 {
167 struct nhi_cmd_frame *cmd;
168
169 mtx_lock(&r->mtx);
170 cmd = nhi_alloc_tx_frame_locked(r);
171 mtx_unlock(&r->mtx);
172
173 return (cmd);
174 }
175
176 void
nhi_free_tx_frame(struct nhi_ring_pair * r,struct nhi_cmd_frame * cmd)177 nhi_free_tx_frame(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
178 {
179 mtx_lock(&r->mtx);
180 nhi_free_tx_frame_locked(r, cmd);
181 mtx_unlock(&r->mtx);
182 }
183
184 /*
185 * Push a command and data dword through the mailbox to the firmware.
186 * Response is either good, error, or timeout. Commands that return data
187 * do so by reading OUTMAILDATA.
188 */
189 int
nhi_inmail_cmd(struct nhi_softc * sc,uint32_t cmd,uint32_t data)190 nhi_inmail_cmd(struct nhi_softc *sc, uint32_t cmd, uint32_t data)
191 {
192 uint32_t val;
193 u_int error, timeout;
194
195 mtx_lock(&sc->nhi_mtx);
196 /*
197 * XXX Should a defer/reschedule happen here, or is it not worth
198 * worrying about?
199 */
200 if (sc->hwflags & NHI_MBOX_BUSY) {
201 mtx_unlock(&sc->nhi_mtx);
202 tb_debug(sc, DBG_MBOX, "Driver busy with mailbox\n");
203 return (EBUSY);
204 }
205 sc->hwflags |= NHI_MBOX_BUSY;
206
207 val = nhi_read_reg(sc, TBT_INMAILCMD);
208 tb_debug(sc, DBG_MBOX|DBG_FULL, "Reading INMAILCMD= 0x%08x\n", val);
209 if (val & INMAILCMD_ERROR)
210 tb_debug(sc, DBG_MBOX, "Error already set in INMAILCMD\n");
211 if (val & INMAILCMD_OPREQ) {
212 mtx_unlock(&sc->nhi_mtx);
213 tb_debug(sc, DBG_MBOX,
214 "INMAILCMD request already in progress\n");
215 return (EBUSY);
216 }
217
218 nhi_write_reg(sc, TBT_INMAILDATA, data);
219 nhi_write_reg(sc, TBT_INMAILCMD, cmd | INMAILCMD_OPREQ);
220
221 /* Poll at 1s intervals */
222 timeout = NHI_MAILBOX_TIMEOUT;
223 while (timeout--) {
224 DELAY(1000000);
225 val = nhi_read_reg(sc, TBT_INMAILCMD);
226 tb_debug(sc, DBG_MBOX|DBG_EXTRA,
227 "Polling INMAILCMD= 0x%08x\n", val);
228 if ((val & INMAILCMD_OPREQ) == 0)
229 break;
230 }
231 sc->hwflags &= ~NHI_MBOX_BUSY;
232 mtx_unlock(&sc->nhi_mtx);
233
234 error = 0;
235 if (val & INMAILCMD_OPREQ) {
236 tb_printf(sc, "Timeout waiting for mailbox\n");
237 error = ETIMEDOUT;
238 }
239 if (val & INMAILCMD_ERROR) {
240 tb_printf(sc, "Firmware reports error in mailbox\n");
241 error = EINVAL;
242 }
243
244 return (error);
245 }
246
247 /*
248 * Pull command status and data from the firmware mailbox.
249 */
250 int
nhi_outmail_cmd(struct nhi_softc * sc,uint32_t * val)251 nhi_outmail_cmd(struct nhi_softc *sc, uint32_t *val)
252 {
253
254 if (val == NULL)
255 return (EINVAL);
256 *val = nhi_read_reg(sc, TBT_OUTMAILCMD);
257 return (0);
258 }
259
260 int
nhi_attach(struct nhi_softc * sc)261 nhi_attach(struct nhi_softc *sc)
262 {
263 uint32_t val;
264 int error = 0;
265
266 if ((error = nhi_setup_sysctl(sc)) != 0)
267 return (error);
268
269 mtx_init(&sc->nhi_mtx, "nhimtx", "NHI Control Mutex", MTX_DEF);
270
271 nhi_configure_caps(sc);
272
273 /*
274 * Get the number of TX/RX paths. This sizes some of the register
275 * arrays during allocation and initialization. USB4 spec says that
276 * the max is 21. Alpine Ridge appears to default to 12.
277 */
278 val = GET_HOST_CAPS_PATHS(nhi_read_reg(sc, NHI_HOST_CAPS));
279 tb_debug(sc, DBG_INIT|DBG_NOISY, "Total Paths= %d\n", val);
280 if ((val == 0) || (val > 21) || ((NHI_IS_AR(sc) && val != 12))) {
281 tb_printf(sc, "WARN: unexpected number of paths: %d\n", val);
282 /* return (ENXIO); */
283 }
284 sc->path_count = val;
285
286 SLIST_INIT(&sc->ring_list);
287
288 error = nhi_pci_configure_interrupts(sc);
289 if (error == 0)
290 error = nhi_alloc_ring0(sc);
291 if (error == 0) {
292 nhi_configure_ring(sc, sc->ring0);
293 nhi_activate_ring(sc->ring0);
294 nhi_fill_rx_ring(sc, sc->ring0);
295 }
296
297 if (error == 0)
298 error = tbdev_add_interface(sc);
299
300 if ((error == 0) && (NHI_USE_ICM(sc)))
301 tb_printf(sc, "WARN: device uses an internal connection manager\n");
302 if ((error == 0) && (NHI_USE_HCM(sc)))
303 ;
304 error = hcm_attach(sc);
305
306 if (error == 0)
307 error = nhi_init(sc);
308
309 return (error);
310 }
311
312 int
nhi_detach(struct nhi_softc * sc)313 nhi_detach(struct nhi_softc *sc)
314 {
315
316 if (NHI_USE_HCM(sc))
317 hcm_detach(sc);
318
319 if (sc->root_rsc != NULL)
320 tb_router_detach(sc->root_rsc);
321
322 tbdev_remove_interface(sc);
323
324 nhi_pci_disable_interrupts(sc);
325
326 nhi_free_ring0(sc);
327
328 /* XXX Should the rings be marked as !VALID in the descriptors? */
329 nhi_free_rings(sc);
330
331 mtx_destroy(&sc->nhi_mtx);
332
333 return (0);
334 }
335
336 static void
nhi_memaddr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)337 nhi_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
338 {
339 bus_addr_t *addr;
340
341 addr = arg;
342 if (error == 0 && nsegs == 1) {
343 *addr = segs[0].ds_addr;
344 } else
345 *addr = 0;
346 }
347
348 static int
nhi_alloc_ring(struct nhi_softc * sc,int ringnum,int tx_depth,int rx_depth,struct nhi_ring_pair ** rp)349 nhi_alloc_ring(struct nhi_softc *sc, int ringnum, int tx_depth, int rx_depth,
350 struct nhi_ring_pair **rp)
351 {
352 bus_dma_template_t t;
353 bus_addr_t ring_busaddr;
354 struct nhi_ring_pair *r;
355 int ring_size, error;
356 u_int rxring_len, txring_len;
357 char *ring;
358
359 if (ringnum >= sc->max_ring_count) {
360 tb_debug(sc, DBG_INIT, "Tried to allocate ring number %d\n",
361 ringnum);
362 return (EINVAL);
363 }
364
365 /* Allocate the ring structure and the RX ring tacker together. */
366 rxring_len = rx_depth * sizeof(void *);
367 txring_len = tx_depth * sizeof(void *);
368 r = malloc(sizeof(struct nhi_ring_pair) + rxring_len + txring_len,
369 M_NHI, M_NOWAIT|M_ZERO);
370 if (r == NULL) {
371 tb_printf(sc, "ERROR: Cannot allocate ring memory\n");
372 return (ENOMEM);
373 }
374
375 r->sc = sc;
376 TAILQ_INIT(&r->tx_head);
377 TAILQ_INIT(&r->rx_head);
378 r->ring_num = ringnum;
379 r->tx_ring_depth = tx_depth;
380 r->tx_ring_mask = tx_depth - 1;
381 r->rx_ring_depth = rx_depth;
382 r->rx_ring_mask = rx_depth - 1;
383 r->rx_pici_reg = NHI_RX_RING_PICI + ringnum * 16;
384 r->tx_pici_reg = NHI_TX_RING_PICI + ringnum * 16;
385 r->rx_cmd_ring = (struct nhi_cmd_frame **)((uint8_t *)r + sizeof (*r));
386 r->tx_cmd_ring = (struct nhi_cmd_frame **)((uint8_t *)r->rx_cmd_ring +
387 rxring_len);
388
389 snprintf(r->name, NHI_RING_NAMELEN, "nhiring%d\n", ringnum);
390 mtx_init(&r->mtx, r->name, "NHI Ring Lock", MTX_DEF);
391 tb_debug(sc, DBG_INIT | DBG_FULL, "Allocated ring context at %p, "
392 "mutex %p\n", r, &r->mtx);
393
394 /* Allocate the RX and TX buffer descriptor rings */
395 ring_size = sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
396 ring_size += sizeof(struct nhi_rx_buffer_desc) * r->rx_ring_depth;
397 tb_debug(sc, DBG_INIT | DBG_FULL, "Ring %d ring_size= %d\n",
398 ringnum, ring_size);
399
400 bus_dma_template_init(&t, sc->parent_dmat);
401 t.alignment = 4;
402 t.maxsize = t.maxsegsize = ring_size;
403 t.nsegments = 1;
404 if ((error = bus_dma_template_tag(&t, &r->ring_dmat)) != 0) {
405 tb_printf(sc, "Cannot allocate ring %d DMA tag: %d\n",
406 ringnum, error);
407 return (ENOMEM);
408 }
409 if (bus_dmamem_alloc(r->ring_dmat, (void **)&ring, BUS_DMA_NOWAIT,
410 &r->ring_map)) {
411 tb_printf(sc, "Cannot allocate ring memory\n");
412 return (ENOMEM);
413 }
414 bzero(ring, ring_size);
415 bus_dmamap_load(r->ring_dmat, r->ring_map, ring, ring_size,
416 nhi_memaddr_cb, &ring_busaddr, 0);
417
418 r->ring = ring;
419
420 r->tx_ring = (union nhi_ring_desc *)(ring);
421 r->tx_ring_busaddr = ring_busaddr;
422 ring += sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
423 ring_busaddr += sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
424
425 r->rx_ring = (union nhi_ring_desc *)(ring);
426 r->rx_ring_busaddr = ring_busaddr;
427
428 tb_debug(sc, DBG_INIT | DBG_EXTRA, "Ring %d: RX %p [0x%jx] "
429 "TX %p [0x%jx]\n", ringnum, r->tx_ring, r->tx_ring_busaddr,
430 r->rx_ring, r->rx_ring_busaddr);
431
432 *rp = r;
433 return (0);
434 }
435
436 static void
nhi_free_ring(struct nhi_ring_pair * r)437 nhi_free_ring(struct nhi_ring_pair *r)
438 {
439
440 tb_debug(r->sc, DBG_INIT, "Freeing ring %d resources\n", r->ring_num);
441 nhi_deactivate_ring(r);
442
443 if (r->tx_ring_busaddr != 0) {
444 bus_dmamap_unload(r->ring_dmat, r->ring_map);
445 r->tx_ring_busaddr = 0;
446 }
447 if (r->ring != NULL) {
448 bus_dmamem_free(r->ring_dmat, r->ring, r->ring_map);
449 r->ring = NULL;
450 }
451 if (r->ring_dmat != NULL) {
452 bus_dma_tag_destroy(r->ring_dmat);
453 r->ring_dmat = NULL;
454 }
455 mtx_destroy(&r->mtx);
456 }
457
458 static void
nhi_free_rings(struct nhi_softc * sc)459 nhi_free_rings(struct nhi_softc *sc)
460 {
461 struct nhi_ring_pair *r;
462
463 while ((r = SLIST_FIRST(&sc->ring_list)) != NULL) {
464 nhi_free_ring(r);
465 mtx_lock(&sc->nhi_mtx);
466 SLIST_REMOVE_HEAD(&sc->ring_list, ring_link);
467 mtx_unlock(&sc->nhi_mtx);
468 free(r, M_NHI);
469 }
470
471 return;
472 }
473
474 static int
nhi_configure_ring(struct nhi_softc * sc,struct nhi_ring_pair * ring)475 nhi_configure_ring(struct nhi_softc *sc, struct nhi_ring_pair *ring)
476 {
477 bus_addr_t busaddr;
478 uint32_t val;
479 int idx;
480
481 idx = ring->ring_num * 16;
482
483 /* Program the TX ring address and size */
484 busaddr = ring->tx_ring_busaddr;
485 nhi_write_reg(sc, NHI_TX_RING_ADDR_LO + idx, busaddr & 0xffffffff);
486 nhi_write_reg(sc, NHI_TX_RING_ADDR_HI + idx, busaddr >> 32);
487 nhi_write_reg(sc, NHI_TX_RING_SIZE + idx, ring->tx_ring_depth);
488 nhi_write_reg(sc, NHI_TX_RING_TABLE_TIMESTAMP + idx, 0x0);
489 tb_debug(sc, DBG_INIT, "TX Ring %d TX_RING_SIZE= 0x%x\n",
490 ring->ring_num, ring->tx_ring_depth);
491
492 /* Program the RX ring address and size */
493 busaddr = ring->rx_ring_busaddr;
494 val = (ring->rx_buffer_size << 16) | ring->rx_ring_depth;
495 nhi_write_reg(sc, NHI_RX_RING_ADDR_LO + idx, busaddr & 0xffffffff);
496 nhi_write_reg(sc, NHI_RX_RING_ADDR_HI + idx, busaddr >> 32);
497 nhi_write_reg(sc, NHI_RX_RING_SIZE + idx, val);
498 nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE1 + idx, 0xffffffff);
499 tb_debug(sc, DBG_INIT, "RX Ring %d RX_RING_SIZE= 0x%x\n",
500 ring->ring_num, val);
501
502 return (0);
503 }
504
505 static int
nhi_activate_ring(struct nhi_ring_pair * ring)506 nhi_activate_ring(struct nhi_ring_pair *ring)
507 {
508 struct nhi_softc *sc = ring->sc;
509 int idx;
510
511 nhi_pci_enable_interrupt(ring);
512
513 idx = ring->ring_num * 32;
514 tb_debug(sc, DBG_INIT, "Activating ring %d at idx %d\n",
515 ring->ring_num, idx);
516 nhi_write_reg(sc, NHI_TX_RING_TABLE_BASE0 + idx,
517 TX_TABLE_RAW | TX_TABLE_VALID);
518 nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE0 + idx,
519 RX_TABLE_RAW | RX_TABLE_VALID);
520
521 return (0);
522 }
523
524 static int
nhi_deactivate_ring(struct nhi_ring_pair * r)525 nhi_deactivate_ring(struct nhi_ring_pair *r)
526 {
527 struct nhi_softc *sc = r->sc;
528 int idx;
529
530 idx = r->ring_num * 32;
531 tb_debug(sc, DBG_INIT, "Deactiving ring %d at idx %d\n",
532 r->ring_num, idx);
533 nhi_write_reg(sc, NHI_TX_RING_TABLE_BASE0 + idx, 0);
534 nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE0 + idx, 0);
535
536 idx = r->ring_num * 16;
537 tb_debug(sc, DBG_INIT, "Setting ring %d sizes to 0\n", r->ring_num);
538 nhi_write_reg(sc, NHI_TX_RING_SIZE + idx, 0);
539 nhi_write_reg(sc, NHI_RX_RING_SIZE + idx, 0);
540
541 return (0);
542 }
543
544 static int
nhi_alloc_ring0(struct nhi_softc * sc)545 nhi_alloc_ring0(struct nhi_softc *sc)
546 {
547 bus_addr_t frames_busaddr;
548 bus_dma_template_t t;
549 struct nhi_intr_tracker *trkr;
550 struct nhi_ring_pair *r;
551 struct nhi_cmd_frame *cmd;
552 char *frames;
553 int error, size, i;
554
555 if ((error = nhi_alloc_ring(sc, 0, NHI_RING0_TX_DEPTH,
556 NHI_RING0_RX_DEPTH, &r)) != 0) {
557 tb_printf(sc, "Error allocating control ring\n");
558 return (error);
559 }
560
561 r->rx_buffer_size = NHI_RING0_FRAME_SIZE;/* Control packets are small */
562
563 /* Allocate the RX and TX buffers that are used for Ring0 comms */
564 size = r->tx_ring_depth * NHI_RING0_FRAME_SIZE;
565 size += r->rx_ring_depth * NHI_RING0_FRAME_SIZE;
566
567 bus_dma_template_init(&t, sc->parent_dmat);
568 t.maxsize = t.maxsegsize = size;
569 t.nsegments = 1;
570 if (bus_dma_template_tag(&t, &sc->ring0_dmat)) {
571 tb_printf(sc, "Error allocating control ring buffer tag\n");
572 return (ENOMEM);
573 }
574
575 if (bus_dmamem_alloc(sc->ring0_dmat, (void **)&frames, BUS_DMA_NOWAIT,
576 &sc->ring0_map) != 0) {
577 tb_printf(sc, "Error allocating control ring memory\n");
578 return (ENOMEM);
579 }
580 bzero(frames, size);
581 bus_dmamap_load(sc->ring0_dmat, sc->ring0_map, frames, size,
582 nhi_memaddr_cb, &frames_busaddr, 0);
583 sc->ring0_frames_busaddr = frames_busaddr;
584 sc->ring0_frames = frames;
585
586 /* Allocate the driver command trackers */
587 sc->ring0_cmds = malloc(sizeof(struct nhi_cmd_frame) *
588 (r->tx_ring_depth + r->rx_ring_depth), M_NHI, M_NOWAIT | M_ZERO);
589 if (sc->ring0_cmds == NULL)
590 return (ENOMEM);
591
592 /* Initialize the RX frames so they can be used */
593 mtx_lock(&r->mtx);
594 for (i = 0; i < r->rx_ring_depth; i++) {
595 cmd = &sc->ring0_cmds[i];
596 cmd->data = (uint32_t *)(frames + NHI_RING0_FRAME_SIZE * i);
597 cmd->data_busaddr = frames_busaddr + NHI_RING0_FRAME_SIZE * i;
598 cmd->flags = CMD_MAPPED;
599 cmd->idx = i;
600 TAILQ_INSERT_TAIL(&r->rx_head, cmd, cm_link);
601 }
602
603 /* Inititalize the TX frames */
604 for ( ; i < r->tx_ring_depth + r->rx_ring_depth - 1; i++) {
605 cmd = &sc->ring0_cmds[i];
606 cmd->data = (uint32_t *)(frames + NHI_RING0_FRAME_SIZE * i);
607 cmd->data_busaddr = frames_busaddr + NHI_RING0_FRAME_SIZE * i;
608 cmd->flags = CMD_MAPPED;
609 cmd->idx = i;
610 nhi_free_tx_frame_locked(r, cmd);
611 }
612 mtx_unlock(&r->mtx);
613
614 /* Do a 1:1 mapping of rings to interrupt vectors. */
615 /* XXX Should be abstracted */
616 trkr = &sc->intr_trackers[0];
617 trkr->ring = r;
618 r->tracker = trkr;
619
620 /* XXX Should be an array */
621 sc->ring0 = r;
622 SLIST_INSERT_HEAD(&sc->ring_list, r, ring_link);
623
624 return (0);
625 }
626
627 static void
nhi_free_ring0(struct nhi_softc * sc)628 nhi_free_ring0(struct nhi_softc *sc)
629 {
630 if (sc->ring0_cmds != NULL) {
631 free(sc->ring0_cmds, M_NHI);
632 sc->ring0_cmds = NULL;
633 }
634
635 if (sc->ring0_frames_busaddr != 0) {
636 bus_dmamap_unload(sc->ring0_dmat, sc->ring0_map);
637 sc->ring0_frames_busaddr = 0;
638 }
639
640 if (sc->ring0_frames != NULL) {
641 bus_dmamem_free(sc->ring0_dmat, sc->ring0_frames,
642 sc->ring0_map);
643 sc->ring0_frames = NULL;
644 }
645
646 if (sc->ring0_dmat != NULL)
647 bus_dma_tag_destroy(sc->ring0_dmat);
648
649 return;
650 }
651
652 static void
nhi_fill_rx_ring(struct nhi_softc * sc,struct nhi_ring_pair * rp)653 nhi_fill_rx_ring(struct nhi_softc *sc, struct nhi_ring_pair *rp)
654 {
655 struct nhi_cmd_frame *cmd;
656 struct nhi_rx_buffer_desc *desc;
657 u_int ci;
658
659 /* Assume that we never grow or shrink the ring population */
660 rp->rx_ci = ci = 0;
661 rp->rx_pi = 0;
662
663 do {
664 cmd = TAILQ_FIRST(&rp->rx_head);
665 if (cmd == NULL)
666 break;
667 TAILQ_REMOVE(&rp->rx_head, cmd, cm_link);
668 desc = &rp->rx_ring[ci].rx;
669 if ((cmd->flags & CMD_MAPPED) == 0)
670 panic("Need rx buffer mapping code");
671
672 desc->addr_lo = cmd->data_busaddr & 0xffffffff;
673 desc->addr_hi = (cmd->data_busaddr >> 32) & 0xffffffff;
674 desc->offset = 0;
675 desc->flags = RX_BUFFER_DESC_RS | RX_BUFFER_DESC_IE;
676 rp->rx_ci = ci;
677 rp->rx_cmd_ring[ci] = cmd;
678 tb_debug(sc, DBG_RXQ | DBG_FULL,
679 "Updating ring%d ci= %d cmd= %p, busaddr= 0x%jx\n",
680 rp->ring_num, ci, cmd, cmd->data_busaddr);
681
682 ci = (rp->rx_ci + 1) & rp->rx_ring_mask;
683 } while (ci != rp->rx_pi);
684
685 /* Update the CI in one shot */
686 tb_debug(sc, DBG_RXQ, "Writing RX CI= %d\n", rp->rx_ci);
687 nhi_write_reg(sc, rp->rx_pici_reg, rp->rx_ci);
688
689 return;
690 }
691
692 static int
nhi_init(struct nhi_softc * sc)693 nhi_init(struct nhi_softc *sc)
694 {
695 tb_route_t root_route = {0x0, 0x0};
696 uint32_t val;
697 int error;
698
699 tb_debug(sc, DBG_INIT, "Initializing NHI\n");
700
701 /* Set interrupt Auto-ACK */
702 val = nhi_read_reg(sc, NHI_DMA_MISC);
703 tb_debug(sc, DBG_INIT|DBG_FULL, "Read NHI_DMA_MISC= 0x%08x\n", val);
704 val |= DMA_MISC_INT_AUTOCLEAR;
705 tb_debug(sc, DBG_INIT, "Setting interrupt auto-ACK, 0x%08x\n", val);
706 nhi_write_reg(sc, NHI_DMA_MISC, val);
707
708 if (NHI_IS_AR(sc) || NHI_IS_TR(sc) || NHI_IS_ICL(sc))
709 tb_printf(sc, "WARN: device uses an internal connection manager\n");
710
711 /*
712 * Populate the controller (local) UUID, necessary for cross-domain
713 * communications.
714 if (NHI_IS_ICL(sc))
715 nhi_pci_get_uuid(sc);
716 */
717
718 /*
719 * Attach the router to the root thunderbolt bridge now that the DMA
720 * channel is configured and ready.
721 * The root router always has a route of 0x0...0, so set it statically
722 * here.
723 */
724 if ((error = tb_router_attach_root(sc, root_route)) != 0)
725 tb_printf(sc, "tb_router_attach_root() error."
726 " The driver should be loaded at boot\n");
727
728 if (error == 0) {
729 sc->ich.ich_func = nhi_post_init;
730 sc->ich.ich_arg = sc;
731 error = config_intrhook_establish(&sc->ich);
732 if (error)
733 tb_printf(sc, "Failed to establish config hook\n");
734 }
735
736 return (error);
737 }
738
739 static void
nhi_post_init(void * arg)740 nhi_post_init(void *arg)
741 {
742 struct nhi_softc *sc;
743 uint8_t *u;
744 int error;
745
746 sc = (struct nhi_softc *)arg;
747 tb_debug(sc, DBG_INIT | DBG_EXTRA, "nhi_post_init\n");
748
749 bzero(sc->lc_uuid, 16);
750 error = tb_config_get_lc_uuid(sc->root_rsc, sc->lc_uuid);
751 if (error == 0) {
752 u = sc->lc_uuid;
753 tb_printf(sc, "Root Router LC UUID: %02x%02x%02x%02x-"
754 "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
755 u[15], u[14], u[13], u[12], u[11], u[10], u[9], u[8], u[7],
756 u[6], u[5], u[4], u[3], u[2], u[1], u[0]);
757 } else
758 tb_printf(sc, "Error finding LC registers: %d\n", error);
759
760 u = sc->uuid;
761 tb_printf(sc, "Root Router UUID: %02x%02x%02x%02x-"
762 "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
763 u[15], u[14], u[13], u[12], u[11], u[10], u[9], u[8], u[7],
764 u[6], u[5], u[4], u[3], u[2], u[1], u[0]);
765
766 config_intrhook_disestablish(&sc->ich);
767 }
768
769 static int
nhi_tx_enqueue(struct nhi_ring_pair * r,struct nhi_cmd_frame * cmd)770 nhi_tx_enqueue(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
771 {
772 struct nhi_softc *sc;
773 struct nhi_tx_buffer_desc *desc;
774 uint16_t pi;
775
776 sc = r->sc;
777
778 /* A length of 0 means 4096. Can't have longer lengths */
779 if (cmd->req_len > TX_BUFFER_DESC_LEN_MASK + 1) {
780 tb_debug(sc, DBG_TXQ, "Error: TX frame too big\n");
781 return (EINVAL);
782 }
783 cmd->req_len &= TX_BUFFER_DESC_LEN_MASK;
784
785 mtx_lock(&r->mtx);
786 desc = &r->tx_ring[r->tx_pi].tx;
787 pi = (r->tx_pi + 1) & r->tx_ring_mask;
788 if (pi == r->tx_ci) {
789 mtx_unlock(&r->mtx);
790 return (EBUSY);
791 }
792 r->tx_cmd_ring[r->tx_pi] = cmd;
793 r->tx_pi = pi;
794
795 desc->addr_lo = htole32(cmd->data_busaddr & 0xffffffff);
796 desc->addr_hi = htole32(cmd->data_busaddr >> 32);
797 desc->eof_len = htole16((cmd->pdf << TX_BUFFER_DESC_EOF_SHIFT) |
798 cmd->req_len);
799 desc->flags_sof = cmd->pdf | TX_BUFFER_DESC_IE | TX_BUFFER_DESC_RS;
800 desc->offset = 0;
801 desc->payload_time = 0;
802
803 tb_debug(sc, DBG_TXQ, "enqueue TXdescIdx= %d cmdidx= %d len= %d, "
804 "busaddr= 0x%jx\n", r->tx_pi, cmd->idx, cmd->req_len,
805 cmd->data_busaddr);
806
807 nhi_write_reg(sc, r->tx_pici_reg, pi << TX_RING_PI_SHIFT | r->tx_ci);
808 mtx_unlock(&r->mtx);
809 return (0);
810 }
811
812 /*
813 * No scheduling happens for now. Ring0 scheduling is done in the TB
814 * layer.
815 */
816 int
nhi_tx_schedule(struct nhi_ring_pair * r,struct nhi_cmd_frame * cmd)817 nhi_tx_schedule(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
818 {
819 int error;
820
821 error = nhi_tx_enqueue(r, cmd);
822 if (error == EBUSY)
823 nhi_write_reg(r->sc, r->tx_pici_reg, r->tx_pi << TX_RING_PI_SHIFT | r->tx_ci);
824 return (error);
825 }
826
827 int
nhi_tx_synchronous(struct nhi_ring_pair * r,struct nhi_cmd_frame * cmd)828 nhi_tx_synchronous(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
829 {
830 int error, count;
831
832 if ((error = nhi_tx_schedule(r, cmd)) != 0)
833 return (error);
834
835 if (cmd->flags & CMD_POLLED) {
836 error = 0;
837 count = cmd->timeout * 100;
838
839 /* Enter the loop at least once */
840 while ((count-- > 0) && (cmd->flags & CMD_REQ_COMPLETE) == 0) {
841 DELAY(10000);
842 rmb();
843 nhi_intr(r->tracker);
844 }
845 } else {
846 error = msleep(cmd, &r->mtx, PCATCH, "nhi_tx", cmd->timeout);
847 if ((error == 0) && (cmd->flags & CMD_REQ_COMPLETE) != 0)
848 error = EWOULDBLOCK;
849 }
850
851 if ((cmd->flags & CMD_REQ_COMPLETE) == 0)
852 error = ETIMEDOUT;
853
854 tb_debug(r->sc, DBG_TXQ|DBG_FULL, "tx_synchronous done waiting, "
855 "err= %d, TX_COMPLETE= %d\n", error,
856 !!(cmd->flags & CMD_REQ_COMPLETE));
857
858 if (error == ERESTART) {
859 tb_printf(r->sc, "TX command interrupted\n");
860 } else if ((error == EWOULDBLOCK) || (error == ETIMEDOUT)) {
861 tb_printf(r->sc, "TX command timed out\n");
862 } else if (error != 0) {
863 tb_printf(r->sc, "TX command failed error= %d\n", error);
864 }
865
866 return (error);
867 }
868
869 static int
nhi_tx_complete(struct nhi_ring_pair * r,struct nhi_tx_buffer_desc * desc,struct nhi_cmd_frame * cmd)870 nhi_tx_complete(struct nhi_ring_pair *r, struct nhi_tx_buffer_desc *desc,
871 struct nhi_cmd_frame *cmd)
872 {
873 struct nhi_softc *sc;
874 struct nhi_pdf_dispatch *txpdf;
875 u_int sof;
876
877 sc = r->sc;
878 sof = desc->flags_sof & TX_BUFFER_DESC_SOF_MASK;
879 tb_debug(sc, DBG_TXQ, "Recovered TX pdf= %s cmdidx= %d flags= 0x%x\n",
880 tb_get_string(sof, nhi_frame_pdf), cmd->idx, desc->flags_sof);
881
882 if ((desc->flags_sof & TX_BUFFER_DESC_DONE) == 0)
883 tb_debug(sc, DBG_TXQ,
884 "warning, TX descriptor DONE flag not set\n");
885
886 /* XXX Atomics */
887 cmd->flags |= CMD_REQ_COMPLETE;
888
889 txpdf = &r->tracker->txpdf[sof];
890 if (txpdf->cb != NULL) {
891 tb_debug(sc, DBG_INTR|DBG_TXQ, "Calling PDF TX callback\n");
892 txpdf->cb(txpdf->context, (union nhi_ring_desc *)desc, cmd);
893 return (0);
894 }
895
896 tb_debug(sc, DBG_TXQ, "Unhandled TX complete %s\n",
897 tb_get_string(sof, nhi_frame_pdf));
898 nhi_free_tx_frame(r, cmd);
899
900 return (0);
901 }
902
903 static int
nhi_rx_complete(struct nhi_ring_pair * r,struct nhi_rx_post_desc * desc,struct nhi_cmd_frame * cmd)904 nhi_rx_complete(struct nhi_ring_pair *r, struct nhi_rx_post_desc *desc,
905 struct nhi_cmd_frame *cmd)
906 {
907 struct nhi_softc *sc;
908 struct nhi_pdf_dispatch *rxpdf;
909 u_int eof, len;
910
911 sc = r->sc;
912 eof = desc->eof_len >> RX_BUFFER_DESC_EOF_SHIFT;
913 len = desc->eof_len & RX_BUFFER_DESC_LEN_MASK;
914 tb_debug(sc, DBG_INTR|DBG_RXQ,
915 "Recovered RX pdf= %s len= %d cmdidx= %d, busaddr= 0x%jx\n",
916 tb_get_string(eof, nhi_frame_pdf), len, cmd->idx,
917 cmd->data_busaddr);
918
919 rxpdf = &r->tracker->rxpdf[eof];
920 if (rxpdf->cb != NULL) {
921 tb_debug(sc, DBG_INTR|DBG_RXQ, "Calling PDF RX callback\n");
922 rxpdf->cb(rxpdf->context, (union nhi_ring_desc *)desc, cmd);
923 return (0);
924 }
925
926 tb_debug(sc, DBG_INTR, "Unhandled RX frame %s\n",
927 tb_get_string(eof, nhi_frame_pdf));
928
929 return (0);
930 }
931
932 int
nhi_register_pdf(struct nhi_ring_pair * rp,struct nhi_dispatch * tx,struct nhi_dispatch * rx)933 nhi_register_pdf(struct nhi_ring_pair *rp, struct nhi_dispatch *tx,
934 struct nhi_dispatch *rx)
935 {
936 struct nhi_intr_tracker *trkr;
937 struct nhi_pdf_dispatch *slot;
938
939 KASSERT(rp != NULL, ("ring_pair is null\n"));
940 tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "nhi_register_pdf called\n");
941
942 trkr = rp->tracker;
943 if (trkr == NULL) {
944 tb_debug(rp->sc, DBG_INTR, "Invalid tracker\n");
945 return (EINVAL);
946 }
947
948 tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "Registering TX interrupts\n");
949 if (tx != NULL) {
950 while (tx->cb != NULL) {
951 if ((tx->pdf < 0) || (tx->pdf > 15))
952 return (EINVAL);
953 slot = &trkr->txpdf[tx->pdf];
954 if (slot->cb != NULL) {
955 tb_debug(rp->sc, DBG_INTR,
956 "Attempted to register busy callback\n");
957 return (EBUSY);
958 }
959 slot->cb = tx->cb;
960 slot->context = tx->context;
961 tb_debug(rp->sc, DBG_INTR,
962 "Registered TX callback for PDF %d\n", tx->pdf);
963 tx++;
964 }
965 }
966
967 tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "Registering RX interrupts\n");
968 if (rx != NULL) {
969 while (rx->cb != NULL) {
970 if ((rx->pdf < 0) || (rx->pdf > 15))
971 return (EINVAL);
972 slot = &trkr->rxpdf[rx->pdf];
973 if (slot->cb != NULL) {
974 tb_debug(rp->sc, DBG_INTR,
975 "Attempted to register busy callback\n");
976 return (EBUSY);
977 }
978 slot->cb = rx->cb;
979 slot->context = rx->context;
980 tb_debug(rp->sc, DBG_INTR,
981 "Registered RX callback for PDF %d\n", rx->pdf);
982 rx++;
983 }
984 }
985
986 return (0);
987 }
988
989 int
nhi_deregister_pdf(struct nhi_ring_pair * rp,struct nhi_dispatch * tx,struct nhi_dispatch * rx)990 nhi_deregister_pdf(struct nhi_ring_pair *rp, struct nhi_dispatch *tx,
991 struct nhi_dispatch *rx)
992 {
993 struct nhi_intr_tracker *trkr;
994 struct nhi_pdf_dispatch *slot;
995
996 tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "nhi_register_pdf called\n");
997
998 trkr = rp->tracker;
999
1000 if (tx != NULL) {
1001 while (tx->cb != NULL) {
1002 if ((tx->pdf < 0) || (tx->pdf > 15))
1003 return (EINVAL);
1004 slot = &trkr->txpdf[tx->pdf];
1005 slot->cb = NULL;
1006 slot->context = NULL;
1007 tx++;
1008 }
1009 }
1010
1011 if (rx != NULL) {
1012 while (rx->cb != NULL) {
1013 if ((rx->pdf < 0) || (rx->pdf > 15))
1014 return (EINVAL);
1015 slot = &trkr->rxpdf[rx->pdf];
1016 slot->cb = NULL;
1017 slot->context = NULL;
1018 rx++;
1019 }
1020 }
1021
1022 return (0);
1023 }
1024
1025 /*
1026 * The CI and PI indexes are not read from the hardware. We track them in
1027 * software, so we know where in the ring to start a scan on an interrupt.
1028 * All we have to do is check for the appropriate Done bit in the next
1029 * descriptor, and we know if we have reached the last descriptor that the
1030 * hardware touched. This technique saves at least 2 MEMIO reads per
1031 * interrupt.
1032 */
1033 void
nhi_intr(void * data)1034 nhi_intr(void *data)
1035 {
1036 union nhi_ring_desc *rxd;
1037 struct nhi_cmd_frame *cmd;
1038 struct nhi_intr_tracker *trkr = data;
1039 struct nhi_softc *sc;
1040 struct nhi_ring_pair *r;
1041 struct nhi_tx_buffer_desc *txd;
1042 uint32_t val, old_ci;
1043 u_int count;
1044
1045 sc = trkr->sc;
1046
1047 tb_debug(sc, DBG_INTR|DBG_FULL, "Interrupt @ vector %d\n",
1048 trkr->vector);
1049 if ((r = trkr->ring) == NULL)
1050 return;
1051
1052 /*
1053 * Process TX completions from the adapter. Only go through
1054 * the ring once to prevent unbounded looping.
1055 */
1056 count = r->tx_ring_depth;
1057 while (count-- > 0) {
1058 txd = &r->tx_ring[r->tx_ci].tx;
1059 if ((txd->flags_sof & TX_BUFFER_DESC_DONE) == 0)
1060 break;
1061 cmd = r->tx_cmd_ring[r->tx_ci];
1062 tb_debug(sc, DBG_INTR|DBG_TXQ|DBG_FULL,
1063 "Found tx cmdidx= %d cmd= %p\n", r->tx_ci, cmd);
1064
1065 /* Pass the completion up the stack */
1066 nhi_tx_complete(r, txd, cmd);
1067
1068 /*
1069 * Advance to the next item in the ring via the cached
1070 * copy of the CI. Clear the flags so we can detect
1071 * a new done condition the next time the ring wraps
1072 * around. Anything higher up the stack that needs this
1073 * field should have already copied it.
1074 *
1075 * XXX is a memory barrier needed?
1076 */
1077 txd->flags_sof = 0;
1078 r->tx_ci = (r->tx_ci + 1) & r->tx_ring_mask;
1079 }
1080
1081 /* Process RX packets from the adapter */
1082 count = r->rx_ring_depth;
1083 old_ci = r->rx_ci;
1084
1085 while (count-- > 0) {
1086 tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
1087 "Checking RX descriptor at %d\n", r->rx_pi);
1088
1089 /* Look up RX descriptor and cmd */
1090 rxd = &r->rx_ring[r->rx_pi];
1091 tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
1092 "rx desc len= 0x%04x flags= 0x%04x\n", rxd->rxpost.eof_len,
1093 rxd->rxpost.flags_sof);
1094 if ((rxd->rxpost.flags_sof & RX_BUFFER_DESC_DONE) == 0)
1095 break;
1096 cmd = r->rx_cmd_ring[r->rx_pi];
1097 tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
1098 "Found rx cmdidx= %d cmd= %p\n", r->rx_pi, cmd);
1099
1100 /*
1101 * Pass the RX frame up the stack. RX frames are re-used
1102 * in-place, so their contents must be copied before this
1103 * function returns.
1104 *
1105 * XXX Rings other than Ring0 might want to have a different
1106 * re-use and re-populate policy
1107 */
1108 nhi_rx_complete(r, &rxd->rxpost, cmd);
1109
1110 /*
1111 * Advance the CI and move forward to the next item in the
1112 * ring via our cached copy of the PI. Clear out the
1113 * length field so we can detect a new RX frame when the
1114 * ring wraps around. Reset the flags of the descriptor.
1115 */
1116 rxd->rxpost.eof_len = 0;
1117 rxd->rx.flags = RX_BUFFER_DESC_RS | RX_BUFFER_DESC_IE;
1118 r->rx_ci = (r->rx_ci + 1) & r->rx_ring_mask;
1119 r->rx_pi = (r->rx_pi + 1) & r->rx_ring_mask;
1120 }
1121
1122 /*
1123 * Tell the firmware about the new RX CI
1124 *
1125 * XXX There's a chance this will overwrite an update to the PI.
1126 * Is that OK? We keep our own copy of the PI and never read it from
1127 * hardware. However, will overwriting it result in a missed
1128 * interrupt?
1129 */
1130 if (r->rx_ci != old_ci) {
1131 val = r->rx_pi << RX_RING_PI_SHIFT | r->rx_ci;
1132 tb_debug(sc, DBG_INTR | DBG_RXQ,
1133 "Writing new RX PICI= 0x%08x\n", val);
1134 nhi_write_reg(sc, r->rx_pici_reg, val);
1135 }
1136 }
1137
1138 static int
nhi_setup_sysctl(struct nhi_softc * sc)1139 nhi_setup_sysctl(struct nhi_softc *sc)
1140 {
1141 struct sysctl_ctx_list *ctx = NULL;
1142 struct sysctl_oid *tree = NULL;
1143
1144 ctx = device_get_sysctl_ctx(sc->dev);
1145 if (ctx != NULL)
1146 tree = device_get_sysctl_tree(sc->dev);
1147
1148 /*
1149 * Not being able to create sysctls is going to hamper other
1150 * parts of the driver.
1151 */
1152 if (tree == NULL) {
1153 tb_printf(sc, "Error: cannot create sysctl nodes\n");
1154 return (EINVAL);
1155 }
1156 sc->sysctl_tree = tree;
1157 sc->sysctl_ctx = ctx;
1158
1159 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
1160 OID_AUTO, "debug_level", CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE,
1161 &sc->debug, 0, tb_debug_sysctl, "A", "Thunderbolt debug level");
1162 SYSCTL_ADD_U16(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1163 "max_rings", CTLFLAG_RD, &sc->max_ring_count, 0,
1164 "Max number of rings available");
1165 SYSCTL_ADD_U8(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1166 "force_hcm", CTLFLAG_RD, &sc->force_hcm, 0,
1167 "Force on/off the function of the host connection manager");
1168
1169 return (0);
1170 }
1171