1 /*-
2 * Copyright (c) 2025, Samsung Electronics Co., Ltd.
3 * Written by Jaeyoon Choi
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/buf.h>
11 #include <sys/bus.h>
12 #include <sys/conf.h>
13 #include <sys/proc.h>
14 #include <sys/smp.h>
15
16 #include <vm/vm.h>
17
18 #include <dev/pci/pcireg.h>
19 #include <dev/pci/pcivar.h>
20
21 #include "ufshci_private.h"
22
23 static int ufshci_pci_probe(device_t);
24 static int ufshci_pci_attach(device_t);
25 static int ufshci_pci_detach(device_t);
26 static int ufshci_pci_suspend(device_t);
27 static int ufshci_pci_resume(device_t);
28
29 static int ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr);
30
31 static device_method_t ufshci_pci_methods[] = {
32 /* Device interface */
33 DEVMETHOD(device_probe, ufshci_pci_probe),
34 DEVMETHOD(device_attach, ufshci_pci_attach),
35 DEVMETHOD(device_detach, ufshci_pci_detach),
36 DEVMETHOD(device_suspend, ufshci_pci_suspend),
37 DEVMETHOD(device_resume, ufshci_pci_resume), { 0, 0 }
38 };
39
40 static driver_t ufshci_pci_driver = {
41 "ufshci",
42 ufshci_pci_methods,
43 sizeof(struct ufshci_controller),
44 };
45
46 DRIVER_MODULE(ufshci, pci, ufshci_pci_driver, 0, 0);
47
48 static struct _pcsid {
49 uint32_t devid;
50 const char *desc;
51 uint32_t ref_clk;
52 uint32_t quirks;
53 } pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz,
54 UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE |
55 UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK |
56 UFSHCI_QUIRK_SKIP_WELL_KNOWN_LUNS },
57 { 0x98fa8086, "Intel Lakefield UFS Host Controller",
58 UFSHCI_REF_CLK_19_2MHz,
59 UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE |
60 UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE |
61 UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY |
62 UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE },
63 { 0x54ff8086, "Intel Alder Lake-N UFS Host Controller",
64 UFSHCI_REF_CLK_19_2MHz, UFSHCI_QUIRK_BROKEN_AUTO_HIBERNATE },
65 { 0x00000000, NULL } };
66
67 static int
ufshci_pci_probe(device_t device)68 ufshci_pci_probe(device_t device)
69 {
70 struct ufshci_controller *ctrlr = device_get_softc(device);
71 uint32_t devid = pci_get_devid(device);
72 struct _pcsid *ep = pci_ids;
73
74 while (ep->devid && ep->devid != devid)
75 ++ep;
76
77 if (ep->devid) {
78 ctrlr->quirks = ep->quirks;
79 ctrlr->ref_clk = ep->ref_clk;
80 }
81
82 if (ep->desc) {
83 device_set_desc(device, ep->desc);
84 return (BUS_PROBE_DEFAULT);
85 }
86
87 return (ENXIO);
88 }
89
90 static int
ufshci_pci_allocate_bar(struct ufshci_controller * ctrlr)91 ufshci_pci_allocate_bar(struct ufshci_controller *ctrlr)
92 {
93 ctrlr->resource_id = PCIR_BAR(0);
94
95 ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
96 &ctrlr->resource_id, RF_ACTIVE);
97
98 if (ctrlr->resource == NULL) {
99 ufshci_printf(ctrlr, "unable to allocate pci resource\n");
100 return (ENOMEM);
101 }
102
103 ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
104 ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
105 ctrlr->regs = (struct ufshci_registers *)ctrlr->bus_handle;
106
107 return (0);
108 }
109
110 static int
ufshci_pci_attach(device_t dev)111 ufshci_pci_attach(device_t dev)
112 {
113 struct ufshci_controller *ctrlr = device_get_softc(dev);
114 int status;
115
116 ctrlr->dev = dev;
117 status = ufshci_pci_allocate_bar(ctrlr);
118 if (status != 0)
119 goto bad;
120 pci_enable_busmaster(dev);
121 status = ufshci_pci_setup_interrupts(ctrlr);
122 if (status != 0)
123 goto bad;
124
125 return (ufshci_attach(dev));
126 bad:
127 if (ctrlr->resource != NULL) {
128 bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
129 ctrlr->resource);
130 }
131
132 if (ctrlr->tag)
133 bus_teardown_intr(dev, ctrlr->res, ctrlr->tag);
134
135 if (ctrlr->res)
136 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res),
137 ctrlr->res);
138
139 if (ctrlr->msi_count > 0)
140 pci_release_msi(dev);
141
142 return (status);
143 }
144
145 static int
ufshci_pci_detach(device_t dev)146 ufshci_pci_detach(device_t dev)
147 {
148 struct ufshci_controller *ctrlr = device_get_softc(dev);
149 int error;
150
151 error = ufshci_detach(dev);
152 if (ctrlr->msi_count > 0)
153 pci_release_msi(dev);
154 pci_disable_busmaster(dev);
155 return (error);
156 }
157
158 static int
ufshci_pci_setup_shared(struct ufshci_controller * ctrlr,int rid)159 ufshci_pci_setup_shared(struct ufshci_controller *ctrlr, int rid)
160 {
161 int error;
162
163 ctrlr->num_io_queues = 1;
164 ctrlr->rid = rid;
165 ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
166 &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
167 if (ctrlr->res == NULL) {
168 ufshci_printf(ctrlr, "unable to allocate shared interrupt\n");
169 return (ENOMEM);
170 }
171
172 error = bus_setup_intr(ctrlr->dev, ctrlr->res,
173 INTR_TYPE_MISC | INTR_MPSAFE, NULL, ufshci_ctrlr_shared_handler,
174 ctrlr, &ctrlr->tag);
175 if (error) {
176 ufshci_printf(ctrlr, "unable to setup shared interrupt\n");
177 return (error);
178 }
179
180 return (0);
181 }
182
183 static int
ufshci_pci_setup_interrupts(struct ufshci_controller * ctrlr)184 ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr)
185 {
186 device_t dev = ctrlr->dev;
187 int force_intx = 0;
188 int num_io_queues, per_cpu_io_queues, min_cpus_per_ioq;
189 int num_vectors_requested;
190
191 TUNABLE_INT_FETCH("hw.ufshci.force_intx", &force_intx);
192 if (force_intx)
193 goto intx;
194
195 if (pci_msix_count(dev) == 0)
196 goto msi;
197
198 /*
199 * Try to allocate one MSI-X per core for I/O queues, plus one
200 * for admin queue, but accept single shared MSI-X if have to.
201 * Fall back to MSI if can't get any MSI-X.
202 */
203
204 /*
205 * TODO: Need to implement MCQ(Multi Circular Queue)
206 * Example: num_io_queues = mp_ncpus;
207 */
208 num_io_queues = 1;
209
210 TUNABLE_INT_FETCH("hw.ufshci.num_io_queues", &num_io_queues);
211 if (num_io_queues < 1 || num_io_queues > mp_ncpus)
212 num_io_queues = mp_ncpus;
213
214 per_cpu_io_queues = 1;
215 TUNABLE_INT_FETCH("hw.ufshci.per_cpu_io_queues", &per_cpu_io_queues);
216 if (per_cpu_io_queues == 0)
217 num_io_queues = 1;
218
219 min_cpus_per_ioq = smp_threads_per_core;
220 TUNABLE_INT_FETCH("hw.ufshci.min_cpus_per_ioq", &min_cpus_per_ioq);
221 if (min_cpus_per_ioq > 1) {
222 num_io_queues = min(num_io_queues,
223 max(1, mp_ncpus / min_cpus_per_ioq));
224 }
225
226 num_io_queues = min(num_io_queues, max(1, pci_msix_count(dev) - 1));
227
228 again:
229 if (num_io_queues > vm_ndomains)
230 num_io_queues -= num_io_queues % vm_ndomains;
231 num_vectors_requested = min(num_io_queues + 1, pci_msix_count(dev));
232 ctrlr->msi_count = num_vectors_requested;
233 if (pci_alloc_msix(dev, &ctrlr->msi_count) != 0) {
234 ufshci_printf(ctrlr, "unable to allocate MSI-X\n");
235 ctrlr->msi_count = 0;
236 goto msi;
237 }
238 if (ctrlr->msi_count == 1)
239 return (ufshci_pci_setup_shared(ctrlr, 1));
240 if (ctrlr->msi_count != num_vectors_requested) {
241 pci_release_msi(dev);
242 num_io_queues = ctrlr->msi_count - 1;
243 goto again;
244 }
245
246 ctrlr->num_io_queues = num_io_queues;
247 return (0);
248
249 msi:
250 /*
251 * Try to allocate 2 MSIs (admin and I/O queues), but accept single
252 * shared if have to. Fall back to INTx if can't get any MSI.
253 */
254 ctrlr->msi_count = min(pci_msi_count(dev), 2);
255 if (ctrlr->msi_count > 0) {
256 if (pci_alloc_msi(dev, &ctrlr->msi_count) != 0) {
257 ufshci_printf(ctrlr, "unable to allocate MSI\n");
258 ctrlr->msi_count = 0;
259 } else if (ctrlr->msi_count == 2) {
260 ctrlr->num_io_queues = 1;
261 return (0);
262 }
263 }
264
265 intx:
266 return (ufshci_pci_setup_shared(ctrlr, ctrlr->msi_count > 0 ? 1 : 0));
267 }
268
269 static int
ufshci_pci_suspend(device_t dev)270 ufshci_pci_suspend(device_t dev)
271 {
272 struct ufshci_controller *ctrlr = device_get_softc(dev);
273
274 /* Currently, PCI-based ufshci only supports POWER_STYPE_STANDBY */
275 return (ufshci_ctrlr_suspend(ctrlr, POWER_STYPE_STANDBY));
276 }
277
278 static int
ufshci_pci_resume(device_t dev)279 ufshci_pci_resume(device_t dev)
280 {
281 struct ufshci_controller *ctrlr = device_get_softc(dev);
282
283 return (ufshci_ctrlr_resume(ctrlr, POWER_STYPE_AWAKE));
284 }
285