1 // SPDX-License-Identifier: GPL-2.0-only
2
3 // Interface to CCP/SEV-TIO for generic PCIe TDISP module
4
5 #include <linux/pci.h>
6 #include <linux/device.h>
7 #include <linux/tsm.h>
8 #include <linux/iommu.h>
9 #include <linux/pci-doe.h>
10 #include <linux/bitfield.h>
11 #include <linux/module.h>
12
13 #include <asm/sev-common.h>
14 #include <asm/sev.h>
15
16 #include "psp-dev.h"
17 #include "sev-dev.h"
18 #include "sev-dev-tio.h"
19
20 MODULE_IMPORT_NS("PCI_IDE");
21
22 #define TIO_DEFAULT_NR_IDE_STREAMS 1
23
24 static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS;
25 module_param_named(ide_nr, nr_ide_streams, uint, 0644);
26 MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB");
27
28 #define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev))
29 #define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data))
30 #define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data))
31 #define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent)
32
33 #define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm))
34
sev_tio_spdm_cmd(struct tio_dsm * dsm,int ret)35 static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret)
36 {
37 struct tsm_dsm_tio *dev_data = &dsm->data;
38 struct tsm_spdm *spdm = &dev_data->spdm;
39
40 /* Check the main command handler response before entering the loop */
41 if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS)
42 return -EINVAL;
43
44 if (ret <= 0)
45 return ret;
46
47 /* ret > 0 means "SPDM requested" */
48 while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) {
49 ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret,
50 spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len);
51 if (ret < 0)
52 break;
53
54 WARN_ON_ONCE(ret == 0); /* The response should never be empty */
55 spdm->rsp_len = ret;
56 ret = sev_tio_continue(dev_data);
57 }
58
59 return ret;
60 }
61
stream_enable(struct pci_ide * ide)62 static int stream_enable(struct pci_ide *ide)
63 {
64 struct pci_dev *rp = pcie_find_root_port(ide->pdev);
65 int ret;
66
67 ret = pci_ide_stream_enable(rp, ide);
68 if (ret)
69 return ret;
70
71 ret = pci_ide_stream_enable(ide->pdev, ide);
72 if (ret)
73 pci_ide_stream_disable(rp, ide);
74
75 return ret;
76 }
77
streams_enable(struct pci_ide ** ide)78 static int streams_enable(struct pci_ide **ide)
79 {
80 int ret = 0;
81
82 for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
83 if (ide[i]) {
84 ret = stream_enable(ide[i]);
85 if (ret)
86 break;
87 }
88 }
89
90 return ret;
91 }
92
stream_disable(struct pci_ide * ide)93 static void stream_disable(struct pci_ide *ide)
94 {
95 pci_ide_stream_disable(ide->pdev, ide);
96 pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide);
97 }
98
streams_disable(struct pci_ide ** ide)99 static void streams_disable(struct pci_ide **ide)
100 {
101 for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
102 if (ide[i])
103 stream_disable(ide[i]);
104 }
105
stream_setup(struct pci_ide * ide)106 static void stream_setup(struct pci_ide *ide)
107 {
108 struct pci_dev *rp = pcie_find_root_port(ide->pdev);
109
110 ide->partner[PCI_IDE_EP].rid_start = 0;
111 ide->partner[PCI_IDE_EP].rid_end = 0xffff;
112 ide->partner[PCI_IDE_RP].rid_start = 0;
113 ide->partner[PCI_IDE_RP].rid_end = 0xffff;
114
115 ide->pdev->ide_cfg = 0;
116 ide->pdev->ide_tee_limit = 1;
117 rp->ide_cfg = 1;
118 rp->ide_tee_limit = 0;
119
120 pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp));
121 pci_ide_stream_setup(ide->pdev, ide);
122 pci_ide_stream_setup(rp, ide);
123 }
124
streams_setup(struct pci_ide ** ide,u8 * ids)125 static u8 streams_setup(struct pci_ide **ide, u8 *ids)
126 {
127 bool def = false;
128 u8 tc_mask = 0;
129 int i;
130
131 for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
132 if (!ide[i]) {
133 ids[i] = 0xFF;
134 continue;
135 }
136
137 tc_mask |= BIT(i);
138 ids[i] = ide[i]->stream_id;
139
140 if (!def) {
141 struct pci_ide_partner *settings;
142
143 settings = pci_ide_to_settings(ide[i]->pdev, ide[i]);
144 settings->default_stream = 1;
145 def = true;
146 }
147
148 stream_setup(ide[i]);
149 }
150
151 return tc_mask;
152 }
153
streams_register(struct pci_ide ** ide)154 static int streams_register(struct pci_ide **ide)
155 {
156 int ret = 0, i;
157
158 for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
159 if (ide[i]) {
160 ret = pci_ide_stream_register(ide[i]);
161 if (ret)
162 break;
163 }
164 }
165
166 return ret;
167 }
168
streams_unregister(struct pci_ide ** ide)169 static void streams_unregister(struct pci_ide **ide)
170 {
171 for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
172 if (ide[i])
173 pci_ide_stream_unregister(ide[i]);
174 }
175
stream_teardown(struct pci_ide * ide)176 static void stream_teardown(struct pci_ide *ide)
177 {
178 pci_ide_stream_teardown(ide->pdev, ide);
179 pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide);
180 }
181
streams_teardown(struct pci_ide ** ide)182 static void streams_teardown(struct pci_ide **ide)
183 {
184 for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
185 if (ide[i]) {
186 stream_teardown(ide[i]);
187 pci_ide_stream_free(ide[i]);
188 ide[i] = NULL;
189 }
190 }
191 }
192
stream_alloc(struct pci_dev * pdev,struct pci_ide ** ide,unsigned int tc)193 static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
194 unsigned int tc)
195 {
196 struct pci_dev *rp = pcie_find_root_port(pdev);
197 struct pci_ide *ide1;
198
199 if (ide[tc]) {
200 pci_err(pdev, "Stream for class=%d already registered", tc);
201 return -EBUSY;
202 }
203
204 /* FIXME: find a better way */
205 if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS)
206 pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams);
207 pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams);
208
209 ide1 = pci_ide_stream_alloc(pdev);
210 if (!ide1)
211 return -EFAULT;
212
213 /* Blindly assign streamid=0 to TC=0, and so on */
214 ide1->stream_id = tc;
215
216 ide[tc] = ide1;
217
218 return 0;
219 }
220
tio_pf0_probe(struct pci_dev * pdev,struct sev_device * sev)221 static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev)
222 {
223 struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL);
224 int rc;
225
226 if (!dsm)
227 return NULL;
228
229 rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev);
230 if (rc)
231 return NULL;
232
233 pci_dbg(pdev, "TSM enabled\n");
234 dsm->sev = sev;
235 return &no_free_ptr(dsm)->tsm.base_tsm;
236 }
237
dsm_probe(struct tsm_dev * tsmdev,struct pci_dev * pdev)238 static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
239 {
240 struct sev_device *sev = tsm_dev_to_sev(tsmdev);
241
242 if (is_pci_tsm_pf0(pdev))
243 return tio_pf0_probe(pdev, sev);
244 return 0;
245 }
246
dsm_remove(struct pci_tsm * tsm)247 static void dsm_remove(struct pci_tsm *tsm)
248 {
249 struct pci_dev *pdev = tsm->pdev;
250
251 pci_dbg(pdev, "TSM disabled\n");
252
253 if (is_pci_tsm_pf0(pdev)) {
254 struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm);
255
256 pci_tsm_pf0_destructor(&dsm->tsm);
257 kfree(dsm);
258 }
259 }
260
dsm_create(struct tio_dsm * dsm)261 static int dsm_create(struct tio_dsm *dsm)
262 {
263 struct pci_dev *pdev = dsm->tsm.base_tsm.pdev;
264 u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0;
265 struct pci_dev *rootport = pcie_find_root_port(pdev);
266 u16 device_id = pci_dev_id(pdev);
267 u16 root_port_id;
268 u32 lnkcap = 0;
269
270 if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP,
271 &lnkcap))
272 return -ENODEV;
273
274 root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
275
276 return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id);
277 }
278
dsm_connect(struct pci_dev * pdev)279 static int dsm_connect(struct pci_dev *pdev)
280 {
281 struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
282 struct tsm_dsm_tio *dev_data = &dsm->data;
283 u8 ids[TIO_IDE_MAX_TC];
284 u8 tc_mask;
285 int ret;
286
287 if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
288 PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) {
289 pci_err(pdev, "CMA DOE MB must support SSESSION\n");
290 return -EFAULT;
291 }
292
293 ret = stream_alloc(pdev, dev_data->ide, 0);
294 if (ret)
295 return ret;
296
297 ret = dsm_create(dsm);
298 if (ret)
299 goto ide_free_exit;
300
301 tc_mask = streams_setup(dev_data->ide, ids);
302
303 ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot);
304 ret = sev_tio_spdm_cmd(dsm, ret);
305 if (ret)
306 goto free_exit;
307
308 streams_enable(dev_data->ide);
309
310 ret = streams_register(dev_data->ide);
311 if (ret)
312 goto free_exit;
313
314 return 0;
315
316 free_exit:
317 sev_tio_dev_reclaim(dev_data);
318
319 streams_disable(dev_data->ide);
320 ide_free_exit:
321
322 streams_teardown(dev_data->ide);
323
324 return ret;
325 }
326
dsm_disconnect(struct pci_dev * pdev)327 static void dsm_disconnect(struct pci_dev *pdev)
328 {
329 bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART;
330 struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
331 struct tsm_dsm_tio *dev_data = &dsm->data;
332 int ret;
333
334 ret = sev_tio_dev_disconnect(dev_data, force);
335 ret = sev_tio_spdm_cmd(dsm, ret);
336 if (ret && !force) {
337 ret = sev_tio_dev_disconnect(dev_data, true);
338 sev_tio_spdm_cmd(dsm, ret);
339 }
340
341 sev_tio_dev_reclaim(dev_data);
342
343 streams_disable(dev_data->ide);
344 streams_unregister(dev_data->ide);
345 streams_teardown(dev_data->ide);
346 }
347
348 static struct pci_tsm_ops sev_tsm_ops = {
349 .probe = dsm_probe,
350 .remove = dsm_remove,
351 .connect = dsm_connect,
352 .disconnect = dsm_disconnect,
353 };
354
sev_tsm_init_locked(struct sev_device * sev,void * tio_status_page)355 void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page)
356 {
357 struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL);
358 struct tsm_dev *tsmdev;
359 int ret;
360
361 WARN_ON(sev->tio_status);
362
363 if (!t)
364 return;
365
366 ret = sev_tio_init_locked(tio_status_page);
367 if (ret) {
368 pr_warn("SEV-TIO STATUS failed with %d\n", ret);
369 goto error_exit;
370 }
371
372 tsmdev = tsm_register(sev->dev, &sev_tsm_ops);
373 if (IS_ERR(tsmdev))
374 goto error_exit;
375
376 memcpy(t, tio_status_page, sizeof(*t));
377
378 pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d "
379 "scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n",
380 t->tio_en, t->tio_init_done,
381 t->spdm_req_size_min, t->spdm_req_size_max,
382 t->spdm_rsp_size_min, t->spdm_rsp_size_max,
383 t->spdm_scratch_size_min, t->spdm_scratch_size_max,
384 t->spdm_out_size_min, t->spdm_out_size_max,
385 t->devctx_size, t->tdictx_size,
386 t->tio_crypto_alg);
387
388 sev->tsmdev = tsmdev;
389 sev->tio_status = t;
390
391 return;
392
393 error_exit:
394 kfree(t);
395 pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n",
396 ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV));
397 }
398
sev_tsm_uninit(struct sev_device * sev)399 void sev_tsm_uninit(struct sev_device *sev)
400 {
401 if (sev->tsmdev)
402 tsm_unregister(sev->tsmdev);
403
404 sev->tsmdev = NULL;
405 }
406