xref: /linux/drivers/pci/ide.c (revision 249872f53d64441690927853e9d3af36394802d5)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
3 
4 /* PCIe r7.0 section 6.33 Integrity & Data Encryption (IDE) */
5 
6 #define dev_fmt(fmt) "PCI/IDE: " fmt
7 #include <linux/bitfield.h>
8 #include <linux/bitops.h>
9 #include <linux/pci.h>
10 #include <linux/pci-ide.h>
11 #include <linux/pci_regs.h>
12 #include <linux/slab.h>
13 #include <linux/sysfs.h>
14 #include <linux/tsm.h>
15 
16 #include "pci.h"
17 
__sel_ide_offset(u16 ide_cap,u8 nr_link_ide,u8 stream_index,u8 nr_ide_mem)18 static int __sel_ide_offset(u16 ide_cap, u8 nr_link_ide, u8 stream_index,
19 			    u8 nr_ide_mem)
20 {
21 	u32 offset = ide_cap + PCI_IDE_LINK_STREAM_0 +
22 		     nr_link_ide * PCI_IDE_LINK_BLOCK_SIZE;
23 
24 	/*
25 	 * Assume a constant number of address association resources per stream
26 	 * index
27 	 */
28 	return offset + stream_index * PCI_IDE_SEL_BLOCK_SIZE(nr_ide_mem);
29 }
30 
sel_ide_offset(struct pci_dev * pdev,struct pci_ide_partner * settings)31 static int sel_ide_offset(struct pci_dev *pdev,
32 			  struct pci_ide_partner *settings)
33 {
34 	return __sel_ide_offset(pdev->ide_cap, pdev->nr_link_ide,
35 				settings->stream_index, pdev->nr_ide_mem);
36 }
37 
reserve_stream_index(struct pci_dev * pdev,u8 idx)38 static bool reserve_stream_index(struct pci_dev *pdev, u8 idx)
39 {
40 	int ret;
41 
42 	ret = ida_alloc_range(&pdev->ide_stream_ida, idx, idx, GFP_KERNEL);
43 	return ret >= 0;
44 }
45 
reserve_stream_id(struct pci_host_bridge * hb,u8 id)46 static bool reserve_stream_id(struct pci_host_bridge *hb, u8 id)
47 {
48 	int ret;
49 
50 	ret = ida_alloc_range(&hb->ide_stream_ids_ida, id, id, GFP_KERNEL);
51 	return ret >= 0;
52 }
53 
claim_stream(struct pci_host_bridge * hb,u8 stream_id,struct pci_dev * pdev,u8 stream_idx)54 static bool claim_stream(struct pci_host_bridge *hb, u8 stream_id,
55 			 struct pci_dev *pdev, u8 stream_idx)
56 {
57 	dev_info(&hb->dev, "Stream ID %d active at init\n", stream_id);
58 	if (!reserve_stream_id(hb, stream_id)) {
59 		dev_info(&hb->dev, "Failed to claim %s Stream ID %d\n",
60 			 stream_id == PCI_IDE_RESERVED_STREAM_ID ? "reserved" :
61 								   "active",
62 			 stream_id);
63 		return false;
64 	}
65 
66 	/* No stream index to reserve in the Link IDE case */
67 	if (!pdev)
68 		return true;
69 
70 	if (!reserve_stream_index(pdev, stream_idx)) {
71 		pci_info(pdev, "Failed to claim active Selective Stream %d\n",
72 			 stream_idx);
73 		return false;
74 	}
75 
76 	return true;
77 }
78 
pci_ide_init(struct pci_dev * pdev)79 void pci_ide_init(struct pci_dev *pdev)
80 {
81 	struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
82 	u16 nr_link_ide, nr_ide_mem, nr_streams;
83 	u16 ide_cap;
84 	u32 val;
85 
86 	/*
87 	 * Unconditionally init so that ida idle state is consistent with
88 	 * pdev->ide_cap.
89 	 */
90 	ida_init(&pdev->ide_stream_ida);
91 
92 	if (!pci_is_pcie(pdev))
93 		return;
94 
95 	ide_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_IDE);
96 	if (!ide_cap)
97 		return;
98 
99 	pci_read_config_dword(pdev, ide_cap + PCI_IDE_CAP, &val);
100 	if ((val & PCI_IDE_CAP_SELECTIVE) == 0)
101 		return;
102 
103 	/*
104 	 * Require endpoint IDE capability to be paired with IDE Root Port IDE
105 	 * capability.
106 	 */
107 	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) {
108 		struct pci_dev *rp = pcie_find_root_port(pdev);
109 
110 		if (!rp->ide_cap)
111 			return;
112 	}
113 
114 	pdev->ide_cfg = FIELD_GET(PCI_IDE_CAP_SEL_CFG, val);
115 	pdev->ide_tee_limit = FIELD_GET(PCI_IDE_CAP_TEE_LIMITED, val);
116 
117 	if (val & PCI_IDE_CAP_LINK)
118 		nr_link_ide = 1 + FIELD_GET(PCI_IDE_CAP_LINK_TC_NUM, val);
119 	else
120 		nr_link_ide = 0;
121 
122 	nr_ide_mem = 0;
123 	nr_streams = 1 + FIELD_GET(PCI_IDE_CAP_SEL_NUM, val);
124 	for (u16 i = 0; i < nr_streams; i++) {
125 		int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
126 		int nr_assoc;
127 		u32 val;
128 		u8 id;
129 
130 		pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
131 
132 		/*
133 		 * Let's not entertain streams that do not have a constant
134 		 * number of address association blocks
135 		 */
136 		nr_assoc = FIELD_GET(PCI_IDE_SEL_CAP_ASSOC_NUM, val);
137 		if (i && (nr_assoc != nr_ide_mem)) {
138 			pci_info(pdev, "Unsupported Selective Stream %d capability, SKIP the rest\n", i);
139 			nr_streams = i;
140 			break;
141 		}
142 
143 		nr_ide_mem = nr_assoc;
144 
145 		/*
146 		 * Claim Stream IDs and Selective Stream blocks that are already
147 		 * active on the device
148 		 */
149 		pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val);
150 		id = FIELD_GET(PCI_IDE_SEL_CTL_ID, val);
151 		if ((val & PCI_IDE_SEL_CTL_EN) &&
152 		    !claim_stream(hb, id, pdev, i))
153 			return;
154 	}
155 
156 	/* Reserve link stream-ids that are already active on the device */
157 	for (u16 i = 0; i < nr_link_ide; ++i) {
158 		int pos = ide_cap + PCI_IDE_LINK_STREAM_0 + i * PCI_IDE_LINK_BLOCK_SIZE;
159 		u8 id;
160 
161 		pci_read_config_dword(pdev, pos + PCI_IDE_LINK_CTL_0, &val);
162 		id = FIELD_GET(PCI_IDE_LINK_CTL_ID, val);
163 		if ((val & PCI_IDE_LINK_CTL_EN) &&
164 		    !claim_stream(hb, id, NULL, -1))
165 			return;
166 	}
167 
168 	for (u16 i = 0; i < nr_streams; i++) {
169 		int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
170 
171 		pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
172 		if (val & PCI_IDE_SEL_CTL_EN)
173 			continue;
174 		val &= ~PCI_IDE_SEL_CTL_ID;
175 		val |= FIELD_PREP(PCI_IDE_SEL_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
176 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
177 	}
178 
179 	for (u16 i = 0; i < nr_link_ide; ++i) {
180 		int pos = ide_cap + PCI_IDE_LINK_STREAM_0 +
181 			  i * PCI_IDE_LINK_BLOCK_SIZE;
182 
183 		pci_read_config_dword(pdev, pos, &val);
184 		if (val & PCI_IDE_LINK_CTL_EN)
185 			continue;
186 		val &= ~PCI_IDE_LINK_CTL_ID;
187 		val |= FIELD_PREP(PCI_IDE_LINK_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
188 		pci_write_config_dword(pdev, pos, val);
189 	}
190 
191 	pdev->ide_cap = ide_cap;
192 	pdev->nr_link_ide = nr_link_ide;
193 	pdev->nr_sel_ide = nr_streams;
194 	pdev->nr_ide_mem = nr_ide_mem;
195 }
196 
197 struct stream_index {
198 	struct ida *ida;
199 	u8 stream_index;
200 };
201 
free_stream_index(struct stream_index * stream)202 static void free_stream_index(struct stream_index *stream)
203 {
204 	ida_free(stream->ida, stream->stream_index);
205 }
206 
DEFINE_FREE(free_stream,struct stream_index *,if (_T)free_stream_index (_T))207 DEFINE_FREE(free_stream, struct stream_index *, if (_T) free_stream_index(_T))
208 static struct stream_index *alloc_stream_index(struct ida *ida, u16 max,
209 					       struct stream_index *stream)
210 {
211 	int id;
212 
213 	if (!max)
214 		return NULL;
215 
216 	id = ida_alloc_max(ida, max - 1, GFP_KERNEL);
217 	if (id < 0)
218 		return NULL;
219 
220 	*stream = (struct stream_index) {
221 		.ida = ida,
222 		.stream_index = id,
223 	};
224 	return stream;
225 }
226 
227 /**
228  * pci_ide_stream_alloc() - Reserve stream indices and probe for settings
229  * @pdev: IDE capable PCIe Endpoint Physical Function
230  *
231  * Retrieve the Requester ID range of @pdev for programming its Root
232  * Port IDE RID Association registers, and conversely retrieve the
233  * Requester ID of the Root Port for programming @pdev's IDE RID
234  * Association registers.
235  *
236  * Allocate a Selective IDE Stream Register Block instance per port.
237  *
238  * Allocate a platform stream resource from the associated host bridge.
239  * Retrieve stream association parameters for Requester ID range and
240  * address range restrictions for the stream.
241  */
pci_ide_stream_alloc(struct pci_dev * pdev)242 struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev)
243 {
244 	/* EP, RP, + HB Stream allocation */
245 	struct stream_index __stream[PCI_IDE_HB + 1];
246 	struct pci_bus_region pref_assoc = { 0, -1 };
247 	struct pci_bus_region mem_assoc = { 0, -1 };
248 	struct resource *mem, *pref;
249 	struct pci_host_bridge *hb;
250 	struct pci_dev *rp, *br;
251 	int num_vf, rid_end;
252 
253 	if (!pci_is_pcie(pdev))
254 		return NULL;
255 
256 	if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ENDPOINT)
257 		return NULL;
258 
259 	if (!pdev->ide_cap)
260 		return NULL;
261 
262 	struct pci_ide *ide __free(kfree) = kzalloc(sizeof(*ide), GFP_KERNEL);
263 	if (!ide)
264 		return NULL;
265 
266 	hb = pci_find_host_bridge(pdev->bus);
267 	struct stream_index *hb_stream __free(free_stream) = alloc_stream_index(
268 		&hb->ide_stream_ida, hb->nr_ide_streams, &__stream[PCI_IDE_HB]);
269 	if (!hb_stream)
270 		return NULL;
271 
272 	rp = pcie_find_root_port(pdev);
273 	struct stream_index *rp_stream __free(free_stream) = alloc_stream_index(
274 		&rp->ide_stream_ida, rp->nr_sel_ide, &__stream[PCI_IDE_RP]);
275 	if (!rp_stream)
276 		return NULL;
277 
278 	struct stream_index *ep_stream __free(free_stream) = alloc_stream_index(
279 		&pdev->ide_stream_ida, pdev->nr_sel_ide, &__stream[PCI_IDE_EP]);
280 	if (!ep_stream)
281 		return NULL;
282 
283 	/* for SR-IOV case, cover all VFs */
284 	num_vf = pci_num_vf(pdev);
285 	if (num_vf)
286 		rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf),
287 				    pci_iov_virtfn_devfn(pdev, num_vf));
288 	else
289 		rid_end = pci_dev_id(pdev);
290 
291 	br = pci_upstream_bridge(pdev);
292 	if (!br)
293 		return NULL;
294 
295 	/*
296 	 * Check if the device consumes memory and/or prefetch-memory. Setup
297 	 * downstream address association ranges for each.
298 	 */
299 	mem = pci_resource_n(br, PCI_BRIDGE_MEM_WINDOW);
300 	pref = pci_resource_n(br, PCI_BRIDGE_PREF_MEM_WINDOW);
301 	if (resource_assigned(mem))
302 		pcibios_resource_to_bus(br->bus, &mem_assoc, mem);
303 	if (resource_assigned(pref))
304 		pcibios_resource_to_bus(br->bus, &pref_assoc, pref);
305 
306 	*ide = (struct pci_ide) {
307 		.pdev = pdev,
308 		.partner = {
309 			[PCI_IDE_EP] = {
310 				.rid_start = pci_dev_id(rp),
311 				.rid_end = pci_dev_id(rp),
312 				.stream_index = no_free_ptr(ep_stream)->stream_index,
313 				/* Disable upstream address association */
314 				.mem_assoc = { 0, -1 },
315 				.pref_assoc = { 0, -1 },
316 			},
317 			[PCI_IDE_RP] = {
318 				.rid_start = pci_dev_id(pdev),
319 				.rid_end = rid_end,
320 				.stream_index = no_free_ptr(rp_stream)->stream_index,
321 				.mem_assoc = mem_assoc,
322 				.pref_assoc = pref_assoc,
323 			},
324 		},
325 		.host_bridge_stream = no_free_ptr(hb_stream)->stream_index,
326 		.stream_id = -1,
327 	};
328 
329 	return_ptr(ide);
330 }
331 EXPORT_SYMBOL_GPL(pci_ide_stream_alloc);
332 
333 /**
334  * pci_ide_stream_free() - unwind pci_ide_stream_alloc()
335  * @ide: idle IDE settings descriptor
336  *
337  * Free all of the stream index (register block) allocations acquired by
338  * pci_ide_stream_alloc(). The stream represented by @ide is assumed to
339  * be unregistered and not instantiated in any device.
340  */
pci_ide_stream_free(struct pci_ide * ide)341 void pci_ide_stream_free(struct pci_ide *ide)
342 {
343 	struct pci_dev *pdev = ide->pdev;
344 	struct pci_dev *rp = pcie_find_root_port(pdev);
345 	struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
346 
347 	ida_free(&pdev->ide_stream_ida, ide->partner[PCI_IDE_EP].stream_index);
348 	ida_free(&rp->ide_stream_ida, ide->partner[PCI_IDE_RP].stream_index);
349 	ida_free(&hb->ide_stream_ida, ide->host_bridge_stream);
350 	kfree(ide);
351 }
352 EXPORT_SYMBOL_GPL(pci_ide_stream_free);
353 
354 /**
355  * pci_ide_stream_release() - unwind and release an @ide context
356  * @ide: partially or fully registered IDE settings descriptor
357  *
358  * In support of automatic cleanup of IDE setup routines perform IDE
359  * teardown in expected reverse order of setup and with respect to which
360  * aspects of IDE setup have successfully completed.
361  *
362  * Be careful that setup order mirrors this shutdown order. Otherwise,
363  * open code releasing the IDE context.
364  */
pci_ide_stream_release(struct pci_ide * ide)365 void pci_ide_stream_release(struct pci_ide *ide)
366 {
367 	struct pci_dev *pdev = ide->pdev;
368 	struct pci_dev *rp = pcie_find_root_port(pdev);
369 
370 	if (ide->partner[PCI_IDE_RP].enable)
371 		pci_ide_stream_disable(rp, ide);
372 
373 	if (ide->partner[PCI_IDE_EP].enable)
374 		pci_ide_stream_disable(pdev, ide);
375 
376 	if (ide->tsm_dev)
377 		tsm_ide_stream_unregister(ide);
378 
379 	if (ide->partner[PCI_IDE_RP].setup)
380 		pci_ide_stream_teardown(rp, ide);
381 
382 	if (ide->partner[PCI_IDE_EP].setup)
383 		pci_ide_stream_teardown(pdev, ide);
384 
385 	if (ide->name)
386 		pci_ide_stream_unregister(ide);
387 
388 	pci_ide_stream_free(ide);
389 }
390 EXPORT_SYMBOL_GPL(pci_ide_stream_release);
391 
392 struct pci_ide_stream_id {
393 	struct pci_host_bridge *hb;
394 	u8 stream_id;
395 };
396 
397 static struct pci_ide_stream_id *
request_stream_id(struct pci_host_bridge * hb,u8 stream_id,struct pci_ide_stream_id * sid)398 request_stream_id(struct pci_host_bridge *hb, u8 stream_id,
399 		  struct pci_ide_stream_id *sid)
400 {
401 	if (!reserve_stream_id(hb, stream_id))
402 		return NULL;
403 
404 	*sid = (struct pci_ide_stream_id) {
405 		.hb = hb,
406 		.stream_id = stream_id,
407 	};
408 
409 	return sid;
410 }
411 DEFINE_FREE(free_stream_id, struct pci_ide_stream_id *,
412 	    if (_T) ida_free(&_T->hb->ide_stream_ids_ida, _T->stream_id))
413 
414 /**
415  * pci_ide_stream_register() - Prepare to activate an IDE Stream
416  * @ide: IDE settings descriptor
417  *
418  * After a Stream ID has been acquired for @ide, record the presence of
419  * the stream in sysfs. The expectation is that @ide is immutable while
420  * registered.
421  */
pci_ide_stream_register(struct pci_ide * ide)422 int pci_ide_stream_register(struct pci_ide *ide)
423 {
424 	struct pci_dev *pdev = ide->pdev;
425 	struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
426 	struct pci_ide_stream_id __sid;
427 	u8 ep_stream, rp_stream;
428 	int rc;
429 
430 	if (ide->stream_id < 0 || ide->stream_id > U8_MAX) {
431 		pci_err(pdev, "Setup fail: Invalid Stream ID: %d\n", ide->stream_id);
432 		return -ENXIO;
433 	}
434 
435 	struct pci_ide_stream_id *sid __free(free_stream_id) =
436 		request_stream_id(hb, ide->stream_id, &__sid);
437 	if (!sid) {
438 		pci_err(pdev, "Setup fail: Stream ID %d in use\n", ide->stream_id);
439 		return -EBUSY;
440 	}
441 
442 	ep_stream = ide->partner[PCI_IDE_EP].stream_index;
443 	rp_stream = ide->partner[PCI_IDE_RP].stream_index;
444 	const char *name __free(kfree) = kasprintf(GFP_KERNEL, "stream%d.%d.%d",
445 						   ide->host_bridge_stream,
446 						   rp_stream, ep_stream);
447 	if (!name)
448 		return -ENOMEM;
449 
450 	rc = sysfs_create_link(&hb->dev.kobj, &pdev->dev.kobj, name);
451 	if (rc)
452 		return rc;
453 
454 	ide->name = no_free_ptr(name);
455 
456 	/* Stream ID reservation recorded in @ide is now successfully registered */
457 	retain_and_null_ptr(sid);
458 
459 	return 0;
460 }
461 EXPORT_SYMBOL_GPL(pci_ide_stream_register);
462 
463 /**
464  * pci_ide_stream_unregister() - unwind pci_ide_stream_register()
465  * @ide: idle IDE settings descriptor
466  *
467  * In preparation for freeing @ide, remove sysfs enumeration for the
468  * stream.
469  */
pci_ide_stream_unregister(struct pci_ide * ide)470 void pci_ide_stream_unregister(struct pci_ide *ide)
471 {
472 	struct pci_dev *pdev = ide->pdev;
473 	struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
474 
475 	sysfs_remove_link(&hb->dev.kobj, ide->name);
476 	kfree(ide->name);
477 	ida_free(&hb->ide_stream_ids_ida, ide->stream_id);
478 	ide->name = NULL;
479 }
480 EXPORT_SYMBOL_GPL(pci_ide_stream_unregister);
481 
pci_ide_domain(struct pci_dev * pdev)482 static int pci_ide_domain(struct pci_dev *pdev)
483 {
484 	if (pdev->fm_enabled)
485 		return pci_domain_nr(pdev->bus);
486 	return 0;
487 }
488 
pci_ide_to_settings(struct pci_dev * pdev,struct pci_ide * ide)489 struct pci_ide_partner *pci_ide_to_settings(struct pci_dev *pdev, struct pci_ide *ide)
490 {
491 	if (!pci_is_pcie(pdev)) {
492 		pci_warn_once(pdev, "not a PCIe device\n");
493 		return NULL;
494 	}
495 
496 	switch (pci_pcie_type(pdev)) {
497 	case PCI_EXP_TYPE_ENDPOINT:
498 		if (pdev != ide->pdev) {
499 			pci_warn_once(pdev, "setup expected Endpoint: %s\n", pci_name(ide->pdev));
500 			return NULL;
501 		}
502 		return &ide->partner[PCI_IDE_EP];
503 	case PCI_EXP_TYPE_ROOT_PORT: {
504 		struct pci_dev *rp = pcie_find_root_port(ide->pdev);
505 
506 		if (pdev != rp) {
507 			pci_warn_once(pdev, "setup expected Root Port: %s\n",
508 				      pci_name(rp));
509 			return NULL;
510 		}
511 		return &ide->partner[PCI_IDE_RP];
512 	}
513 	default:
514 		pci_warn_once(pdev, "invalid device type\n");
515 		return NULL;
516 	}
517 }
518 EXPORT_SYMBOL_GPL(pci_ide_to_settings);
519 
set_ide_sel_ctl(struct pci_dev * pdev,struct pci_ide * ide,struct pci_ide_partner * settings,int pos,bool enable)520 static void set_ide_sel_ctl(struct pci_dev *pdev, struct pci_ide *ide,
521 			    struct pci_ide_partner *settings, int pos,
522 			    bool enable)
523 {
524 	u32 val = FIELD_PREP(PCI_IDE_SEL_CTL_ID, ide->stream_id) |
525 		  FIELD_PREP(PCI_IDE_SEL_CTL_DEFAULT, settings->default_stream) |
526 		  FIELD_PREP(PCI_IDE_SEL_CTL_CFG_EN, pdev->ide_cfg) |
527 		  FIELD_PREP(PCI_IDE_SEL_CTL_TEE_LIMITED, pdev->ide_tee_limit) |
528 		  FIELD_PREP(PCI_IDE_SEL_CTL_EN, enable);
529 
530 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
531 }
532 
533 #define SEL_ADDR1_LOWER GENMASK(31, 20)
534 #define SEL_ADDR_UPPER GENMASK_ULL(63, 32)
535 #define PREP_PCI_IDE_SEL_ADDR1(base, limit)			\
536 	(FIELD_PREP(PCI_IDE_SEL_ADDR_1_VALID, 1) |		\
537 	 FIELD_PREP(PCI_IDE_SEL_ADDR_1_BASE_LOW,		\
538 		    FIELD_GET(SEL_ADDR1_LOWER, (base))) |	\
539 	 FIELD_PREP(PCI_IDE_SEL_ADDR_1_LIMIT_LOW,		\
540 		    FIELD_GET(SEL_ADDR1_LOWER, (limit))))
541 
mem_assoc_to_regs(struct pci_bus_region * region,struct pci_ide_regs * regs,int idx)542 static void mem_assoc_to_regs(struct pci_bus_region *region,
543 			      struct pci_ide_regs *regs, int idx)
544 {
545 	/* convert to u64 range for bitfield size checks */
546 	struct range r = { region->start, region->end };
547 
548 	regs->addr[idx].assoc1 = PREP_PCI_IDE_SEL_ADDR1(r.start, r.end);
549 	regs->addr[idx].assoc2 = FIELD_GET(SEL_ADDR_UPPER, r.end);
550 	regs->addr[idx].assoc3 = FIELD_GET(SEL_ADDR_UPPER, r.start);
551 }
552 
553 /**
554  * pci_ide_stream_to_regs() - convert IDE settings to association register values
555  * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
556  * @ide: registered IDE settings descriptor
557  * @regs: output register values
558  */
pci_ide_stream_to_regs(struct pci_dev * pdev,struct pci_ide * ide,struct pci_ide_regs * regs)559 static void pci_ide_stream_to_regs(struct pci_dev *pdev, struct pci_ide *ide,
560 				   struct pci_ide_regs *regs)
561 {
562 	struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
563 	int assoc_idx = 0;
564 
565 	memset(regs, 0, sizeof(*regs));
566 
567 	if (!settings)
568 		return;
569 
570 	regs->rid1 = FIELD_PREP(PCI_IDE_SEL_RID_1_LIMIT, settings->rid_end);
571 
572 	regs->rid2 = FIELD_PREP(PCI_IDE_SEL_RID_2_VALID, 1) |
573 		     FIELD_PREP(PCI_IDE_SEL_RID_2_BASE, settings->rid_start) |
574 		     FIELD_PREP(PCI_IDE_SEL_RID_2_SEG, pci_ide_domain(pdev));
575 
576 	if (pdev->nr_ide_mem && pci_bus_region_size(&settings->mem_assoc)) {
577 		mem_assoc_to_regs(&settings->mem_assoc, regs, assoc_idx);
578 		assoc_idx++;
579 	}
580 
581 	if (pdev->nr_ide_mem > assoc_idx &&
582 	    pci_bus_region_size(&settings->pref_assoc)) {
583 		mem_assoc_to_regs(&settings->pref_assoc, regs, assoc_idx);
584 		assoc_idx++;
585 	}
586 
587 	regs->nr_addr = assoc_idx;
588 }
589 
590 /**
591  * pci_ide_stream_setup() - program settings to Selective IDE Stream registers
592  * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
593  * @ide: registered IDE settings descriptor
594  *
595  * When @pdev is a PCI_EXP_TYPE_ENDPOINT then the PCI_IDE_EP partner
596  * settings are written to @pdev's Selective IDE Stream register block,
597  * and when @pdev is a PCI_EXP_TYPE_ROOT_PORT, the PCI_IDE_RP settings
598  * are selected.
599  */
pci_ide_stream_setup(struct pci_dev * pdev,struct pci_ide * ide)600 void pci_ide_stream_setup(struct pci_dev *pdev, struct pci_ide *ide)
601 {
602 	struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
603 	struct pci_ide_regs regs;
604 	int pos;
605 
606 	if (!settings)
607 		return;
608 
609 	pci_ide_stream_to_regs(pdev, ide, &regs);
610 
611 	pos = sel_ide_offset(pdev, settings);
612 
613 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, regs.rid1);
614 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, regs.rid2);
615 
616 	for (int i = 0; i < regs.nr_addr; i++) {
617 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i),
618 				       regs.addr[i].assoc1);
619 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i),
620 				       regs.addr[i].assoc2);
621 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i),
622 				       regs.addr[i].assoc3);
623 	}
624 
625 	/* clear extra unused address association blocks */
626 	for (int i = regs.nr_addr; i < pdev->nr_ide_mem; i++) {
627 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
628 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
629 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
630 	}
631 
632 	/*
633 	 * Setup control register early for devices that expect
634 	 * stream_id is set during key programming.
635 	 */
636 	set_ide_sel_ctl(pdev, ide, settings, pos, false);
637 	settings->setup = 1;
638 }
639 EXPORT_SYMBOL_GPL(pci_ide_stream_setup);
640 
641 /**
642  * pci_ide_stream_teardown() - disable the stream and clear all settings
643  * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
644  * @ide: registered IDE settings descriptor
645  *
646  * For stream destruction, zero all registers that may have been written
647  * by pci_ide_stream_setup(). Consider pci_ide_stream_disable() to leave
648  * settings in place while temporarily disabling the stream.
649  */
pci_ide_stream_teardown(struct pci_dev * pdev,struct pci_ide * ide)650 void pci_ide_stream_teardown(struct pci_dev *pdev, struct pci_ide *ide)
651 {
652 	struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
653 	int pos, i;
654 
655 	if (!settings)
656 		return;
657 
658 	pos = sel_ide_offset(pdev, settings);
659 
660 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
661 
662 	for (i = 0; i < pdev->nr_ide_mem; i++) {
663 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
664 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
665 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
666 	}
667 
668 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, 0);
669 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, 0);
670 	settings->setup = 0;
671 }
672 EXPORT_SYMBOL_GPL(pci_ide_stream_teardown);
673 
674 /**
675  * pci_ide_stream_enable() - enable a Selective IDE Stream
676  * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
677  * @ide: registered and setup IDE settings descriptor
678  *
679  * Activate the stream by writing to the Selective IDE Stream Control
680  * Register.
681  *
682  * Return: 0 if the stream successfully entered the "secure" state, and -EINVAL
683  * if @ide is invalid, and -ENXIO if the stream fails to enter the secure state.
684  *
685  * Note that the state may go "insecure" at any point after returning 0, but
686  * those events are equivalent to a "link down" event and handled via
687  * asynchronous error reporting.
688  *
689  * Caller is responsible to clear the enable bit in the -ENXIO case.
690  */
pci_ide_stream_enable(struct pci_dev * pdev,struct pci_ide * ide)691 int pci_ide_stream_enable(struct pci_dev *pdev, struct pci_ide *ide)
692 {
693 	struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
694 	int pos;
695 	u32 val;
696 
697 	if (!settings)
698 		return -EINVAL;
699 
700 	pos = sel_ide_offset(pdev, settings);
701 
702 	set_ide_sel_ctl(pdev, ide, settings, pos, true);
703 	settings->enable = 1;
704 
705 	pci_read_config_dword(pdev, pos + PCI_IDE_SEL_STS, &val);
706 	if (FIELD_GET(PCI_IDE_SEL_STS_STATE, val) !=
707 	    PCI_IDE_SEL_STS_STATE_SECURE)
708 		return -ENXIO;
709 
710 	return 0;
711 }
712 EXPORT_SYMBOL_GPL(pci_ide_stream_enable);
713 
714 /**
715  * pci_ide_stream_disable() - disable a Selective IDE Stream
716  * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
717  * @ide: registered and setup IDE settings descriptor
718  *
719  * Clear the Selective IDE Stream Control Register, but leave all other
720  * registers untouched.
721  */
pci_ide_stream_disable(struct pci_dev * pdev,struct pci_ide * ide)722 void pci_ide_stream_disable(struct pci_dev *pdev, struct pci_ide *ide)
723 {
724 	struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
725 	int pos;
726 
727 	if (!settings)
728 		return;
729 
730 	pos = sel_ide_offset(pdev, settings);
731 
732 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
733 	settings->enable = 0;
734 }
735 EXPORT_SYMBOL_GPL(pci_ide_stream_disable);
736 
pci_ide_init_host_bridge(struct pci_host_bridge * hb)737 void pci_ide_init_host_bridge(struct pci_host_bridge *hb)
738 {
739 	hb->nr_ide_streams = 256;
740 	ida_init(&hb->ide_stream_ida);
741 	ida_init(&hb->ide_stream_ids_ida);
742 	reserve_stream_id(hb, PCI_IDE_RESERVED_STREAM_ID);
743 }
744 
available_secure_streams_show(struct device * dev,struct device_attribute * attr,char * buf)745 static ssize_t available_secure_streams_show(struct device *dev,
746 					     struct device_attribute *attr,
747 					     char *buf)
748 {
749 	struct pci_host_bridge *hb = to_pci_host_bridge(dev);
750 	int nr = READ_ONCE(hb->nr_ide_streams);
751 	int avail = nr;
752 
753 	if (!nr)
754 		return -ENXIO;
755 
756 	/*
757 	 * Yes, this is inefficient and racy, but it is only for occasional
758 	 * platform resource surveys. Worst case is bounded to 256 streams.
759 	 */
760 	for (int i = 0; i < nr; i++)
761 		if (ida_exists(&hb->ide_stream_ida, i))
762 			avail--;
763 	return sysfs_emit(buf, "%d\n", avail);
764 }
765 static DEVICE_ATTR_RO(available_secure_streams);
766 
767 static struct attribute *pci_ide_attrs[] = {
768 	&dev_attr_available_secure_streams.attr,
769 	NULL
770 };
771 
pci_ide_attr_visible(struct kobject * kobj,struct attribute * a,int n)772 static umode_t pci_ide_attr_visible(struct kobject *kobj, struct attribute *a, int n)
773 {
774 	struct device *dev = kobj_to_dev(kobj);
775 	struct pci_host_bridge *hb = to_pci_host_bridge(dev);
776 
777 	if (a == &dev_attr_available_secure_streams.attr)
778 		if (!hb->nr_ide_streams)
779 			return 0;
780 
781 	return a->mode;
782 }
783 
784 const struct attribute_group pci_ide_attr_group = {
785 	.attrs = pci_ide_attrs,
786 	.is_visible = pci_ide_attr_visible,
787 };
788 
789 /**
790  * pci_ide_set_nr_streams() - sets size of the pool of IDE Stream resources
791  * @hb: host bridge boundary for the stream pool
792  * @nr: number of streams
793  *
794  * Platform PCI init and/or expert test module use only. Limit IDE
795  * Stream establishment by setting the number of stream resources
796  * available at the host bridge. Platform init code must set this before
797  * the first pci_ide_stream_alloc() call if the platform has less than the
798  * default of 256 streams per host-bridge.
799  *
800  * The "PCI_IDE" symbol namespace is required because this is typically
801  * a detail that is settled in early PCI init. I.e. this export is not
802  * for endpoint drivers.
803  */
pci_ide_set_nr_streams(struct pci_host_bridge * hb,u16 nr)804 void pci_ide_set_nr_streams(struct pci_host_bridge *hb, u16 nr)
805 {
806 	hb->nr_ide_streams = min(nr, 256);
807 	WARN_ON_ONCE(!ida_is_empty(&hb->ide_stream_ida));
808 	sysfs_update_group(&hb->dev.kobj, &pci_ide_attr_group);
809 }
810 EXPORT_SYMBOL_NS_GPL(pci_ide_set_nr_streams, "PCI_IDE");
811 
pci_ide_destroy(struct pci_dev * pdev)812 void pci_ide_destroy(struct pci_dev *pdev)
813 {
814 	ida_destroy(&pdev->ide_stream_ida);
815 }
816