xref: /linux/drivers/pci/ide.c (revision f14faaf3a1fb3b9e4cf2e56269711fb85fba9458)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
3 
4 /* PCIe r7.0 section 6.33 Integrity & Data Encryption (IDE) */
5 
6 #define dev_fmt(fmt) "PCI/IDE: " fmt
7 #include <linux/bitfield.h>
8 #include <linux/bitops.h>
9 #include <linux/pci.h>
10 #include <linux/pci-ide.h>
11 #include <linux/pci_regs.h>
12 #include <linux/slab.h>
13 #include <linux/sysfs.h>
14 
15 #include "pci.h"
16 
__sel_ide_offset(u16 ide_cap,u8 nr_link_ide,u8 stream_index,u8 nr_ide_mem)17 static int __sel_ide_offset(u16 ide_cap, u8 nr_link_ide, u8 stream_index,
18 			    u8 nr_ide_mem)
19 {
20 	u32 offset = ide_cap + PCI_IDE_LINK_STREAM_0 +
21 		     nr_link_ide * PCI_IDE_LINK_BLOCK_SIZE;
22 
23 	/*
24 	 * Assume a constant number of address association resources per stream
25 	 * index
26 	 */
27 	return offset + stream_index * PCI_IDE_SEL_BLOCK_SIZE(nr_ide_mem);
28 }
29 
sel_ide_offset(struct pci_dev * pdev,struct pci_ide_partner * settings)30 static int sel_ide_offset(struct pci_dev *pdev,
31 			  struct pci_ide_partner *settings)
32 {
33 	return __sel_ide_offset(pdev->ide_cap, pdev->nr_link_ide,
34 				settings->stream_index, pdev->nr_ide_mem);
35 }
36 
reserve_stream_index(struct pci_dev * pdev,u8 idx)37 static bool reserve_stream_index(struct pci_dev *pdev, u8 idx)
38 {
39 	int ret;
40 
41 	ret = ida_alloc_range(&pdev->ide_stream_ida, idx, idx, GFP_KERNEL);
42 	return ret >= 0;
43 }
44 
reserve_stream_id(struct pci_host_bridge * hb,u8 id)45 static bool reserve_stream_id(struct pci_host_bridge *hb, u8 id)
46 {
47 	int ret;
48 
49 	ret = ida_alloc_range(&hb->ide_stream_ids_ida, id, id, GFP_KERNEL);
50 	return ret >= 0;
51 }
52 
claim_stream(struct pci_host_bridge * hb,u8 stream_id,struct pci_dev * pdev,u8 stream_idx)53 static bool claim_stream(struct pci_host_bridge *hb, u8 stream_id,
54 			 struct pci_dev *pdev, u8 stream_idx)
55 {
56 	dev_info(&hb->dev, "Stream ID %d active at init\n", stream_id);
57 	if (!reserve_stream_id(hb, stream_id)) {
58 		dev_info(&hb->dev, "Failed to claim %s Stream ID %d\n",
59 			 stream_id == PCI_IDE_RESERVED_STREAM_ID ? "reserved" :
60 								   "active",
61 			 stream_id);
62 		return false;
63 	}
64 
65 	/* No stream index to reserve in the Link IDE case */
66 	if (!pdev)
67 		return true;
68 
69 	if (!reserve_stream_index(pdev, stream_idx)) {
70 		pci_info(pdev, "Failed to claim active Selective Stream %d\n",
71 			 stream_idx);
72 		return false;
73 	}
74 
75 	return true;
76 }
77 
pci_ide_init(struct pci_dev * pdev)78 void pci_ide_init(struct pci_dev *pdev)
79 {
80 	struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
81 	u16 nr_link_ide, nr_ide_mem, nr_streams;
82 	u16 ide_cap;
83 	u32 val;
84 
85 	/*
86 	 * Unconditionally init so that ida idle state is consistent with
87 	 * pdev->ide_cap.
88 	 */
89 	ida_init(&pdev->ide_stream_ida);
90 
91 	if (!pci_is_pcie(pdev))
92 		return;
93 
94 	ide_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_IDE);
95 	if (!ide_cap)
96 		return;
97 
98 	pci_read_config_dword(pdev, ide_cap + PCI_IDE_CAP, &val);
99 	if ((val & PCI_IDE_CAP_SELECTIVE) == 0)
100 		return;
101 
102 	/*
103 	 * Require endpoint IDE capability to be paired with IDE Root Port IDE
104 	 * capability.
105 	 */
106 	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) {
107 		struct pci_dev *rp = pcie_find_root_port(pdev);
108 
109 		if (!rp->ide_cap)
110 			return;
111 	}
112 
113 	pdev->ide_cfg = FIELD_GET(PCI_IDE_CAP_SEL_CFG, val);
114 	pdev->ide_tee_limit = FIELD_GET(PCI_IDE_CAP_TEE_LIMITED, val);
115 
116 	if (val & PCI_IDE_CAP_LINK)
117 		nr_link_ide = 1 + FIELD_GET(PCI_IDE_CAP_LINK_TC_NUM, val);
118 	else
119 		nr_link_ide = 0;
120 
121 	nr_ide_mem = 0;
122 	nr_streams = 1 + FIELD_GET(PCI_IDE_CAP_SEL_NUM, val);
123 	for (u16 i = 0; i < nr_streams; i++) {
124 		int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
125 		int nr_assoc;
126 		u32 val;
127 		u8 id;
128 
129 		pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
130 
131 		/*
132 		 * Let's not entertain streams that do not have a constant
133 		 * number of address association blocks
134 		 */
135 		nr_assoc = FIELD_GET(PCI_IDE_SEL_CAP_ASSOC_NUM, val);
136 		if (i && (nr_assoc != nr_ide_mem)) {
137 			pci_info(pdev, "Unsupported Selective Stream %d capability, SKIP the rest\n", i);
138 			nr_streams = i;
139 			break;
140 		}
141 
142 		nr_ide_mem = nr_assoc;
143 
144 		/*
145 		 * Claim Stream IDs and Selective Stream blocks that are already
146 		 * active on the device
147 		 */
148 		pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val);
149 		id = FIELD_GET(PCI_IDE_SEL_CTL_ID, val);
150 		if ((val & PCI_IDE_SEL_CTL_EN) &&
151 		    !claim_stream(hb, id, pdev, i))
152 			return;
153 	}
154 
155 	/* Reserve link stream-ids that are already active on the device */
156 	for (u16 i = 0; i < nr_link_ide; ++i) {
157 		int pos = ide_cap + PCI_IDE_LINK_STREAM_0 + i * PCI_IDE_LINK_BLOCK_SIZE;
158 		u8 id;
159 
160 		pci_read_config_dword(pdev, pos + PCI_IDE_LINK_CTL_0, &val);
161 		id = FIELD_GET(PCI_IDE_LINK_CTL_ID, val);
162 		if ((val & PCI_IDE_LINK_CTL_EN) &&
163 		    !claim_stream(hb, id, NULL, -1))
164 			return;
165 	}
166 
167 	for (u16 i = 0; i < nr_streams; i++) {
168 		int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
169 
170 		pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val);
171 		if (val & PCI_IDE_SEL_CTL_EN)
172 			continue;
173 		val &= ~PCI_IDE_SEL_CTL_ID;
174 		val |= FIELD_PREP(PCI_IDE_SEL_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
175 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
176 	}
177 
178 	for (u16 i = 0; i < nr_link_ide; ++i) {
179 		int pos = ide_cap + PCI_IDE_LINK_STREAM_0 +
180 			  i * PCI_IDE_LINK_BLOCK_SIZE;
181 
182 		pci_read_config_dword(pdev, pos, &val);
183 		if (val & PCI_IDE_LINK_CTL_EN)
184 			continue;
185 		val &= ~PCI_IDE_LINK_CTL_ID;
186 		val |= FIELD_PREP(PCI_IDE_LINK_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
187 		pci_write_config_dword(pdev, pos, val);
188 	}
189 
190 	pdev->ide_cap = ide_cap;
191 	pdev->nr_link_ide = nr_link_ide;
192 	pdev->nr_sel_ide = nr_streams;
193 	pdev->nr_ide_mem = nr_ide_mem;
194 }
195 
196 struct stream_index {
197 	struct ida *ida;
198 	u8 stream_index;
199 };
200 
free_stream_index(struct stream_index * stream)201 static void free_stream_index(struct stream_index *stream)
202 {
203 	ida_free(stream->ida, stream->stream_index);
204 }
205 
DEFINE_FREE(free_stream,struct stream_index *,if (_T)free_stream_index (_T))206 DEFINE_FREE(free_stream, struct stream_index *, if (_T) free_stream_index(_T))
207 static struct stream_index *alloc_stream_index(struct ida *ida, u16 max,
208 					       struct stream_index *stream)
209 {
210 	int id;
211 
212 	if (!max)
213 		return NULL;
214 
215 	id = ida_alloc_max(ida, max - 1, GFP_KERNEL);
216 	if (id < 0)
217 		return NULL;
218 
219 	*stream = (struct stream_index) {
220 		.ida = ida,
221 		.stream_index = id,
222 	};
223 	return stream;
224 }
225 
226 /**
227  * pci_ide_stream_alloc() - Reserve stream indices and probe for settings
228  * @pdev: IDE capable PCIe Endpoint Physical Function
229  *
230  * Retrieve the Requester ID range of @pdev for programming its Root
231  * Port IDE RID Association registers, and conversely retrieve the
232  * Requester ID of the Root Port for programming @pdev's IDE RID
233  * Association registers.
234  *
235  * Allocate a Selective IDE Stream Register Block instance per port.
236  *
237  * Allocate a platform stream resource from the associated host bridge.
238  * Retrieve stream association parameters for Requester ID range and
239  * address range restrictions for the stream.
240  */
pci_ide_stream_alloc(struct pci_dev * pdev)241 struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev)
242 {
243 	/* EP, RP, + HB Stream allocation */
244 	struct stream_index __stream[PCI_IDE_HB + 1];
245 	struct pci_bus_region pref_assoc = { 0, -1 };
246 	struct pci_bus_region mem_assoc = { 0, -1 };
247 	struct resource *mem, *pref;
248 	struct pci_host_bridge *hb;
249 	struct pci_dev *rp, *br;
250 	int num_vf, rid_end;
251 
252 	if (!pci_is_pcie(pdev))
253 		return NULL;
254 
255 	if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ENDPOINT)
256 		return NULL;
257 
258 	if (!pdev->ide_cap)
259 		return NULL;
260 
261 	struct pci_ide *ide __free(kfree) = kzalloc(sizeof(*ide), GFP_KERNEL);
262 	if (!ide)
263 		return NULL;
264 
265 	hb = pci_find_host_bridge(pdev->bus);
266 	struct stream_index *hb_stream __free(free_stream) = alloc_stream_index(
267 		&hb->ide_stream_ida, hb->nr_ide_streams, &__stream[PCI_IDE_HB]);
268 	if (!hb_stream)
269 		return NULL;
270 
271 	rp = pcie_find_root_port(pdev);
272 	struct stream_index *rp_stream __free(free_stream) = alloc_stream_index(
273 		&rp->ide_stream_ida, rp->nr_sel_ide, &__stream[PCI_IDE_RP]);
274 	if (!rp_stream)
275 		return NULL;
276 
277 	struct stream_index *ep_stream __free(free_stream) = alloc_stream_index(
278 		&pdev->ide_stream_ida, pdev->nr_sel_ide, &__stream[PCI_IDE_EP]);
279 	if (!ep_stream)
280 		return NULL;
281 
282 	/* for SR-IOV case, cover all VFs */
283 	num_vf = pci_num_vf(pdev);
284 	if (num_vf)
285 		rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf - 1),
286 				    pci_iov_virtfn_devfn(pdev, num_vf - 1));
287 	else
288 		rid_end = pci_dev_id(pdev);
289 
290 	br = pci_upstream_bridge(pdev);
291 	if (!br)
292 		return NULL;
293 
294 	/*
295 	 * Check if the device consumes memory and/or prefetch-memory. Setup
296 	 * downstream address association ranges for each.
297 	 */
298 	mem = pci_resource_n(br, PCI_BRIDGE_MEM_WINDOW);
299 	pref = pci_resource_n(br, PCI_BRIDGE_PREF_MEM_WINDOW);
300 	if (resource_assigned(mem))
301 		pcibios_resource_to_bus(br->bus, &mem_assoc, mem);
302 	if (resource_assigned(pref))
303 		pcibios_resource_to_bus(br->bus, &pref_assoc, pref);
304 
305 	*ide = (struct pci_ide) {
306 		.pdev = pdev,
307 		.partner = {
308 			[PCI_IDE_EP] = {
309 				.rid_start = pci_dev_id(rp),
310 				.rid_end = pci_dev_id(rp),
311 				.stream_index = no_free_ptr(ep_stream)->stream_index,
312 				/* Disable upstream address association */
313 				.mem_assoc = { 0, -1 },
314 				.pref_assoc = { 0, -1 },
315 			},
316 			[PCI_IDE_RP] = {
317 				.rid_start = pci_dev_id(pdev),
318 				.rid_end = rid_end,
319 				.stream_index = no_free_ptr(rp_stream)->stream_index,
320 				.mem_assoc = mem_assoc,
321 				.pref_assoc = pref_assoc,
322 			},
323 		},
324 		.host_bridge_stream = no_free_ptr(hb_stream)->stream_index,
325 		.stream_id = -1,
326 	};
327 
328 	return_ptr(ide);
329 }
330 EXPORT_SYMBOL_GPL(pci_ide_stream_alloc);
331 
332 /**
333  * pci_ide_stream_free() - unwind pci_ide_stream_alloc()
334  * @ide: idle IDE settings descriptor
335  *
336  * Free all of the stream index (register block) allocations acquired by
337  * pci_ide_stream_alloc(). The stream represented by @ide is assumed to
338  * be unregistered and not instantiated in any device.
339  */
pci_ide_stream_free(struct pci_ide * ide)340 void pci_ide_stream_free(struct pci_ide *ide)
341 {
342 	struct pci_dev *pdev = ide->pdev;
343 	struct pci_dev *rp = pcie_find_root_port(pdev);
344 	struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
345 
346 	ida_free(&pdev->ide_stream_ida, ide->partner[PCI_IDE_EP].stream_index);
347 	ida_free(&rp->ide_stream_ida, ide->partner[PCI_IDE_RP].stream_index);
348 	ida_free(&hb->ide_stream_ida, ide->host_bridge_stream);
349 	kfree(ide);
350 }
351 EXPORT_SYMBOL_GPL(pci_ide_stream_free);
352 
353 /**
354  * pci_ide_stream_release() - unwind and release an @ide context
355  * @ide: partially or fully registered IDE settings descriptor
356  *
357  * In support of automatic cleanup of IDE setup routines perform IDE
358  * teardown in expected reverse order of setup and with respect to which
359  * aspects of IDE setup have successfully completed.
360  *
361  * Be careful that setup order mirrors this shutdown order. Otherwise,
362  * open code releasing the IDE context.
363  */
pci_ide_stream_release(struct pci_ide * ide)364 void pci_ide_stream_release(struct pci_ide *ide)
365 {
366 	struct pci_dev *pdev = ide->pdev;
367 	struct pci_dev *rp = pcie_find_root_port(pdev);
368 
369 	if (ide->partner[PCI_IDE_RP].enable)
370 		pci_ide_stream_disable(rp, ide);
371 
372 	if (ide->partner[PCI_IDE_EP].enable)
373 		pci_ide_stream_disable(pdev, ide);
374 
375 	if (ide->partner[PCI_IDE_RP].setup)
376 		pci_ide_stream_teardown(rp, ide);
377 
378 	if (ide->partner[PCI_IDE_EP].setup)
379 		pci_ide_stream_teardown(pdev, ide);
380 
381 	if (ide->name)
382 		pci_ide_stream_unregister(ide);
383 
384 	pci_ide_stream_free(ide);
385 }
386 EXPORT_SYMBOL_GPL(pci_ide_stream_release);
387 
388 struct pci_ide_stream_id {
389 	struct pci_host_bridge *hb;
390 	u8 stream_id;
391 };
392 
393 static struct pci_ide_stream_id *
request_stream_id(struct pci_host_bridge * hb,u8 stream_id,struct pci_ide_stream_id * sid)394 request_stream_id(struct pci_host_bridge *hb, u8 stream_id,
395 		  struct pci_ide_stream_id *sid)
396 {
397 	if (!reserve_stream_id(hb, stream_id))
398 		return NULL;
399 
400 	*sid = (struct pci_ide_stream_id) {
401 		.hb = hb,
402 		.stream_id = stream_id,
403 	};
404 
405 	return sid;
406 }
407 DEFINE_FREE(free_stream_id, struct pci_ide_stream_id *,
408 	    if (_T) ida_free(&_T->hb->ide_stream_ids_ida, _T->stream_id))
409 
410 /**
411  * pci_ide_stream_register() - Prepare to activate an IDE Stream
412  * @ide: IDE settings descriptor
413  *
414  * After a Stream ID has been acquired for @ide, record the presence of
415  * the stream in sysfs. The expectation is that @ide is immutable while
416  * registered.
417  */
pci_ide_stream_register(struct pci_ide * ide)418 int pci_ide_stream_register(struct pci_ide *ide)
419 {
420 	struct pci_dev *pdev = ide->pdev;
421 	struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
422 	struct pci_ide_stream_id __sid;
423 	u8 ep_stream, rp_stream;
424 	int rc;
425 
426 	if (ide->stream_id < 0 || ide->stream_id > U8_MAX) {
427 		pci_err(pdev, "Setup fail: Invalid Stream ID: %d\n", ide->stream_id);
428 		return -ENXIO;
429 	}
430 
431 	struct pci_ide_stream_id *sid __free(free_stream_id) =
432 		request_stream_id(hb, ide->stream_id, &__sid);
433 	if (!sid) {
434 		pci_err(pdev, "Setup fail: Stream ID %d in use\n", ide->stream_id);
435 		return -EBUSY;
436 	}
437 
438 	ep_stream = ide->partner[PCI_IDE_EP].stream_index;
439 	rp_stream = ide->partner[PCI_IDE_RP].stream_index;
440 	const char *name __free(kfree) = kasprintf(GFP_KERNEL, "stream%d.%d.%d",
441 						   ide->host_bridge_stream,
442 						   rp_stream, ep_stream);
443 	if (!name)
444 		return -ENOMEM;
445 
446 	rc = sysfs_create_link(&hb->dev.kobj, &pdev->dev.kobj, name);
447 	if (rc)
448 		return rc;
449 
450 	ide->name = no_free_ptr(name);
451 
452 	/* Stream ID reservation recorded in @ide is now successfully registered */
453 	retain_and_null_ptr(sid);
454 
455 	return 0;
456 }
457 EXPORT_SYMBOL_GPL(pci_ide_stream_register);
458 
459 /**
460  * pci_ide_stream_unregister() - unwind pci_ide_stream_register()
461  * @ide: idle IDE settings descriptor
462  *
463  * In preparation for freeing @ide, remove sysfs enumeration for the
464  * stream.
465  */
pci_ide_stream_unregister(struct pci_ide * ide)466 void pci_ide_stream_unregister(struct pci_ide *ide)
467 {
468 	struct pci_dev *pdev = ide->pdev;
469 	struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
470 
471 	sysfs_remove_link(&hb->dev.kobj, ide->name);
472 	kfree(ide->name);
473 	ida_free(&hb->ide_stream_ids_ida, ide->stream_id);
474 	ide->name = NULL;
475 }
476 EXPORT_SYMBOL_GPL(pci_ide_stream_unregister);
477 
pci_ide_domain(struct pci_dev * pdev)478 static int pci_ide_domain(struct pci_dev *pdev)
479 {
480 	if (pdev->fm_enabled)
481 		return pci_domain_nr(pdev->bus);
482 	return 0;
483 }
484 
pci_ide_to_settings(struct pci_dev * pdev,struct pci_ide * ide)485 struct pci_ide_partner *pci_ide_to_settings(struct pci_dev *pdev, struct pci_ide *ide)
486 {
487 	if (!pci_is_pcie(pdev)) {
488 		pci_warn_once(pdev, "not a PCIe device\n");
489 		return NULL;
490 	}
491 
492 	switch (pci_pcie_type(pdev)) {
493 	case PCI_EXP_TYPE_ENDPOINT:
494 		if (pdev != ide->pdev) {
495 			pci_warn_once(pdev, "setup expected Endpoint: %s\n", pci_name(ide->pdev));
496 			return NULL;
497 		}
498 		return &ide->partner[PCI_IDE_EP];
499 	case PCI_EXP_TYPE_ROOT_PORT: {
500 		struct pci_dev *rp = pcie_find_root_port(ide->pdev);
501 
502 		if (pdev != rp) {
503 			pci_warn_once(pdev, "setup expected Root Port: %s\n",
504 				      pci_name(rp));
505 			return NULL;
506 		}
507 		return &ide->partner[PCI_IDE_RP];
508 	}
509 	default:
510 		pci_warn_once(pdev, "invalid device type\n");
511 		return NULL;
512 	}
513 }
514 EXPORT_SYMBOL_GPL(pci_ide_to_settings);
515 
set_ide_sel_ctl(struct pci_dev * pdev,struct pci_ide * ide,struct pci_ide_partner * settings,int pos,bool enable)516 static void set_ide_sel_ctl(struct pci_dev *pdev, struct pci_ide *ide,
517 			    struct pci_ide_partner *settings, int pos,
518 			    bool enable)
519 {
520 	u32 val = FIELD_PREP(PCI_IDE_SEL_CTL_ID, ide->stream_id) |
521 		  FIELD_PREP(PCI_IDE_SEL_CTL_DEFAULT, settings->default_stream) |
522 		  FIELD_PREP(PCI_IDE_SEL_CTL_CFG_EN, pdev->ide_cfg) |
523 		  FIELD_PREP(PCI_IDE_SEL_CTL_TEE_LIMITED, pdev->ide_tee_limit) |
524 		  FIELD_PREP(PCI_IDE_SEL_CTL_EN, enable);
525 
526 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
527 }
528 
529 #define SEL_ADDR1_LOWER GENMASK(31, 20)
530 #define SEL_ADDR_UPPER GENMASK_ULL(63, 32)
531 #define PREP_PCI_IDE_SEL_ADDR1(base, limit)			\
532 	(FIELD_PREP(PCI_IDE_SEL_ADDR_1_VALID, 1) |		\
533 	 FIELD_PREP(PCI_IDE_SEL_ADDR_1_BASE_LOW,		\
534 		    FIELD_GET(SEL_ADDR1_LOWER, (base))) |	\
535 	 FIELD_PREP(PCI_IDE_SEL_ADDR_1_LIMIT_LOW,		\
536 		    FIELD_GET(SEL_ADDR1_LOWER, (limit))))
537 
mem_assoc_to_regs(struct pci_bus_region * region,struct pci_ide_regs * regs,int idx)538 static void mem_assoc_to_regs(struct pci_bus_region *region,
539 			      struct pci_ide_regs *regs, int idx)
540 {
541 	/* convert to u64 range for bitfield size checks */
542 	struct range r = { region->start, region->end };
543 
544 	regs->addr[idx].assoc1 = PREP_PCI_IDE_SEL_ADDR1(r.start, r.end);
545 	regs->addr[idx].assoc2 = FIELD_GET(SEL_ADDR_UPPER, r.end);
546 	regs->addr[idx].assoc3 = FIELD_GET(SEL_ADDR_UPPER, r.start);
547 }
548 
549 /**
550  * pci_ide_stream_to_regs() - convert IDE settings to association register values
551  * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
552  * @ide: registered IDE settings descriptor
553  * @regs: output register values
554  */
pci_ide_stream_to_regs(struct pci_dev * pdev,struct pci_ide * ide,struct pci_ide_regs * regs)555 static void pci_ide_stream_to_regs(struct pci_dev *pdev, struct pci_ide *ide,
556 				   struct pci_ide_regs *regs)
557 {
558 	struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
559 	int assoc_idx = 0;
560 
561 	memset(regs, 0, sizeof(*regs));
562 
563 	if (!settings)
564 		return;
565 
566 	regs->rid1 = FIELD_PREP(PCI_IDE_SEL_RID_1_LIMIT, settings->rid_end);
567 
568 	regs->rid2 = FIELD_PREP(PCI_IDE_SEL_RID_2_VALID, 1) |
569 		     FIELD_PREP(PCI_IDE_SEL_RID_2_BASE, settings->rid_start) |
570 		     FIELD_PREP(PCI_IDE_SEL_RID_2_SEG, pci_ide_domain(pdev));
571 
572 	if (pdev->nr_ide_mem && pci_bus_region_size(&settings->mem_assoc)) {
573 		mem_assoc_to_regs(&settings->mem_assoc, regs, assoc_idx);
574 		assoc_idx++;
575 	}
576 
577 	if (pdev->nr_ide_mem > assoc_idx &&
578 	    pci_bus_region_size(&settings->pref_assoc)) {
579 		mem_assoc_to_regs(&settings->pref_assoc, regs, assoc_idx);
580 		assoc_idx++;
581 	}
582 
583 	regs->nr_addr = assoc_idx;
584 }
585 
586 /**
587  * pci_ide_stream_setup() - program settings to Selective IDE Stream registers
588  * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
589  * @ide: registered IDE settings descriptor
590  *
591  * When @pdev is a PCI_EXP_TYPE_ENDPOINT then the PCI_IDE_EP partner
592  * settings are written to @pdev's Selective IDE Stream register block,
593  * and when @pdev is a PCI_EXP_TYPE_ROOT_PORT, the PCI_IDE_RP settings
594  * are selected.
595  */
pci_ide_stream_setup(struct pci_dev * pdev,struct pci_ide * ide)596 void pci_ide_stream_setup(struct pci_dev *pdev, struct pci_ide *ide)
597 {
598 	struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
599 	struct pci_ide_regs regs;
600 	int pos;
601 
602 	if (!settings)
603 		return;
604 
605 	pci_ide_stream_to_regs(pdev, ide, &regs);
606 
607 	pos = sel_ide_offset(pdev, settings);
608 
609 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, regs.rid1);
610 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, regs.rid2);
611 
612 	for (int i = 0; i < regs.nr_addr; i++) {
613 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i),
614 				       regs.addr[i].assoc1);
615 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i),
616 				       regs.addr[i].assoc2);
617 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i),
618 				       regs.addr[i].assoc3);
619 	}
620 
621 	/* clear extra unused address association blocks */
622 	for (int i = regs.nr_addr; i < pdev->nr_ide_mem; i++) {
623 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
624 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
625 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
626 	}
627 
628 	/*
629 	 * Setup control register early for devices that expect
630 	 * stream_id is set during key programming.
631 	 */
632 	set_ide_sel_ctl(pdev, ide, settings, pos, false);
633 	settings->setup = 1;
634 }
635 EXPORT_SYMBOL_GPL(pci_ide_stream_setup);
636 
637 /**
638  * pci_ide_stream_teardown() - disable the stream and clear all settings
639  * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
640  * @ide: registered IDE settings descriptor
641  *
642  * For stream destruction, zero all registers that may have been written
643  * by pci_ide_stream_setup(). Consider pci_ide_stream_disable() to leave
644  * settings in place while temporarily disabling the stream.
645  */
pci_ide_stream_teardown(struct pci_dev * pdev,struct pci_ide * ide)646 void pci_ide_stream_teardown(struct pci_dev *pdev, struct pci_ide *ide)
647 {
648 	struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
649 	int pos, i;
650 
651 	if (!settings)
652 		return;
653 
654 	pos = sel_ide_offset(pdev, settings);
655 
656 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
657 
658 	for (i = 0; i < pdev->nr_ide_mem; i++) {
659 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
660 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
661 		pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
662 	}
663 
664 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, 0);
665 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, 0);
666 	settings->setup = 0;
667 }
668 EXPORT_SYMBOL_GPL(pci_ide_stream_teardown);
669 
670 /**
671  * pci_ide_stream_enable() - enable a Selective IDE Stream
672  * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
673  * @ide: registered and setup IDE settings descriptor
674  *
675  * Activate the stream by writing to the Selective IDE Stream Control
676  * Register.
677  *
678  * Return: 0 if the stream successfully entered the "secure" state, and -EINVAL
679  * if @ide is invalid, and -ENXIO if the stream fails to enter the secure state.
680  *
681  * Note that the state may go "insecure" at any point after returning 0, but
682  * those events are equivalent to a "link down" event and handled via
683  * asynchronous error reporting.
684  *
685  * Caller is responsible to clear the enable bit in the -ENXIO case.
686  */
pci_ide_stream_enable(struct pci_dev * pdev,struct pci_ide * ide)687 int pci_ide_stream_enable(struct pci_dev *pdev, struct pci_ide *ide)
688 {
689 	struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
690 	int pos;
691 	u32 val;
692 
693 	if (!settings)
694 		return -EINVAL;
695 
696 	pos = sel_ide_offset(pdev, settings);
697 
698 	set_ide_sel_ctl(pdev, ide, settings, pos, true);
699 	settings->enable = 1;
700 
701 	pci_read_config_dword(pdev, pos + PCI_IDE_SEL_STS, &val);
702 	if (FIELD_GET(PCI_IDE_SEL_STS_STATE, val) !=
703 	    PCI_IDE_SEL_STS_STATE_SECURE)
704 		return -ENXIO;
705 
706 	return 0;
707 }
708 EXPORT_SYMBOL_GPL(pci_ide_stream_enable);
709 
710 /**
711  * pci_ide_stream_disable() - disable a Selective IDE Stream
712  * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
713  * @ide: registered and setup IDE settings descriptor
714  *
715  * Clear the Selective IDE Stream Control Register, but leave all other
716  * registers untouched.
717  */
pci_ide_stream_disable(struct pci_dev * pdev,struct pci_ide * ide)718 void pci_ide_stream_disable(struct pci_dev *pdev, struct pci_ide *ide)
719 {
720 	struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
721 	int pos;
722 
723 	if (!settings)
724 		return;
725 
726 	pos = sel_ide_offset(pdev, settings);
727 
728 	pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
729 	settings->enable = 0;
730 }
731 EXPORT_SYMBOL_GPL(pci_ide_stream_disable);
732 
pci_ide_init_host_bridge(struct pci_host_bridge * hb)733 void pci_ide_init_host_bridge(struct pci_host_bridge *hb)
734 {
735 	hb->nr_ide_streams = 256;
736 	ida_init(&hb->ide_stream_ida);
737 	ida_init(&hb->ide_stream_ids_ida);
738 	reserve_stream_id(hb, PCI_IDE_RESERVED_STREAM_ID);
739 }
740 
available_secure_streams_show(struct device * dev,struct device_attribute * attr,char * buf)741 static ssize_t available_secure_streams_show(struct device *dev,
742 					     struct device_attribute *attr,
743 					     char *buf)
744 {
745 	struct pci_host_bridge *hb = to_pci_host_bridge(dev);
746 	int nr = READ_ONCE(hb->nr_ide_streams);
747 	int avail = nr;
748 
749 	if (!nr)
750 		return -ENXIO;
751 
752 	/*
753 	 * Yes, this is inefficient and racy, but it is only for occasional
754 	 * platform resource surveys. Worst case is bounded to 256 streams.
755 	 */
756 	for (int i = 0; i < nr; i++)
757 		if (ida_exists(&hb->ide_stream_ida, i))
758 			avail--;
759 	return sysfs_emit(buf, "%d\n", avail);
760 }
761 static DEVICE_ATTR_RO(available_secure_streams);
762 
763 static struct attribute *pci_ide_attrs[] = {
764 	&dev_attr_available_secure_streams.attr,
765 	NULL
766 };
767 
pci_ide_attr_visible(struct kobject * kobj,struct attribute * a,int n)768 static umode_t pci_ide_attr_visible(struct kobject *kobj, struct attribute *a, int n)
769 {
770 	struct device *dev = kobj_to_dev(kobj);
771 	struct pci_host_bridge *hb = to_pci_host_bridge(dev);
772 
773 	if (a == &dev_attr_available_secure_streams.attr)
774 		if (!hb->nr_ide_streams)
775 			return 0;
776 
777 	return a->mode;
778 }
779 
780 const struct attribute_group pci_ide_attr_group = {
781 	.attrs = pci_ide_attrs,
782 	.is_visible = pci_ide_attr_visible,
783 };
784 
785 /**
786  * pci_ide_set_nr_streams() - sets size of the pool of IDE Stream resources
787  * @hb: host bridge boundary for the stream pool
788  * @nr: number of streams
789  *
790  * Platform PCI init and/or expert test module use only. Limit IDE
791  * Stream establishment by setting the number of stream resources
792  * available at the host bridge. Platform init code must set this before
793  * the first pci_ide_stream_alloc() call if the platform has less than the
794  * default of 256 streams per host-bridge.
795  *
796  * The "PCI_IDE" symbol namespace is required because this is typically
797  * a detail that is settled in early PCI init. I.e. this export is not
798  * for endpoint drivers.
799  */
pci_ide_set_nr_streams(struct pci_host_bridge * hb,u16 nr)800 void pci_ide_set_nr_streams(struct pci_host_bridge *hb, u16 nr)
801 {
802 	hb->nr_ide_streams = min(nr, 256);
803 	WARN_ON_ONCE(!ida_is_empty(&hb->ide_stream_ida));
804 	sysfs_update_group(&hb->dev.kobj, &pci_ide_attr_group);
805 }
806 EXPORT_SYMBOL_NS_GPL(pci_ide_set_nr_streams, "PCI_IDE");
807 
pci_ide_destroy(struct pci_dev * pdev)808 void pci_ide_destroy(struct pci_dev *pdev)
809 {
810 	ida_destroy(&pdev->ide_stream_ida);
811 }
812