xref: /linux/drivers/iommu/amd/ppr.c (revision 36ec807b627b4c0a0a382f0ae48eac7187d14b2b)
1e08fcd90SSuravee Suthikulpanit // SPDX-License-Identifier: GPL-2.0-only
2e08fcd90SSuravee Suthikulpanit /*
3e08fcd90SSuravee Suthikulpanit  * Copyright (C) 2023 Advanced Micro Devices, Inc.
4e08fcd90SSuravee Suthikulpanit  */
5e08fcd90SSuravee Suthikulpanit 
6e08fcd90SSuravee Suthikulpanit #define pr_fmt(fmt)     "AMD-Vi: " fmt
7e08fcd90SSuravee Suthikulpanit #define dev_fmt(fmt)    pr_fmt(fmt)
8e08fcd90SSuravee Suthikulpanit 
9e08fcd90SSuravee Suthikulpanit #include <linux/amd-iommu.h>
10e08fcd90SSuravee Suthikulpanit #include <linux/delay.h>
11e08fcd90SSuravee Suthikulpanit #include <linux/mmu_notifier.h>
12e08fcd90SSuravee Suthikulpanit 
13e08fcd90SSuravee Suthikulpanit #include <asm/iommu.h>
14e08fcd90SSuravee Suthikulpanit 
15e08fcd90SSuravee Suthikulpanit #include "amd_iommu.h"
16e08fcd90SSuravee Suthikulpanit #include "amd_iommu_types.h"
17e08fcd90SSuravee Suthikulpanit 
185dc72c8aSJoerg Roedel #include "../iommu-pages.h"
195dc72c8aSJoerg Roedel 
20e08fcd90SSuravee Suthikulpanit int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu)
21e08fcd90SSuravee Suthikulpanit {
22e08fcd90SSuravee Suthikulpanit 	iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
23e08fcd90SSuravee Suthikulpanit 					      PPR_LOG_SIZE);
24e08fcd90SSuravee Suthikulpanit 	return iommu->ppr_log ? 0 : -ENOMEM;
25e08fcd90SSuravee Suthikulpanit }
26e08fcd90SSuravee Suthikulpanit 
27e08fcd90SSuravee Suthikulpanit void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
28e08fcd90SSuravee Suthikulpanit {
29e08fcd90SSuravee Suthikulpanit 	u64 entry;
30e08fcd90SSuravee Suthikulpanit 
31e08fcd90SSuravee Suthikulpanit 	if (iommu->ppr_log == NULL)
32e08fcd90SSuravee Suthikulpanit 		return;
33e08fcd90SSuravee Suthikulpanit 
34e08fcd90SSuravee Suthikulpanit 	iommu_feature_enable(iommu, CONTROL_PPR_EN);
35e08fcd90SSuravee Suthikulpanit 
36e08fcd90SSuravee Suthikulpanit 	entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
37e08fcd90SSuravee Suthikulpanit 
38e08fcd90SSuravee Suthikulpanit 	memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
39e08fcd90SSuravee Suthikulpanit 		    &entry, sizeof(entry));
40e08fcd90SSuravee Suthikulpanit 
41e08fcd90SSuravee Suthikulpanit 	/* set head and tail to zero manually */
42e08fcd90SSuravee Suthikulpanit 	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
43e08fcd90SSuravee Suthikulpanit 	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
44e08fcd90SSuravee Suthikulpanit 
45e08fcd90SSuravee Suthikulpanit 	iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
46e08fcd90SSuravee Suthikulpanit 	iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
47e08fcd90SSuravee Suthikulpanit }
48e08fcd90SSuravee Suthikulpanit 
49e08fcd90SSuravee Suthikulpanit void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
50e08fcd90SSuravee Suthikulpanit {
515dc72c8aSJoerg Roedel 	iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
52e08fcd90SSuravee Suthikulpanit }
53e08fcd90SSuravee Suthikulpanit 
54e08fcd90SSuravee Suthikulpanit /*
55e08fcd90SSuravee Suthikulpanit  * This function restarts ppr logging in case the IOMMU experienced
56e08fcd90SSuravee Suthikulpanit  * PPR log overflow.
57e08fcd90SSuravee Suthikulpanit  */
58e08fcd90SSuravee Suthikulpanit void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
59e08fcd90SSuravee Suthikulpanit {
60e08fcd90SSuravee Suthikulpanit 	amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
61e08fcd90SSuravee Suthikulpanit 			      CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
62e08fcd90SSuravee Suthikulpanit 			      MMIO_STATUS_PPR_OVERFLOW_MASK);
63e08fcd90SSuravee Suthikulpanit }
64e08fcd90SSuravee Suthikulpanit 
65978d626bSWei Huang static inline u32 ppr_flag_to_fault_perm(u16 flag)
66978d626bSWei Huang {
67978d626bSWei Huang 	int perm = 0;
68978d626bSWei Huang 
69978d626bSWei Huang 	if (flag & PPR_FLAG_READ)
70978d626bSWei Huang 		perm |= IOMMU_FAULT_PERM_READ;
71978d626bSWei Huang 	if (flag & PPR_FLAG_WRITE)
72978d626bSWei Huang 		perm |= IOMMU_FAULT_PERM_WRITE;
73978d626bSWei Huang 	if (flag & PPR_FLAG_EXEC)
74978d626bSWei Huang 		perm |= IOMMU_FAULT_PERM_EXEC;
75978d626bSWei Huang 	if (!(flag & PPR_FLAG_US))
76978d626bSWei Huang 		perm |= IOMMU_FAULT_PERM_PRIV;
77978d626bSWei Huang 
78978d626bSWei Huang 	return perm;
79978d626bSWei Huang }
80978d626bSWei Huang 
81978d626bSWei Huang static bool ppr_is_valid(struct amd_iommu *iommu, u64 *raw)
82978d626bSWei Huang {
83978d626bSWei Huang 	struct device *dev = iommu->iommu.dev;
84978d626bSWei Huang 	u16 devid = PPR_DEVID(raw[0]);
85978d626bSWei Huang 
86978d626bSWei Huang 	if (!(PPR_FLAGS(raw[0]) & PPR_FLAG_GN)) {
87978d626bSWei Huang 		dev_dbg(dev, "PPR logged [Request ignored due to GN=0 (device=%04x:%02x:%02x.%x "
88978d626bSWei Huang 			"pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n",
89978d626bSWei Huang 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
90978d626bSWei Huang 			PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0]));
91978d626bSWei Huang 		return false;
92978d626bSWei Huang 	}
93978d626bSWei Huang 
94978d626bSWei Huang 	if (PPR_FLAGS(raw[0]) & PPR_FLAG_RVSD) {
95978d626bSWei Huang 		dev_dbg(dev, "PPR logged [Invalid request format (device=%04x:%02x:%02x.%x "
96978d626bSWei Huang 			"pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n",
97978d626bSWei Huang 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
98978d626bSWei Huang 			PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0]));
99978d626bSWei Huang 		return false;
100978d626bSWei Huang 	}
101978d626bSWei Huang 
102978d626bSWei Huang 	return true;
103978d626bSWei Huang }
104978d626bSWei Huang 
105978d626bSWei Huang static void iommu_call_iopf_notifier(struct amd_iommu *iommu, u64 *raw)
106978d626bSWei Huang {
107978d626bSWei Huang 	struct iommu_dev_data *dev_data;
108978d626bSWei Huang 	struct iopf_fault event;
109978d626bSWei Huang 	struct pci_dev *pdev;
110978d626bSWei Huang 	u16 devid = PPR_DEVID(raw[0]);
111978d626bSWei Huang 
112978d626bSWei Huang 	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
113978d626bSWei Huang 		pr_info_ratelimited("Unknown PPR request received\n");
114978d626bSWei Huang 		return;
115978d626bSWei Huang 	}
116978d626bSWei Huang 
117978d626bSWei Huang 	pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
118978d626bSWei Huang 					   PCI_BUS_NUM(devid), devid & 0xff);
119978d626bSWei Huang 	if (!pdev)
120978d626bSWei Huang 		return;
121978d626bSWei Huang 
122978d626bSWei Huang 	if (!ppr_is_valid(iommu, raw))
123978d626bSWei Huang 		goto out;
124978d626bSWei Huang 
125978d626bSWei Huang 	memset(&event, 0, sizeof(struct iopf_fault));
126978d626bSWei Huang 
127978d626bSWei Huang 	event.fault.type = IOMMU_FAULT_PAGE_REQ;
128978d626bSWei Huang 	event.fault.prm.perm = ppr_flag_to_fault_perm(PPR_FLAGS(raw[0]));
129978d626bSWei Huang 	event.fault.prm.addr = (u64)(raw[1] & PAGE_MASK);
130978d626bSWei Huang 	event.fault.prm.pasid = PPR_PASID(raw[0]);
131978d626bSWei Huang 	event.fault.prm.grpid = PPR_TAG(raw[0]) & 0x1FF;
132978d626bSWei Huang 
133978d626bSWei Huang 	/*
134978d626bSWei Huang 	 * PASID zero is used for requests from the I/O device without
135978d626bSWei Huang 	 * a PASID
136978d626bSWei Huang 	 */
137978d626bSWei Huang 	dev_data = dev_iommu_priv_get(&pdev->dev);
138978d626bSWei Huang 	if (event.fault.prm.pasid == 0 ||
139978d626bSWei Huang 	    event.fault.prm.pasid >= dev_data->max_pasids) {
140978d626bSWei Huang 		pr_info_ratelimited("Invalid PASID : 0x%x, device : 0x%x\n",
141978d626bSWei Huang 				    event.fault.prm.pasid, pdev->dev.id);
142978d626bSWei Huang 		goto out;
143978d626bSWei Huang 	}
144978d626bSWei Huang 
145978d626bSWei Huang 	event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
146978d626bSWei Huang 	event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
147978d626bSWei Huang 	if (PPR_TAG(raw[0]) & 0x200)
148978d626bSWei Huang 		event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
149978d626bSWei Huang 
150978d626bSWei Huang 	/* Submit event */
151978d626bSWei Huang 	iommu_report_device_fault(&pdev->dev, &event);
152978d626bSWei Huang 
153978d626bSWei Huang 	return;
154978d626bSWei Huang 
155978d626bSWei Huang out:
156978d626bSWei Huang 	/* Nobody cared, abort */
157978d626bSWei Huang 	amd_iommu_complete_ppr(&pdev->dev, PPR_PASID(raw[0]),
158978d626bSWei Huang 			       IOMMU_PAGE_RESP_FAILURE,
159978d626bSWei Huang 			       PPR_TAG(raw[0]) & 0x1FF);
160978d626bSWei Huang }
161978d626bSWei Huang 
162e08fcd90SSuravee Suthikulpanit void amd_iommu_poll_ppr_log(struct amd_iommu *iommu)
163e08fcd90SSuravee Suthikulpanit {
164e08fcd90SSuravee Suthikulpanit 	u32 head, tail;
165e08fcd90SSuravee Suthikulpanit 
166e08fcd90SSuravee Suthikulpanit 	if (iommu->ppr_log == NULL)
167e08fcd90SSuravee Suthikulpanit 		return;
168e08fcd90SSuravee Suthikulpanit 
169e08fcd90SSuravee Suthikulpanit 	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
170e08fcd90SSuravee Suthikulpanit 	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
171e08fcd90SSuravee Suthikulpanit 
172e08fcd90SSuravee Suthikulpanit 	while (head != tail) {
173e08fcd90SSuravee Suthikulpanit 		volatile u64 *raw;
174e08fcd90SSuravee Suthikulpanit 		u64 entry[2];
175e08fcd90SSuravee Suthikulpanit 		int i;
176e08fcd90SSuravee Suthikulpanit 
177e08fcd90SSuravee Suthikulpanit 		raw = (u64 *)(iommu->ppr_log + head);
178e08fcd90SSuravee Suthikulpanit 
179e08fcd90SSuravee Suthikulpanit 		/*
180e08fcd90SSuravee Suthikulpanit 		 * Hardware bug: Interrupt may arrive before the entry is
181e08fcd90SSuravee Suthikulpanit 		 * written to memory. If this happens we need to wait for the
182e08fcd90SSuravee Suthikulpanit 		 * entry to arrive.
183e08fcd90SSuravee Suthikulpanit 		 */
184e08fcd90SSuravee Suthikulpanit 		for (i = 0; i < LOOP_TIMEOUT; ++i) {
185e08fcd90SSuravee Suthikulpanit 			if (PPR_REQ_TYPE(raw[0]) != 0)
186e08fcd90SSuravee Suthikulpanit 				break;
187e08fcd90SSuravee Suthikulpanit 			udelay(1);
188e08fcd90SSuravee Suthikulpanit 		}
189e08fcd90SSuravee Suthikulpanit 
190e08fcd90SSuravee Suthikulpanit 		/* Avoid memcpy function-call overhead */
191e08fcd90SSuravee Suthikulpanit 		entry[0] = raw[0];
192e08fcd90SSuravee Suthikulpanit 		entry[1] = raw[1];
193e08fcd90SSuravee Suthikulpanit 
194e08fcd90SSuravee Suthikulpanit 		/*
195e08fcd90SSuravee Suthikulpanit 		 * To detect the hardware errata 733 we need to clear the
196e08fcd90SSuravee Suthikulpanit 		 * entry back to zero. This issue does not exist on SNP
197e08fcd90SSuravee Suthikulpanit 		 * enabled system. Also this buffer is not writeable on
198e08fcd90SSuravee Suthikulpanit 		 * SNP enabled system.
199e08fcd90SSuravee Suthikulpanit 		 */
200e08fcd90SSuravee Suthikulpanit 		if (!amd_iommu_snp_en)
201e08fcd90SSuravee Suthikulpanit 			raw[0] = raw[1] = 0UL;
202e08fcd90SSuravee Suthikulpanit 
203e08fcd90SSuravee Suthikulpanit 		/* Update head pointer of hardware ring-buffer */
204e08fcd90SSuravee Suthikulpanit 		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
205e08fcd90SSuravee Suthikulpanit 		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
206e08fcd90SSuravee Suthikulpanit 
207978d626bSWei Huang 		/* Handle PPR entry */
208978d626bSWei Huang 		iommu_call_iopf_notifier(iommu, entry);
209e08fcd90SSuravee Suthikulpanit 	}
210e08fcd90SSuravee Suthikulpanit }
21161928babSSuravee Suthikulpanit 
21261928babSSuravee Suthikulpanit /**************************************************************
21361928babSSuravee Suthikulpanit  *
21461928babSSuravee Suthikulpanit  * IOPF handling stuff
21561928babSSuravee Suthikulpanit  */
21661928babSSuravee Suthikulpanit 
21761928babSSuravee Suthikulpanit /* Setup per-IOMMU IOPF queue if not exist. */
21861928babSSuravee Suthikulpanit int amd_iommu_iopf_init(struct amd_iommu *iommu)
21961928babSSuravee Suthikulpanit {
22061928babSSuravee Suthikulpanit 	int ret = 0;
22161928babSSuravee Suthikulpanit 
22261928babSSuravee Suthikulpanit 	if (iommu->iopf_queue)
22361928babSSuravee Suthikulpanit 		return ret;
22461928babSSuravee Suthikulpanit 
225998a0a36SVasant Hegde 	snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "amdvi-%#x",
22661928babSSuravee Suthikulpanit 		 PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid));
22761928babSSuravee Suthikulpanit 
22861928babSSuravee Suthikulpanit 	iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name);
22961928babSSuravee Suthikulpanit 	if (!iommu->iopf_queue)
23061928babSSuravee Suthikulpanit 		ret = -ENOMEM;
23161928babSSuravee Suthikulpanit 
23261928babSSuravee Suthikulpanit 	return ret;
23361928babSSuravee Suthikulpanit }
23461928babSSuravee Suthikulpanit 
23561928babSSuravee Suthikulpanit /* Destroy per-IOMMU IOPF queue if no longer needed. */
23661928babSSuravee Suthikulpanit void amd_iommu_iopf_uninit(struct amd_iommu *iommu)
23761928babSSuravee Suthikulpanit {
23861928babSSuravee Suthikulpanit 	iopf_queue_free(iommu->iopf_queue);
23961928babSSuravee Suthikulpanit 	iommu->iopf_queue = NULL;
24061928babSSuravee Suthikulpanit }
241405e2f12SSuravee Suthikulpanit 
242405e2f12SSuravee Suthikulpanit void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
243405e2f12SSuravee Suthikulpanit 			     struct iommu_page_response *resp)
244405e2f12SSuravee Suthikulpanit {
245405e2f12SSuravee Suthikulpanit 	amd_iommu_complete_ppr(dev, resp->pasid, resp->code, resp->grpid);
246405e2f12SSuravee Suthikulpanit }
247c4cb2311SVasant Hegde 
248c4cb2311SVasant Hegde int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
249c4cb2311SVasant Hegde 			      struct iommu_dev_data *dev_data)
250c4cb2311SVasant Hegde {
251c4cb2311SVasant Hegde 	int ret = 0;
252c4cb2311SVasant Hegde 
253c4cb2311SVasant Hegde 	if (!dev_data->pri_enabled)
254c4cb2311SVasant Hegde 		return ret;
255c4cb2311SVasant Hegde 
256*526606b0SVasant Hegde 	if (!iommu->iopf_queue)
257*526606b0SVasant Hegde 		return -EINVAL;
258c4cb2311SVasant Hegde 
259c4cb2311SVasant Hegde 	ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
260c4cb2311SVasant Hegde 	if (ret)
261*526606b0SVasant Hegde 		return ret;
262c4cb2311SVasant Hegde 
263c4cb2311SVasant Hegde 	dev_data->ppr = true;
264*526606b0SVasant Hegde 	return 0;
265c4cb2311SVasant Hegde }
266c4cb2311SVasant Hegde 
267c4cb2311SVasant Hegde /* Its assumed that caller has verified that device was added to iopf queue */
268c4cb2311SVasant Hegde void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
269c4cb2311SVasant Hegde 				  struct iommu_dev_data *dev_data)
270c4cb2311SVasant Hegde {
271c4cb2311SVasant Hegde 	iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
272c4cb2311SVasant Hegde 	dev_data->ppr = false;
273c4cb2311SVasant Hegde }
274