xref: /linux/drivers/iommu/amd/ppr.c (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Advanced Micro Devices, Inc.
4  */
5 
6 #define pr_fmt(fmt)     "AMD-Vi: " fmt
7 #define dev_fmt(fmt)    pr_fmt(fmt)
8 
9 #include <linux/amd-iommu.h>
10 #include <linux/delay.h>
11 #include <linux/mmu_notifier.h>
12 
13 #include <asm/iommu.h>
14 
15 #include "amd_iommu.h"
16 #include "amd_iommu_types.h"
17 
18 #include "../iommu-pages.h"
19 
20 int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu)
21 {
22 	iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
23 					      PPR_LOG_SIZE);
24 	return iommu->ppr_log ? 0 : -ENOMEM;
25 }
26 
27 void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
28 {
29 	u64 entry;
30 
31 	if (iommu->ppr_log == NULL)
32 		return;
33 
34 	iommu_feature_enable(iommu, CONTROL_PPR_EN);
35 
36 	entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
37 
38 	memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
39 		    &entry, sizeof(entry));
40 
41 	/* set head and tail to zero manually */
42 	writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
43 	writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
44 
45 	iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
46 	iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
47 }
48 
49 void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
50 {
51 	iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
52 }
53 
54 /*
55  * This function restarts ppr logging in case the IOMMU experienced
56  * PPR log overflow.
57  */
58 void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
59 {
60 	amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
61 			      CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
62 			      MMIO_STATUS_PPR_OVERFLOW_MASK);
63 }
64 
65 static inline u32 ppr_flag_to_fault_perm(u16 flag)
66 {
67 	int perm = 0;
68 
69 	if (flag & PPR_FLAG_READ)
70 		perm |= IOMMU_FAULT_PERM_READ;
71 	if (flag & PPR_FLAG_WRITE)
72 		perm |= IOMMU_FAULT_PERM_WRITE;
73 	if (flag & PPR_FLAG_EXEC)
74 		perm |= IOMMU_FAULT_PERM_EXEC;
75 	if (!(flag & PPR_FLAG_US))
76 		perm |= IOMMU_FAULT_PERM_PRIV;
77 
78 	return perm;
79 }
80 
81 static bool ppr_is_valid(struct amd_iommu *iommu, u64 *raw)
82 {
83 	struct device *dev = iommu->iommu.dev;
84 	u16 devid = PPR_DEVID(raw[0]);
85 
86 	if (!(PPR_FLAGS(raw[0]) & PPR_FLAG_GN)) {
87 		dev_dbg(dev, "PPR logged [Request ignored due to GN=0 (device=%04x:%02x:%02x.%x "
88 			"pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n",
89 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
90 			PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0]));
91 		return false;
92 	}
93 
94 	if (PPR_FLAGS(raw[0]) & PPR_FLAG_RVSD) {
95 		dev_dbg(dev, "PPR logged [Invalid request format (device=%04x:%02x:%02x.%x "
96 			"pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n",
97 			iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
98 			PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0]));
99 		return false;
100 	}
101 
102 	return true;
103 }
104 
105 static void iommu_call_iopf_notifier(struct amd_iommu *iommu, u64 *raw)
106 {
107 	struct iommu_dev_data *dev_data;
108 	struct iopf_fault event;
109 	struct pci_dev *pdev;
110 	u16 devid = PPR_DEVID(raw[0]);
111 
112 	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
113 		pr_info_ratelimited("Unknown PPR request received\n");
114 		return;
115 	}
116 
117 	pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
118 					   PCI_BUS_NUM(devid), devid & 0xff);
119 	if (!pdev)
120 		return;
121 
122 	if (!ppr_is_valid(iommu, raw))
123 		goto out;
124 
125 	memset(&event, 0, sizeof(struct iopf_fault));
126 
127 	event.fault.type = IOMMU_FAULT_PAGE_REQ;
128 	event.fault.prm.perm = ppr_flag_to_fault_perm(PPR_FLAGS(raw[0]));
129 	event.fault.prm.addr = (u64)(raw[1] & PAGE_MASK);
130 	event.fault.prm.pasid = PPR_PASID(raw[0]);
131 	event.fault.prm.grpid = PPR_TAG(raw[0]) & 0x1FF;
132 
133 	/*
134 	 * PASID zero is used for requests from the I/O device without
135 	 * a PASID
136 	 */
137 	dev_data = dev_iommu_priv_get(&pdev->dev);
138 	if (event.fault.prm.pasid == 0 ||
139 	    event.fault.prm.pasid >= dev_data->max_pasids) {
140 		pr_info_ratelimited("Invalid PASID : 0x%x, device : 0x%x\n",
141 				    event.fault.prm.pasid, pdev->dev.id);
142 		goto out;
143 	}
144 
145 	event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
146 	event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
147 	if (PPR_TAG(raw[0]) & 0x200)
148 		event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
149 
150 	/* Submit event */
151 	iommu_report_device_fault(&pdev->dev, &event);
152 
153 	return;
154 
155 out:
156 	/* Nobody cared, abort */
157 	amd_iommu_complete_ppr(&pdev->dev, PPR_PASID(raw[0]),
158 			       IOMMU_PAGE_RESP_FAILURE,
159 			       PPR_TAG(raw[0]) & 0x1FF);
160 }
161 
162 void amd_iommu_poll_ppr_log(struct amd_iommu *iommu)
163 {
164 	u32 head, tail;
165 
166 	if (iommu->ppr_log == NULL)
167 		return;
168 
169 	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
170 	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
171 
172 	while (head != tail) {
173 		volatile u64 *raw;
174 		u64 entry[2];
175 		int i;
176 
177 		raw = (u64 *)(iommu->ppr_log + head);
178 
179 		/*
180 		 * Hardware bug: Interrupt may arrive before the entry is
181 		 * written to memory. If this happens we need to wait for the
182 		 * entry to arrive.
183 		 */
184 		for (i = 0; i < LOOP_TIMEOUT; ++i) {
185 			if (PPR_REQ_TYPE(raw[0]) != 0)
186 				break;
187 			udelay(1);
188 		}
189 
190 		/* Avoid memcpy function-call overhead */
191 		entry[0] = raw[0];
192 		entry[1] = raw[1];
193 
194 		/*
195 		 * To detect the hardware errata 733 we need to clear the
196 		 * entry back to zero. This issue does not exist on SNP
197 		 * enabled system. Also this buffer is not writeable on
198 		 * SNP enabled system.
199 		 */
200 		if (!amd_iommu_snp_en)
201 			raw[0] = raw[1] = 0UL;
202 
203 		/* Update head pointer of hardware ring-buffer */
204 		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
205 		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
206 
207 		/* Handle PPR entry */
208 		iommu_call_iopf_notifier(iommu, entry);
209 	}
210 }
211 
212 /**************************************************************
213  *
214  * IOPF handling stuff
215  */
216 
217 /* Setup per-IOMMU IOPF queue if not exist. */
218 int amd_iommu_iopf_init(struct amd_iommu *iommu)
219 {
220 	int ret = 0;
221 
222 	if (iommu->iopf_queue)
223 		return ret;
224 
225 	snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
226 		 "amdiommu-%#x-iopfq",
227 		 PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid));
228 
229 	iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name);
230 	if (!iommu->iopf_queue)
231 		ret = -ENOMEM;
232 
233 	return ret;
234 }
235 
236 /* Destroy per-IOMMU IOPF queue if no longer needed. */
237 void amd_iommu_iopf_uninit(struct amd_iommu *iommu)
238 {
239 	iopf_queue_free(iommu->iopf_queue);
240 	iommu->iopf_queue = NULL;
241 }
242 
243 void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
244 			     struct iommu_page_response *resp)
245 {
246 	amd_iommu_complete_ppr(dev, resp->pasid, resp->code, resp->grpid);
247 }
248 
249 int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
250 			      struct iommu_dev_data *dev_data)
251 {
252 	unsigned long flags;
253 	int ret = 0;
254 
255 	if (!dev_data->pri_enabled)
256 		return ret;
257 
258 	raw_spin_lock_irqsave(&iommu->lock, flags);
259 
260 	if (!iommu->iopf_queue) {
261 		ret = -EINVAL;
262 		goto out_unlock;
263 	}
264 
265 	ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
266 	if (ret)
267 		goto out_unlock;
268 
269 	dev_data->ppr = true;
270 
271 out_unlock:
272 	raw_spin_unlock_irqrestore(&iommu->lock, flags);
273 	return ret;
274 }
275 
276 /* Its assumed that caller has verified that device was added to iopf queue */
277 void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
278 				  struct iommu_dev_data *dev_data)
279 {
280 	unsigned long flags;
281 
282 	raw_spin_lock_irqsave(&iommu->lock, flags);
283 
284 	iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
285 	dev_data->ppr = false;
286 
287 	raw_spin_unlock_irqrestore(&iommu->lock, flags);
288 }
289