xref: /linux/drivers/iommu/intel/dmar.c (revision 8477ab143069c6b05d6da4a8184ded8b969240f5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2006, Intel Corporation.
4  *
5  * Copyright (C) 2006-2008 Intel Corporation
6  * Author: Ashok Raj <ashok.raj@intel.com>
7  * Author: Shaohua Li <shaohua.li@intel.com>
8  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *
10  * This file implements early detection/parsing of Remapping Devices
11  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
12  * tables.
13  *
14  * These routines are used by both DMA-remapping and Interrupt-remapping
15  */
16 
17 #define pr_fmt(fmt)     "DMAR: " fmt
18 
19 #include <linux/pci.h>
20 #include <linux/dmar.h>
21 #include <linux/iova.h>
22 #include <linux/timer.h>
23 #include <linux/irq.h>
24 #include <linux/interrupt.h>
25 #include <linux/tboot.h>
26 #include <linux/dmi.h>
27 #include <linux/slab.h>
28 #include <linux/iommu.h>
29 #include <linux/numa.h>
30 #include <linux/limits.h>
31 #include <asm/irq_remapping.h>
32 
33 #include "iommu.h"
34 #include "../irq_remapping.h"
35 #include "../iommu-pages.h"
36 #include "perf.h"
37 #include "trace.h"
38 #include "perfmon.h"
39 
40 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
41 struct dmar_res_callback {
42 	dmar_res_handler_t	cb[ACPI_DMAR_TYPE_RESERVED];
43 	void			*arg[ACPI_DMAR_TYPE_RESERVED];
44 	bool			ignore_unhandled;
45 	bool			print_entry;
46 };
47 
48 /*
49  * Assumptions:
50  * 1) The hotplug framework guarentees that DMAR unit will be hot-added
51  *    before IO devices managed by that unit.
52  * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
53  *    after IO devices managed by that unit.
54  * 3) Hotplug events are rare.
55  *
56  * Locking rules for DMA and interrupt remapping related global data structures:
57  * 1) Use dmar_global_lock in process context
58  * 2) Use RCU in interrupt context
59  */
60 DECLARE_RWSEM(dmar_global_lock);
61 LIST_HEAD(dmar_drhd_units);
62 
63 struct acpi_table_header * __initdata dmar_tbl;
64 static int dmar_dev_scope_status = 1;
65 static DEFINE_IDA(dmar_seq_ids);
66 
67 static int alloc_iommu(struct dmar_drhd_unit *drhd);
68 static void free_iommu(struct intel_iommu *iommu);
69 
dmar_register_drhd_unit(struct dmar_drhd_unit * drhd)70 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
71 {
72 	/*
73 	 * add INCLUDE_ALL at the tail, so scan the list will find it at
74 	 * the very end.
75 	 */
76 	if (drhd->include_all)
77 		list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
78 	else
79 		list_add_rcu(&drhd->list, &dmar_drhd_units);
80 }
81 
dmar_alloc_dev_scope(void * start,void * end,int * cnt)82 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
83 {
84 	struct acpi_dmar_device_scope *scope;
85 
86 	*cnt = 0;
87 	while (start < end) {
88 		scope = start;
89 		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
90 		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
91 		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
92 			(*cnt)++;
93 		else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
94 			scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
95 			pr_warn("Unsupported device scope\n");
96 		}
97 		start += scope->length;
98 	}
99 	if (*cnt == 0)
100 		return NULL;
101 
102 	return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
103 }
104 
dmar_free_dev_scope(struct dmar_dev_scope ** devices,int * cnt)105 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
106 {
107 	int i;
108 	struct device *tmp_dev;
109 
110 	if (*devices && *cnt) {
111 		for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
112 			put_device(tmp_dev);
113 		kfree(*devices);
114 	}
115 
116 	*devices = NULL;
117 	*cnt = 0;
118 }
119 
120 /* Optimize out kzalloc()/kfree() for normal cases */
121 static char dmar_pci_notify_info_buf[64];
122 
123 static struct dmar_pci_notify_info *
dmar_alloc_pci_notify_info(struct pci_dev * dev,unsigned long event)124 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
125 {
126 	int level = 0;
127 	size_t size;
128 	struct pci_dev *tmp;
129 	struct dmar_pci_notify_info *info;
130 
131 	/*
132 	 * Ignore devices that have a domain number higher than what can
133 	 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
134 	 */
135 	if (pci_domain_nr(dev->bus) > U16_MAX)
136 		return NULL;
137 
138 	/* Only generate path[] for device addition event */
139 	if (event == BUS_NOTIFY_ADD_DEVICE)
140 		for (tmp = dev; tmp; tmp = tmp->bus->self)
141 			level++;
142 
143 	size = struct_size(info, path, level);
144 	if (size <= sizeof(dmar_pci_notify_info_buf)) {
145 		info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
146 	} else {
147 		info = kzalloc(size, GFP_KERNEL);
148 		if (!info) {
149 			if (dmar_dev_scope_status == 0)
150 				dmar_dev_scope_status = -ENOMEM;
151 			return NULL;
152 		}
153 	}
154 
155 	info->event = event;
156 	info->dev = dev;
157 	info->seg = pci_domain_nr(dev->bus);
158 	info->level = level;
159 	if (event == BUS_NOTIFY_ADD_DEVICE) {
160 		for (tmp = dev; tmp; tmp = tmp->bus->self) {
161 			level--;
162 			info->path[level].bus = tmp->bus->number;
163 			info->path[level].device = PCI_SLOT(tmp->devfn);
164 			info->path[level].function = PCI_FUNC(tmp->devfn);
165 			if (pci_is_root_bus(tmp->bus))
166 				info->bus = tmp->bus->number;
167 		}
168 	}
169 
170 	return info;
171 }
172 
dmar_free_pci_notify_info(struct dmar_pci_notify_info * info)173 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
174 {
175 	if ((void *)info != dmar_pci_notify_info_buf)
176 		kfree(info);
177 }
178 
dmar_match_pci_path(struct dmar_pci_notify_info * info,int bus,struct acpi_dmar_pci_path * path,int count)179 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
180 				struct acpi_dmar_pci_path *path, int count)
181 {
182 	int i;
183 
184 	if (info->bus != bus)
185 		goto fallback;
186 	if (info->level != count)
187 		goto fallback;
188 
189 	for (i = 0; i < count; i++) {
190 		if (path[i].device != info->path[i].device ||
191 		    path[i].function != info->path[i].function)
192 			goto fallback;
193 	}
194 
195 	return true;
196 
197 fallback:
198 
199 	if (count != 1)
200 		return false;
201 
202 	i = info->level - 1;
203 	if (bus              == info->path[i].bus &&
204 	    path[0].device   == info->path[i].device &&
205 	    path[0].function == info->path[i].function) {
206 		pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
207 			bus, path[0].device, path[0].function);
208 		return true;
209 	}
210 
211 	return false;
212 }
213 
214 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
dmar_insert_dev_scope(struct dmar_pci_notify_info * info,void * start,void * end,u16 segment,struct dmar_dev_scope * devices,int devices_cnt)215 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
216 			  void *start, void*end, u16 segment,
217 			  struct dmar_dev_scope *devices,
218 			  int devices_cnt)
219 {
220 	int i, level;
221 	struct device *tmp, *dev = &info->dev->dev;
222 	struct acpi_dmar_device_scope *scope;
223 	struct acpi_dmar_pci_path *path;
224 
225 	if (segment != info->seg)
226 		return 0;
227 
228 	for (; start < end; start += scope->length) {
229 		scope = start;
230 		if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
231 		    scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
232 			continue;
233 
234 		path = (struct acpi_dmar_pci_path *)(scope + 1);
235 		level = (scope->length - sizeof(*scope)) / sizeof(*path);
236 		if (!dmar_match_pci_path(info, scope->bus, path, level))
237 			continue;
238 
239 		/*
240 		 * We expect devices with endpoint scope to have normal PCI
241 		 * headers, and devices with bridge scope to have bridge PCI
242 		 * headers.  However PCI NTB devices may be listed in the
243 		 * DMAR table with bridge scope, even though they have a
244 		 * normal PCI header.  NTB devices are identified by class
245 		 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
246 		 * for this special case.
247 		 */
248 		if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
249 		     info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
250 		    (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
251 		     (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
252 		      info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) {
253 			pr_warn("Device scope type does not match for %s\n",
254 				pci_name(info->dev));
255 			return -EINVAL;
256 		}
257 
258 		for_each_dev_scope(devices, devices_cnt, i, tmp)
259 			if (tmp == NULL) {
260 				devices[i].bus = info->dev->bus->number;
261 				devices[i].devfn = info->dev->devfn;
262 				rcu_assign_pointer(devices[i].dev,
263 						   get_device(dev));
264 				return 1;
265 			}
266 		if (WARN_ON(i >= devices_cnt))
267 			return -EINVAL;
268 	}
269 
270 	return 0;
271 }
272 
dmar_remove_dev_scope(struct dmar_pci_notify_info * info,u16 segment,struct dmar_dev_scope * devices,int count)273 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
274 			  struct dmar_dev_scope *devices, int count)
275 {
276 	int index;
277 	struct device *tmp;
278 
279 	if (info->seg != segment)
280 		return 0;
281 
282 	for_each_active_dev_scope(devices, count, index, tmp)
283 		if (tmp == &info->dev->dev) {
284 			RCU_INIT_POINTER(devices[index].dev, NULL);
285 			synchronize_rcu();
286 			put_device(tmp);
287 			return 1;
288 		}
289 
290 	return 0;
291 }
292 
dmar_pci_bus_add_dev(struct dmar_pci_notify_info * info)293 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
294 {
295 	int ret = 0;
296 	struct dmar_drhd_unit *dmaru;
297 	struct acpi_dmar_hardware_unit *drhd;
298 
299 	for_each_drhd_unit(dmaru) {
300 		if (dmaru->include_all)
301 			continue;
302 
303 		drhd = container_of(dmaru->hdr,
304 				    struct acpi_dmar_hardware_unit, header);
305 		ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
306 				((void *)drhd) + drhd->header.length,
307 				dmaru->segment,
308 				dmaru->devices, dmaru->devices_cnt);
309 		if (ret)
310 			break;
311 	}
312 	if (ret >= 0)
313 		ret = dmar_iommu_notify_scope_dev(info);
314 	if (ret < 0 && dmar_dev_scope_status == 0)
315 		dmar_dev_scope_status = ret;
316 
317 	if (ret >= 0)
318 		intel_irq_remap_add_device(info);
319 
320 	return ret;
321 }
322 
dmar_pci_bus_del_dev(struct dmar_pci_notify_info * info)323 static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
324 {
325 	struct dmar_drhd_unit *dmaru;
326 
327 	for_each_drhd_unit(dmaru)
328 		if (dmar_remove_dev_scope(info, dmaru->segment,
329 			dmaru->devices, dmaru->devices_cnt))
330 			break;
331 	dmar_iommu_notify_scope_dev(info);
332 }
333 
vf_inherit_msi_domain(struct pci_dev * pdev)334 static inline void vf_inherit_msi_domain(struct pci_dev *pdev)
335 {
336 	struct pci_dev *physfn = pci_physfn(pdev);
337 
338 	dev_set_msi_domain(&pdev->dev, dev_get_msi_domain(&physfn->dev));
339 }
340 
dmar_pci_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)341 static int dmar_pci_bus_notifier(struct notifier_block *nb,
342 				 unsigned long action, void *data)
343 {
344 	struct pci_dev *pdev = to_pci_dev(data);
345 	struct dmar_pci_notify_info *info;
346 
347 	/* Only care about add/remove events for physical functions.
348 	 * For VFs we actually do the lookup based on the corresponding
349 	 * PF in device_to_iommu() anyway. */
350 	if (pdev->is_virtfn) {
351 		/*
352 		 * Ensure that the VF device inherits the irq domain of the
353 		 * PF device. Ideally the device would inherit the domain
354 		 * from the bus, but DMAR can have multiple units per bus
355 		 * which makes this impossible. The VF 'bus' could inherit
356 		 * from the PF device, but that's yet another x86'sism to
357 		 * inflict on everybody else.
358 		 */
359 		if (action == BUS_NOTIFY_ADD_DEVICE)
360 			vf_inherit_msi_domain(pdev);
361 		return NOTIFY_DONE;
362 	}
363 
364 	if (action != BUS_NOTIFY_ADD_DEVICE &&
365 	    action != BUS_NOTIFY_REMOVED_DEVICE)
366 		return NOTIFY_DONE;
367 
368 	info = dmar_alloc_pci_notify_info(pdev, action);
369 	if (!info)
370 		return NOTIFY_DONE;
371 
372 	down_write(&dmar_global_lock);
373 	if (action == BUS_NOTIFY_ADD_DEVICE)
374 		dmar_pci_bus_add_dev(info);
375 	else if (action == BUS_NOTIFY_REMOVED_DEVICE)
376 		dmar_pci_bus_del_dev(info);
377 	up_write(&dmar_global_lock);
378 
379 	dmar_free_pci_notify_info(info);
380 
381 	return NOTIFY_OK;
382 }
383 
384 static struct notifier_block dmar_pci_bus_nb = {
385 	.notifier_call = dmar_pci_bus_notifier,
386 	.priority = 1,
387 };
388 
389 static struct dmar_drhd_unit *
dmar_find_dmaru(struct acpi_dmar_hardware_unit * drhd)390 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
391 {
392 	struct dmar_drhd_unit *dmaru;
393 
394 	list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
395 				dmar_rcu_check())
396 		if (dmaru->segment == drhd->segment &&
397 		    dmaru->reg_base_addr == drhd->address)
398 			return dmaru;
399 
400 	return NULL;
401 }
402 
403 /*
404  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
405  * structure which uniquely represent one DMA remapping hardware unit
406  * present in the platform
407  */
dmar_parse_one_drhd(struct acpi_dmar_header * header,void * arg)408 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
409 {
410 	struct acpi_dmar_hardware_unit *drhd;
411 	struct dmar_drhd_unit *dmaru;
412 	int ret;
413 
414 	drhd = (struct acpi_dmar_hardware_unit *)header;
415 	dmaru = dmar_find_dmaru(drhd);
416 	if (dmaru)
417 		goto out;
418 
419 	dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
420 	if (!dmaru)
421 		return -ENOMEM;
422 
423 	/*
424 	 * If header is allocated from slab by ACPI _DSM method, we need to
425 	 * copy the content because the memory buffer will be freed on return.
426 	 */
427 	dmaru->hdr = (void *)(dmaru + 1);
428 	memcpy(dmaru->hdr, header, header->length);
429 	dmaru->reg_base_addr = drhd->address;
430 	dmaru->segment = drhd->segment;
431 	/* The size of the register set is 2 ^ N 4 KB pages. */
432 	dmaru->reg_size = 1UL << (drhd->size + 12);
433 	dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
434 	dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
435 					      ((void *)drhd) + drhd->header.length,
436 					      &dmaru->devices_cnt);
437 	if (dmaru->devices_cnt && dmaru->devices == NULL) {
438 		kfree(dmaru);
439 		return -ENOMEM;
440 	}
441 
442 	ret = alloc_iommu(dmaru);
443 	if (ret) {
444 		dmar_free_dev_scope(&dmaru->devices,
445 				    &dmaru->devices_cnt);
446 		kfree(dmaru);
447 		return ret;
448 	}
449 	dmar_register_drhd_unit(dmaru);
450 
451 out:
452 	if (arg)
453 		(*(int *)arg)++;
454 
455 	return 0;
456 }
457 
dmar_free_drhd(struct dmar_drhd_unit * dmaru)458 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
459 {
460 	if (dmaru->devices && dmaru->devices_cnt)
461 		dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
462 	if (dmaru->iommu)
463 		free_iommu(dmaru->iommu);
464 	kfree(dmaru);
465 }
466 
dmar_parse_one_andd(struct acpi_dmar_header * header,void * arg)467 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
468 				      void *arg)
469 {
470 	struct acpi_dmar_andd *andd = (void *)header;
471 
472 	/* Check for NUL termination within the designated length */
473 	if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
474 		pr_warn(FW_BUG
475 			   "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
476 			   "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
477 			   dmi_get_system_info(DMI_BIOS_VENDOR),
478 			   dmi_get_system_info(DMI_BIOS_VERSION),
479 			   dmi_get_system_info(DMI_PRODUCT_VERSION));
480 		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
481 		return -EINVAL;
482 	}
483 	pr_info("ANDD device: %x name: %s\n", andd->device_number,
484 		andd->device_name);
485 
486 	return 0;
487 }
488 
489 #ifdef CONFIG_ACPI_NUMA
dmar_parse_one_rhsa(struct acpi_dmar_header * header,void * arg)490 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
491 {
492 	struct acpi_dmar_rhsa *rhsa;
493 	struct dmar_drhd_unit *drhd;
494 
495 	rhsa = (struct acpi_dmar_rhsa *)header;
496 	for_each_drhd_unit(drhd) {
497 		if (drhd->reg_base_addr == rhsa->base_address) {
498 			int node = pxm_to_node(rhsa->proximity_domain);
499 
500 			if (node != NUMA_NO_NODE && !node_online(node))
501 				node = NUMA_NO_NODE;
502 			drhd->iommu->node = node;
503 			return 0;
504 		}
505 	}
506 	pr_warn(FW_BUG
507 		"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
508 		"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
509 		rhsa->base_address,
510 		dmi_get_system_info(DMI_BIOS_VENDOR),
511 		dmi_get_system_info(DMI_BIOS_VERSION),
512 		dmi_get_system_info(DMI_PRODUCT_VERSION));
513 	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
514 
515 	return 0;
516 }
517 #else
518 #define	dmar_parse_one_rhsa		dmar_res_noop
519 #endif
520 
521 static void
dmar_table_print_dmar_entry(struct acpi_dmar_header * header)522 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
523 {
524 	struct acpi_dmar_hardware_unit *drhd;
525 	struct acpi_dmar_reserved_memory *rmrr;
526 	struct acpi_dmar_atsr *atsr;
527 	struct acpi_dmar_rhsa *rhsa;
528 	struct acpi_dmar_satc *satc;
529 
530 	switch (header->type) {
531 	case ACPI_DMAR_TYPE_HARDWARE_UNIT:
532 		drhd = container_of(header, struct acpi_dmar_hardware_unit,
533 				    header);
534 		pr_info("DRHD base: %#016Lx flags: %#x\n",
535 			(unsigned long long)drhd->address, drhd->flags);
536 		break;
537 	case ACPI_DMAR_TYPE_RESERVED_MEMORY:
538 		rmrr = container_of(header, struct acpi_dmar_reserved_memory,
539 				    header);
540 		pr_info("RMRR base: %#016Lx end: %#016Lx\n",
541 			(unsigned long long)rmrr->base_address,
542 			(unsigned long long)rmrr->end_address);
543 		break;
544 	case ACPI_DMAR_TYPE_ROOT_ATS:
545 		atsr = container_of(header, struct acpi_dmar_atsr, header);
546 		pr_info("ATSR flags: %#x\n", atsr->flags);
547 		break;
548 	case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
549 		rhsa = container_of(header, struct acpi_dmar_rhsa, header);
550 		pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
551 		       (unsigned long long)rhsa->base_address,
552 		       rhsa->proximity_domain);
553 		break;
554 	case ACPI_DMAR_TYPE_NAMESPACE:
555 		/* We don't print this here because we need to sanity-check
556 		   it first. So print it in dmar_parse_one_andd() instead. */
557 		break;
558 	case ACPI_DMAR_TYPE_SATC:
559 		satc = container_of(header, struct acpi_dmar_satc, header);
560 		pr_info("SATC flags: 0x%x\n", satc->flags);
561 		break;
562 	}
563 }
564 
565 /**
566  * dmar_table_detect - checks to see if the platform supports DMAR devices
567  */
dmar_table_detect(void)568 static int __init dmar_table_detect(void)
569 {
570 	acpi_status status = AE_OK;
571 
572 	/* if we could find DMAR table, then there are DMAR devices */
573 	status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
574 
575 	if (ACPI_SUCCESS(status) && !dmar_tbl) {
576 		pr_warn("Unable to map DMAR\n");
577 		status = AE_NOT_FOUND;
578 	}
579 
580 	return ACPI_SUCCESS(status) ? 0 : -ENOENT;
581 }
582 
dmar_walk_remapping_entries(struct acpi_dmar_header * start,size_t len,struct dmar_res_callback * cb)583 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
584 				       size_t len, struct dmar_res_callback *cb)
585 {
586 	struct acpi_dmar_header *iter, *next;
587 	struct acpi_dmar_header *end = ((void *)start) + len;
588 
589 	for (iter = start; iter < end; iter = next) {
590 		next = (void *)iter + iter->length;
591 		if (iter->length == 0) {
592 			/* Avoid looping forever on bad ACPI tables */
593 			pr_debug(FW_BUG "Invalid 0-length structure\n");
594 			break;
595 		} else if (next > end) {
596 			/* Avoid passing table end */
597 			pr_warn(FW_BUG "Record passes table end\n");
598 			return -EINVAL;
599 		}
600 
601 		if (cb->print_entry)
602 			dmar_table_print_dmar_entry(iter);
603 
604 		if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
605 			/* continue for forward compatibility */
606 			pr_debug("Unknown DMAR structure type %d\n",
607 				 iter->type);
608 		} else if (cb->cb[iter->type]) {
609 			int ret;
610 
611 			ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
612 			if (ret)
613 				return ret;
614 		} else if (!cb->ignore_unhandled) {
615 			pr_warn("No handler for DMAR structure type %d\n",
616 				iter->type);
617 			return -EINVAL;
618 		}
619 	}
620 
621 	return 0;
622 }
623 
dmar_walk_dmar_table(struct acpi_table_dmar * dmar,struct dmar_res_callback * cb)624 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
625 				       struct dmar_res_callback *cb)
626 {
627 	return dmar_walk_remapping_entries((void *)(dmar + 1),
628 			dmar->header.length - sizeof(*dmar), cb);
629 }
630 
631 /**
632  * parse_dmar_table - parses the DMA reporting table
633  */
634 static int __init
parse_dmar_table(void)635 parse_dmar_table(void)
636 {
637 	struct acpi_table_dmar *dmar;
638 	int drhd_count = 0;
639 	int ret;
640 	struct dmar_res_callback cb = {
641 		.print_entry = true,
642 		.ignore_unhandled = true,
643 		.arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
644 		.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
645 		.cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
646 		.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
647 		.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
648 		.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
649 		.cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc,
650 	};
651 
652 	/*
653 	 * Do it again, earlier dmar_tbl mapping could be mapped with
654 	 * fixed map.
655 	 */
656 	dmar_table_detect();
657 
658 	/*
659 	 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
660 	 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
661 	 */
662 	dmar_tbl = tboot_get_dmar_table(dmar_tbl);
663 
664 	dmar = (struct acpi_table_dmar *)dmar_tbl;
665 	if (!dmar)
666 		return -ENODEV;
667 
668 	if (dmar->width < PAGE_SHIFT - 1) {
669 		pr_warn("Invalid DMAR haw\n");
670 		return -EINVAL;
671 	}
672 
673 	pr_info("Host address width %d\n", dmar->width + 1);
674 	ret = dmar_walk_dmar_table(dmar, &cb);
675 	if (ret == 0 && drhd_count == 0)
676 		pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
677 
678 	return ret;
679 }
680 
dmar_pci_device_match(struct dmar_dev_scope devices[],int cnt,struct pci_dev * dev)681 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
682 				 int cnt, struct pci_dev *dev)
683 {
684 	int index;
685 	struct device *tmp;
686 
687 	while (dev) {
688 		for_each_active_dev_scope(devices, cnt, index, tmp)
689 			if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
690 				return 1;
691 
692 		/* Check our parent */
693 		dev = dev->bus->self;
694 	}
695 
696 	return 0;
697 }
698 
699 struct dmar_drhd_unit *
dmar_find_matched_drhd_unit(struct pci_dev * dev)700 dmar_find_matched_drhd_unit(struct pci_dev *dev)
701 {
702 	struct dmar_drhd_unit *dmaru;
703 	struct acpi_dmar_hardware_unit *drhd;
704 
705 	dev = pci_physfn(dev);
706 
707 	rcu_read_lock();
708 	for_each_drhd_unit(dmaru) {
709 		drhd = container_of(dmaru->hdr,
710 				    struct acpi_dmar_hardware_unit,
711 				    header);
712 
713 		if (dmaru->include_all &&
714 		    drhd->segment == pci_domain_nr(dev->bus))
715 			goto out;
716 
717 		if (dmar_pci_device_match(dmaru->devices,
718 					  dmaru->devices_cnt, dev))
719 			goto out;
720 	}
721 	dmaru = NULL;
722 out:
723 	rcu_read_unlock();
724 
725 	return dmaru;
726 }
727 
dmar_acpi_insert_dev_scope(u8 device_number,struct acpi_device * adev)728 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
729 					      struct acpi_device *adev)
730 {
731 	struct dmar_drhd_unit *dmaru;
732 	struct acpi_dmar_hardware_unit *drhd;
733 	struct acpi_dmar_device_scope *scope;
734 	struct device *tmp;
735 	int i;
736 	struct acpi_dmar_pci_path *path;
737 
738 	for_each_drhd_unit(dmaru) {
739 		drhd = container_of(dmaru->hdr,
740 				    struct acpi_dmar_hardware_unit,
741 				    header);
742 
743 		for (scope = (void *)(drhd + 1);
744 		     (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
745 		     scope = ((void *)scope) + scope->length) {
746 			if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
747 				continue;
748 			if (scope->enumeration_id != device_number)
749 				continue;
750 
751 			path = (void *)(scope + 1);
752 			pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
753 				dev_name(&adev->dev), dmaru->reg_base_addr,
754 				scope->bus, path->device, path->function);
755 			for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
756 				if (tmp == NULL) {
757 					dmaru->devices[i].bus = scope->bus;
758 					dmaru->devices[i].devfn = PCI_DEVFN(path->device,
759 									    path->function);
760 					rcu_assign_pointer(dmaru->devices[i].dev,
761 							   get_device(&adev->dev));
762 					return;
763 				}
764 			BUG_ON(i >= dmaru->devices_cnt);
765 		}
766 	}
767 	pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
768 		device_number, dev_name(&adev->dev));
769 }
770 
dmar_acpi_dev_scope_init(void)771 static int __init dmar_acpi_dev_scope_init(void)
772 {
773 	struct acpi_dmar_andd *andd;
774 
775 	if (dmar_tbl == NULL)
776 		return -ENODEV;
777 
778 	for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
779 	     ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
780 	     andd = ((void *)andd) + andd->header.length) {
781 		if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
782 			acpi_handle h;
783 			struct acpi_device *adev;
784 
785 			if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
786 							  andd->device_name,
787 							  &h))) {
788 				pr_err("Failed to find handle for ACPI object %s\n",
789 				       andd->device_name);
790 				continue;
791 			}
792 			adev = acpi_fetch_acpi_dev(h);
793 			if (!adev) {
794 				pr_err("Failed to get device for ACPI object %s\n",
795 				       andd->device_name);
796 				continue;
797 			}
798 			dmar_acpi_insert_dev_scope(andd->device_number, adev);
799 		}
800 	}
801 	return 0;
802 }
803 
dmar_dev_scope_init(void)804 int __init dmar_dev_scope_init(void)
805 {
806 	struct pci_dev *dev = NULL;
807 	struct dmar_pci_notify_info *info;
808 
809 	if (dmar_dev_scope_status != 1)
810 		return dmar_dev_scope_status;
811 
812 	if (list_empty(&dmar_drhd_units)) {
813 		dmar_dev_scope_status = -ENODEV;
814 	} else {
815 		dmar_dev_scope_status = 0;
816 
817 		dmar_acpi_dev_scope_init();
818 
819 		for_each_pci_dev(dev) {
820 			if (dev->is_virtfn)
821 				continue;
822 
823 			info = dmar_alloc_pci_notify_info(dev,
824 					BUS_NOTIFY_ADD_DEVICE);
825 			if (!info) {
826 				pci_dev_put(dev);
827 				return dmar_dev_scope_status;
828 			} else {
829 				dmar_pci_bus_add_dev(info);
830 				dmar_free_pci_notify_info(info);
831 			}
832 		}
833 	}
834 
835 	return dmar_dev_scope_status;
836 }
837 
dmar_register_bus_notifier(void)838 void __init dmar_register_bus_notifier(void)
839 {
840 	bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
841 }
842 
843 
dmar_table_init(void)844 int __init dmar_table_init(void)
845 {
846 	static int dmar_table_initialized;
847 	int ret;
848 
849 	if (dmar_table_initialized == 0) {
850 		ret = parse_dmar_table();
851 		if (ret < 0) {
852 			if (ret != -ENODEV)
853 				pr_info("Parse DMAR table failure.\n");
854 		} else  if (list_empty(&dmar_drhd_units)) {
855 			pr_info("No DMAR devices found\n");
856 			ret = -ENODEV;
857 		}
858 
859 		if (ret < 0)
860 			dmar_table_initialized = ret;
861 		else
862 			dmar_table_initialized = 1;
863 	}
864 
865 	return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
866 }
867 
warn_invalid_dmar(u64 addr,const char * message)868 static void warn_invalid_dmar(u64 addr, const char *message)
869 {
870 	pr_warn_once(FW_BUG
871 		"Your BIOS is broken; DMAR reported at address %llx%s!\n"
872 		"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
873 		addr, message,
874 		dmi_get_system_info(DMI_BIOS_VENDOR),
875 		dmi_get_system_info(DMI_BIOS_VERSION),
876 		dmi_get_system_info(DMI_PRODUCT_VERSION));
877 	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
878 }
879 
880 static int __ref
dmar_validate_one_drhd(struct acpi_dmar_header * entry,void * arg)881 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
882 {
883 	struct acpi_dmar_hardware_unit *drhd;
884 	void __iomem *addr;
885 	u64 cap, ecap;
886 
887 	drhd = (void *)entry;
888 	if (!drhd->address) {
889 		warn_invalid_dmar(0, "");
890 		return -EINVAL;
891 	}
892 
893 	if (arg)
894 		addr = ioremap(drhd->address, VTD_PAGE_SIZE);
895 	else
896 		addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
897 	if (!addr) {
898 		pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
899 		return -EINVAL;
900 	}
901 
902 	cap = dmar_readq(addr + DMAR_CAP_REG);
903 	ecap = dmar_readq(addr + DMAR_ECAP_REG);
904 
905 	if (arg)
906 		iounmap(addr);
907 	else
908 		early_iounmap(addr, VTD_PAGE_SIZE);
909 
910 	if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
911 		warn_invalid_dmar(drhd->address, " returns all ones");
912 		return -EINVAL;
913 	}
914 
915 	return 0;
916 }
917 
detect_intel_iommu(void)918 void __init detect_intel_iommu(void)
919 {
920 	int ret;
921 	struct dmar_res_callback validate_drhd_cb = {
922 		.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
923 		.ignore_unhandled = true,
924 	};
925 
926 	down_write(&dmar_global_lock);
927 	ret = dmar_table_detect();
928 	if (!ret)
929 		ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
930 					   &validate_drhd_cb);
931 	if (!ret && !no_iommu && !iommu_detected &&
932 	    (!dmar_disabled || dmar_platform_optin())) {
933 		iommu_detected = 1;
934 		/* Make sure ACS will be enabled */
935 		pci_request_acs();
936 	}
937 
938 #ifdef CONFIG_X86
939 	if (!ret) {
940 		x86_init.iommu.iommu_init = intel_iommu_init;
941 		x86_platform.iommu_shutdown = intel_iommu_shutdown;
942 	}
943 
944 #endif
945 
946 	if (dmar_tbl) {
947 		acpi_put_table(dmar_tbl);
948 		dmar_tbl = NULL;
949 	}
950 	up_write(&dmar_global_lock);
951 }
952 
unmap_iommu(struct intel_iommu * iommu)953 static void unmap_iommu(struct intel_iommu *iommu)
954 {
955 	iounmap(iommu->reg);
956 	release_mem_region(iommu->reg_phys, iommu->reg_size);
957 }
958 
959 /**
960  * map_iommu: map the iommu's registers
961  * @iommu: the iommu to map
962  * @drhd: DMA remapping hardware definition structure
963  *
964  * Memory map the iommu's registers.  Start w/ a single page, and
965  * possibly expand if that turns out to be insufficent.
966  */
map_iommu(struct intel_iommu * iommu,struct dmar_drhd_unit * drhd)967 static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd)
968 {
969 	u64 phys_addr = drhd->reg_base_addr;
970 	int map_size, err=0;
971 
972 	iommu->reg_phys = phys_addr;
973 	iommu->reg_size = drhd->reg_size;
974 
975 	if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
976 		pr_err("Can't reserve memory\n");
977 		err = -EBUSY;
978 		goto out;
979 	}
980 
981 	iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
982 	if (!iommu->reg) {
983 		pr_err("Can't map the region\n");
984 		err = -ENOMEM;
985 		goto release;
986 	}
987 
988 	iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
989 	iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
990 
991 	if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
992 		err = -EINVAL;
993 		warn_invalid_dmar(phys_addr, " returns all ones");
994 		goto unmap;
995 	}
996 
997 	/* the registers might be more than one page */
998 	map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
999 			 cap_max_fault_reg_offset(iommu->cap));
1000 	map_size = VTD_PAGE_ALIGN(map_size);
1001 	if (map_size > iommu->reg_size) {
1002 		iounmap(iommu->reg);
1003 		release_mem_region(iommu->reg_phys, iommu->reg_size);
1004 		iommu->reg_size = map_size;
1005 		if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
1006 					iommu->name)) {
1007 			pr_err("Can't reserve memory\n");
1008 			err = -EBUSY;
1009 			goto out;
1010 		}
1011 		iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
1012 		if (!iommu->reg) {
1013 			pr_err("Can't map the region\n");
1014 			err = -ENOMEM;
1015 			goto release;
1016 		}
1017 	}
1018 
1019 	if (cap_ecmds(iommu->cap)) {
1020 		int i;
1021 
1022 		for (i = 0; i < DMA_MAX_NUM_ECMDCAP; i++) {
1023 			iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG +
1024 						       i * DMA_ECMD_REG_STEP);
1025 		}
1026 	}
1027 
1028 	err = 0;
1029 	goto out;
1030 
1031 unmap:
1032 	iounmap(iommu->reg);
1033 release:
1034 	release_mem_region(iommu->reg_phys, iommu->reg_size);
1035 out:
1036 	return err;
1037 }
1038 
alloc_iommu(struct dmar_drhd_unit * drhd)1039 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1040 {
1041 	struct intel_iommu *iommu;
1042 	u32 ver, sts;
1043 	int agaw = -1;
1044 	int msagaw = -1;
1045 	int err;
1046 
1047 	if (!drhd->reg_base_addr) {
1048 		warn_invalid_dmar(0, "");
1049 		return -EINVAL;
1050 	}
1051 
1052 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1053 	if (!iommu)
1054 		return -ENOMEM;
1055 
1056 	iommu->seq_id = ida_alloc_range(&dmar_seq_ids, 0,
1057 					DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL);
1058 	if (iommu->seq_id < 0) {
1059 		pr_err("Failed to allocate seq_id\n");
1060 		err = iommu->seq_id;
1061 		goto error;
1062 	}
1063 	snprintf(iommu->name, sizeof(iommu->name), "dmar%d", iommu->seq_id);
1064 
1065 	err = map_iommu(iommu, drhd);
1066 	if (err) {
1067 		pr_err("Failed to map %s\n", iommu->name);
1068 		goto error_free_seq_id;
1069 	}
1070 
1071 	if (!cap_sagaw(iommu->cap) &&
1072 	    (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
1073 		pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
1074 			iommu->name);
1075 		drhd->ignored = 1;
1076 	}
1077 
1078 	if (!drhd->ignored) {
1079 		agaw = iommu_calculate_agaw(iommu);
1080 		if (agaw < 0) {
1081 			pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1082 			       iommu->seq_id);
1083 			drhd->ignored = 1;
1084 		}
1085 	}
1086 	if (!drhd->ignored) {
1087 		msagaw = iommu_calculate_max_sagaw(iommu);
1088 		if (msagaw < 0) {
1089 			pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1090 			       iommu->seq_id);
1091 			drhd->ignored = 1;
1092 			agaw = -1;
1093 		}
1094 	}
1095 	iommu->agaw = agaw;
1096 	iommu->msagaw = msagaw;
1097 	iommu->segment = drhd->segment;
1098 	iommu->device_rbtree = RB_ROOT;
1099 	spin_lock_init(&iommu->device_rbtree_lock);
1100 	mutex_init(&iommu->iopf_lock);
1101 	iommu->node = NUMA_NO_NODE;
1102 	spin_lock_init(&iommu->lock);
1103 	ida_init(&iommu->domain_ida);
1104 	mutex_init(&iommu->did_lock);
1105 
1106 	ver = readl(iommu->reg + DMAR_VER_REG);
1107 	pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1108 		iommu->name,
1109 		(unsigned long long)drhd->reg_base_addr,
1110 		DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1111 		(unsigned long long)iommu->cap,
1112 		(unsigned long long)iommu->ecap);
1113 
1114 	/* Reflect status in gcmd */
1115 	sts = readl(iommu->reg + DMAR_GSTS_REG);
1116 	if (sts & DMA_GSTS_IRES)
1117 		iommu->gcmd |= DMA_GCMD_IRE;
1118 	if (sts & DMA_GSTS_TES)
1119 		iommu->gcmd |= DMA_GCMD_TE;
1120 	if (sts & DMA_GSTS_QIES)
1121 		iommu->gcmd |= DMA_GCMD_QIE;
1122 
1123 	if (alloc_iommu_pmu(iommu))
1124 		pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
1125 
1126 	raw_spin_lock_init(&iommu->register_lock);
1127 
1128 	/*
1129 	 * A value of N in PSS field of eCap register indicates hardware
1130 	 * supports PASID field of N+1 bits.
1131 	 */
1132 	if (pasid_supported(iommu))
1133 		iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap);
1134 
1135 	/*
1136 	 * This is only for hotplug; at boot time intel_iommu_enabled won't
1137 	 * be set yet. When intel_iommu_init() runs, it registers the units
1138 	 * present at boot time, then sets intel_iommu_enabled.
1139 	 */
1140 	if (intel_iommu_enabled && !drhd->ignored) {
1141 		err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1142 					     intel_iommu_groups,
1143 					     "%s", iommu->name);
1144 		if (err)
1145 			goto err_unmap;
1146 
1147 		err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
1148 		if (err)
1149 			goto err_sysfs;
1150 
1151 		iommu_pmu_register(iommu);
1152 	}
1153 
1154 	drhd->iommu = iommu;
1155 	iommu->drhd = drhd;
1156 
1157 	return 0;
1158 
1159 err_sysfs:
1160 	iommu_device_sysfs_remove(&iommu->iommu);
1161 err_unmap:
1162 	free_iommu_pmu(iommu);
1163 	unmap_iommu(iommu);
1164 error_free_seq_id:
1165 	ida_free(&dmar_seq_ids, iommu->seq_id);
1166 error:
1167 	kfree(iommu);
1168 	return err;
1169 }
1170 
free_iommu(struct intel_iommu * iommu)1171 static void free_iommu(struct intel_iommu *iommu)
1172 {
1173 	if (intel_iommu_enabled && !iommu->drhd->ignored) {
1174 		iommu_pmu_unregister(iommu);
1175 		iommu_device_unregister(&iommu->iommu);
1176 		iommu_device_sysfs_remove(&iommu->iommu);
1177 	}
1178 
1179 	free_iommu_pmu(iommu);
1180 
1181 	if (iommu->irq) {
1182 		if (iommu->pr_irq) {
1183 			free_irq(iommu->pr_irq, iommu);
1184 			dmar_free_hwirq(iommu->pr_irq);
1185 			iommu->pr_irq = 0;
1186 		}
1187 		free_irq(iommu->irq, iommu);
1188 		dmar_free_hwirq(iommu->irq);
1189 		iommu->irq = 0;
1190 	}
1191 
1192 	if (iommu->qi) {
1193 		iommu_free_pages(iommu->qi->desc);
1194 		kfree(iommu->qi->desc_status);
1195 		kfree(iommu->qi);
1196 	}
1197 
1198 	if (iommu->reg)
1199 		unmap_iommu(iommu);
1200 
1201 	ida_destroy(&iommu->domain_ida);
1202 	ida_free(&dmar_seq_ids, iommu->seq_id);
1203 	kfree(iommu);
1204 }
1205 
1206 /*
1207  * Reclaim all the submitted descriptors which have completed its work.
1208  */
reclaim_free_desc(struct q_inval * qi)1209 static inline void reclaim_free_desc(struct q_inval *qi)
1210 {
1211 	while (qi->desc_status[qi->free_tail] == QI_FREE && qi->free_tail != qi->free_head) {
1212 		qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1213 		qi->free_cnt++;
1214 	}
1215 }
1216 
qi_type_string(u8 type)1217 static const char *qi_type_string(u8 type)
1218 {
1219 	switch (type) {
1220 	case QI_CC_TYPE:
1221 		return "Context-cache Invalidation";
1222 	case QI_IOTLB_TYPE:
1223 		return "IOTLB Invalidation";
1224 	case QI_DIOTLB_TYPE:
1225 		return "Device-TLB Invalidation";
1226 	case QI_IEC_TYPE:
1227 		return "Interrupt Entry Cache Invalidation";
1228 	case QI_IWD_TYPE:
1229 		return "Invalidation Wait";
1230 	case QI_EIOTLB_TYPE:
1231 		return "PASID-based IOTLB Invalidation";
1232 	case QI_PC_TYPE:
1233 		return "PASID-cache Invalidation";
1234 	case QI_DEIOTLB_TYPE:
1235 		return "PASID-based Device-TLB Invalidation";
1236 	case QI_PGRP_RESP_TYPE:
1237 		return "Page Group Response";
1238 	default:
1239 		return "UNKNOWN";
1240 	}
1241 }
1242 
qi_dump_fault(struct intel_iommu * iommu,u32 fault)1243 static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
1244 {
1245 	unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG);
1246 	u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
1247 	struct qi_desc *desc = iommu->qi->desc + head;
1248 
1249 	if (fault & DMA_FSTS_IQE)
1250 		pr_err("VT-d detected Invalidation Queue Error: Reason %llx",
1251 		       DMAR_IQER_REG_IQEI(iqe_err));
1252 	if (fault & DMA_FSTS_ITE)
1253 		pr_err("VT-d detected Invalidation Time-out Error: SID %llx",
1254 		       DMAR_IQER_REG_ITESID(iqe_err));
1255 	if (fault & DMA_FSTS_ICE)
1256 		pr_err("VT-d detected Invalidation Completion Error: SID %llx",
1257 		       DMAR_IQER_REG_ICESID(iqe_err));
1258 
1259 	pr_err("QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1260 	       qi_type_string(desc->qw0 & 0xf),
1261 	       (unsigned long long)desc->qw0,
1262 	       (unsigned long long)desc->qw1);
1263 
1264 	head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH;
1265 	head <<= qi_shift(iommu);
1266 	desc = iommu->qi->desc + head;
1267 
1268 	pr_err("QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n",
1269 	       qi_type_string(desc->qw0 & 0xf),
1270 	       (unsigned long long)desc->qw0,
1271 	       (unsigned long long)desc->qw1);
1272 }
1273 
qi_check_fault(struct intel_iommu * iommu,int index,int wait_index)1274 static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
1275 {
1276 	u32 fault;
1277 	int head, tail;
1278 	struct device *dev;
1279 	u64 iqe_err, ite_sid;
1280 	struct q_inval *qi = iommu->qi;
1281 	int shift = qi_shift(iommu);
1282 
1283 	if (qi->desc_status[wait_index] == QI_ABORT)
1284 		return -EAGAIN;
1285 
1286 	fault = readl(iommu->reg + DMAR_FSTS_REG);
1287 	if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
1288 		qi_dump_fault(iommu, fault);
1289 
1290 	/*
1291 	 * If IQE happens, the head points to the descriptor associated
1292 	 * with the error. No new descriptors are fetched until the IQE
1293 	 * is cleared.
1294 	 */
1295 	if (fault & DMA_FSTS_IQE) {
1296 		head = readl(iommu->reg + DMAR_IQH_REG);
1297 		if ((head >> shift) == index) {
1298 			struct qi_desc *desc = qi->desc + head;
1299 
1300 			/*
1301 			 * desc->qw2 and desc->qw3 are either reserved or
1302 			 * used by software as private data. We won't print
1303 			 * out these two qw's for security consideration.
1304 			 */
1305 			memcpy(desc, qi->desc + (wait_index << shift),
1306 			       1 << shift);
1307 			writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1308 			pr_info("Invalidation Queue Error (IQE) cleared\n");
1309 			return -EINVAL;
1310 		}
1311 	}
1312 
1313 	/*
1314 	 * If ITE happens, all pending wait_desc commands are aborted.
1315 	 * No new descriptors are fetched until the ITE is cleared.
1316 	 */
1317 	if (fault & DMA_FSTS_ITE) {
1318 		head = readl(iommu->reg + DMAR_IQH_REG);
1319 		head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1320 		head |= 1;
1321 		tail = readl(iommu->reg + DMAR_IQT_REG);
1322 		tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1323 
1324 		/*
1325 		 * SID field is valid only when the ITE field is Set in FSTS_REG
1326 		 * see Intel VT-d spec r4.1, section 11.4.9.9
1327 		 */
1328 		iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
1329 		ite_sid = DMAR_IQER_REG_ITESID(iqe_err);
1330 
1331 		writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1332 		pr_info("Invalidation Time-out Error (ITE) cleared\n");
1333 
1334 		do {
1335 			if (qi->desc_status[head] == QI_IN_USE)
1336 				qi->desc_status[head] = QI_ABORT;
1337 			head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1338 		} while (head != tail);
1339 
1340 		/*
1341 		 * If device was released or isn't present, no need to retry
1342 		 * the ATS invalidate request anymore.
1343 		 *
1344 		 * 0 value of ite_sid means old VT-d device, no ite_sid value.
1345 		 * see Intel VT-d spec r4.1, section 11.4.9.9
1346 		 */
1347 		if (ite_sid) {
1348 			dev = device_rbtree_find(iommu, ite_sid);
1349 			if (!dev || !dev_is_pci(dev) ||
1350 			    !pci_device_is_present(to_pci_dev(dev)))
1351 				return -ETIMEDOUT;
1352 		}
1353 		if (qi->desc_status[wait_index] == QI_ABORT)
1354 			return -EAGAIN;
1355 	}
1356 
1357 	if (fault & DMA_FSTS_ICE) {
1358 		writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1359 		pr_info("Invalidation Completion Error (ICE) cleared\n");
1360 	}
1361 
1362 	return 0;
1363 }
1364 
1365 /*
1366  * Function to submit invalidation descriptors of all types to the queued
1367  * invalidation interface(QI). Multiple descriptors can be submitted at a
1368  * time, a wait descriptor will be appended to each submission to ensure
1369  * hardware has completed the invalidation before return. Wait descriptors
1370  * can be part of the submission but it will not be polled for completion.
1371  */
qi_submit_sync(struct intel_iommu * iommu,struct qi_desc * desc,unsigned int count,unsigned long options)1372 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1373 		   unsigned int count, unsigned long options)
1374 {
1375 	struct q_inval *qi = iommu->qi;
1376 	s64 devtlb_start_ktime = 0;
1377 	s64 iotlb_start_ktime = 0;
1378 	s64 iec_start_ktime = 0;
1379 	struct qi_desc wait_desc;
1380 	int wait_index, index;
1381 	unsigned long flags;
1382 	int offset, shift;
1383 	int rc, i;
1384 	u64 type;
1385 
1386 	if (!qi)
1387 		return 0;
1388 
1389 	type = desc->qw0 & GENMASK_ULL(3, 0);
1390 
1391 	if ((type == QI_IOTLB_TYPE || type == QI_EIOTLB_TYPE) &&
1392 	    dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IOTLB))
1393 		iotlb_start_ktime = ktime_to_ns(ktime_get());
1394 
1395 	if ((type == QI_DIOTLB_TYPE || type == QI_DEIOTLB_TYPE) &&
1396 	    dmar_latency_enabled(iommu, DMAR_LATENCY_INV_DEVTLB))
1397 		devtlb_start_ktime = ktime_to_ns(ktime_get());
1398 
1399 	if (type == QI_IEC_TYPE &&
1400 	    dmar_latency_enabled(iommu, DMAR_LATENCY_INV_IEC))
1401 		iec_start_ktime = ktime_to_ns(ktime_get());
1402 
1403 restart:
1404 	rc = 0;
1405 
1406 	raw_spin_lock_irqsave(&qi->q_lock, flags);
1407 	/*
1408 	 * Check if we have enough empty slots in the queue to submit,
1409 	 * the calculation is based on:
1410 	 * # of desc + 1 wait desc + 1 space between head and tail
1411 	 */
1412 	while (qi->free_cnt < count + 2) {
1413 		raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1414 		cpu_relax();
1415 		raw_spin_lock_irqsave(&qi->q_lock, flags);
1416 	}
1417 
1418 	index = qi->free_head;
1419 	wait_index = (index + count) % QI_LENGTH;
1420 	shift = qi_shift(iommu);
1421 
1422 	for (i = 0; i < count; i++) {
1423 		offset = ((index + i) % QI_LENGTH) << shift;
1424 		memcpy(qi->desc + offset, &desc[i], 1 << shift);
1425 		qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
1426 		trace_qi_submit(iommu, desc[i].qw0, desc[i].qw1,
1427 				desc[i].qw2, desc[i].qw3);
1428 	}
1429 	qi->desc_status[wait_index] = QI_IN_USE;
1430 
1431 	wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1432 			QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1433 	if (options & QI_OPT_WAIT_DRAIN)
1434 		wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
1435 	wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1436 	wait_desc.qw2 = 0;
1437 	wait_desc.qw3 = 0;
1438 
1439 	offset = wait_index << shift;
1440 	memcpy(qi->desc + offset, &wait_desc, 1 << shift);
1441 
1442 	qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
1443 	qi->free_cnt -= count + 1;
1444 
1445 	/*
1446 	 * update the HW tail register indicating the presence of
1447 	 * new descriptors.
1448 	 */
1449 	writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1450 
1451 	while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) {
1452 		/*
1453 		 * We will leave the interrupts disabled, to prevent interrupt
1454 		 * context to queue another cmd while a cmd is already submitted
1455 		 * and waiting for completion on this cpu. This is to avoid
1456 		 * a deadlock where the interrupt context can wait indefinitely
1457 		 * for free slots in the queue.
1458 		 */
1459 		rc = qi_check_fault(iommu, index, wait_index);
1460 		if (rc)
1461 			break;
1462 
1463 		raw_spin_unlock(&qi->q_lock);
1464 		cpu_relax();
1465 		raw_spin_lock(&qi->q_lock);
1466 	}
1467 
1468 	/*
1469 	 * The reclaim code can free descriptors from multiple submissions
1470 	 * starting from the tail of the queue. When count == 0, the
1471 	 * status of the standalone wait descriptor at the tail of the queue
1472 	 * must be set to QI_FREE to allow the reclaim code to proceed.
1473 	 * It is also possible that descriptors from one of the previous
1474 	 * submissions has to be reclaimed by a subsequent submission.
1475 	 */
1476 	for (i = 0; i <= count; i++)
1477 		qi->desc_status[(index + i) % QI_LENGTH] = QI_FREE;
1478 
1479 	reclaim_free_desc(qi);
1480 	raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1481 
1482 	if (rc == -EAGAIN)
1483 		goto restart;
1484 
1485 	if (iotlb_start_ktime)
1486 		dmar_latency_update(iommu, DMAR_LATENCY_INV_IOTLB,
1487 				ktime_to_ns(ktime_get()) - iotlb_start_ktime);
1488 
1489 	if (devtlb_start_ktime)
1490 		dmar_latency_update(iommu, DMAR_LATENCY_INV_DEVTLB,
1491 				ktime_to_ns(ktime_get()) - devtlb_start_ktime);
1492 
1493 	if (iec_start_ktime)
1494 		dmar_latency_update(iommu, DMAR_LATENCY_INV_IEC,
1495 				ktime_to_ns(ktime_get()) - iec_start_ktime);
1496 
1497 	return rc;
1498 }
1499 
1500 /*
1501  * Flush the global interrupt entry cache.
1502  */
qi_global_iec(struct intel_iommu * iommu)1503 void qi_global_iec(struct intel_iommu *iommu)
1504 {
1505 	struct qi_desc desc;
1506 
1507 	desc.qw0 = QI_IEC_TYPE;
1508 	desc.qw1 = 0;
1509 	desc.qw2 = 0;
1510 	desc.qw3 = 0;
1511 
1512 	/* should never fail */
1513 	qi_submit_sync(iommu, &desc, 1, 0);
1514 }
1515 
qi_flush_context(struct intel_iommu * iommu,u16 did,u16 sid,u8 fm,u64 type)1516 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1517 		      u64 type)
1518 {
1519 	struct qi_desc desc;
1520 
1521 	desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1522 			| QI_CC_GRAN(type) | QI_CC_TYPE;
1523 	desc.qw1 = 0;
1524 	desc.qw2 = 0;
1525 	desc.qw3 = 0;
1526 
1527 	qi_submit_sync(iommu, &desc, 1, 0);
1528 }
1529 
qi_flush_iotlb(struct intel_iommu * iommu,u16 did,u64 addr,unsigned int size_order,u64 type)1530 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1531 		    unsigned int size_order, u64 type)
1532 {
1533 	struct qi_desc desc;
1534 
1535 	qi_desc_iotlb(iommu, did, addr, size_order, type, &desc);
1536 	qi_submit_sync(iommu, &desc, 1, 0);
1537 }
1538 
qi_flush_dev_iotlb(struct intel_iommu * iommu,u16 sid,u16 pfsid,u16 qdep,u64 addr,unsigned mask)1539 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1540 			u16 qdep, u64 addr, unsigned mask)
1541 {
1542 	struct qi_desc desc;
1543 
1544 	/*
1545 	 * VT-d spec, section 4.3:
1546 	 *
1547 	 * Software is recommended to not submit any Device-TLB invalidation
1548 	 * requests while address remapping hardware is disabled.
1549 	 */
1550 	if (!(iommu->gcmd & DMA_GCMD_TE))
1551 		return;
1552 
1553 	qi_desc_dev_iotlb(sid, pfsid, qdep, addr, mask, &desc);
1554 	qi_submit_sync(iommu, &desc, 1, 0);
1555 }
1556 
1557 /* PASID-based IOTLB invalidation */
qi_flush_piotlb(struct intel_iommu * iommu,u16 did,u32 pasid,u64 addr,unsigned long npages,bool ih)1558 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1559 		     unsigned long npages, bool ih)
1560 {
1561 	struct qi_desc desc = {.qw2 = 0, .qw3 = 0};
1562 
1563 	/*
1564 	 * npages == -1 means a PASID-selective invalidation, otherwise,
1565 	 * a positive value for Page-selective-within-PASID invalidation.
1566 	 * 0 is not a valid input.
1567 	 */
1568 	if (WARN_ON(!npages)) {
1569 		pr_err("Invalid input npages = %ld\n", npages);
1570 		return;
1571 	}
1572 
1573 	qi_desc_piotlb(did, pasid, addr, npages, ih, &desc);
1574 	qi_submit_sync(iommu, &desc, 1, 0);
1575 }
1576 
1577 /* PASID-based device IOTLB Invalidate */
qi_flush_dev_iotlb_pasid(struct intel_iommu * iommu,u16 sid,u16 pfsid,u32 pasid,u16 qdep,u64 addr,unsigned int size_order)1578 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1579 			      u32 pasid,  u16 qdep, u64 addr, unsigned int size_order)
1580 {
1581 	struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1582 
1583 	/*
1584 	 * VT-d spec, section 4.3:
1585 	 *
1586 	 * Software is recommended to not submit any Device-TLB invalidation
1587 	 * requests while address remapping hardware is disabled.
1588 	 */
1589 	if (!(iommu->gcmd & DMA_GCMD_TE))
1590 		return;
1591 
1592 	qi_desc_dev_iotlb_pasid(sid, pfsid, pasid,
1593 				qdep, addr, size_order,
1594 				&desc);
1595 	qi_submit_sync(iommu, &desc, 1, 0);
1596 }
1597 
qi_flush_pasid_cache(struct intel_iommu * iommu,u16 did,u64 granu,u32 pasid)1598 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
1599 			  u64 granu, u32 pasid)
1600 {
1601 	struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
1602 
1603 	desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
1604 			QI_PC_GRAN(granu) | QI_PC_TYPE;
1605 	qi_submit_sync(iommu, &desc, 1, 0);
1606 }
1607 
1608 /*
1609  * Disable Queued Invalidation interface.
1610  */
dmar_disable_qi(struct intel_iommu * iommu)1611 void dmar_disable_qi(struct intel_iommu *iommu)
1612 {
1613 	unsigned long flags;
1614 	u32 sts;
1615 	cycles_t start_time = get_cycles();
1616 
1617 	if (!ecap_qis(iommu->ecap))
1618 		return;
1619 
1620 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1621 
1622 	sts =  readl(iommu->reg + DMAR_GSTS_REG);
1623 	if (!(sts & DMA_GSTS_QIES))
1624 		goto end;
1625 
1626 	/*
1627 	 * Give a chance to HW to complete the pending invalidation requests.
1628 	 */
1629 	while ((readl(iommu->reg + DMAR_IQT_REG) !=
1630 		readl(iommu->reg + DMAR_IQH_REG)) &&
1631 		(DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1632 		cpu_relax();
1633 
1634 	iommu->gcmd &= ~DMA_GCMD_QIE;
1635 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1636 
1637 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1638 		      !(sts & DMA_GSTS_QIES), sts);
1639 end:
1640 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1641 }
1642 
1643 /*
1644  * Enable queued invalidation.
1645  */
__dmar_enable_qi(struct intel_iommu * iommu)1646 static void __dmar_enable_qi(struct intel_iommu *iommu)
1647 {
1648 	u32 sts;
1649 	unsigned long flags;
1650 	struct q_inval *qi = iommu->qi;
1651 	u64 val = virt_to_phys(qi->desc);
1652 
1653 	qi->free_head = qi->free_tail = 0;
1654 	qi->free_cnt = QI_LENGTH;
1655 
1656 	/*
1657 	 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
1658 	 * is present.
1659 	 */
1660 	if (ecap_smts(iommu->ecap))
1661 		val |= BIT_ULL(11) | BIT_ULL(0);
1662 
1663 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1664 
1665 	/* write zero to the tail reg */
1666 	writel(0, iommu->reg + DMAR_IQT_REG);
1667 
1668 	dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1669 
1670 	iommu->gcmd |= DMA_GCMD_QIE;
1671 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1672 
1673 	/* Make sure hardware complete it */
1674 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1675 
1676 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1677 }
1678 
1679 /*
1680  * Enable Queued Invalidation interface. This is a must to support
1681  * interrupt-remapping. Also used by DMA-remapping, which replaces
1682  * register based IOTLB invalidation.
1683  */
dmar_enable_qi(struct intel_iommu * iommu)1684 int dmar_enable_qi(struct intel_iommu *iommu)
1685 {
1686 	struct q_inval *qi;
1687 	void *desc;
1688 
1689 	if (!ecap_qis(iommu->ecap))
1690 		return -ENOENT;
1691 
1692 	/*
1693 	 * queued invalidation is already setup and enabled.
1694 	 */
1695 	if (iommu->qi)
1696 		return 0;
1697 
1698 	iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1699 	if (!iommu->qi)
1700 		return -ENOMEM;
1701 
1702 	qi = iommu->qi;
1703 
1704 	/*
1705 	 * Need two pages to accommodate 256 descriptors of 256 bits each
1706 	 * if the remapping hardware supports scalable mode translation.
1707 	 */
1708 	desc = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
1709 					 ecap_smts(iommu->ecap) ? SZ_8K :
1710 								  SZ_4K);
1711 	if (!desc) {
1712 		kfree(qi);
1713 		iommu->qi = NULL;
1714 		return -ENOMEM;
1715 	}
1716 
1717 	qi->desc = desc;
1718 
1719 	qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1720 	if (!qi->desc_status) {
1721 		iommu_free_pages(qi->desc);
1722 		kfree(qi);
1723 		iommu->qi = NULL;
1724 		return -ENOMEM;
1725 	}
1726 
1727 	raw_spin_lock_init(&qi->q_lock);
1728 
1729 	__dmar_enable_qi(iommu);
1730 
1731 	return 0;
1732 }
1733 
1734 /* iommu interrupt handling. Most stuff are MSI-like. */
1735 
1736 enum faulttype {
1737 	DMA_REMAP,
1738 	INTR_REMAP,
1739 	UNKNOWN,
1740 };
1741 
1742 static const char *dma_remap_fault_reasons[] =
1743 {
1744 	"Software",
1745 	"Present bit in root entry is clear",
1746 	"Present bit in context entry is clear",
1747 	"Invalid context entry",
1748 	"Access beyond MGAW",
1749 	"PTE Write access is not set",
1750 	"PTE Read access is not set",
1751 	"Next page table ptr is invalid",
1752 	"Root table address invalid",
1753 	"Context table ptr is invalid",
1754 	"non-zero reserved fields in RTP",
1755 	"non-zero reserved fields in CTP",
1756 	"non-zero reserved fields in PTE",
1757 	"PCE for translation request specifies blocking",
1758 };
1759 
1760 static const char * const dma_remap_sm_fault_reasons[] = {
1761 	"SM: Invalid Root Table Address",
1762 	"SM: TTM 0 for request with PASID",
1763 	"SM: TTM 0 for page group request",
1764 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1765 	"SM: Error attempting to access Root Entry",
1766 	"SM: Present bit in Root Entry is clear",
1767 	"SM: Non-zero reserved field set in Root Entry",
1768 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1769 	"SM: Error attempting to access Context Entry",
1770 	"SM: Present bit in Context Entry is clear",
1771 	"SM: Non-zero reserved field set in the Context Entry",
1772 	"SM: Invalid Context Entry",
1773 	"SM: DTE field in Context Entry is clear",
1774 	"SM: PASID Enable field in Context Entry is clear",
1775 	"SM: PASID is larger than the max in Context Entry",
1776 	"SM: PRE field in Context-Entry is clear",
1777 	"SM: RID_PASID field error in Context-Entry",
1778 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1779 	"SM: Error attempting to access the PASID Directory Entry",
1780 	"SM: Present bit in Directory Entry is clear",
1781 	"SM: Non-zero reserved field set in PASID Directory Entry",
1782 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1783 	"SM: Error attempting to access PASID Table Entry",
1784 	"SM: Present bit in PASID Table Entry is clear",
1785 	"SM: Non-zero reserved field set in PASID Table Entry",
1786 	"SM: Invalid Scalable-Mode PASID Table Entry",
1787 	"SM: ERE field is clear in PASID Table Entry",
1788 	"SM: SRE field is clear in PASID Table Entry",
1789 	"Unknown", "Unknown",/* 0x5E-0x5F */
1790 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
1791 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
1792 	"SM: Error attempting to access first-level paging entry",
1793 	"SM: Present bit in first-level paging entry is clear",
1794 	"SM: Non-zero reserved field set in first-level paging entry",
1795 	"SM: Error attempting to access FL-PML4 entry",
1796 	"SM: First-level entry address beyond MGAW in Nested translation",
1797 	"SM: Read permission error in FL-PML4 entry in Nested translation",
1798 	"SM: Read permission error in first-level paging entry in Nested translation",
1799 	"SM: Write permission error in first-level paging entry in Nested translation",
1800 	"SM: Error attempting to access second-level paging entry",
1801 	"SM: Read/Write permission error in second-level paging entry",
1802 	"SM: Non-zero reserved field set in second-level paging entry",
1803 	"SM: Invalid second-level page table pointer",
1804 	"SM: A/D bit update needed in second-level entry when set up in no snoop",
1805 	"Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1806 	"SM: Address in first-level translation is not canonical",
1807 	"SM: U/S set 0 for first-level translation with user privilege",
1808 	"SM: No execute permission for request with PASID and ER=1",
1809 	"SM: Address beyond the DMA hardware max",
1810 	"SM: Second-level entry address beyond the max",
1811 	"SM: No write permission for Write/AtomicOp request",
1812 	"SM: No read permission for Read/AtomicOp request",
1813 	"SM: Invalid address-interrupt address",
1814 	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
1815 	"SM: A/D bit update needed in first-level entry when set up in no snoop",
1816 };
1817 
1818 static const char *irq_remap_fault_reasons[] =
1819 {
1820 	"Detected reserved fields in the decoded interrupt-remapped request",
1821 	"Interrupt index exceeded the interrupt-remapping table size",
1822 	"Present field in the IRTE entry is clear",
1823 	"Error accessing interrupt-remapping table pointed by IRTA_REG",
1824 	"Detected reserved fields in the IRTE entry",
1825 	"Blocked a compatibility format interrupt request",
1826 	"Blocked an interrupt request due to source-id verification failure",
1827 };
1828 
dmar_get_fault_reason(u8 fault_reason,int * fault_type)1829 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1830 {
1831 	if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1832 					ARRAY_SIZE(irq_remap_fault_reasons))) {
1833 		*fault_type = INTR_REMAP;
1834 		return irq_remap_fault_reasons[fault_reason - 0x20];
1835 	} else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1836 			ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1837 		*fault_type = DMA_REMAP;
1838 		return dma_remap_sm_fault_reasons[fault_reason - 0x30];
1839 	} else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1840 		*fault_type = DMA_REMAP;
1841 		return dma_remap_fault_reasons[fault_reason];
1842 	} else {
1843 		*fault_type = UNKNOWN;
1844 		return "Unknown";
1845 	}
1846 }
1847 
1848 
dmar_msi_reg(struct intel_iommu * iommu,int irq)1849 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1850 {
1851 	if (iommu->irq == irq)
1852 		return DMAR_FECTL_REG;
1853 	else if (iommu->pr_irq == irq)
1854 		return DMAR_PECTL_REG;
1855 	else if (iommu->perf_irq == irq)
1856 		return DMAR_PERFINTRCTL_REG;
1857 	else
1858 		BUG();
1859 }
1860 
dmar_msi_unmask(struct irq_data * data)1861 void dmar_msi_unmask(struct irq_data *data)
1862 {
1863 	struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1864 	int reg = dmar_msi_reg(iommu, data->irq);
1865 	unsigned long flag;
1866 
1867 	/* unmask it */
1868 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1869 	writel(0, iommu->reg + reg);
1870 	/* Read a reg to force flush the post write */
1871 	readl(iommu->reg + reg);
1872 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1873 }
1874 
dmar_msi_mask(struct irq_data * data)1875 void dmar_msi_mask(struct irq_data *data)
1876 {
1877 	struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1878 	int reg = dmar_msi_reg(iommu, data->irq);
1879 	unsigned long flag;
1880 
1881 	/* mask it */
1882 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1883 	writel(DMA_FECTL_IM, iommu->reg + reg);
1884 	/* Read a reg to force flush the post write */
1885 	readl(iommu->reg + reg);
1886 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1887 }
1888 
dmar_msi_write(int irq,struct msi_msg * msg)1889 void dmar_msi_write(int irq, struct msi_msg *msg)
1890 {
1891 	struct intel_iommu *iommu = irq_get_handler_data(irq);
1892 	int reg = dmar_msi_reg(iommu, irq);
1893 	unsigned long flag;
1894 
1895 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1896 	writel(msg->data, iommu->reg + reg + 4);
1897 	writel(msg->address_lo, iommu->reg + reg + 8);
1898 	writel(msg->address_hi, iommu->reg + reg + 12);
1899 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1900 }
1901 
dmar_fault_do_one(struct intel_iommu * iommu,int type,u8 fault_reason,u32 pasid,u16 source_id,unsigned long long addr)1902 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1903 		u8 fault_reason, u32 pasid, u16 source_id,
1904 		unsigned long long addr)
1905 {
1906 	const char *reason;
1907 	int fault_type;
1908 
1909 	reason = dmar_get_fault_reason(fault_reason, &fault_type);
1910 
1911 	if (fault_type == INTR_REMAP) {
1912 		pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
1913 		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
1914 		       PCI_FUNC(source_id & 0xFF), addr >> 48,
1915 		       fault_reason, reason);
1916 
1917 		return 0;
1918 	}
1919 
1920 	if (pasid == IOMMU_PASID_INVALID)
1921 		pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1922 		       type ? "DMA Read" : "DMA Write",
1923 		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
1924 		       PCI_FUNC(source_id & 0xFF), addr,
1925 		       fault_reason, reason);
1926 	else
1927 		pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
1928 		       type ? "DMA Read" : "DMA Write", pasid,
1929 		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
1930 		       PCI_FUNC(source_id & 0xFF), addr,
1931 		       fault_reason, reason);
1932 
1933 	dmar_fault_dump_ptes(iommu, source_id, addr, pasid);
1934 
1935 	return 0;
1936 }
1937 
1938 #define PRIMARY_FAULT_REG_LEN (16)
dmar_fault(int irq,void * dev_id)1939 irqreturn_t dmar_fault(int irq, void *dev_id)
1940 {
1941 	struct intel_iommu *iommu = dev_id;
1942 	int reg, fault_index;
1943 	u32 fault_status;
1944 	unsigned long flag;
1945 	static DEFINE_RATELIMIT_STATE(rs,
1946 				      DEFAULT_RATELIMIT_INTERVAL,
1947 				      DEFAULT_RATELIMIT_BURST);
1948 
1949 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1950 	fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1951 	if (fault_status && __ratelimit(&rs))
1952 		pr_err("DRHD: handling fault status reg %x\n", fault_status);
1953 
1954 	/* TBD: ignore advanced fault log currently */
1955 	if (!(fault_status & DMA_FSTS_PPF))
1956 		goto unlock_exit;
1957 
1958 	fault_index = dma_fsts_fault_record_index(fault_status);
1959 	reg = cap_fault_reg_offset(iommu->cap);
1960 	while (1) {
1961 		/* Disable printing, simply clear the fault when ratelimited */
1962 		bool ratelimited = !__ratelimit(&rs);
1963 		u8 fault_reason;
1964 		u16 source_id;
1965 		u64 guest_addr;
1966 		u32 pasid;
1967 		int type;
1968 		u32 data;
1969 		bool pasid_present;
1970 
1971 		/* highest 32 bits */
1972 		data = readl(iommu->reg + reg +
1973 				fault_index * PRIMARY_FAULT_REG_LEN + 12);
1974 		if (!(data & DMA_FRCD_F))
1975 			break;
1976 
1977 		if (!ratelimited) {
1978 			fault_reason = dma_frcd_fault_reason(data);
1979 			type = dma_frcd_type(data);
1980 
1981 			pasid = dma_frcd_pasid_value(data);
1982 			data = readl(iommu->reg + reg +
1983 				     fault_index * PRIMARY_FAULT_REG_LEN + 8);
1984 			source_id = dma_frcd_source_id(data);
1985 
1986 			pasid_present = dma_frcd_pasid_present(data);
1987 			guest_addr = dmar_readq(iommu->reg + reg +
1988 					fault_index * PRIMARY_FAULT_REG_LEN);
1989 			guest_addr = dma_frcd_page_addr(guest_addr);
1990 		}
1991 
1992 		/* clear the fault */
1993 		writel(DMA_FRCD_F, iommu->reg + reg +
1994 			fault_index * PRIMARY_FAULT_REG_LEN + 12);
1995 
1996 		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1997 
1998 		if (!ratelimited)
1999 			/* Using pasid -1 if pasid is not present */
2000 			dmar_fault_do_one(iommu, type, fault_reason,
2001 					  pasid_present ? pasid : IOMMU_PASID_INVALID,
2002 					  source_id, guest_addr);
2003 
2004 		fault_index++;
2005 		if (fault_index >= cap_num_fault_regs(iommu->cap))
2006 			fault_index = 0;
2007 		raw_spin_lock_irqsave(&iommu->register_lock, flag);
2008 	}
2009 
2010 	writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
2011 	       iommu->reg + DMAR_FSTS_REG);
2012 
2013 unlock_exit:
2014 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
2015 	return IRQ_HANDLED;
2016 }
2017 
dmar_set_interrupt(struct intel_iommu * iommu)2018 int dmar_set_interrupt(struct intel_iommu *iommu)
2019 {
2020 	int irq, ret;
2021 
2022 	/*
2023 	 * Check if the fault interrupt is already initialized.
2024 	 */
2025 	if (iommu->irq)
2026 		return 0;
2027 
2028 	irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
2029 	if (irq > 0) {
2030 		iommu->irq = irq;
2031 	} else {
2032 		pr_err("No free IRQ vectors\n");
2033 		return -EINVAL;
2034 	}
2035 
2036 	ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
2037 	if (ret)
2038 		pr_err("Can't request irq\n");
2039 	return ret;
2040 }
2041 
enable_drhd_fault_handling(unsigned int cpu)2042 int enable_drhd_fault_handling(unsigned int cpu)
2043 {
2044 	struct dmar_drhd_unit *drhd;
2045 	struct intel_iommu *iommu;
2046 
2047 	/*
2048 	 * Enable fault control interrupt.
2049 	 */
2050 	guard(rwsem_read)(&dmar_global_lock);
2051 	for_each_iommu(iommu, drhd) {
2052 		u32 fault_status;
2053 		int ret;
2054 
2055 		if (iommu->irq || iommu->node != cpu_to_node(cpu))
2056 			continue;
2057 
2058 		ret = dmar_set_interrupt(iommu);
2059 
2060 		if (ret) {
2061 			pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
2062 			       (unsigned long long)drhd->reg_base_addr, ret);
2063 			return -1;
2064 		}
2065 
2066 		/*
2067 		 * Clear any previous faults.
2068 		 */
2069 		dmar_fault(iommu->irq, iommu);
2070 		fault_status = readl(iommu->reg + DMAR_FSTS_REG);
2071 		writel(fault_status, iommu->reg + DMAR_FSTS_REG);
2072 	}
2073 
2074 	return 0;
2075 }
2076 
2077 /*
2078  * Re-enable Queued Invalidation interface.
2079  */
dmar_reenable_qi(struct intel_iommu * iommu)2080 int dmar_reenable_qi(struct intel_iommu *iommu)
2081 {
2082 	if (!ecap_qis(iommu->ecap))
2083 		return -ENOENT;
2084 
2085 	if (!iommu->qi)
2086 		return -ENOENT;
2087 
2088 	/*
2089 	 * First disable queued invalidation.
2090 	 */
2091 	dmar_disable_qi(iommu);
2092 	/*
2093 	 * Then enable queued invalidation again. Since there is no pending
2094 	 * invalidation requests now, it's safe to re-enable queued
2095 	 * invalidation.
2096 	 */
2097 	__dmar_enable_qi(iommu);
2098 
2099 	return 0;
2100 }
2101 
2102 /*
2103  * Check interrupt remapping support in DMAR table description.
2104  */
dmar_ir_support(void)2105 int __init dmar_ir_support(void)
2106 {
2107 	struct acpi_table_dmar *dmar;
2108 	dmar = (struct acpi_table_dmar *)dmar_tbl;
2109 	if (!dmar)
2110 		return 0;
2111 	return dmar->flags & 0x1;
2112 }
2113 
2114 /* Check whether DMAR units are in use */
dmar_in_use(void)2115 static inline bool dmar_in_use(void)
2116 {
2117 	return irq_remapping_enabled || intel_iommu_enabled;
2118 }
2119 
dmar_free_unused_resources(void)2120 static int __init dmar_free_unused_resources(void)
2121 {
2122 	struct dmar_drhd_unit *dmaru, *dmaru_n;
2123 
2124 	if (dmar_in_use())
2125 		return 0;
2126 
2127 	if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
2128 		bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
2129 
2130 	down_write(&dmar_global_lock);
2131 	list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
2132 		list_del(&dmaru->list);
2133 		dmar_free_drhd(dmaru);
2134 	}
2135 	up_write(&dmar_global_lock);
2136 
2137 	return 0;
2138 }
2139 
2140 late_initcall(dmar_free_unused_resources);
2141 
2142 /*
2143  * DMAR Hotplug Support
2144  * For more details, please refer to Intel(R) Virtualization Technology
2145  * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
2146  * "Remapping Hardware Unit Hot Plug".
2147  */
2148 static guid_t dmar_hp_guid =
2149 	GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
2150 		  0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
2151 
2152 /*
2153  * Currently there's only one revision and BIOS will not check the revision id,
2154  * so use 0 for safety.
2155  */
2156 #define	DMAR_DSM_REV_ID			0
2157 #define	DMAR_DSM_FUNC_DRHD		1
2158 #define	DMAR_DSM_FUNC_ATSR		2
2159 #define	DMAR_DSM_FUNC_RHSA		3
2160 #define	DMAR_DSM_FUNC_SATC		4
2161 
dmar_detect_dsm(acpi_handle handle,int func)2162 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
2163 {
2164 	return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
2165 }
2166 
dmar_walk_dsm_resource(acpi_handle handle,int func,dmar_res_handler_t handler,void * arg)2167 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
2168 				  dmar_res_handler_t handler, void *arg)
2169 {
2170 	int ret = -ENODEV;
2171 	union acpi_object *obj;
2172 	struct acpi_dmar_header *start;
2173 	struct dmar_res_callback callback;
2174 	static int res_type[] = {
2175 		[DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
2176 		[DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
2177 		[DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
2178 		[DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC,
2179 	};
2180 
2181 	if (!dmar_detect_dsm(handle, func))
2182 		return 0;
2183 
2184 	obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
2185 				      func, NULL, ACPI_TYPE_BUFFER);
2186 	if (!obj)
2187 		return -ENODEV;
2188 
2189 	memset(&callback, 0, sizeof(callback));
2190 	callback.cb[res_type[func]] = handler;
2191 	callback.arg[res_type[func]] = arg;
2192 	start = (struct acpi_dmar_header *)obj->buffer.pointer;
2193 	ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
2194 
2195 	ACPI_FREE(obj);
2196 
2197 	return ret;
2198 }
2199 
dmar_hp_add_drhd(struct acpi_dmar_header * header,void * arg)2200 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
2201 {
2202 	int ret;
2203 	struct dmar_drhd_unit *dmaru;
2204 
2205 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2206 	if (!dmaru)
2207 		return -ENODEV;
2208 
2209 	ret = dmar_ir_hotplug(dmaru, true);
2210 	if (ret == 0)
2211 		ret = dmar_iommu_hotplug(dmaru, true);
2212 
2213 	return ret;
2214 }
2215 
dmar_hp_remove_drhd(struct acpi_dmar_header * header,void * arg)2216 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
2217 {
2218 	int i, ret;
2219 	struct device *dev;
2220 	struct dmar_drhd_unit *dmaru;
2221 
2222 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2223 	if (!dmaru)
2224 		return 0;
2225 
2226 	/*
2227 	 * All PCI devices managed by this unit should have been destroyed.
2228 	 */
2229 	if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
2230 		for_each_active_dev_scope(dmaru->devices,
2231 					  dmaru->devices_cnt, i, dev)
2232 			return -EBUSY;
2233 	}
2234 
2235 	ret = dmar_ir_hotplug(dmaru, false);
2236 	if (ret == 0)
2237 		ret = dmar_iommu_hotplug(dmaru, false);
2238 
2239 	return ret;
2240 }
2241 
dmar_hp_release_drhd(struct acpi_dmar_header * header,void * arg)2242 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
2243 {
2244 	struct dmar_drhd_unit *dmaru;
2245 
2246 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
2247 	if (dmaru) {
2248 		list_del_rcu(&dmaru->list);
2249 		synchronize_rcu();
2250 		dmar_free_drhd(dmaru);
2251 	}
2252 
2253 	return 0;
2254 }
2255 
dmar_hotplug_insert(acpi_handle handle)2256 static int dmar_hotplug_insert(acpi_handle handle)
2257 {
2258 	int ret;
2259 	int drhd_count = 0;
2260 
2261 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2262 				     &dmar_validate_one_drhd, (void *)1);
2263 	if (ret)
2264 		goto out;
2265 
2266 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2267 				     &dmar_parse_one_drhd, (void *)&drhd_count);
2268 	if (ret == 0 && drhd_count == 0) {
2269 		pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
2270 		goto out;
2271 	} else if (ret) {
2272 		goto release_drhd;
2273 	}
2274 
2275 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
2276 				     &dmar_parse_one_rhsa, NULL);
2277 	if (ret)
2278 		goto release_drhd;
2279 
2280 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2281 				     &dmar_parse_one_atsr, NULL);
2282 	if (ret)
2283 		goto release_atsr;
2284 
2285 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2286 				     &dmar_hp_add_drhd, NULL);
2287 	if (!ret)
2288 		return 0;
2289 
2290 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2291 			       &dmar_hp_remove_drhd, NULL);
2292 release_atsr:
2293 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2294 			       &dmar_release_one_atsr, NULL);
2295 release_drhd:
2296 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2297 			       &dmar_hp_release_drhd, NULL);
2298 out:
2299 	return ret;
2300 }
2301 
dmar_hotplug_remove(acpi_handle handle)2302 static int dmar_hotplug_remove(acpi_handle handle)
2303 {
2304 	int ret;
2305 
2306 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2307 				     &dmar_check_one_atsr, NULL);
2308 	if (ret)
2309 		return ret;
2310 
2311 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2312 				     &dmar_hp_remove_drhd, NULL);
2313 	if (ret == 0) {
2314 		WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
2315 					       &dmar_release_one_atsr, NULL));
2316 		WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2317 					       &dmar_hp_release_drhd, NULL));
2318 	} else {
2319 		dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2320 				       &dmar_hp_add_drhd, NULL);
2321 	}
2322 
2323 	return ret;
2324 }
2325 
dmar_get_dsm_handle(acpi_handle handle,u32 lvl,void * context,void ** retval)2326 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2327 				       void *context, void **retval)
2328 {
2329 	acpi_handle *phdl = retval;
2330 
2331 	if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2332 		*phdl = handle;
2333 		return AE_CTRL_TERMINATE;
2334 	}
2335 
2336 	return AE_OK;
2337 }
2338 
dmar_device_hotplug(acpi_handle handle,bool insert)2339 static int dmar_device_hotplug(acpi_handle handle, bool insert)
2340 {
2341 	int ret;
2342 	acpi_handle tmp = NULL;
2343 	acpi_status status;
2344 
2345 	if (!dmar_in_use())
2346 		return 0;
2347 
2348 	if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2349 		tmp = handle;
2350 	} else {
2351 		status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2352 					     ACPI_UINT32_MAX,
2353 					     dmar_get_dsm_handle,
2354 					     NULL, NULL, &tmp);
2355 		if (ACPI_FAILURE(status)) {
2356 			pr_warn("Failed to locate _DSM method.\n");
2357 			return -ENXIO;
2358 		}
2359 	}
2360 	if (tmp == NULL)
2361 		return 0;
2362 
2363 	down_write(&dmar_global_lock);
2364 	if (insert)
2365 		ret = dmar_hotplug_insert(tmp);
2366 	else
2367 		ret = dmar_hotplug_remove(tmp);
2368 	up_write(&dmar_global_lock);
2369 
2370 	return ret;
2371 }
2372 
dmar_device_add(acpi_handle handle)2373 int dmar_device_add(acpi_handle handle)
2374 {
2375 	return dmar_device_hotplug(handle, true);
2376 }
2377 
dmar_device_remove(acpi_handle handle)2378 int dmar_device_remove(acpi_handle handle)
2379 {
2380 	return dmar_device_hotplug(handle, false);
2381 }
2382 
2383 /*
2384  * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
2385  *
2386  * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
2387  * the ACPI DMAR table. This means that the platform boot firmware has made
2388  * sure no device can issue DMA outside of RMRR regions.
2389  */
dmar_platform_optin(void)2390 bool dmar_platform_optin(void)
2391 {
2392 	struct acpi_table_dmar *dmar;
2393 	acpi_status status;
2394 	bool ret;
2395 
2396 	status = acpi_get_table(ACPI_SIG_DMAR, 0,
2397 				(struct acpi_table_header **)&dmar);
2398 	if (ACPI_FAILURE(status))
2399 		return false;
2400 
2401 	ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2402 	acpi_put_table((struct acpi_table_header *)dmar);
2403 
2404 	return ret;
2405 }
2406 EXPORT_SYMBOL_GPL(dmar_platform_optin);
2407