xref: /linux/drivers/iommu/s390-iommu.c (revision ce5cfb0fa20dc6454da039612e34325b7b4a8243)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * IOMMU API for s390 PCI devices
4  *
5  * Copyright IBM Corp. 2015
6  * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7  */
8 
9 #include <linux/pci.h>
10 #include <linux/iommu.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/sizes.h>
13 #include <linux/rculist.h>
14 #include <linux/rcupdate.h>
15 #include <asm/pci_dma.h>
16 
17 #include "dma-iommu.h"
18 
19 static const struct iommu_ops s390_iommu_ops, s390_iommu_rtr_ops;
20 
21 static struct kmem_cache *dma_region_table_cache;
22 static struct kmem_cache *dma_page_table_cache;
23 
24 static u64 s390_iommu_aperture;
25 static u32 s390_iommu_aperture_factor = 1;
26 
27 struct s390_domain {
28 	struct iommu_domain	domain;
29 	struct list_head	devices;
30 	struct zpci_iommu_ctrs	ctrs;
31 	unsigned long		*dma_table;
32 	spinlock_t		list_lock;
33 	struct rcu_head		rcu;
34 	u8			origin_type;
35 };
36 
37 static struct iommu_domain blocking_domain;
38 
calc_rfx(dma_addr_t ptr)39 static inline unsigned int calc_rfx(dma_addr_t ptr)
40 {
41 	return ((unsigned long)ptr >> ZPCI_RF_SHIFT) & ZPCI_INDEX_MASK;
42 }
43 
calc_rsx(dma_addr_t ptr)44 static inline unsigned int calc_rsx(dma_addr_t ptr)
45 {
46 	return ((unsigned long)ptr >> ZPCI_RS_SHIFT) & ZPCI_INDEX_MASK;
47 }
48 
calc_rtx(dma_addr_t ptr)49 static inline unsigned int calc_rtx(dma_addr_t ptr)
50 {
51 	return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
52 }
53 
calc_sx(dma_addr_t ptr)54 static inline unsigned int calc_sx(dma_addr_t ptr)
55 {
56 	return ((unsigned long)ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
57 }
58 
calc_px(dma_addr_t ptr)59 static inline unsigned int calc_px(dma_addr_t ptr)
60 {
61 	return ((unsigned long)ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
62 }
63 
set_pt_pfaa(unsigned long * entry,phys_addr_t pfaa)64 static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
65 {
66 	*entry &= ZPCI_PTE_FLAG_MASK;
67 	*entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
68 }
69 
set_rf_rso(unsigned long * entry,phys_addr_t rso)70 static inline void set_rf_rso(unsigned long *entry, phys_addr_t rso)
71 {
72 	*entry &= ZPCI_RTE_FLAG_MASK;
73 	*entry |= (rso & ZPCI_RTE_ADDR_MASK);
74 	*entry |= ZPCI_TABLE_TYPE_RFX;
75 }
76 
set_rs_rto(unsigned long * entry,phys_addr_t rto)77 static inline void set_rs_rto(unsigned long *entry, phys_addr_t rto)
78 {
79 	*entry &= ZPCI_RTE_FLAG_MASK;
80 	*entry |= (rto & ZPCI_RTE_ADDR_MASK);
81 	*entry |= ZPCI_TABLE_TYPE_RSX;
82 }
83 
set_rt_sto(unsigned long * entry,phys_addr_t sto)84 static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
85 {
86 	*entry &= ZPCI_RTE_FLAG_MASK;
87 	*entry |= (sto & ZPCI_RTE_ADDR_MASK);
88 	*entry |= ZPCI_TABLE_TYPE_RTX;
89 }
90 
set_st_pto(unsigned long * entry,phys_addr_t pto)91 static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
92 {
93 	*entry &= ZPCI_STE_FLAG_MASK;
94 	*entry |= (pto & ZPCI_STE_ADDR_MASK);
95 	*entry |= ZPCI_TABLE_TYPE_SX;
96 }
97 
validate_rf_entry(unsigned long * entry)98 static inline void validate_rf_entry(unsigned long *entry)
99 {
100 	*entry &= ~ZPCI_TABLE_VALID_MASK;
101 	*entry &= ~ZPCI_TABLE_OFFSET_MASK;
102 	*entry |= ZPCI_TABLE_VALID;
103 	*entry |= ZPCI_TABLE_LEN_RFX;
104 }
105 
validate_rs_entry(unsigned long * entry)106 static inline void validate_rs_entry(unsigned long *entry)
107 {
108 	*entry &= ~ZPCI_TABLE_VALID_MASK;
109 	*entry &= ~ZPCI_TABLE_OFFSET_MASK;
110 	*entry |= ZPCI_TABLE_VALID;
111 	*entry |= ZPCI_TABLE_LEN_RSX;
112 }
113 
validate_rt_entry(unsigned long * entry)114 static inline void validate_rt_entry(unsigned long *entry)
115 {
116 	*entry &= ~ZPCI_TABLE_VALID_MASK;
117 	*entry &= ~ZPCI_TABLE_OFFSET_MASK;
118 	*entry |= ZPCI_TABLE_VALID;
119 	*entry |= ZPCI_TABLE_LEN_RTX;
120 }
121 
validate_st_entry(unsigned long * entry)122 static inline void validate_st_entry(unsigned long *entry)
123 {
124 	*entry &= ~ZPCI_TABLE_VALID_MASK;
125 	*entry |= ZPCI_TABLE_VALID;
126 }
127 
invalidate_pt_entry(unsigned long * entry)128 static inline void invalidate_pt_entry(unsigned long *entry)
129 {
130 	WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
131 	*entry &= ~ZPCI_PTE_VALID_MASK;
132 	*entry |= ZPCI_PTE_INVALID;
133 }
134 
validate_pt_entry(unsigned long * entry)135 static inline void validate_pt_entry(unsigned long *entry)
136 {
137 	WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
138 	*entry &= ~ZPCI_PTE_VALID_MASK;
139 	*entry |= ZPCI_PTE_VALID;
140 }
141 
entry_set_protected(unsigned long * entry)142 static inline void entry_set_protected(unsigned long *entry)
143 {
144 	*entry &= ~ZPCI_TABLE_PROT_MASK;
145 	*entry |= ZPCI_TABLE_PROTECTED;
146 }
147 
entry_clr_protected(unsigned long * entry)148 static inline void entry_clr_protected(unsigned long *entry)
149 {
150 	*entry &= ~ZPCI_TABLE_PROT_MASK;
151 	*entry |= ZPCI_TABLE_UNPROTECTED;
152 }
153 
reg_entry_isvalid(unsigned long entry)154 static inline int reg_entry_isvalid(unsigned long entry)
155 {
156 	return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
157 }
158 
pt_entry_isvalid(unsigned long entry)159 static inline int pt_entry_isvalid(unsigned long entry)
160 {
161 	return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
162 }
163 
get_rf_rso(unsigned long entry)164 static inline unsigned long *get_rf_rso(unsigned long entry)
165 {
166 	if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RFX)
167 		return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
168 	else
169 		return NULL;
170 }
171 
get_rs_rto(unsigned long entry)172 static inline unsigned long *get_rs_rto(unsigned long entry)
173 {
174 	if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RSX)
175 		return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
176 	else
177 		return NULL;
178 }
179 
get_rt_sto(unsigned long entry)180 static inline unsigned long *get_rt_sto(unsigned long entry)
181 {
182 	if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
183 		return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
184 	else
185 		return NULL;
186 }
187 
get_st_pto(unsigned long entry)188 static inline unsigned long *get_st_pto(unsigned long entry)
189 {
190 	if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
191 		return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
192 	else
193 		return NULL;
194 }
195 
dma_alloc_cpu_table_caches(void)196 static int __init dma_alloc_cpu_table_caches(void)
197 {
198 	dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
199 						   ZPCI_TABLE_SIZE,
200 						   ZPCI_TABLE_ALIGN,
201 						   0, NULL);
202 	if (!dma_region_table_cache)
203 		return -ENOMEM;
204 
205 	dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
206 						 ZPCI_PT_SIZE,
207 						 ZPCI_PT_ALIGN,
208 						 0, NULL);
209 	if (!dma_page_table_cache) {
210 		kmem_cache_destroy(dma_region_table_cache);
211 		return -ENOMEM;
212 	}
213 	return 0;
214 }
215 
dma_alloc_cpu_table(gfp_t gfp)216 static unsigned long *dma_alloc_cpu_table(gfp_t gfp)
217 {
218 	unsigned long *table, *entry;
219 
220 	table = kmem_cache_alloc(dma_region_table_cache, gfp);
221 	if (!table)
222 		return NULL;
223 
224 	for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
225 		*entry = ZPCI_TABLE_INVALID;
226 	return table;
227 }
228 
dma_free_cpu_table(void * table)229 static void dma_free_cpu_table(void *table)
230 {
231 	kmem_cache_free(dma_region_table_cache, table);
232 }
233 
dma_free_page_table(void * table)234 static void dma_free_page_table(void *table)
235 {
236 	kmem_cache_free(dma_page_table_cache, table);
237 }
238 
dma_free_seg_table(unsigned long entry)239 static void dma_free_seg_table(unsigned long entry)
240 {
241 	unsigned long *sto = get_rt_sto(entry);
242 	int sx;
243 
244 	for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
245 		if (reg_entry_isvalid(sto[sx]))
246 			dma_free_page_table(get_st_pto(sto[sx]));
247 
248 	dma_free_cpu_table(sto);
249 }
250 
dma_free_rt_table(unsigned long entry)251 static void dma_free_rt_table(unsigned long entry)
252 {
253 	unsigned long *rto = get_rs_rto(entry);
254 	int rtx;
255 
256 	for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
257 		if (reg_entry_isvalid(rto[rtx]))
258 			dma_free_seg_table(rto[rtx]);
259 
260 	dma_free_cpu_table(rto);
261 }
262 
dma_free_rs_table(unsigned long entry)263 static void dma_free_rs_table(unsigned long entry)
264 {
265 	unsigned long *rso = get_rf_rso(entry);
266 	int rsx;
267 
268 	for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
269 		if (reg_entry_isvalid(rso[rsx]))
270 			dma_free_rt_table(rso[rsx]);
271 
272 	dma_free_cpu_table(rso);
273 }
274 
dma_cleanup_tables(struct s390_domain * domain)275 static void dma_cleanup_tables(struct s390_domain *domain)
276 {
277 	int rtx, rsx, rfx;
278 
279 	if (!domain->dma_table)
280 		return;
281 
282 	switch (domain->origin_type) {
283 	case ZPCI_TABLE_TYPE_RFX:
284 		for (rfx = 0; rfx < ZPCI_TABLE_ENTRIES; rfx++)
285 			if (reg_entry_isvalid(domain->dma_table[rfx]))
286 				dma_free_rs_table(domain->dma_table[rfx]);
287 		break;
288 	case ZPCI_TABLE_TYPE_RSX:
289 		for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++)
290 			if (reg_entry_isvalid(domain->dma_table[rsx]))
291 				dma_free_rt_table(domain->dma_table[rsx]);
292 		break;
293 	case ZPCI_TABLE_TYPE_RTX:
294 		for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
295 			if (reg_entry_isvalid(domain->dma_table[rtx]))
296 				dma_free_seg_table(domain->dma_table[rtx]);
297 		break;
298 	default:
299 		WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
300 		return;
301 	}
302 
303 	dma_free_cpu_table(domain->dma_table);
304 }
305 
dma_alloc_page_table(gfp_t gfp)306 static unsigned long *dma_alloc_page_table(gfp_t gfp)
307 {
308 	unsigned long *table, *entry;
309 
310 	table = kmem_cache_alloc(dma_page_table_cache, gfp);
311 	if (!table)
312 		return NULL;
313 
314 	for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
315 		*entry = ZPCI_PTE_INVALID;
316 	return table;
317 }
318 
dma_walk_rs_table(unsigned long * rso,dma_addr_t dma_addr,gfp_t gfp)319 static unsigned long *dma_walk_rs_table(unsigned long *rso,
320 					dma_addr_t dma_addr, gfp_t gfp)
321 {
322 	unsigned int rsx = calc_rsx(dma_addr);
323 	unsigned long old_rse, rse;
324 	unsigned long *rsep, *rto;
325 
326 	rsep = &rso[rsx];
327 	rse = READ_ONCE(*rsep);
328 	if (reg_entry_isvalid(rse)) {
329 		rto = get_rs_rto(rse);
330 	} else {
331 		rto = dma_alloc_cpu_table(gfp);
332 		if (!rto)
333 			return NULL;
334 
335 		set_rs_rto(&rse, virt_to_phys(rto));
336 		validate_rs_entry(&rse);
337 		entry_clr_protected(&rse);
338 
339 		old_rse = cmpxchg(rsep, ZPCI_TABLE_INVALID, rse);
340 		if (old_rse != ZPCI_TABLE_INVALID) {
341 			/* Somone else was faster, use theirs */
342 			dma_free_cpu_table(rto);
343 			rto = get_rs_rto(old_rse);
344 		}
345 	}
346 	return rto;
347 }
348 
dma_walk_rf_table(unsigned long * rfo,dma_addr_t dma_addr,gfp_t gfp)349 static unsigned long *dma_walk_rf_table(unsigned long *rfo,
350 					dma_addr_t dma_addr, gfp_t gfp)
351 {
352 	unsigned int rfx = calc_rfx(dma_addr);
353 	unsigned long old_rfe, rfe;
354 	unsigned long *rfep, *rso;
355 
356 	rfep = &rfo[rfx];
357 	rfe = READ_ONCE(*rfep);
358 	if (reg_entry_isvalid(rfe)) {
359 		rso = get_rf_rso(rfe);
360 	} else {
361 		rso = dma_alloc_cpu_table(gfp);
362 		if (!rso)
363 			return NULL;
364 
365 		set_rf_rso(&rfe, virt_to_phys(rso));
366 		validate_rf_entry(&rfe);
367 		entry_clr_protected(&rfe);
368 
369 		old_rfe = cmpxchg(rfep, ZPCI_TABLE_INVALID, rfe);
370 		if (old_rfe != ZPCI_TABLE_INVALID) {
371 			/* Somone else was faster, use theirs */
372 			dma_free_cpu_table(rso);
373 			rso = get_rf_rso(old_rfe);
374 		}
375 	}
376 
377 	if (!rso)
378 		return NULL;
379 
380 	return dma_walk_rs_table(rso, dma_addr, gfp);
381 }
382 
dma_get_seg_table_origin(unsigned long * rtep,gfp_t gfp)383 static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
384 {
385 	unsigned long old_rte, rte;
386 	unsigned long *sto;
387 
388 	rte = READ_ONCE(*rtep);
389 	if (reg_entry_isvalid(rte)) {
390 		sto = get_rt_sto(rte);
391 	} else {
392 		sto = dma_alloc_cpu_table(gfp);
393 		if (!sto)
394 			return NULL;
395 
396 		set_rt_sto(&rte, virt_to_phys(sto));
397 		validate_rt_entry(&rte);
398 		entry_clr_protected(&rte);
399 
400 		old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
401 		if (old_rte != ZPCI_TABLE_INVALID) {
402 			/* Somone else was faster, use theirs */
403 			dma_free_cpu_table(sto);
404 			sto = get_rt_sto(old_rte);
405 		}
406 	}
407 	return sto;
408 }
409 
dma_get_page_table_origin(unsigned long * step,gfp_t gfp)410 static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
411 {
412 	unsigned long old_ste, ste;
413 	unsigned long *pto;
414 
415 	ste = READ_ONCE(*step);
416 	if (reg_entry_isvalid(ste)) {
417 		pto = get_st_pto(ste);
418 	} else {
419 		pto = dma_alloc_page_table(gfp);
420 		if (!pto)
421 			return NULL;
422 		set_st_pto(&ste, virt_to_phys(pto));
423 		validate_st_entry(&ste);
424 		entry_clr_protected(&ste);
425 
426 		old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
427 		if (old_ste != ZPCI_TABLE_INVALID) {
428 			/* Somone else was faster, use theirs */
429 			dma_free_page_table(pto);
430 			pto = get_st_pto(old_ste);
431 		}
432 	}
433 	return pto;
434 }
435 
dma_walk_region_tables(struct s390_domain * domain,dma_addr_t dma_addr,gfp_t gfp)436 static unsigned long *dma_walk_region_tables(struct s390_domain *domain,
437 					     dma_addr_t dma_addr, gfp_t gfp)
438 {
439 	switch (domain->origin_type) {
440 	case ZPCI_TABLE_TYPE_RFX:
441 		return dma_walk_rf_table(domain->dma_table, dma_addr, gfp);
442 	case ZPCI_TABLE_TYPE_RSX:
443 		return dma_walk_rs_table(domain->dma_table, dma_addr, gfp);
444 	case ZPCI_TABLE_TYPE_RTX:
445 		return domain->dma_table;
446 	default:
447 		return NULL;
448 	}
449 }
450 
dma_walk_cpu_trans(struct s390_domain * domain,dma_addr_t dma_addr,gfp_t gfp)451 static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain,
452 					 dma_addr_t dma_addr, gfp_t gfp)
453 {
454 	unsigned long *rto, *sto, *pto;
455 	unsigned int rtx, sx, px;
456 
457 	rto = dma_walk_region_tables(domain, dma_addr, gfp);
458 	if (!rto)
459 		return NULL;
460 
461 	rtx = calc_rtx(dma_addr);
462 	sto = dma_get_seg_table_origin(&rto[rtx], gfp);
463 	if (!sto)
464 		return NULL;
465 
466 	sx = calc_sx(dma_addr);
467 	pto = dma_get_page_table_origin(&sto[sx], gfp);
468 	if (!pto)
469 		return NULL;
470 
471 	px = calc_px(dma_addr);
472 	return &pto[px];
473 }
474 
dma_update_cpu_trans(unsigned long * ptep,phys_addr_t page_addr,int flags)475 static void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
476 {
477 	unsigned long pte;
478 
479 	pte = READ_ONCE(*ptep);
480 	if (flags & ZPCI_PTE_INVALID) {
481 		invalidate_pt_entry(&pte);
482 	} else {
483 		set_pt_pfaa(&pte, page_addr);
484 		validate_pt_entry(&pte);
485 	}
486 
487 	if (flags & ZPCI_TABLE_PROTECTED)
488 		entry_set_protected(&pte);
489 	else
490 		entry_clr_protected(&pte);
491 
492 	xchg(ptep, pte);
493 }
494 
to_s390_domain(struct iommu_domain * dom)495 static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
496 {
497 	return container_of(dom, struct s390_domain, domain);
498 }
499 
s390_iommu_capable(struct device * dev,enum iommu_cap cap)500 static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
501 {
502 	struct zpci_dev *zdev = to_zpci_dev(dev);
503 
504 	switch (cap) {
505 	case IOMMU_CAP_CACHE_COHERENCY:
506 		return true;
507 	case IOMMU_CAP_DEFERRED_FLUSH:
508 		return zdev->pft != PCI_FUNC_TYPE_ISM;
509 	default:
510 		return false;
511 	}
512 }
513 
max_tbl_size(struct s390_domain * domain)514 static inline u64 max_tbl_size(struct s390_domain *domain)
515 {
516 	switch (domain->origin_type) {
517 	case ZPCI_TABLE_TYPE_RTX:
518 		return ZPCI_TABLE_SIZE_RT - 1;
519 	case ZPCI_TABLE_TYPE_RSX:
520 		return ZPCI_TABLE_SIZE_RS - 1;
521 	case ZPCI_TABLE_TYPE_RFX:
522 		return U64_MAX;
523 	default:
524 		return 0;
525 	}
526 }
527 
s390_domain_alloc_paging(struct device * dev)528 static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
529 {
530 	struct zpci_dev *zdev = to_zpci_dev(dev);
531 	struct s390_domain *s390_domain;
532 	u64 aperture_size;
533 
534 	s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
535 	if (!s390_domain)
536 		return NULL;
537 
538 	s390_domain->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
539 	if (!s390_domain->dma_table) {
540 		kfree(s390_domain);
541 		return NULL;
542 	}
543 
544 	aperture_size = min(s390_iommu_aperture,
545 			    zdev->end_dma - zdev->start_dma + 1);
546 	if (aperture_size <= (ZPCI_TABLE_SIZE_RT - zdev->start_dma)) {
547 		s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
548 	} else if (aperture_size <= (ZPCI_TABLE_SIZE_RS - zdev->start_dma) &&
549 		  (zdev->dtsm & ZPCI_IOTA_DT_RS)) {
550 		s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX;
551 	} else if (zdev->dtsm & ZPCI_IOTA_DT_RF) {
552 		s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX;
553 	} else {
554 		/* Assume RTX available */
555 		s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX;
556 		aperture_size = ZPCI_TABLE_SIZE_RT - zdev->start_dma;
557 	}
558 	zdev->end_dma = zdev->start_dma + aperture_size - 1;
559 
560 	s390_domain->domain.pgsize_bitmap = SZ_4K;
561 	s390_domain->domain.geometry.force_aperture = true;
562 	s390_domain->domain.geometry.aperture_start = 0;
563 	s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain);
564 
565 	spin_lock_init(&s390_domain->list_lock);
566 	INIT_LIST_HEAD_RCU(&s390_domain->devices);
567 
568 	return &s390_domain->domain;
569 }
570 
s390_iommu_rcu_free_domain(struct rcu_head * head)571 static void s390_iommu_rcu_free_domain(struct rcu_head *head)
572 {
573 	struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu);
574 
575 	dma_cleanup_tables(s390_domain);
576 	kfree(s390_domain);
577 }
578 
s390_domain_free(struct iommu_domain * domain)579 static void s390_domain_free(struct iommu_domain *domain)
580 {
581 	struct s390_domain *s390_domain = to_s390_domain(domain);
582 
583 	rcu_read_lock();
584 	WARN_ON(!list_empty(&s390_domain->devices));
585 	rcu_read_unlock();
586 
587 	call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
588 }
589 
zdev_s390_domain_update(struct zpci_dev * zdev,struct iommu_domain * domain)590 static void zdev_s390_domain_update(struct zpci_dev *zdev,
591 				    struct iommu_domain *domain)
592 {
593 	unsigned long flags;
594 
595 	spin_lock_irqsave(&zdev->dom_lock, flags);
596 	zdev->s390_domain = domain;
597 	spin_unlock_irqrestore(&zdev->dom_lock, flags);
598 }
599 
get_iota_region_flag(struct s390_domain * domain)600 static u64 get_iota_region_flag(struct s390_domain *domain)
601 {
602 	switch (domain->origin_type) {
603 	case ZPCI_TABLE_TYPE_RTX:
604 		return ZPCI_IOTA_RTTO_FLAG;
605 	case ZPCI_TABLE_TYPE_RSX:
606 		return ZPCI_IOTA_RSTO_FLAG;
607 	case ZPCI_TABLE_TYPE_RFX:
608 		return ZPCI_IOTA_RFTO_FLAG;
609 	default:
610 		WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type);
611 		return 0;
612 	}
613 }
614 
reg_ioat_propagate_error(int cc,u8 status)615 static bool reg_ioat_propagate_error(int cc, u8 status)
616 {
617 	/*
618 	 * If the device is in the error state the reset routine
619 	 * will register the IOAT of the newly set domain on re-enable
620 	 */
621 	if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL)
622 		return false;
623 	/*
624 	 * If the device was removed treat registration as success
625 	 * and let the subsequent error event trigger tear down.
626 	 */
627 	if (cc == ZPCI_CC_INVAL_HANDLE)
628 		return false;
629 	return cc != ZPCI_CC_OK;
630 }
631 
s390_iommu_domain_reg_ioat(struct zpci_dev * zdev,struct iommu_domain * domain,u8 * status)632 static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
633 				      struct iommu_domain *domain, u8 *status)
634 {
635 	struct s390_domain *s390_domain;
636 	int rc = 0;
637 	u64 iota;
638 
639 	switch (domain->type) {
640 	case IOMMU_DOMAIN_IDENTITY:
641 		rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
642 					zdev->end_dma, 0, status);
643 		break;
644 	case IOMMU_DOMAIN_BLOCKED:
645 		/* Nothing to do in this case */
646 		break;
647 	default:
648 		s390_domain = to_s390_domain(domain);
649 		iota = virt_to_phys(s390_domain->dma_table) |
650 		       get_iota_region_flag(s390_domain);
651 		rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
652 					zdev->end_dma, iota, status);
653 	}
654 
655 	return rc;
656 }
657 
zpci_iommu_register_ioat(struct zpci_dev * zdev,u8 * status)658 int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status)
659 {
660 	unsigned long flags;
661 	int rc;
662 
663 	spin_lock_irqsave(&zdev->dom_lock, flags);
664 
665 	rc = s390_iommu_domain_reg_ioat(zdev, zdev->s390_domain, status);
666 
667 	spin_unlock_irqrestore(&zdev->dom_lock, flags);
668 
669 	return rc;
670 }
671 
blocking_domain_attach_device(struct iommu_domain * domain,struct device * dev,struct iommu_domain * old)672 static int blocking_domain_attach_device(struct iommu_domain *domain,
673 					 struct device *dev,
674 					 struct iommu_domain *old)
675 {
676 	struct zpci_dev *zdev = to_zpci_dev(dev);
677 	struct s390_domain *s390_domain;
678 	unsigned long flags;
679 
680 	if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
681 		return 0;
682 
683 	s390_domain = to_s390_domain(zdev->s390_domain);
684 	if (zdev->dma_table) {
685 		spin_lock_irqsave(&s390_domain->list_lock, flags);
686 		list_del_rcu(&zdev->iommu_list);
687 		spin_unlock_irqrestore(&s390_domain->list_lock, flags);
688 	}
689 
690 	zpci_unregister_ioat(zdev, 0);
691 	zdev->dma_table = NULL;
692 	zdev_s390_domain_update(zdev, domain);
693 
694 	return 0;
695 }
696 
s390_iommu_attach_device(struct iommu_domain * domain,struct device * dev,struct iommu_domain * old)697 static int s390_iommu_attach_device(struct iommu_domain *domain,
698 				    struct device *dev,
699 				    struct iommu_domain *old)
700 {
701 	struct s390_domain *s390_domain = to_s390_domain(domain);
702 	struct zpci_dev *zdev = to_zpci_dev(dev);
703 	unsigned long flags;
704 	u8 status;
705 	int cc;
706 
707 	if (!zdev)
708 		return -ENODEV;
709 
710 	if (WARN_ON(domain->geometry.aperture_start > zdev->end_dma ||
711 		domain->geometry.aperture_end < zdev->start_dma))
712 		return -EINVAL;
713 
714 	blocking_domain_attach_device(&blocking_domain, dev, old);
715 
716 	/* If we fail now DMA remains blocked via blocking domain */
717 	cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
718 	if (reg_ioat_propagate_error(cc, status))
719 		return -EIO;
720 	zdev->dma_table = s390_domain->dma_table;
721 	zdev_s390_domain_update(zdev, domain);
722 
723 	spin_lock_irqsave(&s390_domain->list_lock, flags);
724 	list_add_rcu(&zdev->iommu_list, &s390_domain->devices);
725 	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
726 
727 	return 0;
728 }
729 
s390_iommu_get_resv_regions(struct device * dev,struct list_head * list)730 static void s390_iommu_get_resv_regions(struct device *dev,
731 					struct list_head *list)
732 {
733 	struct zpci_dev *zdev = to_zpci_dev(dev);
734 	struct iommu_resv_region *region;
735 	u64 max_size, end_resv;
736 	unsigned long flags;
737 
738 	if (zdev->start_dma) {
739 		region = iommu_alloc_resv_region(0, zdev->start_dma, 0,
740 						 IOMMU_RESV_RESERVED, GFP_KERNEL);
741 		if (!region)
742 			return;
743 		list_add_tail(&region->list, list);
744 	}
745 
746 	spin_lock_irqsave(&zdev->dom_lock, flags);
747 	if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
748 	    zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) {
749 		spin_unlock_irqrestore(&zdev->dom_lock, flags);
750 		return;
751 	}
752 
753 	max_size = max_tbl_size(to_s390_domain(zdev->s390_domain));
754 	spin_unlock_irqrestore(&zdev->dom_lock, flags);
755 
756 	if (zdev->end_dma < max_size) {
757 		end_resv = max_size - zdev->end_dma;
758 		region = iommu_alloc_resv_region(zdev->end_dma + 1, end_resv,
759 						 0, IOMMU_RESV_RESERVED,
760 						 GFP_KERNEL);
761 		if (!region)
762 			return;
763 		list_add_tail(&region->list, list);
764 	}
765 }
766 
s390_iommu_probe_device(struct device * dev)767 static struct iommu_device *s390_iommu_probe_device(struct device *dev)
768 {
769 	struct zpci_dev *zdev;
770 
771 	if (!dev_is_pci(dev))
772 		return ERR_PTR(-ENODEV);
773 
774 	zdev = to_zpci_dev(dev);
775 
776 	if (zdev->start_dma > zdev->end_dma)
777 		return ERR_PTR(-EINVAL);
778 
779 	if (zdev->tlb_refresh)
780 		dev->iommu->shadow_on_flush = 1;
781 
782 	/* Start with DMA blocked */
783 	spin_lock_init(&zdev->dom_lock);
784 	zdev_s390_domain_update(zdev, &blocking_domain);
785 
786 	return &zdev->iommu_dev;
787 }
788 
zpci_refresh_all(struct zpci_dev * zdev)789 static int zpci_refresh_all(struct zpci_dev *zdev)
790 {
791 	return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
792 				  zdev->end_dma - zdev->start_dma + 1);
793 }
794 
s390_iommu_flush_iotlb_all(struct iommu_domain * domain)795 static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
796 {
797 	struct s390_domain *s390_domain = to_s390_domain(domain);
798 	struct zpci_dev *zdev;
799 
800 	rcu_read_lock();
801 	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
802 		atomic64_inc(&s390_domain->ctrs.global_rpcits);
803 		zpci_refresh_all(zdev);
804 	}
805 	rcu_read_unlock();
806 }
807 
s390_iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)808 static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
809 				  struct iommu_iotlb_gather *gather)
810 {
811 	struct s390_domain *s390_domain = to_s390_domain(domain);
812 	size_t size = gather->end - gather->start + 1;
813 	struct zpci_dev *zdev;
814 
815 	/* If gather was never added to there is nothing to flush */
816 	if (!gather->end)
817 		return;
818 
819 	rcu_read_lock();
820 	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
821 		atomic64_inc(&s390_domain->ctrs.sync_rpcits);
822 		zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
823 				   size);
824 	}
825 	rcu_read_unlock();
826 }
827 
s390_iommu_iotlb_sync_map(struct iommu_domain * domain,unsigned long iova,size_t size)828 static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
829 				     unsigned long iova, size_t size)
830 {
831 	struct s390_domain *s390_domain = to_s390_domain(domain);
832 	struct zpci_dev *zdev;
833 	int ret = 0;
834 
835 	rcu_read_lock();
836 	list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
837 		if (!zdev->tlb_refresh)
838 			continue;
839 		atomic64_inc(&s390_domain->ctrs.sync_map_rpcits);
840 		ret = zpci_refresh_trans((u64)zdev->fh << 32,
841 					 iova, size);
842 		/*
843 		 * let the hypervisor discover invalidated entries
844 		 * allowing it to free IOVAs and unpin pages
845 		 */
846 		if (ret == -ENOMEM) {
847 			ret = zpci_refresh_all(zdev);
848 			if (ret)
849 				break;
850 		}
851 	}
852 	rcu_read_unlock();
853 
854 	return ret;
855 }
856 
s390_iommu_validate_trans(struct s390_domain * s390_domain,phys_addr_t pa,dma_addr_t dma_addr,unsigned long nr_pages,int flags,gfp_t gfp)857 static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
858 				     phys_addr_t pa, dma_addr_t dma_addr,
859 				     unsigned long nr_pages, int flags,
860 				     gfp_t gfp)
861 {
862 	phys_addr_t page_addr = pa & PAGE_MASK;
863 	unsigned long *entry;
864 	unsigned long i;
865 	int rc;
866 
867 	for (i = 0; i < nr_pages; i++) {
868 		entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
869 		if (unlikely(!entry)) {
870 			rc = -ENOMEM;
871 			goto undo_cpu_trans;
872 		}
873 		dma_update_cpu_trans(entry, page_addr, flags);
874 		page_addr += PAGE_SIZE;
875 		dma_addr += PAGE_SIZE;
876 	}
877 
878 	return 0;
879 
880 undo_cpu_trans:
881 	while (i-- > 0) {
882 		dma_addr -= PAGE_SIZE;
883 		entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp);
884 		if (!entry)
885 			break;
886 		dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
887 	}
888 
889 	return rc;
890 }
891 
s390_iommu_invalidate_trans(struct s390_domain * s390_domain,dma_addr_t dma_addr,unsigned long nr_pages)892 static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
893 				       dma_addr_t dma_addr, unsigned long nr_pages)
894 {
895 	unsigned long *entry;
896 	unsigned long i;
897 	int rc = 0;
898 
899 	for (i = 0; i < nr_pages; i++) {
900 		entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC);
901 		if (unlikely(!entry)) {
902 			rc = -EINVAL;
903 			break;
904 		}
905 		dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
906 		dma_addr += PAGE_SIZE;
907 	}
908 
909 	return rc;
910 }
911 
s390_iommu_map_pages(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)912 static int s390_iommu_map_pages(struct iommu_domain *domain,
913 				unsigned long iova, phys_addr_t paddr,
914 				size_t pgsize, size_t pgcount,
915 				int prot, gfp_t gfp, size_t *mapped)
916 {
917 	struct s390_domain *s390_domain = to_s390_domain(domain);
918 	size_t size = pgcount << __ffs(pgsize);
919 	int flags = ZPCI_PTE_VALID, rc = 0;
920 
921 	if (pgsize != SZ_4K)
922 		return -EINVAL;
923 
924 	if (iova < s390_domain->domain.geometry.aperture_start ||
925 	    (iova + size - 1) > s390_domain->domain.geometry.aperture_end)
926 		return -EINVAL;
927 
928 	if (!IS_ALIGNED(iova | paddr, pgsize))
929 		return -EINVAL;
930 
931 	if (!(prot & IOMMU_WRITE))
932 		flags |= ZPCI_TABLE_PROTECTED;
933 
934 	rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
935 				     pgcount, flags, gfp);
936 	if (!rc) {
937 		*mapped = size;
938 		atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages);
939 	}
940 
941 	return rc;
942 }
943 
get_rso_from_iova(struct s390_domain * domain,dma_addr_t iova)944 static unsigned long *get_rso_from_iova(struct s390_domain *domain,
945 					dma_addr_t iova)
946 {
947 	unsigned long *rfo;
948 	unsigned long rfe;
949 	unsigned int rfx;
950 
951 	switch (domain->origin_type) {
952 	case ZPCI_TABLE_TYPE_RFX:
953 		rfo = domain->dma_table;
954 		rfx = calc_rfx(iova);
955 		rfe = READ_ONCE(rfo[rfx]);
956 		if (!reg_entry_isvalid(rfe))
957 			return NULL;
958 		return get_rf_rso(rfe);
959 	case ZPCI_TABLE_TYPE_RSX:
960 		return domain->dma_table;
961 	default:
962 		return NULL;
963 	}
964 }
965 
get_rto_from_iova(struct s390_domain * domain,dma_addr_t iova)966 static unsigned long *get_rto_from_iova(struct s390_domain *domain,
967 					dma_addr_t iova)
968 {
969 	unsigned long *rso;
970 	unsigned long rse;
971 	unsigned int rsx;
972 
973 	switch (domain->origin_type) {
974 	case ZPCI_TABLE_TYPE_RFX:
975 	case ZPCI_TABLE_TYPE_RSX:
976 		rso = get_rso_from_iova(domain, iova);
977 		rsx = calc_rsx(iova);
978 		rse = READ_ONCE(rso[rsx]);
979 		if (!reg_entry_isvalid(rse))
980 			return NULL;
981 		return get_rs_rto(rse);
982 	case ZPCI_TABLE_TYPE_RTX:
983 		return domain->dma_table;
984 	default:
985 		return NULL;
986 	}
987 }
988 
s390_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)989 static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
990 					   dma_addr_t iova)
991 {
992 	struct s390_domain *s390_domain = to_s390_domain(domain);
993 	unsigned long *rto, *sto, *pto;
994 	unsigned long ste, pte, rte;
995 	unsigned int rtx, sx, px;
996 	phys_addr_t phys = 0;
997 
998 	if (iova < domain->geometry.aperture_start ||
999 	    iova > domain->geometry.aperture_end)
1000 		return 0;
1001 
1002 	rto = get_rto_from_iova(s390_domain, iova);
1003 	if (!rto)
1004 		return 0;
1005 
1006 	rtx = calc_rtx(iova);
1007 	sx = calc_sx(iova);
1008 	px = calc_px(iova);
1009 
1010 	rte = READ_ONCE(rto[rtx]);
1011 	if (reg_entry_isvalid(rte)) {
1012 		sto = get_rt_sto(rte);
1013 		ste = READ_ONCE(sto[sx]);
1014 		if (reg_entry_isvalid(ste)) {
1015 			pto = get_st_pto(ste);
1016 			pte = READ_ONCE(pto[px]);
1017 			if (pt_entry_isvalid(pte))
1018 				phys = pte & ZPCI_PTE_ADDR_MASK;
1019 		}
1020 	}
1021 
1022 	return phys;
1023 }
1024 
s390_iommu_unmap_pages(struct iommu_domain * domain,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)1025 static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
1026 				     unsigned long iova,
1027 				     size_t pgsize, size_t pgcount,
1028 				     struct iommu_iotlb_gather *gather)
1029 {
1030 	struct s390_domain *s390_domain = to_s390_domain(domain);
1031 	size_t size = pgcount << __ffs(pgsize);
1032 	int rc;
1033 
1034 	if (WARN_ON(iova < s390_domain->domain.geometry.aperture_start ||
1035 	    (iova + size - 1) > s390_domain->domain.geometry.aperture_end))
1036 		return 0;
1037 
1038 	rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount);
1039 	if (rc)
1040 		return 0;
1041 
1042 	iommu_iotlb_gather_add_range(gather, iova, size);
1043 	atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages);
1044 
1045 	return size;
1046 }
1047 
zpci_get_iommu_ctrs(struct zpci_dev * zdev)1048 struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
1049 {
1050 	struct s390_domain *s390_domain;
1051 
1052 	lockdep_assert_held(&zdev->dom_lock);
1053 
1054 	if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
1055 	    zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY)
1056 		return NULL;
1057 
1058 	s390_domain = to_s390_domain(zdev->s390_domain);
1059 	return &s390_domain->ctrs;
1060 }
1061 
zpci_init_iommu(struct zpci_dev * zdev)1062 int zpci_init_iommu(struct zpci_dev *zdev)
1063 {
1064 	int rc = 0;
1065 
1066 	rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
1067 				    "s390-iommu.%08x", zdev->fid);
1068 	if (rc)
1069 		goto out_err;
1070 
1071 	if (zdev->rtr_avail) {
1072 		rc = iommu_device_register(&zdev->iommu_dev,
1073 					   &s390_iommu_rtr_ops, NULL);
1074 	} else {
1075 		rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops,
1076 					   NULL);
1077 	}
1078 	if (rc)
1079 		goto out_sysfs;
1080 
1081 	return 0;
1082 
1083 out_sysfs:
1084 	iommu_device_sysfs_remove(&zdev->iommu_dev);
1085 
1086 out_err:
1087 	return rc;
1088 }
1089 
zpci_destroy_iommu(struct zpci_dev * zdev)1090 void zpci_destroy_iommu(struct zpci_dev *zdev)
1091 {
1092 	iommu_device_unregister(&zdev->iommu_dev);
1093 	iommu_device_sysfs_remove(&zdev->iommu_dev);
1094 }
1095 
s390_iommu_setup(char * str)1096 static int __init s390_iommu_setup(char *str)
1097 {
1098 	if (!strcmp(str, "strict")) {
1099 		pr_warn("s390_iommu=strict deprecated; use iommu.strict=1 instead\n");
1100 		iommu_set_dma_strict();
1101 	}
1102 	return 1;
1103 }
1104 
1105 __setup("s390_iommu=", s390_iommu_setup);
1106 
s390_iommu_aperture_setup(char * str)1107 static int __init s390_iommu_aperture_setup(char *str)
1108 {
1109 	if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
1110 		s390_iommu_aperture_factor = 1;
1111 	return 1;
1112 }
1113 
1114 __setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
1115 
s390_iommu_init(void)1116 static int __init s390_iommu_init(void)
1117 {
1118 	int rc;
1119 
1120 	iommu_dma_forcedac = true;
1121 	s390_iommu_aperture = (u64)virt_to_phys(high_memory);
1122 	if (!s390_iommu_aperture_factor)
1123 		s390_iommu_aperture = ULONG_MAX;
1124 	else
1125 		s390_iommu_aperture *= s390_iommu_aperture_factor;
1126 
1127 	rc = dma_alloc_cpu_table_caches();
1128 	if (rc)
1129 		return rc;
1130 
1131 	return rc;
1132 }
1133 subsys_initcall(s390_iommu_init);
1134 
s390_attach_dev_identity(struct iommu_domain * domain,struct device * dev,struct iommu_domain * old)1135 static int s390_attach_dev_identity(struct iommu_domain *domain,
1136 				    struct device *dev,
1137 				    struct iommu_domain *old)
1138 {
1139 	struct zpci_dev *zdev = to_zpci_dev(dev);
1140 	u8 status;
1141 	int cc;
1142 
1143 	blocking_domain_attach_device(&blocking_domain, dev, old);
1144 
1145 	/* If we fail now DMA remains blocked via blocking domain */
1146 	cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
1147 	if (reg_ioat_propagate_error(cc, status))
1148 		return -EIO;
1149 
1150 	zdev_s390_domain_update(zdev, domain);
1151 
1152 	return 0;
1153 }
1154 
1155 static const struct iommu_domain_ops s390_identity_ops = {
1156 	.attach_dev = s390_attach_dev_identity,
1157 };
1158 
1159 static struct iommu_domain s390_identity_domain = {
1160 	.type = IOMMU_DOMAIN_IDENTITY,
1161 	.ops = &s390_identity_ops,
1162 };
1163 
1164 static struct iommu_domain blocking_domain = {
1165 	.type = IOMMU_DOMAIN_BLOCKED,
1166 	.ops = &(const struct iommu_domain_ops) {
1167 		.attach_dev	= blocking_domain_attach_device,
1168 	}
1169 };
1170 
1171 #define S390_IOMMU_COMMON_OPS() \
1172 	.blocked_domain		= &blocking_domain, \
1173 	.release_domain		= &blocking_domain, \
1174 	.capable = s390_iommu_capable, \
1175 	.domain_alloc_paging = s390_domain_alloc_paging, \
1176 	.probe_device = s390_iommu_probe_device, \
1177 	.device_group = generic_device_group, \
1178 	.get_resv_regions = s390_iommu_get_resv_regions, \
1179 	.default_domain_ops = &(const struct iommu_domain_ops) { \
1180 		.attach_dev	= s390_iommu_attach_device, \
1181 		.map_pages	= s390_iommu_map_pages, \
1182 		.unmap_pages	= s390_iommu_unmap_pages, \
1183 		.flush_iotlb_all = s390_iommu_flush_iotlb_all, \
1184 		.iotlb_sync      = s390_iommu_iotlb_sync, \
1185 		.iotlb_sync_map  = s390_iommu_iotlb_sync_map, \
1186 		.iova_to_phys	= s390_iommu_iova_to_phys, \
1187 		.free		= s390_domain_free, \
1188 	}
1189 
1190 static const struct iommu_ops s390_iommu_ops = {
1191 	S390_IOMMU_COMMON_OPS()
1192 };
1193 
1194 static const struct iommu_ops s390_iommu_rtr_ops = {
1195 	.identity_domain	= &s390_identity_domain,
1196 	S390_IOMMU_COMMON_OPS()
1197 };
1198