1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * IOMMU API for s390 PCI devices 4 * 5 * Copyright IBM Corp. 2015 6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> 7 */ 8 9 #include <linux/pci.h> 10 #include <linux/iommu.h> 11 #include <linux/iommu-helper.h> 12 #include <linux/sizes.h> 13 #include <linux/rculist.h> 14 #include <linux/rcupdate.h> 15 #include <asm/pci_dma.h> 16 17 #include "dma-iommu.h" 18 19 static const struct iommu_ops s390_iommu_ops, s390_iommu_rtr_ops; 20 21 static struct kmem_cache *dma_region_table_cache; 22 static struct kmem_cache *dma_page_table_cache; 23 24 static u64 s390_iommu_aperture; 25 static u32 s390_iommu_aperture_factor = 1; 26 27 struct s390_domain { 28 struct iommu_domain domain; 29 struct list_head devices; 30 struct zpci_iommu_ctrs ctrs; 31 unsigned long *dma_table; 32 spinlock_t list_lock; 33 struct rcu_head rcu; 34 u8 origin_type; 35 }; 36 37 static struct iommu_domain blocking_domain; 38 39 static inline unsigned int calc_rfx(dma_addr_t ptr) 40 { 41 return ((unsigned long)ptr >> ZPCI_RF_SHIFT) & ZPCI_INDEX_MASK; 42 } 43 44 static inline unsigned int calc_rsx(dma_addr_t ptr) 45 { 46 return ((unsigned long)ptr >> ZPCI_RS_SHIFT) & ZPCI_INDEX_MASK; 47 } 48 49 static inline unsigned int calc_rtx(dma_addr_t ptr) 50 { 51 return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK; 52 } 53 54 static inline unsigned int calc_sx(dma_addr_t ptr) 55 { 56 return ((unsigned long)ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK; 57 } 58 59 static inline unsigned int calc_px(dma_addr_t ptr) 60 { 61 return ((unsigned long)ptr >> PAGE_SHIFT) & ZPCI_PT_MASK; 62 } 63 64 static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa) 65 { 66 *entry &= ZPCI_PTE_FLAG_MASK; 67 *entry |= (pfaa & ZPCI_PTE_ADDR_MASK); 68 } 69 70 static inline void set_rf_rso(unsigned long *entry, phys_addr_t rso) 71 { 72 *entry &= ZPCI_RTE_FLAG_MASK; 73 *entry |= (rso & ZPCI_RTE_ADDR_MASK); 74 *entry |= ZPCI_TABLE_TYPE_RFX; 75 } 76 77 static inline void set_rs_rto(unsigned long *entry, phys_addr_t rto) 78 { 79 *entry &= ZPCI_RTE_FLAG_MASK; 80 *entry |= (rto & ZPCI_RTE_ADDR_MASK); 81 *entry |= ZPCI_TABLE_TYPE_RSX; 82 } 83 84 static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto) 85 { 86 *entry &= ZPCI_RTE_FLAG_MASK; 87 *entry |= (sto & ZPCI_RTE_ADDR_MASK); 88 *entry |= ZPCI_TABLE_TYPE_RTX; 89 } 90 91 static inline void set_st_pto(unsigned long *entry, phys_addr_t pto) 92 { 93 *entry &= ZPCI_STE_FLAG_MASK; 94 *entry |= (pto & ZPCI_STE_ADDR_MASK); 95 *entry |= ZPCI_TABLE_TYPE_SX; 96 } 97 98 static inline void validate_rf_entry(unsigned long *entry) 99 { 100 *entry &= ~ZPCI_TABLE_VALID_MASK; 101 *entry &= ~ZPCI_TABLE_OFFSET_MASK; 102 *entry |= ZPCI_TABLE_VALID; 103 *entry |= ZPCI_TABLE_LEN_RFX; 104 } 105 106 static inline void validate_rs_entry(unsigned long *entry) 107 { 108 *entry &= ~ZPCI_TABLE_VALID_MASK; 109 *entry &= ~ZPCI_TABLE_OFFSET_MASK; 110 *entry |= ZPCI_TABLE_VALID; 111 *entry |= ZPCI_TABLE_LEN_RSX; 112 } 113 114 static inline void validate_rt_entry(unsigned long *entry) 115 { 116 *entry &= ~ZPCI_TABLE_VALID_MASK; 117 *entry &= ~ZPCI_TABLE_OFFSET_MASK; 118 *entry |= ZPCI_TABLE_VALID; 119 *entry |= ZPCI_TABLE_LEN_RTX; 120 } 121 122 static inline void validate_st_entry(unsigned long *entry) 123 { 124 *entry &= ~ZPCI_TABLE_VALID_MASK; 125 *entry |= ZPCI_TABLE_VALID; 126 } 127 128 static inline void invalidate_pt_entry(unsigned long *entry) 129 { 130 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID); 131 *entry &= ~ZPCI_PTE_VALID_MASK; 132 *entry |= ZPCI_PTE_INVALID; 133 } 134 135 static inline void validate_pt_entry(unsigned long *entry) 136 { 137 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID); 138 *entry &= ~ZPCI_PTE_VALID_MASK; 139 *entry |= ZPCI_PTE_VALID; 140 } 141 142 static inline void entry_set_protected(unsigned long *entry) 143 { 144 *entry &= ~ZPCI_TABLE_PROT_MASK; 145 *entry |= ZPCI_TABLE_PROTECTED; 146 } 147 148 static inline void entry_clr_protected(unsigned long *entry) 149 { 150 *entry &= ~ZPCI_TABLE_PROT_MASK; 151 *entry |= ZPCI_TABLE_UNPROTECTED; 152 } 153 154 static inline int reg_entry_isvalid(unsigned long entry) 155 { 156 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID; 157 } 158 159 static inline int pt_entry_isvalid(unsigned long entry) 160 { 161 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID; 162 } 163 164 static inline unsigned long *get_rf_rso(unsigned long entry) 165 { 166 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RFX) 167 return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); 168 else 169 return NULL; 170 } 171 172 static inline unsigned long *get_rs_rto(unsigned long entry) 173 { 174 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RSX) 175 return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); 176 else 177 return NULL; 178 } 179 180 static inline unsigned long *get_rt_sto(unsigned long entry) 181 { 182 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX) 183 return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK); 184 else 185 return NULL; 186 } 187 188 static inline unsigned long *get_st_pto(unsigned long entry) 189 { 190 if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX) 191 return phys_to_virt(entry & ZPCI_STE_ADDR_MASK); 192 else 193 return NULL; 194 } 195 196 static int __init dma_alloc_cpu_table_caches(void) 197 { 198 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables", 199 ZPCI_TABLE_SIZE, 200 ZPCI_TABLE_ALIGN, 201 0, NULL); 202 if (!dma_region_table_cache) 203 return -ENOMEM; 204 205 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables", 206 ZPCI_PT_SIZE, 207 ZPCI_PT_ALIGN, 208 0, NULL); 209 if (!dma_page_table_cache) { 210 kmem_cache_destroy(dma_region_table_cache); 211 return -ENOMEM; 212 } 213 return 0; 214 } 215 216 static unsigned long *dma_alloc_cpu_table(gfp_t gfp) 217 { 218 unsigned long *table, *entry; 219 220 table = kmem_cache_alloc(dma_region_table_cache, gfp); 221 if (!table) 222 return NULL; 223 224 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) 225 *entry = ZPCI_TABLE_INVALID; 226 return table; 227 } 228 229 static void dma_free_cpu_table(void *table) 230 { 231 kmem_cache_free(dma_region_table_cache, table); 232 } 233 234 static void dma_free_page_table(void *table) 235 { 236 kmem_cache_free(dma_page_table_cache, table); 237 } 238 239 static void dma_free_seg_table(unsigned long entry) 240 { 241 unsigned long *sto = get_rt_sto(entry); 242 int sx; 243 244 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++) 245 if (reg_entry_isvalid(sto[sx])) 246 dma_free_page_table(get_st_pto(sto[sx])); 247 248 dma_free_cpu_table(sto); 249 } 250 251 static void dma_free_rt_table(unsigned long entry) 252 { 253 unsigned long *rto = get_rs_rto(entry); 254 int rtx; 255 256 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) 257 if (reg_entry_isvalid(rto[rtx])) 258 dma_free_seg_table(rto[rtx]); 259 260 dma_free_cpu_table(rto); 261 } 262 263 static void dma_free_rs_table(unsigned long entry) 264 { 265 unsigned long *rso = get_rf_rso(entry); 266 int rsx; 267 268 for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++) 269 if (reg_entry_isvalid(rso[rsx])) 270 dma_free_rt_table(rso[rsx]); 271 272 dma_free_cpu_table(rso); 273 } 274 275 static void dma_cleanup_tables(struct s390_domain *domain) 276 { 277 int rtx, rsx, rfx; 278 279 if (!domain->dma_table) 280 return; 281 282 switch (domain->origin_type) { 283 case ZPCI_TABLE_TYPE_RFX: 284 for (rfx = 0; rfx < ZPCI_TABLE_ENTRIES; rfx++) 285 if (reg_entry_isvalid(domain->dma_table[rfx])) 286 dma_free_rs_table(domain->dma_table[rfx]); 287 break; 288 case ZPCI_TABLE_TYPE_RSX: 289 for (rsx = 0; rsx < ZPCI_TABLE_ENTRIES; rsx++) 290 if (reg_entry_isvalid(domain->dma_table[rsx])) 291 dma_free_rt_table(domain->dma_table[rsx]); 292 break; 293 case ZPCI_TABLE_TYPE_RTX: 294 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) 295 if (reg_entry_isvalid(domain->dma_table[rtx])) 296 dma_free_seg_table(domain->dma_table[rtx]); 297 break; 298 default: 299 WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type); 300 return; 301 } 302 303 dma_free_cpu_table(domain->dma_table); 304 } 305 306 static unsigned long *dma_alloc_page_table(gfp_t gfp) 307 { 308 unsigned long *table, *entry; 309 310 table = kmem_cache_alloc(dma_page_table_cache, gfp); 311 if (!table) 312 return NULL; 313 314 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) 315 *entry = ZPCI_PTE_INVALID; 316 return table; 317 } 318 319 static unsigned long *dma_walk_rs_table(unsigned long *rso, 320 dma_addr_t dma_addr, gfp_t gfp) 321 { 322 unsigned int rsx = calc_rsx(dma_addr); 323 unsigned long old_rse, rse; 324 unsigned long *rsep, *rto; 325 326 rsep = &rso[rsx]; 327 rse = READ_ONCE(*rsep); 328 if (reg_entry_isvalid(rse)) { 329 rto = get_rs_rto(rse); 330 } else { 331 rto = dma_alloc_cpu_table(gfp); 332 if (!rto) 333 return NULL; 334 335 set_rs_rto(&rse, virt_to_phys(rto)); 336 validate_rs_entry(&rse); 337 entry_clr_protected(&rse); 338 339 old_rse = cmpxchg(rsep, ZPCI_TABLE_INVALID, rse); 340 if (old_rse != ZPCI_TABLE_INVALID) { 341 /* Somone else was faster, use theirs */ 342 dma_free_cpu_table(rto); 343 rto = get_rs_rto(old_rse); 344 } 345 } 346 return rto; 347 } 348 349 static unsigned long *dma_walk_rf_table(unsigned long *rfo, 350 dma_addr_t dma_addr, gfp_t gfp) 351 { 352 unsigned int rfx = calc_rfx(dma_addr); 353 unsigned long old_rfe, rfe; 354 unsigned long *rfep, *rso; 355 356 rfep = &rfo[rfx]; 357 rfe = READ_ONCE(*rfep); 358 if (reg_entry_isvalid(rfe)) { 359 rso = get_rf_rso(rfe); 360 } else { 361 rso = dma_alloc_cpu_table(gfp); 362 if (!rso) 363 return NULL; 364 365 set_rf_rso(&rfe, virt_to_phys(rso)); 366 validate_rf_entry(&rfe); 367 entry_clr_protected(&rfe); 368 369 old_rfe = cmpxchg(rfep, ZPCI_TABLE_INVALID, rfe); 370 if (old_rfe != ZPCI_TABLE_INVALID) { 371 /* Somone else was faster, use theirs */ 372 dma_free_cpu_table(rso); 373 rso = get_rf_rso(old_rfe); 374 } 375 } 376 377 if (!rso) 378 return NULL; 379 380 return dma_walk_rs_table(rso, dma_addr, gfp); 381 } 382 383 static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp) 384 { 385 unsigned long old_rte, rte; 386 unsigned long *sto; 387 388 rte = READ_ONCE(*rtep); 389 if (reg_entry_isvalid(rte)) { 390 sto = get_rt_sto(rte); 391 } else { 392 sto = dma_alloc_cpu_table(gfp); 393 if (!sto) 394 return NULL; 395 396 set_rt_sto(&rte, virt_to_phys(sto)); 397 validate_rt_entry(&rte); 398 entry_clr_protected(&rte); 399 400 old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte); 401 if (old_rte != ZPCI_TABLE_INVALID) { 402 /* Somone else was faster, use theirs */ 403 dma_free_cpu_table(sto); 404 sto = get_rt_sto(old_rte); 405 } 406 } 407 return sto; 408 } 409 410 static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp) 411 { 412 unsigned long old_ste, ste; 413 unsigned long *pto; 414 415 ste = READ_ONCE(*step); 416 if (reg_entry_isvalid(ste)) { 417 pto = get_st_pto(ste); 418 } else { 419 pto = dma_alloc_page_table(gfp); 420 if (!pto) 421 return NULL; 422 set_st_pto(&ste, virt_to_phys(pto)); 423 validate_st_entry(&ste); 424 entry_clr_protected(&ste); 425 426 old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste); 427 if (old_ste != ZPCI_TABLE_INVALID) { 428 /* Somone else was faster, use theirs */ 429 dma_free_page_table(pto); 430 pto = get_st_pto(old_ste); 431 } 432 } 433 return pto; 434 } 435 436 static unsigned long *dma_walk_region_tables(struct s390_domain *domain, 437 dma_addr_t dma_addr, gfp_t gfp) 438 { 439 switch (domain->origin_type) { 440 case ZPCI_TABLE_TYPE_RFX: 441 return dma_walk_rf_table(domain->dma_table, dma_addr, gfp); 442 case ZPCI_TABLE_TYPE_RSX: 443 return dma_walk_rs_table(domain->dma_table, dma_addr, gfp); 444 case ZPCI_TABLE_TYPE_RTX: 445 return domain->dma_table; 446 default: 447 return NULL; 448 } 449 } 450 451 static unsigned long *dma_walk_cpu_trans(struct s390_domain *domain, 452 dma_addr_t dma_addr, gfp_t gfp) 453 { 454 unsigned long *rto, *sto, *pto; 455 unsigned int rtx, sx, px; 456 457 rto = dma_walk_region_tables(domain, dma_addr, gfp); 458 if (!rto) 459 return NULL; 460 461 rtx = calc_rtx(dma_addr); 462 sto = dma_get_seg_table_origin(&rto[rtx], gfp); 463 if (!sto) 464 return NULL; 465 466 sx = calc_sx(dma_addr); 467 pto = dma_get_page_table_origin(&sto[sx], gfp); 468 if (!pto) 469 return NULL; 470 471 px = calc_px(dma_addr); 472 return &pto[px]; 473 } 474 475 static void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags) 476 { 477 unsigned long pte; 478 479 pte = READ_ONCE(*ptep); 480 if (flags & ZPCI_PTE_INVALID) { 481 invalidate_pt_entry(&pte); 482 } else { 483 set_pt_pfaa(&pte, page_addr); 484 validate_pt_entry(&pte); 485 } 486 487 if (flags & ZPCI_TABLE_PROTECTED) 488 entry_set_protected(&pte); 489 else 490 entry_clr_protected(&pte); 491 492 xchg(ptep, pte); 493 } 494 495 static struct s390_domain *to_s390_domain(struct iommu_domain *dom) 496 { 497 return container_of(dom, struct s390_domain, domain); 498 } 499 500 static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap) 501 { 502 struct zpci_dev *zdev = to_zpci_dev(dev); 503 504 switch (cap) { 505 case IOMMU_CAP_CACHE_COHERENCY: 506 return true; 507 case IOMMU_CAP_DEFERRED_FLUSH: 508 return zdev->pft != PCI_FUNC_TYPE_ISM; 509 default: 510 return false; 511 } 512 } 513 514 static inline u64 max_tbl_size(struct s390_domain *domain) 515 { 516 switch (domain->origin_type) { 517 case ZPCI_TABLE_TYPE_RTX: 518 return ZPCI_TABLE_SIZE_RT - 1; 519 case ZPCI_TABLE_TYPE_RSX: 520 return ZPCI_TABLE_SIZE_RS - 1; 521 case ZPCI_TABLE_TYPE_RFX: 522 return U64_MAX; 523 default: 524 return 0; 525 } 526 } 527 528 static struct iommu_domain *s390_domain_alloc_paging(struct device *dev) 529 { 530 struct zpci_dev *zdev = to_zpci_dev(dev); 531 struct s390_domain *s390_domain; 532 u64 aperture_size; 533 534 s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL); 535 if (!s390_domain) 536 return NULL; 537 538 s390_domain->dma_table = dma_alloc_cpu_table(GFP_KERNEL); 539 if (!s390_domain->dma_table) { 540 kfree(s390_domain); 541 return NULL; 542 } 543 544 aperture_size = min(s390_iommu_aperture, 545 zdev->end_dma - zdev->start_dma + 1); 546 if (aperture_size <= (ZPCI_TABLE_SIZE_RT - zdev->start_dma)) { 547 s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX; 548 } else if (aperture_size <= (ZPCI_TABLE_SIZE_RS - zdev->start_dma) && 549 (zdev->dtsm & ZPCI_IOTA_DT_RS)) { 550 s390_domain->origin_type = ZPCI_TABLE_TYPE_RSX; 551 } else if (zdev->dtsm & ZPCI_IOTA_DT_RF) { 552 s390_domain->origin_type = ZPCI_TABLE_TYPE_RFX; 553 } else { 554 /* Assume RTX available */ 555 s390_domain->origin_type = ZPCI_TABLE_TYPE_RTX; 556 aperture_size = ZPCI_TABLE_SIZE_RT - zdev->start_dma; 557 } 558 zdev->end_dma = zdev->start_dma + aperture_size - 1; 559 560 s390_domain->domain.pgsize_bitmap = SZ_4K; 561 s390_domain->domain.geometry.force_aperture = true; 562 s390_domain->domain.geometry.aperture_start = 0; 563 s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain); 564 565 spin_lock_init(&s390_domain->list_lock); 566 INIT_LIST_HEAD_RCU(&s390_domain->devices); 567 568 return &s390_domain->domain; 569 } 570 571 static void s390_iommu_rcu_free_domain(struct rcu_head *head) 572 { 573 struct s390_domain *s390_domain = container_of(head, struct s390_domain, rcu); 574 575 dma_cleanup_tables(s390_domain); 576 kfree(s390_domain); 577 } 578 579 static void s390_domain_free(struct iommu_domain *domain) 580 { 581 struct s390_domain *s390_domain = to_s390_domain(domain); 582 583 rcu_read_lock(); 584 WARN_ON(!list_empty(&s390_domain->devices)); 585 rcu_read_unlock(); 586 587 call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain); 588 } 589 590 static void zdev_s390_domain_update(struct zpci_dev *zdev, 591 struct iommu_domain *domain) 592 { 593 unsigned long flags; 594 595 spin_lock_irqsave(&zdev->dom_lock, flags); 596 zdev->s390_domain = domain; 597 spin_unlock_irqrestore(&zdev->dom_lock, flags); 598 } 599 600 static u64 get_iota_region_flag(struct s390_domain *domain) 601 { 602 switch (domain->origin_type) { 603 case ZPCI_TABLE_TYPE_RTX: 604 return ZPCI_IOTA_RTTO_FLAG; 605 case ZPCI_TABLE_TYPE_RSX: 606 return ZPCI_IOTA_RSTO_FLAG; 607 case ZPCI_TABLE_TYPE_RFX: 608 return ZPCI_IOTA_RFTO_FLAG; 609 default: 610 WARN_ONCE(1, "Invalid IOMMU table (%x)\n", domain->origin_type); 611 return 0; 612 } 613 } 614 615 static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev, 616 struct iommu_domain *domain, u8 *status) 617 { 618 struct s390_domain *s390_domain; 619 int rc = 0; 620 u64 iota; 621 622 switch (domain->type) { 623 case IOMMU_DOMAIN_IDENTITY: 624 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, 625 zdev->end_dma, 0, status); 626 break; 627 case IOMMU_DOMAIN_BLOCKED: 628 /* Nothing to do in this case */ 629 break; 630 default: 631 s390_domain = to_s390_domain(domain); 632 iota = virt_to_phys(s390_domain->dma_table) | 633 get_iota_region_flag(s390_domain); 634 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, 635 zdev->end_dma, iota, status); 636 } 637 638 return rc; 639 } 640 641 int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status) 642 { 643 unsigned long flags; 644 int rc; 645 646 spin_lock_irqsave(&zdev->dom_lock, flags); 647 648 rc = s390_iommu_domain_reg_ioat(zdev, zdev->s390_domain, status); 649 650 spin_unlock_irqrestore(&zdev->dom_lock, flags); 651 652 return rc; 653 } 654 655 static int blocking_domain_attach_device(struct iommu_domain *domain, 656 struct device *dev) 657 { 658 struct zpci_dev *zdev = to_zpci_dev(dev); 659 struct s390_domain *s390_domain; 660 unsigned long flags; 661 662 if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED) 663 return 0; 664 665 s390_domain = to_s390_domain(zdev->s390_domain); 666 if (zdev->dma_table) { 667 spin_lock_irqsave(&s390_domain->list_lock, flags); 668 list_del_rcu(&zdev->iommu_list); 669 spin_unlock_irqrestore(&s390_domain->list_lock, flags); 670 } 671 672 zpci_unregister_ioat(zdev, 0); 673 zdev->dma_table = NULL; 674 zdev_s390_domain_update(zdev, domain); 675 676 return 0; 677 } 678 679 static int s390_iommu_attach_device(struct iommu_domain *domain, 680 struct device *dev) 681 { 682 struct s390_domain *s390_domain = to_s390_domain(domain); 683 struct zpci_dev *zdev = to_zpci_dev(dev); 684 unsigned long flags; 685 u8 status; 686 int cc; 687 688 if (!zdev) 689 return -ENODEV; 690 691 if (WARN_ON(domain->geometry.aperture_start > zdev->end_dma || 692 domain->geometry.aperture_end < zdev->start_dma)) 693 return -EINVAL; 694 695 blocking_domain_attach_device(&blocking_domain, dev); 696 697 /* If we fail now DMA remains blocked via blocking domain */ 698 cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); 699 if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) 700 return -EIO; 701 zdev->dma_table = s390_domain->dma_table; 702 zdev_s390_domain_update(zdev, domain); 703 704 spin_lock_irqsave(&s390_domain->list_lock, flags); 705 list_add_rcu(&zdev->iommu_list, &s390_domain->devices); 706 spin_unlock_irqrestore(&s390_domain->list_lock, flags); 707 708 return 0; 709 } 710 711 static void s390_iommu_get_resv_regions(struct device *dev, 712 struct list_head *list) 713 { 714 struct zpci_dev *zdev = to_zpci_dev(dev); 715 struct iommu_resv_region *region; 716 u64 max_size, end_resv; 717 unsigned long flags; 718 719 if (zdev->start_dma) { 720 region = iommu_alloc_resv_region(0, zdev->start_dma, 0, 721 IOMMU_RESV_RESERVED, GFP_KERNEL); 722 if (!region) 723 return; 724 list_add_tail(®ion->list, list); 725 } 726 727 spin_lock_irqsave(&zdev->dom_lock, flags); 728 if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED || 729 zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY) { 730 spin_unlock_irqrestore(&zdev->dom_lock, flags); 731 return; 732 } 733 734 max_size = max_tbl_size(to_s390_domain(zdev->s390_domain)); 735 spin_unlock_irqrestore(&zdev->dom_lock, flags); 736 737 if (zdev->end_dma < max_size) { 738 end_resv = max_size - zdev->end_dma; 739 region = iommu_alloc_resv_region(zdev->end_dma + 1, end_resv, 740 0, IOMMU_RESV_RESERVED, 741 GFP_KERNEL); 742 if (!region) 743 return; 744 list_add_tail(®ion->list, list); 745 } 746 } 747 748 static struct iommu_device *s390_iommu_probe_device(struct device *dev) 749 { 750 struct zpci_dev *zdev; 751 752 if (!dev_is_pci(dev)) 753 return ERR_PTR(-ENODEV); 754 755 zdev = to_zpci_dev(dev); 756 757 if (zdev->start_dma > zdev->end_dma) 758 return ERR_PTR(-EINVAL); 759 760 if (zdev->tlb_refresh) 761 dev->iommu->shadow_on_flush = 1; 762 763 /* Start with DMA blocked */ 764 spin_lock_init(&zdev->dom_lock); 765 zdev_s390_domain_update(zdev, &blocking_domain); 766 767 return &zdev->iommu_dev; 768 } 769 770 static int zpci_refresh_all(struct zpci_dev *zdev) 771 { 772 return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma, 773 zdev->end_dma - zdev->start_dma + 1); 774 } 775 776 static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain) 777 { 778 struct s390_domain *s390_domain = to_s390_domain(domain); 779 struct zpci_dev *zdev; 780 781 rcu_read_lock(); 782 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { 783 atomic64_inc(&s390_domain->ctrs.global_rpcits); 784 zpci_refresh_all(zdev); 785 } 786 rcu_read_unlock(); 787 } 788 789 static void s390_iommu_iotlb_sync(struct iommu_domain *domain, 790 struct iommu_iotlb_gather *gather) 791 { 792 struct s390_domain *s390_domain = to_s390_domain(domain); 793 size_t size = gather->end - gather->start + 1; 794 struct zpci_dev *zdev; 795 796 /* If gather was never added to there is nothing to flush */ 797 if (!gather->end) 798 return; 799 800 rcu_read_lock(); 801 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { 802 atomic64_inc(&s390_domain->ctrs.sync_rpcits); 803 zpci_refresh_trans((u64)zdev->fh << 32, gather->start, 804 size); 805 } 806 rcu_read_unlock(); 807 } 808 809 static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain, 810 unsigned long iova, size_t size) 811 { 812 struct s390_domain *s390_domain = to_s390_domain(domain); 813 struct zpci_dev *zdev; 814 int ret = 0; 815 816 rcu_read_lock(); 817 list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) { 818 if (!zdev->tlb_refresh) 819 continue; 820 atomic64_inc(&s390_domain->ctrs.sync_map_rpcits); 821 ret = zpci_refresh_trans((u64)zdev->fh << 32, 822 iova, size); 823 /* 824 * let the hypervisor discover invalidated entries 825 * allowing it to free IOVAs and unpin pages 826 */ 827 if (ret == -ENOMEM) { 828 ret = zpci_refresh_all(zdev); 829 if (ret) 830 break; 831 } 832 } 833 rcu_read_unlock(); 834 835 return ret; 836 } 837 838 static int s390_iommu_validate_trans(struct s390_domain *s390_domain, 839 phys_addr_t pa, dma_addr_t dma_addr, 840 unsigned long nr_pages, int flags, 841 gfp_t gfp) 842 { 843 phys_addr_t page_addr = pa & PAGE_MASK; 844 unsigned long *entry; 845 unsigned long i; 846 int rc; 847 848 for (i = 0; i < nr_pages; i++) { 849 entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp); 850 if (unlikely(!entry)) { 851 rc = -ENOMEM; 852 goto undo_cpu_trans; 853 } 854 dma_update_cpu_trans(entry, page_addr, flags); 855 page_addr += PAGE_SIZE; 856 dma_addr += PAGE_SIZE; 857 } 858 859 return 0; 860 861 undo_cpu_trans: 862 while (i-- > 0) { 863 dma_addr -= PAGE_SIZE; 864 entry = dma_walk_cpu_trans(s390_domain, dma_addr, gfp); 865 if (!entry) 866 break; 867 dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID); 868 } 869 870 return rc; 871 } 872 873 static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain, 874 dma_addr_t dma_addr, unsigned long nr_pages) 875 { 876 unsigned long *entry; 877 unsigned long i; 878 int rc = 0; 879 880 for (i = 0; i < nr_pages; i++) { 881 entry = dma_walk_cpu_trans(s390_domain, dma_addr, GFP_ATOMIC); 882 if (unlikely(!entry)) { 883 rc = -EINVAL; 884 break; 885 } 886 dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID); 887 dma_addr += PAGE_SIZE; 888 } 889 890 return rc; 891 } 892 893 static int s390_iommu_map_pages(struct iommu_domain *domain, 894 unsigned long iova, phys_addr_t paddr, 895 size_t pgsize, size_t pgcount, 896 int prot, gfp_t gfp, size_t *mapped) 897 { 898 struct s390_domain *s390_domain = to_s390_domain(domain); 899 size_t size = pgcount << __ffs(pgsize); 900 int flags = ZPCI_PTE_VALID, rc = 0; 901 902 if (pgsize != SZ_4K) 903 return -EINVAL; 904 905 if (iova < s390_domain->domain.geometry.aperture_start || 906 (iova + size - 1) > s390_domain->domain.geometry.aperture_end) 907 return -EINVAL; 908 909 if (!IS_ALIGNED(iova | paddr, pgsize)) 910 return -EINVAL; 911 912 if (!(prot & IOMMU_WRITE)) 913 flags |= ZPCI_TABLE_PROTECTED; 914 915 rc = s390_iommu_validate_trans(s390_domain, paddr, iova, 916 pgcount, flags, gfp); 917 if (!rc) { 918 *mapped = size; 919 atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages); 920 } 921 922 return rc; 923 } 924 925 static unsigned long *get_rso_from_iova(struct s390_domain *domain, 926 dma_addr_t iova) 927 { 928 unsigned long *rfo; 929 unsigned long rfe; 930 unsigned int rfx; 931 932 switch (domain->origin_type) { 933 case ZPCI_TABLE_TYPE_RFX: 934 rfo = domain->dma_table; 935 rfx = calc_rfx(iova); 936 rfe = READ_ONCE(rfo[rfx]); 937 if (!reg_entry_isvalid(rfe)) 938 return NULL; 939 return get_rf_rso(rfe); 940 case ZPCI_TABLE_TYPE_RSX: 941 return domain->dma_table; 942 default: 943 return NULL; 944 } 945 } 946 947 static unsigned long *get_rto_from_iova(struct s390_domain *domain, 948 dma_addr_t iova) 949 { 950 unsigned long *rso; 951 unsigned long rse; 952 unsigned int rsx; 953 954 switch (domain->origin_type) { 955 case ZPCI_TABLE_TYPE_RFX: 956 case ZPCI_TABLE_TYPE_RSX: 957 rso = get_rso_from_iova(domain, iova); 958 rsx = calc_rsx(iova); 959 rse = READ_ONCE(rso[rsx]); 960 if (!reg_entry_isvalid(rse)) 961 return NULL; 962 return get_rs_rto(rse); 963 case ZPCI_TABLE_TYPE_RTX: 964 return domain->dma_table; 965 default: 966 return NULL; 967 } 968 } 969 970 static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, 971 dma_addr_t iova) 972 { 973 struct s390_domain *s390_domain = to_s390_domain(domain); 974 unsigned long *rto, *sto, *pto; 975 unsigned long ste, pte, rte; 976 unsigned int rtx, sx, px; 977 phys_addr_t phys = 0; 978 979 if (iova < domain->geometry.aperture_start || 980 iova > domain->geometry.aperture_end) 981 return 0; 982 983 rto = get_rto_from_iova(s390_domain, iova); 984 if (!rto) 985 return 0; 986 987 rtx = calc_rtx(iova); 988 sx = calc_sx(iova); 989 px = calc_px(iova); 990 991 rte = READ_ONCE(rto[rtx]); 992 if (reg_entry_isvalid(rte)) { 993 sto = get_rt_sto(rte); 994 ste = READ_ONCE(sto[sx]); 995 if (reg_entry_isvalid(ste)) { 996 pto = get_st_pto(ste); 997 pte = READ_ONCE(pto[px]); 998 if (pt_entry_isvalid(pte)) 999 phys = pte & ZPCI_PTE_ADDR_MASK; 1000 } 1001 } 1002 1003 return phys; 1004 } 1005 1006 static size_t s390_iommu_unmap_pages(struct iommu_domain *domain, 1007 unsigned long iova, 1008 size_t pgsize, size_t pgcount, 1009 struct iommu_iotlb_gather *gather) 1010 { 1011 struct s390_domain *s390_domain = to_s390_domain(domain); 1012 size_t size = pgcount << __ffs(pgsize); 1013 int rc; 1014 1015 if (WARN_ON(iova < s390_domain->domain.geometry.aperture_start || 1016 (iova + size - 1) > s390_domain->domain.geometry.aperture_end)) 1017 return 0; 1018 1019 rc = s390_iommu_invalidate_trans(s390_domain, iova, pgcount); 1020 if (rc) 1021 return 0; 1022 1023 iommu_iotlb_gather_add_range(gather, iova, size); 1024 atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages); 1025 1026 return size; 1027 } 1028 1029 struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev) 1030 { 1031 struct s390_domain *s390_domain; 1032 1033 lockdep_assert_held(&zdev->dom_lock); 1034 1035 if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED) 1036 return NULL; 1037 1038 s390_domain = to_s390_domain(zdev->s390_domain); 1039 return &s390_domain->ctrs; 1040 } 1041 1042 int zpci_init_iommu(struct zpci_dev *zdev) 1043 { 1044 int rc = 0; 1045 1046 rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL, 1047 "s390-iommu.%08x", zdev->fid); 1048 if (rc) 1049 goto out_err; 1050 1051 if (zdev->rtr_avail) { 1052 rc = iommu_device_register(&zdev->iommu_dev, 1053 &s390_iommu_rtr_ops, NULL); 1054 } else { 1055 rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, 1056 NULL); 1057 } 1058 if (rc) 1059 goto out_sysfs; 1060 1061 return 0; 1062 1063 out_sysfs: 1064 iommu_device_sysfs_remove(&zdev->iommu_dev); 1065 1066 out_err: 1067 return rc; 1068 } 1069 1070 void zpci_destroy_iommu(struct zpci_dev *zdev) 1071 { 1072 iommu_device_unregister(&zdev->iommu_dev); 1073 iommu_device_sysfs_remove(&zdev->iommu_dev); 1074 } 1075 1076 static int __init s390_iommu_setup(char *str) 1077 { 1078 if (!strcmp(str, "strict")) { 1079 pr_warn("s390_iommu=strict deprecated; use iommu.strict=1 instead\n"); 1080 iommu_set_dma_strict(); 1081 } 1082 return 1; 1083 } 1084 1085 __setup("s390_iommu=", s390_iommu_setup); 1086 1087 static int __init s390_iommu_aperture_setup(char *str) 1088 { 1089 if (kstrtou32(str, 10, &s390_iommu_aperture_factor)) 1090 s390_iommu_aperture_factor = 1; 1091 return 1; 1092 } 1093 1094 __setup("s390_iommu_aperture=", s390_iommu_aperture_setup); 1095 1096 static int __init s390_iommu_init(void) 1097 { 1098 int rc; 1099 1100 iommu_dma_forcedac = true; 1101 s390_iommu_aperture = (u64)virt_to_phys(high_memory); 1102 if (!s390_iommu_aperture_factor) 1103 s390_iommu_aperture = ULONG_MAX; 1104 else 1105 s390_iommu_aperture *= s390_iommu_aperture_factor; 1106 1107 rc = dma_alloc_cpu_table_caches(); 1108 if (rc) 1109 return rc; 1110 1111 return rc; 1112 } 1113 subsys_initcall(s390_iommu_init); 1114 1115 static int s390_attach_dev_identity(struct iommu_domain *domain, 1116 struct device *dev) 1117 { 1118 struct zpci_dev *zdev = to_zpci_dev(dev); 1119 u8 status; 1120 int cc; 1121 1122 blocking_domain_attach_device(&blocking_domain, dev); 1123 1124 /* If we fail now DMA remains blocked via blocking domain */ 1125 cc = s390_iommu_domain_reg_ioat(zdev, domain, &status); 1126 1127 /* 1128 * If the device is undergoing error recovery the reset code 1129 * will re-establish the new domain. 1130 */ 1131 if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL) 1132 return -EIO; 1133 1134 zdev_s390_domain_update(zdev, domain); 1135 1136 return 0; 1137 } 1138 1139 static const struct iommu_domain_ops s390_identity_ops = { 1140 .attach_dev = s390_attach_dev_identity, 1141 }; 1142 1143 static struct iommu_domain s390_identity_domain = { 1144 .type = IOMMU_DOMAIN_IDENTITY, 1145 .ops = &s390_identity_ops, 1146 }; 1147 1148 static struct iommu_domain blocking_domain = { 1149 .type = IOMMU_DOMAIN_BLOCKED, 1150 .ops = &(const struct iommu_domain_ops) { 1151 .attach_dev = blocking_domain_attach_device, 1152 } 1153 }; 1154 1155 #define S390_IOMMU_COMMON_OPS() \ 1156 .blocked_domain = &blocking_domain, \ 1157 .release_domain = &blocking_domain, \ 1158 .capable = s390_iommu_capable, \ 1159 .domain_alloc_paging = s390_domain_alloc_paging, \ 1160 .probe_device = s390_iommu_probe_device, \ 1161 .device_group = generic_device_group, \ 1162 .get_resv_regions = s390_iommu_get_resv_regions, \ 1163 .default_domain_ops = &(const struct iommu_domain_ops) { \ 1164 .attach_dev = s390_iommu_attach_device, \ 1165 .map_pages = s390_iommu_map_pages, \ 1166 .unmap_pages = s390_iommu_unmap_pages, \ 1167 .flush_iotlb_all = s390_iommu_flush_iotlb_all, \ 1168 .iotlb_sync = s390_iommu_iotlb_sync, \ 1169 .iotlb_sync_map = s390_iommu_iotlb_sync_map, \ 1170 .iova_to_phys = s390_iommu_iova_to_phys, \ 1171 .free = s390_domain_free, \ 1172 } 1173 1174 static const struct iommu_ops s390_iommu_ops = { 1175 S390_IOMMU_COMMON_OPS() 1176 }; 1177 1178 static const struct iommu_ops s390_iommu_rtr_ops = { 1179 .identity_domain = &s390_identity_domain, 1180 S390_IOMMU_COMMON_OPS() 1181 }; 1182