1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright 2017 IBM Corp. 3 #include <linux/sched/mm.h> 4 #include <linux/mutex.h> 5 #include <linux/mmu_context.h> 6 #include <asm/copro.h> 7 #include <asm/pnv-ocxl.h> 8 #include <misc/ocxl.h> 9 #include "ocxl_internal.h" 10 #include "trace.h" 11 12 13 #define SPA_PASID_BITS 15 14 #define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1) 15 #define SPA_PE_MASK SPA_PASID_MAX 16 #define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */ 17 18 #define SPA_CFG_SF (1ull << (63-0)) 19 #define SPA_CFG_TA (1ull << (63-1)) 20 #define SPA_CFG_HV (1ull << (63-3)) 21 #define SPA_CFG_UV (1ull << (63-4)) 22 #define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table (HPT) mode */ 23 #define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */ 24 #define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */ 25 #define SPA_CFG_PR (1ull << (63-49)) 26 #define SPA_CFG_TC (1ull << (63-54)) 27 #define SPA_CFG_DR (1ull << (63-59)) 28 29 #define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */ 30 #define SPA_XSL_S (1ull << (63-38)) /* Store operation */ 31 32 #define SPA_PE_VALID 0x80000000 33 34 35 struct pe_data { 36 struct mm_struct *mm; 37 /* callback to trigger when a translation fault occurs */ 38 void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr); 39 /* opaque pointer to be passed to the above callback */ 40 void *xsl_err_data; 41 struct rcu_head rcu; 42 }; 43 44 struct spa { 45 struct ocxl_process_element *spa_mem; 46 int spa_order; 47 struct mutex spa_lock; 48 struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */ 49 char *irq_name; 50 int virq; 51 void __iomem *reg_dsisr; 52 void __iomem *reg_dar; 53 void __iomem *reg_tfc; 54 void __iomem *reg_pe_handle; 55 /* 56 * The following field are used by the memory fault 57 * interrupt handler. We can only have one interrupt at a 58 * time. The NPU won't raise another interrupt until the 59 * previous one has been ack'd by writing to the TFC register 60 */ 61 struct xsl_fault { 62 struct work_struct fault_work; 63 u64 pe; 64 u64 dsisr; 65 u64 dar; 66 struct pe_data pe_data; 67 } xsl_fault; 68 }; 69 70 /* 71 * A opencapi link can be used be by several PCI functions. We have 72 * one link per device slot. 73 * 74 * A linked list of opencapi links should suffice, as there's a 75 * limited number of opencapi slots on a system and lookup is only 76 * done when the device is probed 77 */ 78 struct link { 79 struct list_head list; 80 struct kref ref; 81 int domain; 82 int bus; 83 int dev; 84 atomic_t irq_available; 85 struct spa *spa; 86 void *platform_data; 87 }; 88 static struct list_head links_list = LIST_HEAD_INIT(links_list); 89 static DEFINE_MUTEX(links_list_lock); 90 91 enum xsl_response { 92 CONTINUE, 93 ADDRESS_ERROR, 94 RESTART, 95 }; 96 97 98 static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe) 99 { 100 u64 reg; 101 102 *dsisr = in_be64(spa->reg_dsisr); 103 *dar = in_be64(spa->reg_dar); 104 reg = in_be64(spa->reg_pe_handle); 105 *pe = reg & SPA_PE_MASK; 106 } 107 108 static void ack_irq(struct spa *spa, enum xsl_response r) 109 { 110 u64 reg = 0; 111 112 /* continue is not supported */ 113 if (r == RESTART) 114 reg = PPC_BIT(31); 115 else if (r == ADDRESS_ERROR) 116 reg = PPC_BIT(30); 117 else 118 WARN(1, "Invalid irq response %d\n", r); 119 120 if (reg) { 121 trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe, 122 spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg); 123 out_be64(spa->reg_tfc, reg); 124 } 125 } 126 127 static void xsl_fault_handler_bh(struct work_struct *fault_work) 128 { 129 unsigned int flt = 0; 130 unsigned long access, flags, inv_flags = 0; 131 enum xsl_response r; 132 struct xsl_fault *fault = container_of(fault_work, struct xsl_fault, 133 fault_work); 134 struct spa *spa = container_of(fault, struct spa, xsl_fault); 135 136 int rc; 137 138 /* 139 * We must release a reference on mm_users whenever exiting this 140 * function (taken in the memory fault interrupt handler) 141 */ 142 rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr, 143 &flt); 144 if (rc) { 145 pr_debug("copro_handle_mm_fault failed: %d\n", rc); 146 if (fault->pe_data.xsl_err_cb) { 147 fault->pe_data.xsl_err_cb( 148 fault->pe_data.xsl_err_data, 149 fault->dar, fault->dsisr); 150 } 151 r = ADDRESS_ERROR; 152 goto ack; 153 } 154 155 if (!radix_enabled()) { 156 /* 157 * update_mmu_cache() will not have loaded the hash 158 * since current->trap is not a 0x400 or 0x300, so 159 * just call hash_page_mm() here. 160 */ 161 access = _PAGE_PRESENT | _PAGE_READ; 162 if (fault->dsisr & SPA_XSL_S) 163 access |= _PAGE_WRITE; 164 165 if (REGION_ID(fault->dar) != USER_REGION_ID) 166 access |= _PAGE_PRIVILEGED; 167 168 local_irq_save(flags); 169 hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300, 170 inv_flags); 171 local_irq_restore(flags); 172 } 173 r = RESTART; 174 ack: 175 mmput(fault->pe_data.mm); 176 ack_irq(spa, r); 177 } 178 179 static irqreturn_t xsl_fault_handler(int irq, void *data) 180 { 181 struct link *link = (struct link *) data; 182 struct spa *spa = link->spa; 183 u64 dsisr, dar, pe_handle; 184 struct pe_data *pe_data; 185 struct ocxl_process_element *pe; 186 int lpid, pid, tid; 187 bool schedule = false; 188 189 read_irq(spa, &dsisr, &dar, &pe_handle); 190 trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1); 191 192 WARN_ON(pe_handle > SPA_PE_MASK); 193 pe = spa->spa_mem + pe_handle; 194 lpid = be32_to_cpu(pe->lpid); 195 pid = be32_to_cpu(pe->pid); 196 tid = be32_to_cpu(pe->tid); 197 /* We could be reading all null values here if the PE is being 198 * removed while an interrupt kicks in. It's not supposed to 199 * happen if the driver notified the AFU to terminate the 200 * PASID, and the AFU waited for pending operations before 201 * acknowledging. But even if it happens, we won't find a 202 * memory context below and fail silently, so it should be ok. 203 */ 204 if (!(dsisr & SPA_XSL_TF)) { 205 WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr); 206 ack_irq(spa, ADDRESS_ERROR); 207 return IRQ_HANDLED; 208 } 209 210 rcu_read_lock(); 211 pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle); 212 if (!pe_data) { 213 /* 214 * Could only happen if the driver didn't notify the 215 * AFU about PASID termination before removing the PE, 216 * or the AFU didn't wait for all memory access to 217 * have completed. 218 * 219 * Either way, we fail early, but we shouldn't log an 220 * error message, as it is a valid (if unexpected) 221 * scenario 222 */ 223 rcu_read_unlock(); 224 pr_debug("Unknown mm context for xsl interrupt\n"); 225 ack_irq(spa, ADDRESS_ERROR); 226 return IRQ_HANDLED; 227 } 228 WARN_ON(pe_data->mm->context.id != pid); 229 230 if (mmget_not_zero(pe_data->mm)) { 231 spa->xsl_fault.pe = pe_handle; 232 spa->xsl_fault.dar = dar; 233 spa->xsl_fault.dsisr = dsisr; 234 spa->xsl_fault.pe_data = *pe_data; 235 schedule = true; 236 /* mm_users count released by bottom half */ 237 } 238 rcu_read_unlock(); 239 if (schedule) 240 schedule_work(&spa->xsl_fault.fault_work); 241 else 242 ack_irq(spa, ADDRESS_ERROR); 243 return IRQ_HANDLED; 244 } 245 246 static void unmap_irq_registers(struct spa *spa) 247 { 248 pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc, 249 spa->reg_pe_handle); 250 } 251 252 static int map_irq_registers(struct pci_dev *dev, struct spa *spa) 253 { 254 return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar, 255 &spa->reg_tfc, &spa->reg_pe_handle); 256 } 257 258 static int setup_xsl_irq(struct pci_dev *dev, struct link *link) 259 { 260 struct spa *spa = link->spa; 261 int rc; 262 int hwirq; 263 264 rc = pnv_ocxl_get_xsl_irq(dev, &hwirq); 265 if (rc) 266 return rc; 267 268 rc = map_irq_registers(dev, spa); 269 if (rc) 270 return rc; 271 272 spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x", 273 link->domain, link->bus, link->dev); 274 if (!spa->irq_name) { 275 unmap_irq_registers(spa); 276 dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n"); 277 return -ENOMEM; 278 } 279 /* 280 * At some point, we'll need to look into allowing a higher 281 * number of interrupts. Could we have an IRQ domain per link? 282 */ 283 spa->virq = irq_create_mapping(NULL, hwirq); 284 if (!spa->virq) { 285 kfree(spa->irq_name); 286 unmap_irq_registers(spa); 287 dev_err(&dev->dev, 288 "irq_create_mapping failed for translation interrupt\n"); 289 return -EINVAL; 290 } 291 292 dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq); 293 294 rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name, 295 link); 296 if (rc) { 297 irq_dispose_mapping(spa->virq); 298 kfree(spa->irq_name); 299 unmap_irq_registers(spa); 300 dev_err(&dev->dev, 301 "request_irq failed for translation interrupt: %d\n", 302 rc); 303 return -EINVAL; 304 } 305 return 0; 306 } 307 308 static void release_xsl_irq(struct link *link) 309 { 310 struct spa *spa = link->spa; 311 312 if (spa->virq) { 313 free_irq(spa->virq, link); 314 irq_dispose_mapping(spa->virq); 315 } 316 kfree(spa->irq_name); 317 unmap_irq_registers(spa); 318 } 319 320 static int alloc_spa(struct pci_dev *dev, struct link *link) 321 { 322 struct spa *spa; 323 324 spa = kzalloc(sizeof(struct spa), GFP_KERNEL); 325 if (!spa) 326 return -ENOMEM; 327 328 mutex_init(&spa->spa_lock); 329 INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL); 330 INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh); 331 332 spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT; 333 spa->spa_mem = (struct ocxl_process_element *) 334 __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order); 335 if (!spa->spa_mem) { 336 dev_err(&dev->dev, "Can't allocate Shared Process Area\n"); 337 kfree(spa); 338 return -ENOMEM; 339 } 340 pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain, link->bus, 341 link->dev, spa->spa_mem); 342 343 link->spa = spa; 344 return 0; 345 } 346 347 static void free_spa(struct link *link) 348 { 349 struct spa *spa = link->spa; 350 351 pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus, 352 link->dev); 353 354 if (spa && spa->spa_mem) { 355 free_pages((unsigned long) spa->spa_mem, spa->spa_order); 356 kfree(spa); 357 link->spa = NULL; 358 } 359 } 360 361 static int alloc_link(struct pci_dev *dev, int PE_mask, struct link **out_link) 362 { 363 struct link *link; 364 int rc; 365 366 link = kzalloc(sizeof(struct link), GFP_KERNEL); 367 if (!link) 368 return -ENOMEM; 369 370 kref_init(&link->ref); 371 link->domain = pci_domain_nr(dev->bus); 372 link->bus = dev->bus->number; 373 link->dev = PCI_SLOT(dev->devfn); 374 atomic_set(&link->irq_available, MAX_IRQ_PER_LINK); 375 376 rc = alloc_spa(dev, link); 377 if (rc) 378 goto err_free; 379 380 rc = setup_xsl_irq(dev, link); 381 if (rc) 382 goto err_spa; 383 384 /* platform specific hook */ 385 rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask, 386 &link->platform_data); 387 if (rc) 388 goto err_xsl_irq; 389 390 *out_link = link; 391 return 0; 392 393 err_xsl_irq: 394 release_xsl_irq(link); 395 err_spa: 396 free_spa(link); 397 err_free: 398 kfree(link); 399 return rc; 400 } 401 402 static void free_link(struct link *link) 403 { 404 release_xsl_irq(link); 405 free_spa(link); 406 kfree(link); 407 } 408 409 int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle) 410 { 411 int rc = 0; 412 struct link *link; 413 414 mutex_lock(&links_list_lock); 415 list_for_each_entry(link, &links_list, list) { 416 /* The functions of a device all share the same link */ 417 if (link->domain == pci_domain_nr(dev->bus) && 418 link->bus == dev->bus->number && 419 link->dev == PCI_SLOT(dev->devfn)) { 420 kref_get(&link->ref); 421 *link_handle = link; 422 goto unlock; 423 } 424 } 425 rc = alloc_link(dev, PE_mask, &link); 426 if (rc) 427 goto unlock; 428 429 list_add(&link->list, &links_list); 430 *link_handle = link; 431 unlock: 432 mutex_unlock(&links_list_lock); 433 return rc; 434 } 435 EXPORT_SYMBOL_GPL(ocxl_link_setup); 436 437 static void release_xsl(struct kref *ref) 438 { 439 struct link *link = container_of(ref, struct link, ref); 440 441 list_del(&link->list); 442 /* call platform code before releasing data */ 443 pnv_ocxl_spa_release(link->platform_data); 444 free_link(link); 445 } 446 447 void ocxl_link_release(struct pci_dev *dev, void *link_handle) 448 { 449 struct link *link = (struct link *) link_handle; 450 451 mutex_lock(&links_list_lock); 452 kref_put(&link->ref, release_xsl); 453 mutex_unlock(&links_list_lock); 454 } 455 EXPORT_SYMBOL_GPL(ocxl_link_release); 456 457 static u64 calculate_cfg_state(bool kernel) 458 { 459 u64 state; 460 461 state = SPA_CFG_DR; 462 if (mfspr(SPRN_LPCR) & LPCR_TC) 463 state |= SPA_CFG_TC; 464 if (radix_enabled()) 465 state |= SPA_CFG_XLAT_ror; 466 else 467 state |= SPA_CFG_XLAT_hpt; 468 state |= SPA_CFG_HV; 469 if (kernel) { 470 if (mfmsr() & MSR_SF) 471 state |= SPA_CFG_SF; 472 } else { 473 state |= SPA_CFG_PR; 474 if (!test_tsk_thread_flag(current, TIF_32BIT)) 475 state |= SPA_CFG_SF; 476 } 477 return state; 478 } 479 480 int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, 481 u64 amr, struct mm_struct *mm, 482 void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), 483 void *xsl_err_data) 484 { 485 struct link *link = (struct link *) link_handle; 486 struct spa *spa = link->spa; 487 struct ocxl_process_element *pe; 488 int pe_handle, rc = 0; 489 struct pe_data *pe_data; 490 491 BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128); 492 if (pasid > SPA_PASID_MAX) 493 return -EINVAL; 494 495 mutex_lock(&spa->spa_lock); 496 pe_handle = pasid & SPA_PE_MASK; 497 pe = spa->spa_mem + pe_handle; 498 499 if (pe->software_state) { 500 rc = -EBUSY; 501 goto unlock; 502 } 503 504 pe_data = kmalloc(sizeof(*pe_data), GFP_KERNEL); 505 if (!pe_data) { 506 rc = -ENOMEM; 507 goto unlock; 508 } 509 510 pe_data->mm = mm; 511 pe_data->xsl_err_cb = xsl_err_cb; 512 pe_data->xsl_err_data = xsl_err_data; 513 514 memset(pe, 0, sizeof(struct ocxl_process_element)); 515 pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0)); 516 pe->lpid = cpu_to_be32(mfspr(SPRN_LPID)); 517 pe->pid = cpu_to_be32(pidr); 518 pe->tid = cpu_to_be32(tidr); 519 pe->amr = cpu_to_be64(amr); 520 pe->software_state = cpu_to_be32(SPA_PE_VALID); 521 522 mm_context_add_copro(mm); 523 /* 524 * Barrier is to make sure PE is visible in the SPA before it 525 * is used by the device. It also helps with the global TLBI 526 * invalidation 527 */ 528 mb(); 529 radix_tree_insert(&spa->pe_tree, pe_handle, pe_data); 530 531 /* 532 * The mm must stay valid for as long as the device uses it. We 533 * lower the count when the context is removed from the SPA. 534 * 535 * We grab mm_count (and not mm_users), as we don't want to 536 * end up in a circular dependency if a process mmaps its 537 * mmio, therefore incrementing the file ref count when 538 * calling mmap(), and forgets to unmap before exiting. In 539 * that scenario, when the kernel handles the death of the 540 * process, the file is not cleaned because unmap was not 541 * called, and the mm wouldn't be freed because we would still 542 * have a reference on mm_users. Incrementing mm_count solves 543 * the problem. 544 */ 545 mmgrab(mm); 546 trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr); 547 unlock: 548 mutex_unlock(&spa->spa_lock); 549 return rc; 550 } 551 EXPORT_SYMBOL_GPL(ocxl_link_add_pe); 552 553 int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) 554 { 555 struct link *link = (struct link *) link_handle; 556 struct spa *spa = link->spa; 557 struct ocxl_process_element *pe; 558 int pe_handle, rc; 559 560 if (pasid > SPA_PASID_MAX) 561 return -EINVAL; 562 563 pe_handle = pasid & SPA_PE_MASK; 564 pe = spa->spa_mem + pe_handle; 565 566 mutex_lock(&spa->spa_lock); 567 568 pe->tid = tid; 569 570 /* 571 * The barrier makes sure the PE is updated 572 * before we clear the NPU context cache below, so that the 573 * old PE cannot be reloaded erroneously. 574 */ 575 mb(); 576 577 /* 578 * hook to platform code 579 * On powerpc, the entry needs to be cleared from the context 580 * cache of the NPU. 581 */ 582 rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle); 583 WARN_ON(rc); 584 585 mutex_unlock(&spa->spa_lock); 586 return rc; 587 } 588 589 int ocxl_link_remove_pe(void *link_handle, int pasid) 590 { 591 struct link *link = (struct link *) link_handle; 592 struct spa *spa = link->spa; 593 struct ocxl_process_element *pe; 594 struct pe_data *pe_data; 595 int pe_handle, rc; 596 597 if (pasid > SPA_PASID_MAX) 598 return -EINVAL; 599 600 /* 601 * About synchronization with our memory fault handler: 602 * 603 * Before removing the PE, the driver is supposed to have 604 * notified the AFU, which should have cleaned up and make 605 * sure the PASID is no longer in use, including pending 606 * interrupts. However, there's no way to be sure... 607 * 608 * We clear the PE and remove the context from our radix 609 * tree. From that point on, any new interrupt for that 610 * context will fail silently, which is ok. As mentioned 611 * above, that's not expected, but it could happen if the 612 * driver or AFU didn't do the right thing. 613 * 614 * There could still be a bottom half running, but we don't 615 * need to wait/flush, as it is managing a reference count on 616 * the mm it reads from the radix tree. 617 */ 618 pe_handle = pasid & SPA_PE_MASK; 619 pe = spa->spa_mem + pe_handle; 620 621 mutex_lock(&spa->spa_lock); 622 623 if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID)) { 624 rc = -EINVAL; 625 goto unlock; 626 } 627 628 trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid, 629 be32_to_cpu(pe->pid), be32_to_cpu(pe->tid)); 630 631 memset(pe, 0, sizeof(struct ocxl_process_element)); 632 /* 633 * The barrier makes sure the PE is removed from the SPA 634 * before we clear the NPU context cache below, so that the 635 * old PE cannot be reloaded erroneously. 636 */ 637 mb(); 638 639 /* 640 * hook to platform code 641 * On powerpc, the entry needs to be cleared from the context 642 * cache of the NPU. 643 */ 644 rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle); 645 WARN_ON(rc); 646 647 pe_data = radix_tree_delete(&spa->pe_tree, pe_handle); 648 if (!pe_data) { 649 WARN(1, "Couldn't find pe data when removing PE\n"); 650 } else { 651 mm_context_remove_copro(pe_data->mm); 652 mmdrop(pe_data->mm); 653 kfree_rcu(pe_data, rcu); 654 } 655 unlock: 656 mutex_unlock(&spa->spa_lock); 657 return rc; 658 } 659 EXPORT_SYMBOL_GPL(ocxl_link_remove_pe); 660 661 int ocxl_link_irq_alloc(void *link_handle, int *hw_irq, u64 *trigger_addr) 662 { 663 struct link *link = (struct link *) link_handle; 664 int rc, irq; 665 u64 addr; 666 667 if (atomic_dec_if_positive(&link->irq_available) < 0) 668 return -ENOSPC; 669 670 rc = pnv_ocxl_alloc_xive_irq(&irq, &addr); 671 if (rc) { 672 atomic_inc(&link->irq_available); 673 return rc; 674 } 675 676 *hw_irq = irq; 677 *trigger_addr = addr; 678 return 0; 679 } 680 EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc); 681 682 void ocxl_link_free_irq(void *link_handle, int hw_irq) 683 { 684 struct link *link = (struct link *) link_handle; 685 686 pnv_ocxl_free_xive_irq(hw_irq); 687 atomic_inc(&link->irq_available); 688 } 689 EXPORT_SYMBOL_GPL(ocxl_link_free_irq); 690