1 /* 2 * Copyright 2015 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/uaccess.h> 12 #include <linux/delay.h> 13 14 #include "cxl.h" 15 #include "hcalls.h" 16 #include "trace.h" 17 18 #define CXL_ERROR_DETECTED_EVENT 1 19 #define CXL_SLOT_RESET_EVENT 2 20 #define CXL_RESUME_EVENT 3 21 22 static void pci_error_handlers(struct cxl_afu *afu, 23 int bus_error_event, 24 pci_channel_state_t state) 25 { 26 struct pci_dev *afu_dev; 27 28 if (afu->phb == NULL) 29 return; 30 31 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { 32 if (!afu_dev->driver) 33 continue; 34 35 switch (bus_error_event) { 36 case CXL_ERROR_DETECTED_EVENT: 37 afu_dev->error_state = state; 38 39 if (afu_dev->driver->err_handler && 40 afu_dev->driver->err_handler->error_detected) 41 afu_dev->driver->err_handler->error_detected(afu_dev, state); 42 break; 43 case CXL_SLOT_RESET_EVENT: 44 afu_dev->error_state = state; 45 46 if (afu_dev->driver->err_handler && 47 afu_dev->driver->err_handler->slot_reset) 48 afu_dev->driver->err_handler->slot_reset(afu_dev); 49 break; 50 case CXL_RESUME_EVENT: 51 if (afu_dev->driver->err_handler && 52 afu_dev->driver->err_handler->resume) 53 afu_dev->driver->err_handler->resume(afu_dev); 54 break; 55 } 56 } 57 } 58 59 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, 60 u64 errstat) 61 { 62 pr_devel("in %s\n", __func__); 63 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); 64 65 return cxl_ops->ack_irq(ctx, 0, errstat); 66 } 67 68 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, 69 void *buf, size_t len) 70 { 71 unsigned int entries, mod; 72 unsigned long **vpd_buf = NULL; 73 struct sg_list *le; 74 int rc = 0, i, tocopy; 75 u64 out = 0; 76 77 if (buf == NULL) 78 return -EINVAL; 79 80 /* number of entries in the list */ 81 entries = len / SG_BUFFER_SIZE; 82 mod = len % SG_BUFFER_SIZE; 83 if (mod) 84 entries++; 85 86 if (entries > SG_MAX_ENTRIES) { 87 entries = SG_MAX_ENTRIES; 88 len = SG_MAX_ENTRIES * SG_BUFFER_SIZE; 89 mod = 0; 90 } 91 92 vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL); 93 if (!vpd_buf) 94 return -ENOMEM; 95 96 le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); 97 if (!le) { 98 rc = -ENOMEM; 99 goto err1; 100 } 101 102 for (i = 0; i < entries; i++) { 103 vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); 104 if (!vpd_buf[i]) { 105 rc = -ENOMEM; 106 goto err2; 107 } 108 le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i])); 109 le[i].len = cpu_to_be64(SG_BUFFER_SIZE); 110 if ((i == (entries - 1)) && mod) 111 le[i].len = cpu_to_be64(mod); 112 } 113 114 if (adapter) 115 rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, 116 virt_to_phys(le), entries, &out); 117 else 118 rc = cxl_h_collect_vpd(afu->guest->handle, 0, 119 virt_to_phys(le), entries, &out); 120 pr_devel("length of available (entries: %i), vpd: %#llx\n", 121 entries, out); 122 123 if (!rc) { 124 /* 125 * hcall returns in 'out' the size of available VPDs. 126 * It fills the buffer with as much data as possible. 127 */ 128 if (out < len) 129 len = out; 130 rc = len; 131 if (out) { 132 for (i = 0; i < entries; i++) { 133 if (len < SG_BUFFER_SIZE) 134 tocopy = len; 135 else 136 tocopy = SG_BUFFER_SIZE; 137 memcpy(buf, vpd_buf[i], tocopy); 138 buf += tocopy; 139 len -= tocopy; 140 } 141 } 142 } 143 err2: 144 for (i = 0; i < entries; i++) { 145 if (vpd_buf[i]) 146 free_page((unsigned long) vpd_buf[i]); 147 } 148 free_page((unsigned long) le); 149 err1: 150 kfree(vpd_buf); 151 return rc; 152 } 153 154 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info) 155 { 156 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); 157 } 158 159 static irqreturn_t guest_psl_irq(int irq, void *data) 160 { 161 struct cxl_context *ctx = data; 162 struct cxl_irq_info irq_info; 163 int rc; 164 165 pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq); 166 rc = guest_get_irq_info(ctx, &irq_info); 167 if (rc) { 168 WARN(1, "Unable to get IRQ info: %i\n", rc); 169 return IRQ_HANDLED; 170 } 171 172 rc = cxl_irq(irq, ctx, &irq_info); 173 return rc; 174 } 175 176 static int afu_read_error_state(struct cxl_afu *afu, int *state_out) 177 { 178 u64 state; 179 int rc = 0; 180 181 rc = cxl_h_read_error_state(afu->guest->handle, &state); 182 if (!rc) { 183 WARN_ON(state != H_STATE_NORMAL && 184 state != H_STATE_DISABLE && 185 state != H_STATE_TEMP_UNAVAILABLE && 186 state != H_STATE_PERM_UNAVAILABLE); 187 *state_out = state & 0xffffffff; 188 } 189 return rc; 190 } 191 192 static irqreturn_t guest_slice_irq_err(int irq, void *data) 193 { 194 struct cxl_afu *afu = data; 195 int rc; 196 u64 serr; 197 198 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); 199 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); 200 if (rc) { 201 dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); 202 return IRQ_HANDLED; 203 } 204 dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); 205 206 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); 207 if (rc) 208 dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", 209 rc); 210 211 return IRQ_HANDLED; 212 } 213 214 215 static int irq_alloc_range(struct cxl *adapter, int len, int *irq) 216 { 217 int i, n; 218 struct irq_avail *cur; 219 220 for (i = 0; i < adapter->guest->irq_nranges; i++) { 221 cur = &adapter->guest->irq_avail[i]; 222 n = bitmap_find_next_zero_area(cur->bitmap, cur->range, 223 0, len, 0); 224 if (n < cur->range) { 225 bitmap_set(cur->bitmap, n, len); 226 *irq = cur->offset + n; 227 pr_devel("guest: allocate IRQs %#x->%#x\n", 228 *irq, *irq + len - 1); 229 230 return 0; 231 } 232 } 233 return -ENOSPC; 234 } 235 236 static int irq_free_range(struct cxl *adapter, int irq, int len) 237 { 238 int i, n; 239 struct irq_avail *cur; 240 241 if (len == 0) 242 return -ENOENT; 243 244 for (i = 0; i < adapter->guest->irq_nranges; i++) { 245 cur = &adapter->guest->irq_avail[i]; 246 if (irq >= cur->offset && 247 (irq + len) <= (cur->offset + cur->range)) { 248 n = irq - cur->offset; 249 bitmap_clear(cur->bitmap, n, len); 250 pr_devel("guest: release IRQs %#x->%#x\n", 251 irq, irq + len - 1); 252 return 0; 253 } 254 } 255 return -ENOENT; 256 } 257 258 static int guest_reset(struct cxl *adapter) 259 { 260 struct cxl_afu *afu = NULL; 261 int i, rc; 262 263 pr_devel("Adapter reset request\n"); 264 for (i = 0; i < adapter->slices; i++) { 265 if ((afu = adapter->afu[i])) { 266 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 267 pci_channel_io_frozen); 268 cxl_context_detach_all(afu); 269 } 270 } 271 272 rc = cxl_h_reset_adapter(adapter->guest->handle); 273 for (i = 0; i < adapter->slices; i++) { 274 if (!rc && (afu = adapter->afu[i])) { 275 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, 276 pci_channel_io_normal); 277 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); 278 } 279 } 280 return rc; 281 } 282 283 static int guest_alloc_one_irq(struct cxl *adapter) 284 { 285 int irq; 286 287 spin_lock(&adapter->guest->irq_alloc_lock); 288 if (irq_alloc_range(adapter, 1, &irq)) 289 irq = -ENOSPC; 290 spin_unlock(&adapter->guest->irq_alloc_lock); 291 return irq; 292 } 293 294 static void guest_release_one_irq(struct cxl *adapter, int irq) 295 { 296 spin_lock(&adapter->guest->irq_alloc_lock); 297 irq_free_range(adapter, irq, 1); 298 spin_unlock(&adapter->guest->irq_alloc_lock); 299 } 300 301 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs, 302 struct cxl *adapter, unsigned int num) 303 { 304 int i, try, irq; 305 306 memset(irqs, 0, sizeof(struct cxl_irq_ranges)); 307 308 spin_lock(&adapter->guest->irq_alloc_lock); 309 for (i = 0; i < CXL_IRQ_RANGES && num; i++) { 310 try = num; 311 while (try) { 312 if (irq_alloc_range(adapter, try, &irq) == 0) 313 break; 314 try /= 2; 315 } 316 if (!try) 317 goto error; 318 irqs->offset[i] = irq; 319 irqs->range[i] = try; 320 num -= try; 321 } 322 if (num) 323 goto error; 324 spin_unlock(&adapter->guest->irq_alloc_lock); 325 return 0; 326 327 error: 328 for (i = 0; i < CXL_IRQ_RANGES; i++) 329 irq_free_range(adapter, irqs->offset[i], irqs->range[i]); 330 spin_unlock(&adapter->guest->irq_alloc_lock); 331 return -ENOSPC; 332 } 333 334 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs, 335 struct cxl *adapter) 336 { 337 int i; 338 339 spin_lock(&adapter->guest->irq_alloc_lock); 340 for (i = 0; i < CXL_IRQ_RANGES; i++) 341 irq_free_range(adapter, irqs->offset[i], irqs->range[i]); 342 spin_unlock(&adapter->guest->irq_alloc_lock); 343 } 344 345 static int guest_register_serr_irq(struct cxl_afu *afu) 346 { 347 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 348 dev_name(&afu->dev)); 349 if (!afu->err_irq_name) 350 return -ENOMEM; 351 352 if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, 353 guest_slice_irq_err, afu, afu->err_irq_name))) { 354 kfree(afu->err_irq_name); 355 afu->err_irq_name = NULL; 356 return -ENOMEM; 357 } 358 359 return 0; 360 } 361 362 static void guest_release_serr_irq(struct cxl_afu *afu) 363 { 364 cxl_unmap_irq(afu->serr_virq, afu); 365 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 366 kfree(afu->err_irq_name); 367 } 368 369 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) 370 { 371 return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, 372 tfc >> 32, (psl_reset_mask != 0)); 373 } 374 375 static void disable_afu_irqs(struct cxl_context *ctx) 376 { 377 irq_hw_number_t hwirq; 378 unsigned int virq; 379 int r, i; 380 381 pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); 382 for (r = 0; r < CXL_IRQ_RANGES; r++) { 383 hwirq = ctx->irqs.offset[r]; 384 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 385 virq = irq_find_mapping(NULL, hwirq); 386 disable_irq(virq); 387 } 388 } 389 } 390 391 static void enable_afu_irqs(struct cxl_context *ctx) 392 { 393 irq_hw_number_t hwirq; 394 unsigned int virq; 395 int r, i; 396 397 pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); 398 for (r = 0; r < CXL_IRQ_RANGES; r++) { 399 hwirq = ctx->irqs.offset[r]; 400 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 401 virq = irq_find_mapping(NULL, hwirq); 402 enable_irq(virq); 403 } 404 } 405 } 406 407 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, 408 u64 offset, u64 *val) 409 { 410 unsigned long cr; 411 char c; 412 int rc = 0; 413 414 if (afu->crs_len < sz) 415 return -ENOENT; 416 417 if (unlikely(offset >= afu->crs_len)) 418 return -ERANGE; 419 420 cr = get_zeroed_page(GFP_KERNEL); 421 if (!cr) 422 return -ENOMEM; 423 424 rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, 425 virt_to_phys((void *)cr), sz); 426 if (rc) 427 goto err; 428 429 switch (sz) { 430 case 1: 431 c = *((char *) cr); 432 *val = c; 433 break; 434 case 2: 435 *val = in_le16((u16 *)cr); 436 break; 437 case 4: 438 *val = in_le32((unsigned *)cr); 439 break; 440 case 8: 441 *val = in_le64((u64 *)cr); 442 break; 443 default: 444 WARN_ON(1); 445 } 446 err: 447 free_page(cr); 448 return rc; 449 } 450 451 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, 452 u32 *out) 453 { 454 int rc; 455 u64 val; 456 457 rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); 458 if (!rc) 459 *out = (u32) val; 460 return rc; 461 } 462 463 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, 464 u16 *out) 465 { 466 int rc; 467 u64 val; 468 469 rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); 470 if (!rc) 471 *out = (u16) val; 472 return rc; 473 } 474 475 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, 476 u8 *out) 477 { 478 int rc; 479 u64 val; 480 481 rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); 482 if (!rc) 483 *out = (u8) val; 484 return rc; 485 } 486 487 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, 488 u64 *out) 489 { 490 return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); 491 } 492 493 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) 494 { 495 /* config record is not writable from guest */ 496 return -EPERM; 497 } 498 499 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) 500 { 501 /* config record is not writable from guest */ 502 return -EPERM; 503 } 504 505 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) 506 { 507 /* config record is not writable from guest */ 508 return -EPERM; 509 } 510 511 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) 512 { 513 struct cxl_process_element_hcall *elem; 514 struct cxl *adapter = ctx->afu->adapter; 515 const struct cred *cred; 516 u32 pid, idx; 517 int rc, r, i; 518 u64 mmio_addr, mmio_size; 519 __be64 flags = 0; 520 521 /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */ 522 if (!(elem = (struct cxl_process_element_hcall *) 523 get_zeroed_page(GFP_KERNEL))) 524 return -ENOMEM; 525 526 elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION); 527 if (ctx->kernel) { 528 pid = 0; 529 flags |= CXL_PE_TRANSLATION_ENABLED; 530 flags |= CXL_PE_PRIVILEGED_PROCESS; 531 if (mfmsr() & MSR_SF) 532 flags |= CXL_PE_64_BIT; 533 } else { 534 pid = current->pid; 535 flags |= CXL_PE_PROBLEM_STATE; 536 flags |= CXL_PE_TRANSLATION_ENABLED; 537 if (!test_tsk_thread_flag(current, TIF_32BIT)) 538 flags |= CXL_PE_64_BIT; 539 cred = get_current_cred(); 540 if (uid_eq(cred->euid, GLOBAL_ROOT_UID)) 541 flags |= CXL_PE_PRIVILEGED_PROCESS; 542 put_cred(cred); 543 } 544 elem->flags = cpu_to_be64(flags); 545 elem->common.tid = cpu_to_be32(0); /* Unused */ 546 elem->common.pid = cpu_to_be32(pid); 547 elem->common.csrp = cpu_to_be64(0); /* disable */ 548 elem->common.aurp0 = cpu_to_be64(0); /* disable */ 549 elem->common.aurp1 = cpu_to_be64(0); /* disable */ 550 551 cxl_prefault(ctx, wed); 552 553 elem->common.sstp0 = cpu_to_be64(ctx->sstp0); 554 elem->common.sstp1 = cpu_to_be64(ctx->sstp1); 555 for (r = 0; r < CXL_IRQ_RANGES; r++) { 556 for (i = 0; i < ctx->irqs.range[r]; i++) { 557 if (r == 0 && i == 0) { 558 elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]); 559 } else { 560 idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset; 561 elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8); 562 } 563 } 564 } 565 elem->common.amr = cpu_to_be64(amr); 566 elem->common.wed = cpu_to_be64(wed); 567 568 disable_afu_irqs(ctx); 569 570 rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, 571 &ctx->process_token, &mmio_addr, &mmio_size); 572 if (rc == H_SUCCESS) { 573 if (ctx->master || !ctx->afu->pp_psa) { 574 ctx->psn_phys = ctx->afu->psn_phys; 575 ctx->psn_size = ctx->afu->adapter->ps_size; 576 } else { 577 ctx->psn_phys = mmio_addr; 578 ctx->psn_size = mmio_size; 579 } 580 if (ctx->afu->pp_psa && mmio_size && 581 ctx->afu->pp_size == 0) { 582 /* 583 * There's no property in the device tree to read the 584 * pp_size. We only find out at the 1st attach. 585 * Compared to bare-metal, it is too late and we 586 * should really lock here. However, on powerVM, 587 * pp_size is really only used to display in /sys. 588 * Being discussed with pHyp for their next release. 589 */ 590 ctx->afu->pp_size = mmio_size; 591 } 592 /* from PAPR: process element is bytes 4-7 of process token */ 593 ctx->external_pe = ctx->process_token & 0xFFFFFFFF; 594 pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx", 595 ctx->pe, ctx->external_pe, ctx->psn_size); 596 ctx->pe_inserted = true; 597 enable_afu_irqs(ctx); 598 } 599 600 free_page((u64)elem); 601 return rc; 602 } 603 604 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) 605 { 606 pr_devel("in %s\n", __func__); 607 608 ctx->kernel = kernel; 609 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 610 return attach_afu_directed(ctx, wed, amr); 611 612 /* dedicated mode not supported on FW840 */ 613 614 return -EINVAL; 615 } 616 617 static int detach_afu_directed(struct cxl_context *ctx) 618 { 619 if (!ctx->pe_inserted) 620 return 0; 621 if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) 622 return -1; 623 return 0; 624 } 625 626 static int guest_detach_process(struct cxl_context *ctx) 627 { 628 pr_devel("in %s\n", __func__); 629 trace_cxl_detach(ctx); 630 631 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) 632 return -EIO; 633 634 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 635 return detach_afu_directed(ctx); 636 637 return -EINVAL; 638 } 639 640 static void guest_release_afu(struct device *dev) 641 { 642 struct cxl_afu *afu = to_cxl_afu(dev); 643 644 pr_devel("%s\n", __func__); 645 646 idr_destroy(&afu->contexts_idr); 647 648 kfree(afu->guest); 649 kfree(afu); 650 } 651 652 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) 653 { 654 return guest_collect_vpd(NULL, afu, buf, len); 655 } 656 657 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE 658 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, 659 loff_t off, size_t count) 660 { 661 void *tbuf = NULL; 662 int rc = 0; 663 664 tbuf = (void *) get_zeroed_page(GFP_KERNEL); 665 if (!tbuf) 666 return -ENOMEM; 667 668 rc = cxl_h_get_afu_err(afu->guest->handle, 669 off & 0x7, 670 virt_to_phys(tbuf), 671 count); 672 if (rc) 673 goto err; 674 675 if (count > ERR_BUFF_MAX_COPY_SIZE) 676 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); 677 memcpy(buf, tbuf, count); 678 err: 679 free_page((u64)tbuf); 680 681 return rc; 682 } 683 684 static int guest_afu_check_and_enable(struct cxl_afu *afu) 685 { 686 return 0; 687 } 688 689 static bool guest_support_attributes(const char *attr_name, 690 enum cxl_attrs type) 691 { 692 switch (type) { 693 case CXL_ADAPTER_ATTRS: 694 if ((strcmp(attr_name, "base_image") == 0) || 695 (strcmp(attr_name, "load_image_on_perst") == 0) || 696 (strcmp(attr_name, "perst_reloads_same_image") == 0) || 697 (strcmp(attr_name, "image_loaded") == 0)) 698 return false; 699 break; 700 case CXL_AFU_MASTER_ATTRS: 701 if ((strcmp(attr_name, "pp_mmio_off") == 0)) 702 return false; 703 break; 704 case CXL_AFU_ATTRS: 705 break; 706 default: 707 break; 708 } 709 710 return true; 711 } 712 713 static int activate_afu_directed(struct cxl_afu *afu) 714 { 715 int rc; 716 717 dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); 718 719 afu->current_mode = CXL_MODE_DIRECTED; 720 721 afu->num_procs = afu->max_procs_virtualised; 722 723 if ((rc = cxl_chardev_m_afu_add(afu))) 724 return rc; 725 726 if ((rc = cxl_sysfs_afu_m_add(afu))) 727 goto err; 728 729 if ((rc = cxl_chardev_s_afu_add(afu))) 730 goto err1; 731 732 return 0; 733 err1: 734 cxl_sysfs_afu_m_remove(afu); 735 err: 736 cxl_chardev_afu_remove(afu); 737 return rc; 738 } 739 740 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) 741 { 742 if (!mode) 743 return 0; 744 if (!(mode & afu->modes_supported)) 745 return -EINVAL; 746 747 if (mode == CXL_MODE_DIRECTED) 748 return activate_afu_directed(afu); 749 750 if (mode == CXL_MODE_DEDICATED) 751 dev_err(&afu->dev, "Dedicated mode not supported\n"); 752 753 return -EINVAL; 754 } 755 756 static int deactivate_afu_directed(struct cxl_afu *afu) 757 { 758 dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); 759 760 afu->current_mode = 0; 761 afu->num_procs = 0; 762 763 cxl_sysfs_afu_m_remove(afu); 764 cxl_chardev_afu_remove(afu); 765 766 cxl_ops->afu_reset(afu); 767 768 return 0; 769 } 770 771 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) 772 { 773 if (!mode) 774 return 0; 775 if (!(mode & afu->modes_supported)) 776 return -EINVAL; 777 778 if (mode == CXL_MODE_DIRECTED) 779 return deactivate_afu_directed(afu); 780 return 0; 781 } 782 783 static int guest_afu_reset(struct cxl_afu *afu) 784 { 785 pr_devel("AFU(%d) reset request\n", afu->slice); 786 return cxl_h_reset_afu(afu->guest->handle); 787 } 788 789 static int guest_map_slice_regs(struct cxl_afu *afu) 790 { 791 if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { 792 dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", 793 afu->slice); 794 return -ENOMEM; 795 } 796 return 0; 797 } 798 799 static void guest_unmap_slice_regs(struct cxl_afu *afu) 800 { 801 if (afu->p2n_mmio) 802 iounmap(afu->p2n_mmio); 803 } 804 805 static int afu_update_state(struct cxl_afu *afu) 806 { 807 int rc, cur_state; 808 809 rc = afu_read_error_state(afu, &cur_state); 810 if (rc) 811 return rc; 812 813 if (afu->guest->previous_state == cur_state) 814 return 0; 815 816 pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); 817 818 switch (cur_state) { 819 case H_STATE_NORMAL: 820 afu->guest->previous_state = cur_state; 821 rc = 1; 822 break; 823 824 case H_STATE_DISABLE: 825 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 826 pci_channel_io_frozen); 827 828 cxl_context_detach_all(afu); 829 if ((rc = cxl_ops->afu_reset(afu))) 830 pr_devel("reset hcall failed %d\n", rc); 831 832 rc = afu_read_error_state(afu, &cur_state); 833 if (!rc && cur_state == H_STATE_NORMAL) { 834 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, 835 pci_channel_io_normal); 836 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); 837 rc = 1; 838 } 839 afu->guest->previous_state = 0; 840 break; 841 842 case H_STATE_TEMP_UNAVAILABLE: 843 afu->guest->previous_state = cur_state; 844 break; 845 846 case H_STATE_PERM_UNAVAILABLE: 847 dev_err(&afu->dev, "AFU is in permanent error state\n"); 848 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 849 pci_channel_io_perm_failure); 850 afu->guest->previous_state = cur_state; 851 break; 852 853 default: 854 pr_err("Unexpected AFU(%d) error state: %#x\n", 855 afu->slice, cur_state); 856 return -EINVAL; 857 } 858 859 return rc; 860 } 861 862 static int afu_do_recovery(struct cxl_afu *afu) 863 { 864 int rc; 865 866 /* many threads can arrive here, in case of detach_all for example. 867 * Only one needs to drive the recovery 868 */ 869 if (mutex_trylock(&afu->guest->recovery_lock)) { 870 rc = afu_update_state(afu); 871 mutex_unlock(&afu->guest->recovery_lock); 872 return rc; 873 } 874 return 0; 875 } 876 877 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) 878 { 879 int state; 880 881 if (afu) { 882 if (afu_read_error_state(afu, &state) || 883 state != H_STATE_NORMAL) { 884 if (afu_do_recovery(afu) > 0) { 885 /* check again in case we've just fixed it */ 886 if (!afu_read_error_state(afu, &state) && 887 state == H_STATE_NORMAL) 888 return true; 889 } 890 return false; 891 } 892 } 893 894 return true; 895 } 896 897 static int afu_properties_look_ok(struct cxl_afu *afu) 898 { 899 if (afu->pp_irqs < 0) { 900 dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); 901 return -EINVAL; 902 } 903 904 if (afu->max_procs_virtualised < 1) { 905 dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); 906 return -EINVAL; 907 } 908 909 if (afu->crs_len < 0) { 910 dev_err(&afu->dev, "Unexpected configuration record size value\n"); 911 return -EINVAL; 912 } 913 914 return 0; 915 } 916 917 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np) 918 { 919 struct cxl_afu *afu; 920 bool free = true; 921 int rc; 922 923 pr_devel("in %s - AFU(%d)\n", __func__, slice); 924 if (!(afu = cxl_alloc_afu(adapter, slice))) 925 return -ENOMEM; 926 927 if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { 928 kfree(afu); 929 return -ENOMEM; 930 } 931 932 mutex_init(&afu->guest->recovery_lock); 933 934 if ((rc = dev_set_name(&afu->dev, "afu%i.%i", 935 adapter->adapter_num, 936 slice))) 937 goto err1; 938 939 adapter->slices++; 940 941 if ((rc = cxl_of_read_afu_handle(afu, afu_np))) 942 goto err1; 943 944 if ((rc = cxl_ops->afu_reset(afu))) 945 goto err1; 946 947 if ((rc = cxl_of_read_afu_properties(afu, afu_np))) 948 goto err1; 949 950 if ((rc = afu_properties_look_ok(afu))) 951 goto err1; 952 953 if ((rc = guest_map_slice_regs(afu))) 954 goto err1; 955 956 if ((rc = guest_register_serr_irq(afu))) 957 goto err2; 958 959 /* 960 * After we call this function we must not free the afu directly, even 961 * if it returns an error! 962 */ 963 if ((rc = cxl_register_afu(afu))) 964 goto err_put1; 965 966 if ((rc = cxl_sysfs_afu_add(afu))) 967 goto err_put1; 968 969 /* 970 * pHyp doesn't expose the programming models supported by the 971 * AFU. pHyp currently only supports directed mode. If it adds 972 * dedicated mode later, this version of cxl has no way to 973 * detect it. So we'll initialize the driver, but the first 974 * attach will fail. 975 * Being discussed with pHyp to do better (likely new property) 976 */ 977 if (afu->max_procs_virtualised == 1) 978 afu->modes_supported = CXL_MODE_DEDICATED; 979 else 980 afu->modes_supported = CXL_MODE_DIRECTED; 981 982 if ((rc = cxl_afu_select_best_mode(afu))) 983 goto err_put2; 984 985 adapter->afu[afu->slice] = afu; 986 987 afu->enabled = true; 988 989 if ((rc = cxl_pci_vphb_add(afu))) 990 dev_info(&afu->dev, "Can't register vPHB\n"); 991 992 return 0; 993 994 err_put2: 995 cxl_sysfs_afu_remove(afu); 996 err_put1: 997 device_unregister(&afu->dev); 998 free = false; 999 guest_release_serr_irq(afu); 1000 err2: 1001 guest_unmap_slice_regs(afu); 1002 err1: 1003 if (free) { 1004 kfree(afu->guest); 1005 kfree(afu); 1006 } 1007 return rc; 1008 } 1009 1010 void cxl_guest_remove_afu(struct cxl_afu *afu) 1011 { 1012 pr_devel("in %s - AFU(%d)\n", __func__, afu->slice); 1013 1014 if (!afu) 1015 return; 1016 1017 cxl_pci_vphb_remove(afu); 1018 cxl_sysfs_afu_remove(afu); 1019 1020 spin_lock(&afu->adapter->afu_list_lock); 1021 afu->adapter->afu[afu->slice] = NULL; 1022 spin_unlock(&afu->adapter->afu_list_lock); 1023 1024 cxl_context_detach_all(afu); 1025 cxl_ops->afu_deactivate_mode(afu, afu->current_mode); 1026 guest_release_serr_irq(afu); 1027 guest_unmap_slice_regs(afu); 1028 1029 device_unregister(&afu->dev); 1030 } 1031 1032 static void free_adapter(struct cxl *adapter) 1033 { 1034 struct irq_avail *cur; 1035 int i; 1036 1037 if (adapter->guest->irq_avail) { 1038 for (i = 0; i < adapter->guest->irq_nranges; i++) { 1039 cur = &adapter->guest->irq_avail[i]; 1040 kfree(cur->bitmap); 1041 } 1042 kfree(adapter->guest->irq_avail); 1043 } 1044 kfree(adapter->guest->status); 1045 cxl_remove_adapter_nr(adapter); 1046 kfree(adapter->guest); 1047 kfree(adapter); 1048 } 1049 1050 static int properties_look_ok(struct cxl *adapter) 1051 { 1052 /* The absence of this property means that the operational 1053 * status is unknown or okay 1054 */ 1055 if (strlen(adapter->guest->status) && 1056 strcmp(adapter->guest->status, "okay")) { 1057 pr_err("ABORTING:Bad operational status of the device\n"); 1058 return -EINVAL; 1059 } 1060 1061 return 0; 1062 } 1063 1064 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) 1065 { 1066 return guest_collect_vpd(adapter, NULL, buf, len); 1067 } 1068 1069 void cxl_guest_remove_adapter(struct cxl *adapter) 1070 { 1071 pr_devel("in %s\n", __func__); 1072 1073 cxl_sysfs_adapter_remove(adapter); 1074 1075 cxl_guest_remove_chardev(adapter); 1076 device_unregister(&adapter->dev); 1077 } 1078 1079 static void release_adapter(struct device *dev) 1080 { 1081 free_adapter(to_cxl_adapter(dev)); 1082 } 1083 1084 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev) 1085 { 1086 struct cxl *adapter; 1087 bool free = true; 1088 int rc; 1089 1090 if (!(adapter = cxl_alloc_adapter())) 1091 return ERR_PTR(-ENOMEM); 1092 1093 if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) { 1094 free_adapter(adapter); 1095 return ERR_PTR(-ENOMEM); 1096 } 1097 1098 adapter->slices = 0; 1099 adapter->guest->pdev = pdev; 1100 adapter->dev.parent = &pdev->dev; 1101 adapter->dev.release = release_adapter; 1102 dev_set_drvdata(&pdev->dev, adapter); 1103 1104 if ((rc = cxl_of_read_adapter_handle(adapter, np))) 1105 goto err1; 1106 1107 if ((rc = cxl_of_read_adapter_properties(adapter, np))) 1108 goto err1; 1109 1110 if ((rc = properties_look_ok(adapter))) 1111 goto err1; 1112 1113 if ((rc = cxl_guest_add_chardev(adapter))) 1114 goto err1; 1115 1116 /* 1117 * After we call this function we must not free the adapter directly, 1118 * even if it returns an error! 1119 */ 1120 if ((rc = cxl_register_adapter(adapter))) 1121 goto err_put1; 1122 1123 if ((rc = cxl_sysfs_adapter_add(adapter))) 1124 goto err_put1; 1125 1126 return adapter; 1127 1128 err_put1: 1129 device_unregister(&adapter->dev); 1130 free = false; 1131 cxl_guest_remove_chardev(adapter); 1132 err1: 1133 if (free) 1134 free_adapter(adapter); 1135 return ERR_PTR(rc); 1136 } 1137 1138 void cxl_guest_reload_module(struct cxl *adapter) 1139 { 1140 struct platform_device *pdev; 1141 1142 pdev = adapter->guest->pdev; 1143 cxl_guest_remove_adapter(adapter); 1144 1145 cxl_of_probe(pdev); 1146 } 1147 1148 const struct cxl_backend_ops cxl_guest_ops = { 1149 .module = THIS_MODULE, 1150 .adapter_reset = guest_reset, 1151 .alloc_one_irq = guest_alloc_one_irq, 1152 .release_one_irq = guest_release_one_irq, 1153 .alloc_irq_ranges = guest_alloc_irq_ranges, 1154 .release_irq_ranges = guest_release_irq_ranges, 1155 .setup_irq = NULL, 1156 .handle_psl_slice_error = guest_handle_psl_slice_error, 1157 .psl_interrupt = guest_psl_irq, 1158 .ack_irq = guest_ack_irq, 1159 .attach_process = guest_attach_process, 1160 .detach_process = guest_detach_process, 1161 .support_attributes = guest_support_attributes, 1162 .link_ok = guest_link_ok, 1163 .release_afu = guest_release_afu, 1164 .afu_read_err_buffer = guest_afu_read_err_buffer, 1165 .afu_check_and_enable = guest_afu_check_and_enable, 1166 .afu_activate_mode = guest_afu_activate_mode, 1167 .afu_deactivate_mode = guest_afu_deactivate_mode, 1168 .afu_reset = guest_afu_reset, 1169 .afu_cr_read8 = guest_afu_cr_read8, 1170 .afu_cr_read16 = guest_afu_cr_read16, 1171 .afu_cr_read32 = guest_afu_cr_read32, 1172 .afu_cr_read64 = guest_afu_cr_read64, 1173 .afu_cr_write8 = guest_afu_cr_write8, 1174 .afu_cr_write16 = guest_afu_cr_write16, 1175 .afu_cr_write32 = guest_afu_cr_write32, 1176 .read_adapter_vpd = cxl_guest_read_adapter_vpd, 1177 }; 1178