1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright 2017 IBM Corp. 3 #include <linux/fs.h> 4 #include <linux/poll.h> 5 #include <linux/sched/signal.h> 6 #include <linux/eventfd.h> 7 #include <linux/uaccess.h> 8 #include <uapi/misc/ocxl.h> 9 #include <asm/reg.h> 10 #include <asm/switch_to.h> 11 #include "ocxl_internal.h" 12 13 14 #define OCXL_NUM_MINORS 256 /* Total to reserve */ 15 16 static dev_t ocxl_dev; 17 static DEFINE_MUTEX(minors_idr_lock); 18 static struct idr minors_idr; 19 20 static struct ocxl_file_info *find_and_get_file_info(dev_t devno) 21 { 22 struct ocxl_file_info *info; 23 24 mutex_lock(&minors_idr_lock); 25 info = idr_find(&minors_idr, MINOR(devno)); 26 if (info) 27 get_device(&info->dev); 28 mutex_unlock(&minors_idr_lock); 29 return info; 30 } 31 32 static int allocate_minor(struct ocxl_file_info *info) 33 { 34 int minor; 35 36 mutex_lock(&minors_idr_lock); 37 minor = idr_alloc(&minors_idr, info, 0, OCXL_NUM_MINORS, GFP_KERNEL); 38 mutex_unlock(&minors_idr_lock); 39 return minor; 40 } 41 42 static void free_minor(struct ocxl_file_info *info) 43 { 44 mutex_lock(&minors_idr_lock); 45 idr_remove(&minors_idr, MINOR(info->dev.devt)); 46 mutex_unlock(&minors_idr_lock); 47 } 48 49 static int afu_open(struct inode *inode, struct file *file) 50 { 51 struct ocxl_file_info *info; 52 struct ocxl_context *ctx; 53 int rc; 54 55 pr_debug("%s for device %x\n", __func__, inode->i_rdev); 56 57 info = find_and_get_file_info(inode->i_rdev); 58 if (!info) 59 return -ENODEV; 60 61 rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping); 62 if (rc) { 63 put_device(&info->dev); 64 return rc; 65 } 66 put_device(&info->dev); 67 file->private_data = ctx; 68 return 0; 69 } 70 71 static long afu_ioctl_attach(struct ocxl_context *ctx, 72 struct ocxl_ioctl_attach __user *uarg) 73 { 74 struct ocxl_ioctl_attach arg; 75 u64 amr = 0; 76 77 pr_debug("%s for context %d\n", __func__, ctx->pasid); 78 79 if (copy_from_user(&arg, uarg, sizeof(arg))) 80 return -EFAULT; 81 82 /* Make sure reserved fields are not set for forward compatibility */ 83 if (arg.reserved1 || arg.reserved2 || arg.reserved3) 84 return -EINVAL; 85 86 amr = arg.amr & mfspr(SPRN_UAMOR); 87 return ocxl_context_attach(ctx, amr, current->mm); 88 } 89 90 static long afu_ioctl_get_metadata(struct ocxl_context *ctx, 91 struct ocxl_ioctl_metadata __user *uarg) 92 { 93 struct ocxl_ioctl_metadata arg; 94 95 memset(&arg, 0, sizeof(arg)); 96 97 arg.version = 0; 98 99 arg.afu_version_major = ctx->afu->config.version_major; 100 arg.afu_version_minor = ctx->afu->config.version_minor; 101 arg.pasid = ctx->pasid; 102 arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride; 103 arg.global_mmio_size = ctx->afu->config.global_mmio_size; 104 105 if (copy_to_user(uarg, &arg, sizeof(arg))) 106 return -EFAULT; 107 108 return 0; 109 } 110 111 #ifdef CONFIG_PPC64 112 static long afu_ioctl_enable_p9_wait(struct ocxl_context *ctx, 113 struct ocxl_ioctl_p9_wait __user *uarg) 114 { 115 struct ocxl_ioctl_p9_wait arg; 116 117 memset(&arg, 0, sizeof(arg)); 118 119 if (cpu_has_feature(CPU_FTR_P9_TIDR)) { 120 enum ocxl_context_status status; 121 122 // Locks both status & tidr 123 mutex_lock(&ctx->status_mutex); 124 if (!ctx->tidr) { 125 if (set_thread_tidr(current)) { 126 mutex_unlock(&ctx->status_mutex); 127 return -ENOENT; 128 } 129 130 ctx->tidr = current->thread.tidr; 131 } 132 133 status = ctx->status; 134 mutex_unlock(&ctx->status_mutex); 135 136 if (status == ATTACHED) { 137 int rc = ocxl_link_update_pe(ctx->afu->fn->link, 138 ctx->pasid, ctx->tidr); 139 140 if (rc) 141 return rc; 142 } 143 144 arg.thread_id = ctx->tidr; 145 } else 146 return -ENOENT; 147 148 if (copy_to_user(uarg, &arg, sizeof(arg))) 149 return -EFAULT; 150 151 return 0; 152 } 153 #endif 154 155 156 static long afu_ioctl_get_features(struct ocxl_context *ctx, 157 struct ocxl_ioctl_features __user *uarg) 158 { 159 struct ocxl_ioctl_features arg; 160 161 memset(&arg, 0, sizeof(arg)); 162 163 #ifdef CONFIG_PPC64 164 if (cpu_has_feature(CPU_FTR_P9_TIDR)) 165 arg.flags[0] |= OCXL_IOCTL_FEATURES_FLAGS0_P9_WAIT; 166 #endif 167 168 if (copy_to_user(uarg, &arg, sizeof(arg))) 169 return -EFAULT; 170 171 return 0; 172 } 173 174 #define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ 175 x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ 176 x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ 177 x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ 178 x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \ 179 x == OCXL_IOCTL_ENABLE_P9_WAIT ? "ENABLE_P9_WAIT" : \ 180 x == OCXL_IOCTL_GET_FEATURES ? "GET_FEATURES" : \ 181 "UNKNOWN") 182 183 static irqreturn_t irq_handler(void *private) 184 { 185 struct eventfd_ctx *ev_ctx = private; 186 187 eventfd_signal(ev_ctx); 188 return IRQ_HANDLED; 189 } 190 191 static void irq_free(void *private) 192 { 193 struct eventfd_ctx *ev_ctx = private; 194 195 eventfd_ctx_put(ev_ctx); 196 } 197 198 static long afu_ioctl(struct file *file, unsigned int cmd, 199 unsigned long args) 200 { 201 struct ocxl_context *ctx = file->private_data; 202 struct ocxl_ioctl_irq_fd irq_fd; 203 struct eventfd_ctx *ev_ctx; 204 int irq_id; 205 u64 irq_offset; 206 long rc; 207 bool closed; 208 209 pr_debug("%s for context %d, command %s\n", __func__, ctx->pasid, 210 CMD_STR(cmd)); 211 212 mutex_lock(&ctx->status_mutex); 213 closed = (ctx->status == CLOSED); 214 mutex_unlock(&ctx->status_mutex); 215 216 if (closed) 217 return -EIO; 218 219 switch (cmd) { 220 case OCXL_IOCTL_ATTACH: 221 rc = afu_ioctl_attach(ctx, 222 (struct ocxl_ioctl_attach __user *) args); 223 break; 224 225 case OCXL_IOCTL_IRQ_ALLOC: 226 rc = ocxl_afu_irq_alloc(ctx, &irq_id); 227 if (!rc) { 228 irq_offset = ocxl_irq_id_to_offset(ctx, irq_id); 229 rc = copy_to_user((u64 __user *) args, &irq_offset, 230 sizeof(irq_offset)); 231 if (rc) { 232 ocxl_afu_irq_free(ctx, irq_id); 233 return -EFAULT; 234 } 235 } 236 break; 237 238 case OCXL_IOCTL_IRQ_FREE: 239 rc = copy_from_user(&irq_offset, (u64 __user *) args, 240 sizeof(irq_offset)); 241 if (rc) 242 return -EFAULT; 243 irq_id = ocxl_irq_offset_to_id(ctx, irq_offset); 244 rc = ocxl_afu_irq_free(ctx, irq_id); 245 break; 246 247 case OCXL_IOCTL_IRQ_SET_FD: 248 rc = copy_from_user(&irq_fd, (u64 __user *) args, 249 sizeof(irq_fd)); 250 if (rc) 251 return -EFAULT; 252 if (irq_fd.reserved) 253 return -EINVAL; 254 irq_id = ocxl_irq_offset_to_id(ctx, irq_fd.irq_offset); 255 ev_ctx = eventfd_ctx_fdget(irq_fd.eventfd); 256 if (IS_ERR(ev_ctx)) 257 return PTR_ERR(ev_ctx); 258 rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx); 259 if (rc) 260 eventfd_ctx_put(ev_ctx); 261 break; 262 263 case OCXL_IOCTL_GET_METADATA: 264 rc = afu_ioctl_get_metadata(ctx, 265 (struct ocxl_ioctl_metadata __user *) args); 266 break; 267 268 #ifdef CONFIG_PPC64 269 case OCXL_IOCTL_ENABLE_P9_WAIT: 270 rc = afu_ioctl_enable_p9_wait(ctx, 271 (struct ocxl_ioctl_p9_wait __user *) args); 272 break; 273 #endif 274 275 case OCXL_IOCTL_GET_FEATURES: 276 rc = afu_ioctl_get_features(ctx, 277 (struct ocxl_ioctl_features __user *) args); 278 break; 279 280 default: 281 rc = -EINVAL; 282 } 283 return rc; 284 } 285 286 static long afu_compat_ioctl(struct file *file, unsigned int cmd, 287 unsigned long args) 288 { 289 return afu_ioctl(file, cmd, args); 290 } 291 292 static int afu_mmap(struct file *file, struct vm_area_struct *vma) 293 { 294 struct ocxl_context *ctx = file->private_data; 295 296 pr_debug("%s for context %d\n", __func__, ctx->pasid); 297 return ocxl_context_mmap(ctx, vma); 298 } 299 300 static bool has_xsl_error(struct ocxl_context *ctx) 301 { 302 bool ret; 303 304 mutex_lock(&ctx->xsl_error_lock); 305 ret = !!ctx->xsl_error.addr; 306 mutex_unlock(&ctx->xsl_error_lock); 307 308 return ret; 309 } 310 311 /* 312 * Are there any events pending on the AFU 313 * ctx: The AFU context 314 * Returns: true if there are events pending 315 */ 316 static bool afu_events_pending(struct ocxl_context *ctx) 317 { 318 if (has_xsl_error(ctx)) 319 return true; 320 return false; 321 } 322 323 static unsigned int afu_poll(struct file *file, struct poll_table_struct *wait) 324 { 325 struct ocxl_context *ctx = file->private_data; 326 unsigned int mask = 0; 327 bool closed; 328 329 pr_debug("%s for context %d\n", __func__, ctx->pasid); 330 331 poll_wait(file, &ctx->events_wq, wait); 332 333 mutex_lock(&ctx->status_mutex); 334 closed = (ctx->status == CLOSED); 335 mutex_unlock(&ctx->status_mutex); 336 337 if (afu_events_pending(ctx)) 338 mask = EPOLLIN | EPOLLRDNORM; 339 else if (closed) 340 mask = EPOLLERR; 341 342 return mask; 343 } 344 345 /* 346 * Populate the supplied buffer with a single XSL error 347 * ctx: The AFU context to report the error from 348 * header: the event header to populate 349 * buf: The buffer to write the body into (should be at least 350 * AFU_EVENT_BODY_XSL_ERROR_SIZE) 351 * Return: the amount of buffer that was populated 352 */ 353 static ssize_t append_xsl_error(struct ocxl_context *ctx, 354 struct ocxl_kernel_event_header *header, 355 char __user *buf) 356 { 357 struct ocxl_kernel_event_xsl_fault_error body; 358 359 memset(&body, 0, sizeof(body)); 360 361 mutex_lock(&ctx->xsl_error_lock); 362 if (!ctx->xsl_error.addr) { 363 mutex_unlock(&ctx->xsl_error_lock); 364 return 0; 365 } 366 367 body.addr = ctx->xsl_error.addr; 368 body.dsisr = ctx->xsl_error.dsisr; 369 body.count = ctx->xsl_error.count; 370 371 ctx->xsl_error.addr = 0; 372 ctx->xsl_error.dsisr = 0; 373 ctx->xsl_error.count = 0; 374 375 mutex_unlock(&ctx->xsl_error_lock); 376 377 header->type = OCXL_AFU_EVENT_XSL_FAULT_ERROR; 378 379 if (copy_to_user(buf, &body, sizeof(body))) 380 return -EFAULT; 381 382 return sizeof(body); 383 } 384 385 #define AFU_EVENT_BODY_MAX_SIZE sizeof(struct ocxl_kernel_event_xsl_fault_error) 386 387 /* 388 * Reports events on the AFU 389 * Format: 390 * Header (struct ocxl_kernel_event_header) 391 * Body (struct ocxl_kernel_event_*) 392 * Header... 393 */ 394 static ssize_t afu_read(struct file *file, char __user *buf, size_t count, 395 loff_t *off) 396 { 397 struct ocxl_context *ctx = file->private_data; 398 struct ocxl_kernel_event_header header; 399 ssize_t rc; 400 ssize_t used = 0; 401 DEFINE_WAIT(event_wait); 402 403 memset(&header, 0, sizeof(header)); 404 405 /* Require offset to be 0 */ 406 if (*off != 0) 407 return -EINVAL; 408 409 if (count < (sizeof(struct ocxl_kernel_event_header) + 410 AFU_EVENT_BODY_MAX_SIZE)) 411 return -EINVAL; 412 413 for (;;) { 414 prepare_to_wait(&ctx->events_wq, &event_wait, 415 TASK_INTERRUPTIBLE); 416 417 if (afu_events_pending(ctx)) 418 break; 419 420 if (ctx->status == CLOSED) 421 break; 422 423 if (file->f_flags & O_NONBLOCK) { 424 finish_wait(&ctx->events_wq, &event_wait); 425 return -EAGAIN; 426 } 427 428 if (signal_pending(current)) { 429 finish_wait(&ctx->events_wq, &event_wait); 430 return -ERESTARTSYS; 431 } 432 433 schedule(); 434 } 435 436 finish_wait(&ctx->events_wq, &event_wait); 437 438 if (has_xsl_error(ctx)) { 439 used = append_xsl_error(ctx, &header, buf + sizeof(header)); 440 if (used < 0) 441 return used; 442 } 443 444 if (!afu_events_pending(ctx)) 445 header.flags |= OCXL_KERNEL_EVENT_FLAG_LAST; 446 447 if (copy_to_user(buf, &header, sizeof(header))) 448 return -EFAULT; 449 450 used += sizeof(header); 451 452 rc = used; 453 return rc; 454 } 455 456 static int afu_release(struct inode *inode, struct file *file) 457 { 458 struct ocxl_context *ctx = file->private_data; 459 int rc; 460 461 pr_debug("%s for device %x\n", __func__, inode->i_rdev); 462 rc = ocxl_context_detach(ctx); 463 mutex_lock(&ctx->mapping_lock); 464 ctx->mapping = NULL; 465 mutex_unlock(&ctx->mapping_lock); 466 wake_up_all(&ctx->events_wq); 467 if (rc != -EBUSY) 468 ocxl_context_free(ctx); 469 return 0; 470 } 471 472 static const struct file_operations ocxl_afu_fops = { 473 .owner = THIS_MODULE, 474 .open = afu_open, 475 .unlocked_ioctl = afu_ioctl, 476 .compat_ioctl = afu_compat_ioctl, 477 .mmap = afu_mmap, 478 .poll = afu_poll, 479 .read = afu_read, 480 .release = afu_release, 481 }; 482 483 // Free the info struct 484 static void info_release(struct device *dev) 485 { 486 struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev); 487 488 ocxl_afu_put(info->afu); 489 kfree(info); 490 } 491 492 static int ocxl_file_make_visible(struct ocxl_file_info *info) 493 { 494 int rc; 495 496 cdev_init(&info->cdev, &ocxl_afu_fops); 497 rc = cdev_add(&info->cdev, info->dev.devt, 1); 498 if (rc) { 499 dev_err(&info->dev, "Unable to add afu char device: %d\n", rc); 500 return rc; 501 } 502 503 return 0; 504 } 505 506 static void ocxl_file_make_invisible(struct ocxl_file_info *info) 507 { 508 cdev_del(&info->cdev); 509 } 510 511 static char *ocxl_devnode(const struct device *dev, umode_t *mode) 512 { 513 return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev)); 514 } 515 516 static const struct class ocxl_class = { 517 .name = "ocxl", 518 .devnode = ocxl_devnode, 519 }; 520 521 int ocxl_file_register_afu(struct ocxl_afu *afu) 522 { 523 int minor; 524 int rc; 525 struct ocxl_file_info *info; 526 struct ocxl_fn *fn = afu->fn; 527 struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); 528 529 info = kzalloc(sizeof(*info), GFP_KERNEL); 530 if (info == NULL) 531 return -ENOMEM; 532 533 minor = allocate_minor(info); 534 if (minor < 0) { 535 kfree(info); 536 return minor; 537 } 538 539 info->dev.parent = &fn->dev; 540 info->dev.devt = MKDEV(MAJOR(ocxl_dev), minor); 541 info->dev.class = &ocxl_class; 542 info->dev.release = info_release; 543 544 info->afu = afu; 545 ocxl_afu_get(afu); 546 547 rc = dev_set_name(&info->dev, "%s.%s.%hhu", 548 afu->config.name, dev_name(&pci_dev->dev), afu->config.idx); 549 if (rc) 550 goto err_put; 551 552 rc = device_register(&info->dev); 553 if (rc) { 554 free_minor(info); 555 put_device(&info->dev); 556 return rc; 557 } 558 559 rc = ocxl_sysfs_register_afu(info); 560 if (rc) 561 goto err_unregister; 562 563 rc = ocxl_file_make_visible(info); 564 if (rc) 565 goto err_unregister; 566 567 ocxl_afu_set_private(afu, info); 568 569 return 0; 570 571 err_unregister: 572 ocxl_sysfs_unregister_afu(info); // safe to call even if register failed 573 free_minor(info); 574 device_unregister(&info->dev); 575 return rc; 576 err_put: 577 ocxl_afu_put(afu); 578 free_minor(info); 579 kfree(info); 580 return rc; 581 } 582 583 void ocxl_file_unregister_afu(struct ocxl_afu *afu) 584 { 585 struct ocxl_file_info *info = ocxl_afu_get_private(afu); 586 587 if (!info) 588 return; 589 590 ocxl_file_make_invisible(info); 591 ocxl_sysfs_unregister_afu(info); 592 free_minor(info); 593 device_unregister(&info->dev); 594 } 595 596 int ocxl_file_init(void) 597 { 598 int rc; 599 600 idr_init(&minors_idr); 601 602 rc = alloc_chrdev_region(&ocxl_dev, 0, OCXL_NUM_MINORS, "ocxl"); 603 if (rc) { 604 pr_err("Unable to allocate ocxl major number: %d\n", rc); 605 return rc; 606 } 607 608 rc = class_register(&ocxl_class); 609 if (rc) { 610 pr_err("Unable to create ocxl class\n"); 611 unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS); 612 return rc; 613 } 614 615 return 0; 616 } 617 618 void ocxl_file_exit(void) 619 { 620 class_unregister(&ocxl_class); 621 unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS); 622 idr_destroy(&minors_idr); 623 } 624