1 /* 2 * Copyright 2014 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/pci.h> 11 #include <linux/slab.h> 12 #include <linux/anon_inodes.h> 13 #include <linux/file.h> 14 #include <misc/cxl.h> 15 #include <linux/fs.h> 16 17 #include "cxl.h" 18 19 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) 20 { 21 struct address_space *mapping; 22 struct cxl_afu *afu; 23 struct cxl_context *ctx; 24 int rc; 25 26 afu = cxl_pci_to_afu(dev); 27 28 ctx = cxl_context_alloc(); 29 if (IS_ERR(ctx)) { 30 rc = PTR_ERR(ctx); 31 goto err_dev; 32 } 33 34 ctx->kernelapi = true; 35 36 /* 37 * Make our own address space since we won't have one from the 38 * filesystem like the user api has, and even if we do associate a file 39 * with this context we don't want to use the global anonymous inode's 40 * address space as that can invalidate unrelated users: 41 */ 42 mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL); 43 if (!mapping) { 44 rc = -ENOMEM; 45 goto err_ctx; 46 } 47 address_space_init_once(mapping); 48 49 /* Make it a slave context. We can promote it later? */ 50 rc = cxl_context_init(ctx, afu, false, mapping); 51 if (rc) 52 goto err_mapping; 53 54 return ctx; 55 56 err_mapping: 57 kfree(mapping); 58 err_ctx: 59 kfree(ctx); 60 err_dev: 61 return ERR_PTR(rc); 62 } 63 EXPORT_SYMBOL_GPL(cxl_dev_context_init); 64 65 struct cxl_context *cxl_get_context(struct pci_dev *dev) 66 { 67 return dev->dev.archdata.cxl_ctx; 68 } 69 EXPORT_SYMBOL_GPL(cxl_get_context); 70 71 int cxl_release_context(struct cxl_context *ctx) 72 { 73 if (ctx->status >= STARTED) 74 return -EBUSY; 75 76 cxl_context_free(ctx); 77 78 return 0; 79 } 80 EXPORT_SYMBOL_GPL(cxl_release_context); 81 82 static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num) 83 { 84 __u16 range; 85 int r; 86 87 for (r = 0; r < CXL_IRQ_RANGES; r++) { 88 range = ctx->irqs.range[r]; 89 if (num < range) { 90 return ctx->irqs.offset[r] + num; 91 } 92 num -= range; 93 } 94 return 0; 95 } 96 97 98 int cxl_set_priv(struct cxl_context *ctx, void *priv) 99 { 100 if (!ctx) 101 return -EINVAL; 102 103 ctx->priv = priv; 104 105 return 0; 106 } 107 EXPORT_SYMBOL_GPL(cxl_set_priv); 108 109 void *cxl_get_priv(struct cxl_context *ctx) 110 { 111 if (!ctx) 112 return ERR_PTR(-EINVAL); 113 114 return ctx->priv; 115 } 116 EXPORT_SYMBOL_GPL(cxl_get_priv); 117 118 int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num) 119 { 120 int res; 121 irq_hw_number_t hwirq; 122 123 if (num == 0) 124 num = ctx->afu->pp_irqs; 125 res = afu_allocate_irqs(ctx, num); 126 if (res) 127 return res; 128 129 if (!cpu_has_feature(CPU_FTR_HVMODE)) { 130 /* In a guest, the PSL interrupt is not multiplexed. It was 131 * allocated above, and we need to set its handler 132 */ 133 hwirq = cxl_find_afu_irq(ctx, 0); 134 if (hwirq) 135 cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl"); 136 } 137 138 if (ctx->status == STARTED) { 139 if (cxl_ops->update_ivtes) 140 cxl_ops->update_ivtes(ctx); 141 else WARN(1, "BUG: cxl_allocate_afu_irqs must be called prior to starting the context on this platform\n"); 142 } 143 144 return res; 145 } 146 EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs); 147 148 void cxl_free_afu_irqs(struct cxl_context *ctx) 149 { 150 irq_hw_number_t hwirq; 151 unsigned int virq; 152 153 if (!cpu_has_feature(CPU_FTR_HVMODE)) { 154 hwirq = cxl_find_afu_irq(ctx, 0); 155 if (hwirq) { 156 virq = irq_find_mapping(NULL, hwirq); 157 if (virq) 158 cxl_unmap_irq(virq, ctx); 159 } 160 } 161 afu_irq_name_free(ctx); 162 cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter); 163 } 164 EXPORT_SYMBOL_GPL(cxl_free_afu_irqs); 165 166 int cxl_map_afu_irq(struct cxl_context *ctx, int num, 167 irq_handler_t handler, void *cookie, char *name) 168 { 169 irq_hw_number_t hwirq; 170 171 /* 172 * Find interrupt we are to register. 173 */ 174 hwirq = cxl_find_afu_irq(ctx, num); 175 if (!hwirq) 176 return -ENOENT; 177 178 return cxl_map_irq(ctx->afu->adapter, hwirq, handler, cookie, name); 179 } 180 EXPORT_SYMBOL_GPL(cxl_map_afu_irq); 181 182 void cxl_unmap_afu_irq(struct cxl_context *ctx, int num, void *cookie) 183 { 184 irq_hw_number_t hwirq; 185 unsigned int virq; 186 187 hwirq = cxl_find_afu_irq(ctx, num); 188 if (!hwirq) 189 return; 190 191 virq = irq_find_mapping(NULL, hwirq); 192 if (virq) 193 cxl_unmap_irq(virq, cookie); 194 } 195 EXPORT_SYMBOL_GPL(cxl_unmap_afu_irq); 196 197 /* 198 * Start a context 199 * Code here similar to afu_ioctl_start_work(). 200 */ 201 int cxl_start_context(struct cxl_context *ctx, u64 wed, 202 struct task_struct *task) 203 { 204 int rc = 0; 205 bool kernel = true; 206 207 pr_devel("%s: pe: %i\n", __func__, ctx->pe); 208 209 mutex_lock(&ctx->status_mutex); 210 if (ctx->status == STARTED) 211 goto out; /* already started */ 212 213 if (task) { 214 ctx->pid = get_task_pid(task, PIDTYPE_PID); 215 ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID); 216 kernel = false; 217 ctx->real_mode = false; 218 } 219 220 cxl_ctx_get(); 221 222 if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { 223 put_pid(ctx->pid); 224 cxl_ctx_put(); 225 goto out; 226 } 227 228 ctx->status = STARTED; 229 out: 230 mutex_unlock(&ctx->status_mutex); 231 return rc; 232 } 233 EXPORT_SYMBOL_GPL(cxl_start_context); 234 235 int cxl_process_element(struct cxl_context *ctx) 236 { 237 return ctx->external_pe; 238 } 239 EXPORT_SYMBOL_GPL(cxl_process_element); 240 241 /* Stop a context. Returns 0 on success, otherwise -Errno */ 242 int cxl_stop_context(struct cxl_context *ctx) 243 { 244 return __detach_context(ctx); 245 } 246 EXPORT_SYMBOL_GPL(cxl_stop_context); 247 248 void cxl_set_master(struct cxl_context *ctx) 249 { 250 ctx->master = true; 251 } 252 EXPORT_SYMBOL_GPL(cxl_set_master); 253 254 int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode) 255 { 256 if (ctx->status == STARTED) { 257 /* 258 * We could potentially update the PE and issue an update LLCMD 259 * to support this, but it doesn't seem to have a good use case 260 * since it's trivial to just create a second kernel context 261 * with different translation modes, so until someone convinces 262 * me otherwise: 263 */ 264 return -EBUSY; 265 } 266 267 ctx->real_mode = real_mode; 268 return 0; 269 } 270 EXPORT_SYMBOL_GPL(cxl_set_translation_mode); 271 272 /* wrappers around afu_* file ops which are EXPORTED */ 273 int cxl_fd_open(struct inode *inode, struct file *file) 274 { 275 return afu_open(inode, file); 276 } 277 EXPORT_SYMBOL_GPL(cxl_fd_open); 278 int cxl_fd_release(struct inode *inode, struct file *file) 279 { 280 return afu_release(inode, file); 281 } 282 EXPORT_SYMBOL_GPL(cxl_fd_release); 283 long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 284 { 285 return afu_ioctl(file, cmd, arg); 286 } 287 EXPORT_SYMBOL_GPL(cxl_fd_ioctl); 288 int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm) 289 { 290 return afu_mmap(file, vm); 291 } 292 EXPORT_SYMBOL_GPL(cxl_fd_mmap); 293 unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll) 294 { 295 return afu_poll(file, poll); 296 } 297 EXPORT_SYMBOL_GPL(cxl_fd_poll); 298 ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count, 299 loff_t *off) 300 { 301 return afu_read(file, buf, count, off); 302 } 303 EXPORT_SYMBOL_GPL(cxl_fd_read); 304 305 #define PATCH_FOPS(NAME) if (!fops->NAME) fops->NAME = afu_fops.NAME 306 307 /* Get a struct file and fd for a context and attach the ops */ 308 struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops, 309 int *fd) 310 { 311 struct file *file; 312 int rc, flags, fdtmp; 313 314 flags = O_RDWR | O_CLOEXEC; 315 316 /* This code is similar to anon_inode_getfd() */ 317 rc = get_unused_fd_flags(flags); 318 if (rc < 0) 319 return ERR_PTR(rc); 320 fdtmp = rc; 321 322 /* 323 * Patch the file ops. Needs to be careful that this is rentrant safe. 324 */ 325 if (fops) { 326 PATCH_FOPS(open); 327 PATCH_FOPS(poll); 328 PATCH_FOPS(read); 329 PATCH_FOPS(release); 330 PATCH_FOPS(unlocked_ioctl); 331 PATCH_FOPS(compat_ioctl); 332 PATCH_FOPS(mmap); 333 } else /* use default ops */ 334 fops = (struct file_operations *)&afu_fops; 335 336 file = anon_inode_getfile("cxl", fops, ctx, flags); 337 if (IS_ERR(file)) 338 goto err_fd; 339 340 file->f_mapping = ctx->mapping; 341 342 *fd = fdtmp; 343 return file; 344 345 err_fd: 346 put_unused_fd(fdtmp); 347 return NULL; 348 } 349 EXPORT_SYMBOL_GPL(cxl_get_fd); 350 351 struct cxl_context *cxl_fops_get_context(struct file *file) 352 { 353 return file->private_data; 354 } 355 EXPORT_SYMBOL_GPL(cxl_fops_get_context); 356 357 void cxl_set_driver_ops(struct cxl_context *ctx, 358 struct cxl_afu_driver_ops *ops) 359 { 360 WARN_ON(!ops->fetch_event || !ops->event_delivered); 361 atomic_set(&ctx->afu_driver_events, 0); 362 ctx->afu_driver_ops = ops; 363 } 364 EXPORT_SYMBOL_GPL(cxl_set_driver_ops); 365 366 void cxl_context_events_pending(struct cxl_context *ctx, 367 unsigned int new_events) 368 { 369 atomic_add(new_events, &ctx->afu_driver_events); 370 wake_up_all(&ctx->wq); 371 } 372 EXPORT_SYMBOL_GPL(cxl_context_events_pending); 373 374 int cxl_start_work(struct cxl_context *ctx, 375 struct cxl_ioctl_start_work *work) 376 { 377 int rc; 378 379 /* code taken from afu_ioctl_start_work */ 380 if (!(work->flags & CXL_START_WORK_NUM_IRQS)) 381 work->num_interrupts = ctx->afu->pp_irqs; 382 else if ((work->num_interrupts < ctx->afu->pp_irqs) || 383 (work->num_interrupts > ctx->afu->irqs_max)) { 384 return -EINVAL; 385 } 386 387 rc = afu_register_irqs(ctx, work->num_interrupts); 388 if (rc) 389 return rc; 390 391 rc = cxl_start_context(ctx, work->work_element_descriptor, current); 392 if (rc < 0) { 393 afu_release_irqs(ctx, ctx); 394 return rc; 395 } 396 397 return 0; 398 } 399 EXPORT_SYMBOL_GPL(cxl_start_work); 400 401 void __iomem *cxl_psa_map(struct cxl_context *ctx) 402 { 403 if (ctx->status != STARTED) 404 return NULL; 405 406 pr_devel("%s: psn_phys%llx size:%llx\n", 407 __func__, ctx->psn_phys, ctx->psn_size); 408 return ioremap(ctx->psn_phys, ctx->psn_size); 409 } 410 EXPORT_SYMBOL_GPL(cxl_psa_map); 411 412 void cxl_psa_unmap(void __iomem *addr) 413 { 414 iounmap(addr); 415 } 416 EXPORT_SYMBOL_GPL(cxl_psa_unmap); 417 418 int cxl_afu_reset(struct cxl_context *ctx) 419 { 420 struct cxl_afu *afu = ctx->afu; 421 int rc; 422 423 rc = cxl_ops->afu_reset(afu); 424 if (rc) 425 return rc; 426 427 return cxl_ops->afu_check_and_enable(afu); 428 } 429 EXPORT_SYMBOL_GPL(cxl_afu_reset); 430 431 void cxl_perst_reloads_same_image(struct cxl_afu *afu, 432 bool perst_reloads_same_image) 433 { 434 afu->adapter->perst_same_image = perst_reloads_same_image; 435 } 436 EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image); 437 438 ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count) 439 { 440 struct cxl_afu *afu = cxl_pci_to_afu(dev); 441 442 return cxl_ops->read_adapter_vpd(afu->adapter, buf, count); 443 } 444 EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd); 445