1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/io-64-nonatomic-lo-hi.h> 8 #include <linux/dmaengine.h> 9 #include <linux/irq.h> 10 #include <linux/msi.h> 11 #include <uapi/linux/idxd.h> 12 #include "../dmaengine.h" 13 #include "idxd.h" 14 #include "registers.h" 15 16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, 17 u32 *status); 18 static void idxd_device_wqs_clear_state(struct idxd_device *idxd); 19 static void idxd_wq_disable_cleanup(struct idxd_wq *wq); 20 21 /* Interrupt control bits */ 22 void idxd_unmask_error_interrupts(struct idxd_device *idxd) 23 { 24 union genctrl_reg genctrl; 25 26 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); 27 genctrl.softerr_int_en = 1; 28 genctrl.halt_int_en = 1; 29 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); 30 } 31 32 void idxd_mask_error_interrupts(struct idxd_device *idxd) 33 { 34 union genctrl_reg genctrl; 35 36 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); 37 genctrl.softerr_int_en = 0; 38 genctrl.halt_int_en = 0; 39 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); 40 } 41 42 static void free_hw_descs(struct idxd_wq *wq) 43 { 44 int i; 45 46 for (i = 0; i < wq->num_descs; i++) 47 kfree(wq->hw_descs[i]); 48 49 kfree(wq->hw_descs); 50 } 51 52 static int alloc_hw_descs(struct idxd_wq *wq, int num) 53 { 54 struct device *dev = &wq->idxd->pdev->dev; 55 int i; 56 int node = dev_to_node(dev); 57 58 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), 59 GFP_KERNEL, node); 60 if (!wq->hw_descs) 61 return -ENOMEM; 62 63 for (i = 0; i < num; i++) { 64 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), 65 GFP_KERNEL, node); 66 if (!wq->hw_descs[i]) { 67 free_hw_descs(wq); 68 return -ENOMEM; 69 } 70 } 71 72 return 0; 73 } 74 75 static void free_descs(struct idxd_wq *wq) 76 { 77 int i; 78 79 for (i = 0; i < wq->num_descs; i++) 80 kfree(wq->descs[i]); 81 82 kfree(wq->descs); 83 } 84 85 static int alloc_descs(struct idxd_wq *wq, int num) 86 { 87 struct device *dev = &wq->idxd->pdev->dev; 88 int i; 89 int node = dev_to_node(dev); 90 91 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *), 92 GFP_KERNEL, node); 93 if (!wq->descs) 94 return -ENOMEM; 95 96 for (i = 0; i < num; i++) { 97 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]), 98 GFP_KERNEL, node); 99 if (!wq->descs[i]) { 100 free_descs(wq); 101 return -ENOMEM; 102 } 103 } 104 105 return 0; 106 } 107 108 /* WQ control bits */ 109 int idxd_wq_alloc_resources(struct idxd_wq *wq) 110 { 111 struct idxd_device *idxd = wq->idxd; 112 struct device *dev = &idxd->pdev->dev; 113 int rc, num_descs, i; 114 115 if (wq->type != IDXD_WQT_KERNEL) 116 return 0; 117 118 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold; 119 wq->num_descs = num_descs; 120 121 rc = alloc_hw_descs(wq, num_descs); 122 if (rc < 0) 123 return rc; 124 125 wq->compls_size = num_descs * idxd->data->compl_size; 126 wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL); 127 if (!wq->compls) { 128 rc = -ENOMEM; 129 goto fail_alloc_compls; 130 } 131 132 rc = alloc_descs(wq, num_descs); 133 if (rc < 0) 134 goto fail_alloc_descs; 135 136 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL, 137 dev_to_node(dev)); 138 if (rc < 0) 139 goto fail_sbitmap_init; 140 141 for (i = 0; i < num_descs; i++) { 142 struct idxd_desc *desc = wq->descs[i]; 143 144 desc->hw = wq->hw_descs[i]; 145 if (idxd->data->type == IDXD_TYPE_DSA) 146 desc->completion = &wq->compls[i]; 147 else if (idxd->data->type == IDXD_TYPE_IAX) 148 desc->iax_completion = &wq->iax_compls[i]; 149 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i; 150 desc->id = i; 151 desc->wq = wq; 152 desc->cpu = -1; 153 } 154 155 return 0; 156 157 fail_sbitmap_init: 158 free_descs(wq); 159 fail_alloc_descs: 160 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); 161 fail_alloc_compls: 162 free_hw_descs(wq); 163 return rc; 164 } 165 166 void idxd_wq_free_resources(struct idxd_wq *wq) 167 { 168 struct device *dev = &wq->idxd->pdev->dev; 169 170 if (wq->type != IDXD_WQT_KERNEL) 171 return; 172 173 free_hw_descs(wq); 174 free_descs(wq); 175 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); 176 sbitmap_queue_free(&wq->sbq); 177 } 178 179 int idxd_wq_enable(struct idxd_wq *wq) 180 { 181 struct idxd_device *idxd = wq->idxd; 182 struct device *dev = &idxd->pdev->dev; 183 u32 status; 184 185 if (wq->state == IDXD_WQ_ENABLED) { 186 dev_dbg(dev, "WQ %d already enabled\n", wq->id); 187 return -ENXIO; 188 } 189 190 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status); 191 192 if (status != IDXD_CMDSTS_SUCCESS && 193 status != IDXD_CMDSTS_ERR_WQ_ENABLED) { 194 dev_dbg(dev, "WQ enable failed: %#x\n", status); 195 return -ENXIO; 196 } 197 198 wq->state = IDXD_WQ_ENABLED; 199 dev_dbg(dev, "WQ %d enabled\n", wq->id); 200 return 0; 201 } 202 203 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config) 204 { 205 struct idxd_device *idxd = wq->idxd; 206 struct device *dev = &idxd->pdev->dev; 207 u32 status, operand; 208 209 dev_dbg(dev, "Disabling WQ %d\n", wq->id); 210 211 if (wq->state != IDXD_WQ_ENABLED) { 212 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); 213 return 0; 214 } 215 216 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); 217 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status); 218 219 if (status != IDXD_CMDSTS_SUCCESS) { 220 dev_dbg(dev, "WQ disable failed: %#x\n", status); 221 return -ENXIO; 222 } 223 224 if (reset_config) 225 idxd_wq_disable_cleanup(wq); 226 wq->state = IDXD_WQ_DISABLED; 227 dev_dbg(dev, "WQ %d disabled\n", wq->id); 228 return 0; 229 } 230 231 void idxd_wq_drain(struct idxd_wq *wq) 232 { 233 struct idxd_device *idxd = wq->idxd; 234 struct device *dev = &idxd->pdev->dev; 235 u32 operand; 236 237 if (wq->state != IDXD_WQ_ENABLED) { 238 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); 239 return; 240 } 241 242 dev_dbg(dev, "Draining WQ %d\n", wq->id); 243 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); 244 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL); 245 } 246 247 void idxd_wq_reset(struct idxd_wq *wq) 248 { 249 struct idxd_device *idxd = wq->idxd; 250 struct device *dev = &idxd->pdev->dev; 251 u32 operand; 252 253 if (wq->state != IDXD_WQ_ENABLED) { 254 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); 255 return; 256 } 257 258 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); 259 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL); 260 idxd_wq_disable_cleanup(wq); 261 wq->state = IDXD_WQ_DISABLED; 262 } 263 264 int idxd_wq_map_portal(struct idxd_wq *wq) 265 { 266 struct idxd_device *idxd = wq->idxd; 267 struct pci_dev *pdev = idxd->pdev; 268 struct device *dev = &pdev->dev; 269 resource_size_t start; 270 271 start = pci_resource_start(pdev, IDXD_WQ_BAR); 272 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED); 273 274 wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE); 275 if (!wq->portal) 276 return -ENOMEM; 277 278 return 0; 279 } 280 281 void idxd_wq_unmap_portal(struct idxd_wq *wq) 282 { 283 struct device *dev = &wq->idxd->pdev->dev; 284 285 devm_iounmap(dev, wq->portal); 286 wq->portal = NULL; 287 wq->portal_offset = 0; 288 } 289 290 void idxd_wqs_unmap_portal(struct idxd_device *idxd) 291 { 292 int i; 293 294 for (i = 0; i < idxd->max_wqs; i++) { 295 struct idxd_wq *wq = idxd->wqs[i]; 296 297 if (wq->portal) 298 idxd_wq_unmap_portal(wq); 299 } 300 } 301 302 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) 303 { 304 struct idxd_device *idxd = wq->idxd; 305 int rc; 306 union wqcfg wqcfg; 307 unsigned int offset; 308 309 rc = idxd_wq_disable(wq, false); 310 if (rc < 0) 311 return rc; 312 313 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); 314 spin_lock(&idxd->dev_lock); 315 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); 316 wqcfg.pasid_en = 1; 317 wqcfg.pasid = pasid; 318 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); 319 spin_unlock(&idxd->dev_lock); 320 321 rc = idxd_wq_enable(wq); 322 if (rc < 0) 323 return rc; 324 325 return 0; 326 } 327 328 int idxd_wq_disable_pasid(struct idxd_wq *wq) 329 { 330 struct idxd_device *idxd = wq->idxd; 331 int rc; 332 union wqcfg wqcfg; 333 unsigned int offset; 334 335 rc = idxd_wq_disable(wq, false); 336 if (rc < 0) 337 return rc; 338 339 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); 340 spin_lock(&idxd->dev_lock); 341 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); 342 wqcfg.pasid_en = 0; 343 wqcfg.pasid = 0; 344 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); 345 spin_unlock(&idxd->dev_lock); 346 347 rc = idxd_wq_enable(wq); 348 if (rc < 0) 349 return rc; 350 351 return 0; 352 } 353 354 static void idxd_wq_disable_cleanup(struct idxd_wq *wq) 355 { 356 struct idxd_device *idxd = wq->idxd; 357 358 lockdep_assert_held(&wq->wq_lock); 359 memset(wq->wqcfg, 0, idxd->wqcfg_size); 360 wq->type = IDXD_WQT_NONE; 361 wq->threshold = 0; 362 wq->priority = 0; 363 wq->ats_dis = 0; 364 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 365 clear_bit(WQ_FLAG_DEDICATED, &wq->flags); 366 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); 367 memset(wq->name, 0, WQ_NAME_SIZE); 368 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 369 wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; 370 } 371 372 static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq) 373 { 374 lockdep_assert_held(&wq->wq_lock); 375 376 wq->size = 0; 377 wq->group = NULL; 378 } 379 380 static void idxd_wq_ref_release(struct percpu_ref *ref) 381 { 382 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active); 383 384 complete(&wq->wq_dead); 385 } 386 387 int idxd_wq_init_percpu_ref(struct idxd_wq *wq) 388 { 389 int rc; 390 391 memset(&wq->wq_active, 0, sizeof(wq->wq_active)); 392 rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 393 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); 394 if (rc < 0) 395 return rc; 396 reinit_completion(&wq->wq_dead); 397 reinit_completion(&wq->wq_resurrect); 398 return 0; 399 } 400 401 void __idxd_wq_quiesce(struct idxd_wq *wq) 402 { 403 lockdep_assert_held(&wq->wq_lock); 404 reinit_completion(&wq->wq_resurrect); 405 percpu_ref_kill(&wq->wq_active); 406 complete_all(&wq->wq_resurrect); 407 wait_for_completion(&wq->wq_dead); 408 } 409 410 void idxd_wq_quiesce(struct idxd_wq *wq) 411 { 412 mutex_lock(&wq->wq_lock); 413 __idxd_wq_quiesce(wq); 414 mutex_unlock(&wq->wq_lock); 415 } 416 417 /* Device control bits */ 418 static inline bool idxd_is_enabled(struct idxd_device *idxd) 419 { 420 union gensts_reg gensts; 421 422 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); 423 424 if (gensts.state == IDXD_DEVICE_STATE_ENABLED) 425 return true; 426 return false; 427 } 428 429 static inline bool idxd_device_is_halted(struct idxd_device *idxd) 430 { 431 union gensts_reg gensts; 432 433 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); 434 435 return (gensts.state == IDXD_DEVICE_STATE_HALT); 436 } 437 438 /* 439 * This is function is only used for reset during probe and will 440 * poll for completion. Once the device is setup with interrupts, 441 * all commands will be done via interrupt completion. 442 */ 443 int idxd_device_init_reset(struct idxd_device *idxd) 444 { 445 struct device *dev = &idxd->pdev->dev; 446 union idxd_command_reg cmd; 447 448 if (idxd_device_is_halted(idxd)) { 449 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); 450 return -ENXIO; 451 } 452 453 memset(&cmd, 0, sizeof(cmd)); 454 cmd.cmd = IDXD_CMD_RESET_DEVICE; 455 dev_dbg(dev, "%s: sending reset for init.\n", __func__); 456 spin_lock(&idxd->cmd_lock); 457 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); 458 459 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & 460 IDXD_CMDSTS_ACTIVE) 461 cpu_relax(); 462 spin_unlock(&idxd->cmd_lock); 463 return 0; 464 } 465 466 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, 467 u32 *status) 468 { 469 union idxd_command_reg cmd; 470 DECLARE_COMPLETION_ONSTACK(done); 471 u32 stat; 472 473 if (idxd_device_is_halted(idxd)) { 474 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); 475 if (status) 476 *status = IDXD_CMDSTS_HW_ERR; 477 return; 478 } 479 480 memset(&cmd, 0, sizeof(cmd)); 481 cmd.cmd = cmd_code; 482 cmd.operand = operand; 483 cmd.int_req = 1; 484 485 spin_lock(&idxd->cmd_lock); 486 wait_event_lock_irq(idxd->cmd_waitq, 487 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags), 488 idxd->cmd_lock); 489 490 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", 491 __func__, cmd_code, operand); 492 493 idxd->cmd_status = 0; 494 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); 495 idxd->cmd_done = &done; 496 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); 497 498 /* 499 * After command submitted, release lock and go to sleep until 500 * the command completes via interrupt. 501 */ 502 spin_unlock(&idxd->cmd_lock); 503 wait_for_completion(&done); 504 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); 505 spin_lock(&idxd->cmd_lock); 506 if (status) 507 *status = stat; 508 idxd->cmd_status = stat & GENMASK(7, 0); 509 510 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); 511 /* Wake up other pending commands */ 512 wake_up(&idxd->cmd_waitq); 513 spin_unlock(&idxd->cmd_lock); 514 } 515 516 int idxd_device_enable(struct idxd_device *idxd) 517 { 518 struct device *dev = &idxd->pdev->dev; 519 u32 status; 520 521 if (idxd_is_enabled(idxd)) { 522 dev_dbg(dev, "Device already enabled\n"); 523 return -ENXIO; 524 } 525 526 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status); 527 528 /* If the command is successful or if the device was enabled */ 529 if (status != IDXD_CMDSTS_SUCCESS && 530 status != IDXD_CMDSTS_ERR_DEV_ENABLED) { 531 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); 532 return -ENXIO; 533 } 534 535 idxd->state = IDXD_DEV_ENABLED; 536 return 0; 537 } 538 539 int idxd_device_disable(struct idxd_device *idxd) 540 { 541 struct device *dev = &idxd->pdev->dev; 542 u32 status; 543 544 if (!idxd_is_enabled(idxd)) { 545 dev_dbg(dev, "Device is not enabled\n"); 546 return 0; 547 } 548 549 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status); 550 551 /* If the command is successful or if the device was disabled */ 552 if (status != IDXD_CMDSTS_SUCCESS && 553 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) { 554 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); 555 return -ENXIO; 556 } 557 558 spin_lock(&idxd->dev_lock); 559 idxd_device_clear_state(idxd); 560 idxd->state = IDXD_DEV_DISABLED; 561 spin_unlock(&idxd->dev_lock); 562 return 0; 563 } 564 565 void idxd_device_reset(struct idxd_device *idxd) 566 { 567 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); 568 spin_lock(&idxd->dev_lock); 569 idxd_device_clear_state(idxd); 570 idxd->state = IDXD_DEV_DISABLED; 571 idxd_unmask_error_interrupts(idxd); 572 spin_unlock(&idxd->dev_lock); 573 } 574 575 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid) 576 { 577 struct device *dev = &idxd->pdev->dev; 578 u32 operand; 579 580 operand = pasid; 581 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand); 582 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL); 583 dev_dbg(dev, "pasid %d drained\n", pasid); 584 } 585 586 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle, 587 enum idxd_interrupt_type irq_type) 588 { 589 struct device *dev = &idxd->pdev->dev; 590 u32 operand, status; 591 592 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))) 593 return -EOPNOTSUPP; 594 595 dev_dbg(dev, "get int handle, idx %d\n", idx); 596 597 operand = idx & GENMASK(15, 0); 598 if (irq_type == IDXD_IRQ_IMS) 599 operand |= CMD_INT_HANDLE_IMS; 600 601 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand); 602 603 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status); 604 605 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) { 606 dev_dbg(dev, "request int handle failed: %#x\n", status); 607 return -ENXIO; 608 } 609 610 *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0); 611 612 dev_dbg(dev, "int handle acquired: %u\n", *handle); 613 return 0; 614 } 615 616 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle, 617 enum idxd_interrupt_type irq_type) 618 { 619 struct device *dev = &idxd->pdev->dev; 620 u32 operand, status; 621 union idxd_command_reg cmd; 622 623 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))) 624 return -EOPNOTSUPP; 625 626 dev_dbg(dev, "release int handle, handle %d\n", handle); 627 628 memset(&cmd, 0, sizeof(cmd)); 629 operand = handle & GENMASK(15, 0); 630 631 if (irq_type == IDXD_IRQ_IMS) 632 operand |= CMD_INT_HANDLE_IMS; 633 634 cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE; 635 cmd.operand = operand; 636 637 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand); 638 639 spin_lock(&idxd->cmd_lock); 640 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); 641 642 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE) 643 cpu_relax(); 644 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); 645 spin_unlock(&idxd->cmd_lock); 646 647 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) { 648 dev_dbg(dev, "release int handle failed: %#x\n", status); 649 return -ENXIO; 650 } 651 652 dev_dbg(dev, "int handle released.\n"); 653 return 0; 654 } 655 656 /* Device configuration bits */ 657 static void idxd_engines_clear_state(struct idxd_device *idxd) 658 { 659 struct idxd_engine *engine; 660 int i; 661 662 lockdep_assert_held(&idxd->dev_lock); 663 for (i = 0; i < idxd->max_engines; i++) { 664 engine = idxd->engines[i]; 665 engine->group = NULL; 666 } 667 } 668 669 static void idxd_groups_clear_state(struct idxd_device *idxd) 670 { 671 struct idxd_group *group; 672 int i; 673 674 lockdep_assert_held(&idxd->dev_lock); 675 for (i = 0; i < idxd->max_groups; i++) { 676 group = idxd->groups[i]; 677 memset(&group->grpcfg, 0, sizeof(group->grpcfg)); 678 group->num_engines = 0; 679 group->num_wqs = 0; 680 group->use_rdbuf_limit = false; 681 group->rdbufs_allowed = 0; 682 group->rdbufs_reserved = 0; 683 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { 684 group->tc_a = 1; 685 group->tc_b = 1; 686 } else { 687 group->tc_a = -1; 688 group->tc_b = -1; 689 } 690 } 691 } 692 693 static void idxd_device_wqs_clear_state(struct idxd_device *idxd) 694 { 695 int i; 696 697 lockdep_assert_held(&idxd->dev_lock); 698 for (i = 0; i < idxd->max_wqs; i++) { 699 struct idxd_wq *wq = idxd->wqs[i]; 700 701 if (wq->state == IDXD_WQ_ENABLED) { 702 idxd_wq_disable_cleanup(wq); 703 wq->state = IDXD_WQ_DISABLED; 704 } 705 idxd_wq_device_reset_cleanup(wq); 706 } 707 } 708 709 void idxd_device_clear_state(struct idxd_device *idxd) 710 { 711 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 712 return; 713 714 idxd_groups_clear_state(idxd); 715 idxd_engines_clear_state(idxd); 716 idxd_device_wqs_clear_state(idxd); 717 } 718 719 static void idxd_group_config_write(struct idxd_group *group) 720 { 721 struct idxd_device *idxd = group->idxd; 722 struct device *dev = &idxd->pdev->dev; 723 int i; 724 u32 grpcfg_offset; 725 726 dev_dbg(dev, "Writing group %d cfg registers\n", group->id); 727 728 /* setup GRPWQCFG */ 729 for (i = 0; i < GRPWQCFG_STRIDES; i++) { 730 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); 731 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset); 732 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", 733 group->id, i, grpcfg_offset, 734 ioread64(idxd->reg_base + grpcfg_offset)); 735 } 736 737 /* setup GRPENGCFG */ 738 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); 739 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset); 740 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, 741 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); 742 743 /* setup GRPFLAGS */ 744 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); 745 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); 746 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", 747 group->id, grpcfg_offset, 748 ioread32(idxd->reg_base + grpcfg_offset)); 749 } 750 751 static int idxd_groups_config_write(struct idxd_device *idxd) 752 753 { 754 union gencfg_reg reg; 755 int i; 756 struct device *dev = &idxd->pdev->dev; 757 758 /* Setup bandwidth rdbuf limit */ 759 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) { 760 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); 761 reg.rdbuf_limit = idxd->rdbuf_limit; 762 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); 763 } 764 765 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET, 766 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); 767 768 for (i = 0; i < idxd->max_groups; i++) { 769 struct idxd_group *group = idxd->groups[i]; 770 771 idxd_group_config_write(group); 772 } 773 774 return 0; 775 } 776 777 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd) 778 { 779 struct pci_dev *pdev = idxd->pdev; 780 781 if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV)) 782 return true; 783 return false; 784 } 785 786 static int idxd_wq_config_write(struct idxd_wq *wq) 787 { 788 struct idxd_device *idxd = wq->idxd; 789 struct device *dev = &idxd->pdev->dev; 790 u32 wq_offset; 791 int i; 792 793 if (!wq->group) 794 return 0; 795 796 /* 797 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after 798 * wq reset. This will copy back the sticky values that are present on some devices. 799 */ 800 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { 801 wq_offset = WQCFG_OFFSET(idxd, wq->id, i); 802 wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset); 803 } 804 805 if (wq->size == 0 && wq->type != IDXD_WQT_NONE) 806 wq->size = WQ_DEFAULT_QUEUE_DEPTH; 807 808 /* byte 0-3 */ 809 wq->wqcfg->wq_size = wq->size; 810 811 /* bytes 4-7 */ 812 wq->wqcfg->wq_thresh = wq->threshold; 813 814 /* byte 8-11 */ 815 if (wq_dedicated(wq)) 816 wq->wqcfg->mode = 1; 817 818 if (device_pasid_enabled(idxd)) { 819 wq->wqcfg->pasid_en = 1; 820 if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq)) 821 wq->wqcfg->pasid = idxd->pasid; 822 } 823 824 /* 825 * Here the priv bit is set depending on the WQ type. priv = 1 if the 826 * WQ type is kernel to indicate privileged access. This setting only 827 * matters for dedicated WQ. According to the DSA spec: 828 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the 829 * Privileged Mode Enable field of the PCI Express PASID capability 830 * is 0, this field must be 0. 831 * 832 * In the case of a dedicated kernel WQ that is not able to support 833 * the PASID cap, then the configuration will be rejected. 834 */ 835 wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL); 836 if (wq_dedicated(wq) && wq->wqcfg->pasid_en && 837 !idxd_device_pasid_priv_enabled(idxd) && 838 wq->type == IDXD_WQT_KERNEL) { 839 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV; 840 return -EOPNOTSUPP; 841 } 842 843 wq->wqcfg->priority = wq->priority; 844 845 if (idxd->hw.gen_cap.block_on_fault && 846 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)) 847 wq->wqcfg->bof = 1; 848 849 if (idxd->hw.wq_cap.wq_ats_support) 850 wq->wqcfg->wq_ats_disable = wq->ats_dis; 851 852 /* bytes 12-15 */ 853 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes); 854 wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size); 855 856 dev_dbg(dev, "WQ %d CFGs\n", wq->id); 857 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { 858 wq_offset = WQCFG_OFFSET(idxd, wq->id, i); 859 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset); 860 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", 861 wq->id, i, wq_offset, 862 ioread32(idxd->reg_base + wq_offset)); 863 } 864 865 return 0; 866 } 867 868 static int idxd_wqs_config_write(struct idxd_device *idxd) 869 { 870 int i, rc; 871 872 for (i = 0; i < idxd->max_wqs; i++) { 873 struct idxd_wq *wq = idxd->wqs[i]; 874 875 rc = idxd_wq_config_write(wq); 876 if (rc < 0) 877 return rc; 878 } 879 880 return 0; 881 } 882 883 static void idxd_group_flags_setup(struct idxd_device *idxd) 884 { 885 int i; 886 887 /* TC-A 0 and TC-B 1 should be defaults */ 888 for (i = 0; i < idxd->max_groups; i++) { 889 struct idxd_group *group = idxd->groups[i]; 890 891 if (group->tc_a == -1) 892 group->tc_a = group->grpcfg.flags.tc_a = 0; 893 else 894 group->grpcfg.flags.tc_a = group->tc_a; 895 if (group->tc_b == -1) 896 group->tc_b = group->grpcfg.flags.tc_b = 1; 897 else 898 group->grpcfg.flags.tc_b = group->tc_b; 899 group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit; 900 group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved; 901 if (group->rdbufs_allowed) 902 group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; 903 else 904 group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs; 905 } 906 } 907 908 static int idxd_engines_setup(struct idxd_device *idxd) 909 { 910 int i, engines = 0; 911 struct idxd_engine *eng; 912 struct idxd_group *group; 913 914 for (i = 0; i < idxd->max_groups; i++) { 915 group = idxd->groups[i]; 916 group->grpcfg.engines = 0; 917 } 918 919 for (i = 0; i < idxd->max_engines; i++) { 920 eng = idxd->engines[i]; 921 group = eng->group; 922 923 if (!group) 924 continue; 925 926 group->grpcfg.engines |= BIT(eng->id); 927 engines++; 928 } 929 930 if (!engines) 931 return -EINVAL; 932 933 return 0; 934 } 935 936 static int idxd_wqs_setup(struct idxd_device *idxd) 937 { 938 struct idxd_wq *wq; 939 struct idxd_group *group; 940 int i, j, configured = 0; 941 struct device *dev = &idxd->pdev->dev; 942 943 for (i = 0; i < idxd->max_groups; i++) { 944 group = idxd->groups[i]; 945 for (j = 0; j < 4; j++) 946 group->grpcfg.wqs[j] = 0; 947 } 948 949 for (i = 0; i < idxd->max_wqs; i++) { 950 wq = idxd->wqs[i]; 951 group = wq->group; 952 953 if (!wq->group) 954 continue; 955 956 if (wq_shared(wq) && !device_swq_supported(idxd)) { 957 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; 958 dev_warn(dev, "No shared wq support but configured.\n"); 959 return -EINVAL; 960 } 961 962 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64); 963 configured++; 964 } 965 966 if (configured == 0) { 967 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED; 968 return -EINVAL; 969 } 970 971 return 0; 972 } 973 974 int idxd_device_config(struct idxd_device *idxd) 975 { 976 int rc; 977 978 lockdep_assert_held(&idxd->dev_lock); 979 rc = idxd_wqs_setup(idxd); 980 if (rc < 0) 981 return rc; 982 983 rc = idxd_engines_setup(idxd); 984 if (rc < 0) 985 return rc; 986 987 idxd_group_flags_setup(idxd); 988 989 rc = idxd_wqs_config_write(idxd); 990 if (rc < 0) 991 return rc; 992 993 rc = idxd_groups_config_write(idxd); 994 if (rc < 0) 995 return rc; 996 997 return 0; 998 } 999 1000 static int idxd_wq_load_config(struct idxd_wq *wq) 1001 { 1002 struct idxd_device *idxd = wq->idxd; 1003 struct device *dev = &idxd->pdev->dev; 1004 int wqcfg_offset; 1005 int i; 1006 1007 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0); 1008 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size); 1009 1010 wq->size = wq->wqcfg->wq_size; 1011 wq->threshold = wq->wqcfg->wq_thresh; 1012 1013 /* The driver does not support shared WQ mode in read-only config yet */ 1014 if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en) 1015 return -EOPNOTSUPP; 1016 1017 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 1018 1019 wq->priority = wq->wqcfg->priority; 1020 1021 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { 1022 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); 1023 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]); 1024 } 1025 1026 return 0; 1027 } 1028 1029 static void idxd_group_load_config(struct idxd_group *group) 1030 { 1031 struct idxd_device *idxd = group->idxd; 1032 struct device *dev = &idxd->pdev->dev; 1033 int i, j, grpcfg_offset; 1034 1035 /* 1036 * Load WQS bit fields 1037 * Iterate through all 256 bits 64 bits at a time 1038 */ 1039 for (i = 0; i < GRPWQCFG_STRIDES; i++) { 1040 struct idxd_wq *wq; 1041 1042 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); 1043 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset); 1044 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", 1045 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]); 1046 1047 if (i * 64 >= idxd->max_wqs) 1048 break; 1049 1050 /* Iterate through all 64 bits and check for wq set */ 1051 for (j = 0; j < 64; j++) { 1052 int id = i * 64 + j; 1053 1054 /* No need to check beyond max wqs */ 1055 if (id >= idxd->max_wqs) 1056 break; 1057 1058 /* Set group assignment for wq if wq bit is set */ 1059 if (group->grpcfg.wqs[i] & BIT(j)) { 1060 wq = idxd->wqs[id]; 1061 wq->group = group; 1062 } 1063 } 1064 } 1065 1066 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); 1067 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset); 1068 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, 1069 grpcfg_offset, group->grpcfg.engines); 1070 1071 /* Iterate through all 64 bits to check engines set */ 1072 for (i = 0; i < 64; i++) { 1073 if (i >= idxd->max_engines) 1074 break; 1075 1076 if (group->grpcfg.engines & BIT(i)) { 1077 struct idxd_engine *engine = idxd->engines[i]; 1078 1079 engine->group = group; 1080 } 1081 } 1082 1083 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); 1084 group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset); 1085 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", 1086 group->id, grpcfg_offset, group->grpcfg.flags.bits); 1087 } 1088 1089 int idxd_device_load_config(struct idxd_device *idxd) 1090 { 1091 union gencfg_reg reg; 1092 int i, rc; 1093 1094 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); 1095 idxd->rdbuf_limit = reg.rdbuf_limit; 1096 1097 for (i = 0; i < idxd->max_groups; i++) { 1098 struct idxd_group *group = idxd->groups[i]; 1099 1100 idxd_group_load_config(group); 1101 } 1102 1103 for (i = 0; i < idxd->max_wqs; i++) { 1104 struct idxd_wq *wq = idxd->wqs[i]; 1105 1106 rc = idxd_wq_load_config(wq); 1107 if (rc < 0) 1108 return rc; 1109 } 1110 1111 return 0; 1112 } 1113 1114 static void idxd_flush_pending_descs(struct idxd_irq_entry *ie) 1115 { 1116 struct idxd_desc *desc, *itr; 1117 struct llist_node *head; 1118 LIST_HEAD(flist); 1119 enum idxd_complete_type ctype; 1120 1121 spin_lock(&ie->list_lock); 1122 head = llist_del_all(&ie->pending_llist); 1123 if (head) { 1124 llist_for_each_entry_safe(desc, itr, head, llnode) 1125 list_add_tail(&desc->list, &ie->work_list); 1126 } 1127 1128 list_for_each_entry_safe(desc, itr, &ie->work_list, list) 1129 list_move_tail(&desc->list, &flist); 1130 spin_unlock(&ie->list_lock); 1131 1132 list_for_each_entry_safe(desc, itr, &flist, list) { 1133 list_del(&desc->list); 1134 ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT; 1135 idxd_dma_complete_txd(desc, ctype, true); 1136 } 1137 } 1138 1139 static void idxd_device_set_perm_entry(struct idxd_device *idxd, 1140 struct idxd_irq_entry *ie) 1141 { 1142 union msix_perm mperm; 1143 1144 if (ie->pasid == INVALID_IOASID) 1145 return; 1146 1147 mperm.bits = 0; 1148 mperm.pasid = ie->pasid; 1149 mperm.pasid_en = 1; 1150 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); 1151 } 1152 1153 static void idxd_device_clear_perm_entry(struct idxd_device *idxd, 1154 struct idxd_irq_entry *ie) 1155 { 1156 iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); 1157 } 1158 1159 void idxd_wq_free_irq(struct idxd_wq *wq) 1160 { 1161 struct idxd_device *idxd = wq->idxd; 1162 struct idxd_irq_entry *ie = &wq->ie; 1163 1164 synchronize_irq(ie->vector); 1165 free_irq(ie->vector, ie); 1166 idxd_flush_pending_descs(ie); 1167 if (idxd->request_int_handles) 1168 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); 1169 idxd_device_clear_perm_entry(idxd, ie); 1170 ie->vector = -1; 1171 ie->int_handle = INVALID_INT_HANDLE; 1172 ie->pasid = INVALID_IOASID; 1173 } 1174 1175 int idxd_wq_request_irq(struct idxd_wq *wq) 1176 { 1177 struct idxd_device *idxd = wq->idxd; 1178 struct pci_dev *pdev = idxd->pdev; 1179 struct device *dev = &pdev->dev; 1180 struct idxd_irq_entry *ie; 1181 int rc; 1182 1183 ie = &wq->ie; 1184 ie->vector = pci_irq_vector(pdev, ie->id); 1185 ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID; 1186 idxd_device_set_perm_entry(idxd, ie); 1187 1188 rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie); 1189 if (rc < 0) { 1190 dev_err(dev, "Failed to request irq %d.\n", ie->vector); 1191 goto err_irq; 1192 } 1193 1194 if (idxd->request_int_handles) { 1195 rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle, 1196 IDXD_IRQ_MSIX); 1197 if (rc < 0) 1198 goto err_int_handle; 1199 } else { 1200 ie->int_handle = ie->id; 1201 } 1202 1203 return 0; 1204 1205 err_int_handle: 1206 ie->int_handle = INVALID_INT_HANDLE; 1207 free_irq(ie->vector, ie); 1208 err_irq: 1209 idxd_device_clear_perm_entry(idxd, ie); 1210 ie->pasid = INVALID_IOASID; 1211 return rc; 1212 } 1213 1214 int __drv_enable_wq(struct idxd_wq *wq) 1215 { 1216 struct idxd_device *idxd = wq->idxd; 1217 struct device *dev = &idxd->pdev->dev; 1218 int rc = -ENXIO; 1219 1220 lockdep_assert_held(&wq->wq_lock); 1221 1222 if (idxd->state != IDXD_DEV_ENABLED) { 1223 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED; 1224 goto err; 1225 } 1226 1227 if (wq->state != IDXD_WQ_DISABLED) { 1228 dev_dbg(dev, "wq %d already enabled.\n", wq->id); 1229 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED; 1230 rc = -EBUSY; 1231 goto err; 1232 } 1233 1234 if (!wq->group) { 1235 dev_dbg(dev, "wq %d not attached to group.\n", wq->id); 1236 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP; 1237 goto err; 1238 } 1239 1240 if (strlen(wq->name) == 0) { 1241 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME; 1242 dev_dbg(dev, "wq %d name not set.\n", wq->id); 1243 goto err; 1244 } 1245 1246 /* Shared WQ checks */ 1247 if (wq_shared(wq)) { 1248 if (!device_swq_supported(idxd)) { 1249 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM; 1250 dev_dbg(dev, "PASID not enabled and shared wq.\n"); 1251 goto err; 1252 } 1253 /* 1254 * Shared wq with the threshold set to 0 means the user 1255 * did not set the threshold or transitioned from a 1256 * dedicated wq but did not set threshold. A value 1257 * of 0 would effectively disable the shared wq. The 1258 * driver does not allow a value of 0 to be set for 1259 * threshold via sysfs. 1260 */ 1261 if (wq->threshold == 0) { 1262 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH; 1263 dev_dbg(dev, "Shared wq and threshold 0.\n"); 1264 goto err; 1265 } 1266 } 1267 1268 rc = 0; 1269 spin_lock(&idxd->dev_lock); 1270 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1271 rc = idxd_device_config(idxd); 1272 spin_unlock(&idxd->dev_lock); 1273 if (rc < 0) { 1274 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc); 1275 goto err; 1276 } 1277 1278 rc = idxd_wq_enable(wq); 1279 if (rc < 0) { 1280 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc); 1281 goto err; 1282 } 1283 1284 rc = idxd_wq_map_portal(wq); 1285 if (rc < 0) { 1286 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR; 1287 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc); 1288 goto err_map_portal; 1289 } 1290 1291 wq->client_count = 0; 1292 return 0; 1293 1294 err_map_portal: 1295 rc = idxd_wq_disable(wq, false); 1296 if (rc < 0) 1297 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq))); 1298 err: 1299 return rc; 1300 } 1301 1302 int drv_enable_wq(struct idxd_wq *wq) 1303 { 1304 int rc; 1305 1306 mutex_lock(&wq->wq_lock); 1307 rc = __drv_enable_wq(wq); 1308 mutex_unlock(&wq->wq_lock); 1309 return rc; 1310 } 1311 1312 void __drv_disable_wq(struct idxd_wq *wq) 1313 { 1314 struct idxd_device *idxd = wq->idxd; 1315 struct device *dev = &idxd->pdev->dev; 1316 1317 lockdep_assert_held(&wq->wq_lock); 1318 1319 if (idxd_wq_refcount(wq)) 1320 dev_warn(dev, "Clients has claim on wq %d: %d\n", 1321 wq->id, idxd_wq_refcount(wq)); 1322 1323 idxd_wq_unmap_portal(wq); 1324 1325 idxd_wq_drain(wq); 1326 idxd_wq_reset(wq); 1327 1328 wq->client_count = 0; 1329 } 1330 1331 void drv_disable_wq(struct idxd_wq *wq) 1332 { 1333 mutex_lock(&wq->wq_lock); 1334 __drv_disable_wq(wq); 1335 mutex_unlock(&wq->wq_lock); 1336 } 1337 1338 int idxd_device_drv_probe(struct idxd_dev *idxd_dev) 1339 { 1340 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); 1341 int rc = 0; 1342 1343 /* 1344 * Device should be in disabled state for the idxd_drv to load. If it's in 1345 * enabled state, then the device was altered outside of driver's control. 1346 * If the state is in halted state, then we don't want to proceed. 1347 */ 1348 if (idxd->state != IDXD_DEV_DISABLED) { 1349 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED; 1350 return -ENXIO; 1351 } 1352 1353 /* Device configuration */ 1354 spin_lock(&idxd->dev_lock); 1355 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1356 rc = idxd_device_config(idxd); 1357 spin_unlock(&idxd->dev_lock); 1358 if (rc < 0) 1359 return -ENXIO; 1360 1361 /* Start device */ 1362 rc = idxd_device_enable(idxd); 1363 if (rc < 0) 1364 return rc; 1365 1366 /* Setup DMA device without channels */ 1367 rc = idxd_register_dma_device(idxd); 1368 if (rc < 0) { 1369 idxd_device_disable(idxd); 1370 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR; 1371 return rc; 1372 } 1373 1374 idxd->cmd_status = 0; 1375 return 0; 1376 } 1377 1378 void idxd_device_drv_remove(struct idxd_dev *idxd_dev) 1379 { 1380 struct device *dev = &idxd_dev->conf_dev; 1381 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); 1382 int i; 1383 1384 for (i = 0; i < idxd->max_wqs; i++) { 1385 struct idxd_wq *wq = idxd->wqs[i]; 1386 struct device *wq_dev = wq_confdev(wq); 1387 1388 if (wq->state == IDXD_WQ_DISABLED) 1389 continue; 1390 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev)); 1391 device_release_driver(wq_dev); 1392 } 1393 1394 idxd_unregister_dma_device(idxd); 1395 idxd_device_disable(idxd); 1396 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1397 idxd_device_reset(idxd); 1398 } 1399 1400 static enum idxd_dev_type dev_types[] = { 1401 IDXD_DEV_DSA, 1402 IDXD_DEV_IAX, 1403 IDXD_DEV_NONE, 1404 }; 1405 1406 struct idxd_device_driver idxd_drv = { 1407 .type = dev_types, 1408 .probe = idxd_device_drv_probe, 1409 .remove = idxd_device_drv_remove, 1410 .name = "idxd", 1411 }; 1412 EXPORT_SYMBOL_GPL(idxd_drv); 1413