1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/io-64-nonatomic-lo-hi.h> 8 #include <linux/dmaengine.h> 9 #include <linux/irq.h> 10 #include <linux/msi.h> 11 #include <uapi/linux/idxd.h> 12 #include "../dmaengine.h" 13 #include "idxd.h" 14 #include "registers.h" 15 16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, 17 u32 *status); 18 static void idxd_device_wqs_clear_state(struct idxd_device *idxd); 19 static void idxd_wq_disable_cleanup(struct idxd_wq *wq); 20 21 /* Interrupt control bits */ 22 void idxd_unmask_error_interrupts(struct idxd_device *idxd) 23 { 24 union genctrl_reg genctrl; 25 26 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); 27 genctrl.softerr_int_en = 1; 28 genctrl.halt_int_en = 1; 29 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); 30 } 31 32 void idxd_mask_error_interrupts(struct idxd_device *idxd) 33 { 34 union genctrl_reg genctrl; 35 36 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); 37 genctrl.softerr_int_en = 0; 38 genctrl.halt_int_en = 0; 39 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); 40 } 41 42 static void free_hw_descs(struct idxd_wq *wq) 43 { 44 int i; 45 46 for (i = 0; i < wq->num_descs; i++) 47 kfree(wq->hw_descs[i]); 48 49 kfree(wq->hw_descs); 50 } 51 52 static int alloc_hw_descs(struct idxd_wq *wq, int num) 53 { 54 struct device *dev = &wq->idxd->pdev->dev; 55 int i; 56 int node = dev_to_node(dev); 57 58 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), 59 GFP_KERNEL, node); 60 if (!wq->hw_descs) 61 return -ENOMEM; 62 63 for (i = 0; i < num; i++) { 64 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), 65 GFP_KERNEL, node); 66 if (!wq->hw_descs[i]) { 67 free_hw_descs(wq); 68 return -ENOMEM; 69 } 70 } 71 72 return 0; 73 } 74 75 static void free_descs(struct idxd_wq *wq) 76 { 77 int i; 78 79 for (i = 0; i < wq->num_descs; i++) 80 kfree(wq->descs[i]); 81 82 kfree(wq->descs); 83 } 84 85 static int alloc_descs(struct idxd_wq *wq, int num) 86 { 87 struct device *dev = &wq->idxd->pdev->dev; 88 int i; 89 int node = dev_to_node(dev); 90 91 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *), 92 GFP_KERNEL, node); 93 if (!wq->descs) 94 return -ENOMEM; 95 96 for (i = 0; i < num; i++) { 97 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]), 98 GFP_KERNEL, node); 99 if (!wq->descs[i]) { 100 free_descs(wq); 101 return -ENOMEM; 102 } 103 } 104 105 return 0; 106 } 107 108 /* WQ control bits */ 109 int idxd_wq_alloc_resources(struct idxd_wq *wq) 110 { 111 struct idxd_device *idxd = wq->idxd; 112 struct device *dev = &idxd->pdev->dev; 113 int rc, num_descs, i; 114 115 if (wq->type != IDXD_WQT_KERNEL) 116 return 0; 117 118 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold; 119 wq->num_descs = num_descs; 120 121 rc = alloc_hw_descs(wq, num_descs); 122 if (rc < 0) 123 return rc; 124 125 wq->compls_size = num_descs * idxd->data->compl_size; 126 wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL); 127 if (!wq->compls) { 128 rc = -ENOMEM; 129 goto fail_alloc_compls; 130 } 131 132 rc = alloc_descs(wq, num_descs); 133 if (rc < 0) 134 goto fail_alloc_descs; 135 136 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL, 137 dev_to_node(dev)); 138 if (rc < 0) 139 goto fail_sbitmap_init; 140 141 for (i = 0; i < num_descs; i++) { 142 struct idxd_desc *desc = wq->descs[i]; 143 144 desc->hw = wq->hw_descs[i]; 145 if (idxd->data->type == IDXD_TYPE_DSA) 146 desc->completion = &wq->compls[i]; 147 else if (idxd->data->type == IDXD_TYPE_IAX) 148 desc->iax_completion = &wq->iax_compls[i]; 149 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i; 150 desc->id = i; 151 desc->wq = wq; 152 desc->cpu = -1; 153 } 154 155 return 0; 156 157 fail_sbitmap_init: 158 free_descs(wq); 159 fail_alloc_descs: 160 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); 161 fail_alloc_compls: 162 free_hw_descs(wq); 163 return rc; 164 } 165 166 void idxd_wq_free_resources(struct idxd_wq *wq) 167 { 168 struct device *dev = &wq->idxd->pdev->dev; 169 170 if (wq->type != IDXD_WQT_KERNEL) 171 return; 172 173 free_hw_descs(wq); 174 free_descs(wq); 175 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); 176 sbitmap_queue_free(&wq->sbq); 177 } 178 179 int idxd_wq_enable(struct idxd_wq *wq) 180 { 181 struct idxd_device *idxd = wq->idxd; 182 struct device *dev = &idxd->pdev->dev; 183 u32 status; 184 185 if (wq->state == IDXD_WQ_ENABLED) { 186 dev_dbg(dev, "WQ %d already enabled\n", wq->id); 187 return -ENXIO; 188 } 189 190 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status); 191 192 if (status != IDXD_CMDSTS_SUCCESS && 193 status != IDXD_CMDSTS_ERR_WQ_ENABLED) { 194 dev_dbg(dev, "WQ enable failed: %#x\n", status); 195 return -ENXIO; 196 } 197 198 wq->state = IDXD_WQ_ENABLED; 199 dev_dbg(dev, "WQ %d enabled\n", wq->id); 200 return 0; 201 } 202 203 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config) 204 { 205 struct idxd_device *idxd = wq->idxd; 206 struct device *dev = &idxd->pdev->dev; 207 u32 status, operand; 208 209 dev_dbg(dev, "Disabling WQ %d\n", wq->id); 210 211 if (wq->state != IDXD_WQ_ENABLED) { 212 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); 213 return 0; 214 } 215 216 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); 217 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status); 218 219 if (status != IDXD_CMDSTS_SUCCESS) { 220 dev_dbg(dev, "WQ disable failed: %#x\n", status); 221 return -ENXIO; 222 } 223 224 if (reset_config) 225 idxd_wq_disable_cleanup(wq); 226 wq->state = IDXD_WQ_DISABLED; 227 dev_dbg(dev, "WQ %d disabled\n", wq->id); 228 return 0; 229 } 230 231 void idxd_wq_drain(struct idxd_wq *wq) 232 { 233 struct idxd_device *idxd = wq->idxd; 234 struct device *dev = &idxd->pdev->dev; 235 u32 operand; 236 237 if (wq->state != IDXD_WQ_ENABLED) { 238 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); 239 return; 240 } 241 242 dev_dbg(dev, "Draining WQ %d\n", wq->id); 243 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); 244 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL); 245 } 246 247 void idxd_wq_reset(struct idxd_wq *wq) 248 { 249 struct idxd_device *idxd = wq->idxd; 250 struct device *dev = &idxd->pdev->dev; 251 u32 operand; 252 253 if (wq->state != IDXD_WQ_ENABLED) { 254 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); 255 return; 256 } 257 258 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); 259 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL); 260 idxd_wq_disable_cleanup(wq); 261 wq->state = IDXD_WQ_DISABLED; 262 } 263 264 int idxd_wq_map_portal(struct idxd_wq *wq) 265 { 266 struct idxd_device *idxd = wq->idxd; 267 struct pci_dev *pdev = idxd->pdev; 268 struct device *dev = &pdev->dev; 269 resource_size_t start; 270 271 start = pci_resource_start(pdev, IDXD_WQ_BAR); 272 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED); 273 274 wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE); 275 if (!wq->portal) 276 return -ENOMEM; 277 278 return 0; 279 } 280 281 void idxd_wq_unmap_portal(struct idxd_wq *wq) 282 { 283 struct device *dev = &wq->idxd->pdev->dev; 284 285 devm_iounmap(dev, wq->portal); 286 wq->portal = NULL; 287 wq->portal_offset = 0; 288 } 289 290 void idxd_wqs_unmap_portal(struct idxd_device *idxd) 291 { 292 int i; 293 294 for (i = 0; i < idxd->max_wqs; i++) { 295 struct idxd_wq *wq = idxd->wqs[i]; 296 297 if (wq->portal) 298 idxd_wq_unmap_portal(wq); 299 } 300 } 301 302 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) 303 { 304 struct idxd_device *idxd = wq->idxd; 305 int rc; 306 union wqcfg wqcfg; 307 unsigned int offset; 308 309 rc = idxd_wq_disable(wq, false); 310 if (rc < 0) 311 return rc; 312 313 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); 314 spin_lock(&idxd->dev_lock); 315 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); 316 wqcfg.pasid_en = 1; 317 wqcfg.pasid = pasid; 318 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); 319 spin_unlock(&idxd->dev_lock); 320 321 rc = idxd_wq_enable(wq); 322 if (rc < 0) 323 return rc; 324 325 return 0; 326 } 327 328 int idxd_wq_disable_pasid(struct idxd_wq *wq) 329 { 330 struct idxd_device *idxd = wq->idxd; 331 int rc; 332 union wqcfg wqcfg; 333 unsigned int offset; 334 335 rc = idxd_wq_disable(wq, false); 336 if (rc < 0) 337 return rc; 338 339 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); 340 spin_lock(&idxd->dev_lock); 341 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); 342 wqcfg.pasid_en = 0; 343 wqcfg.pasid = 0; 344 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); 345 spin_unlock(&idxd->dev_lock); 346 347 rc = idxd_wq_enable(wq); 348 if (rc < 0) 349 return rc; 350 351 return 0; 352 } 353 354 static void idxd_wq_disable_cleanup(struct idxd_wq *wq) 355 { 356 struct idxd_device *idxd = wq->idxd; 357 358 lockdep_assert_held(&wq->wq_lock); 359 memset(wq->wqcfg, 0, idxd->wqcfg_size); 360 wq->type = IDXD_WQT_NONE; 361 wq->threshold = 0; 362 wq->priority = 0; 363 wq->ats_dis = 0; 364 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 365 clear_bit(WQ_FLAG_DEDICATED, &wq->flags); 366 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); 367 memset(wq->name, 0, WQ_NAME_SIZE); 368 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 369 wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; 370 } 371 372 static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq) 373 { 374 lockdep_assert_held(&wq->wq_lock); 375 376 idxd_wq_disable_cleanup(wq); 377 wq->size = 0; 378 wq->group = NULL; 379 } 380 381 static void idxd_wq_ref_release(struct percpu_ref *ref) 382 { 383 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active); 384 385 complete(&wq->wq_dead); 386 } 387 388 int idxd_wq_init_percpu_ref(struct idxd_wq *wq) 389 { 390 int rc; 391 392 memset(&wq->wq_active, 0, sizeof(wq->wq_active)); 393 rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 394 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); 395 if (rc < 0) 396 return rc; 397 reinit_completion(&wq->wq_dead); 398 reinit_completion(&wq->wq_resurrect); 399 return 0; 400 } 401 402 void __idxd_wq_quiesce(struct idxd_wq *wq) 403 { 404 lockdep_assert_held(&wq->wq_lock); 405 reinit_completion(&wq->wq_resurrect); 406 percpu_ref_kill(&wq->wq_active); 407 complete_all(&wq->wq_resurrect); 408 wait_for_completion(&wq->wq_dead); 409 } 410 411 void idxd_wq_quiesce(struct idxd_wq *wq) 412 { 413 mutex_lock(&wq->wq_lock); 414 __idxd_wq_quiesce(wq); 415 mutex_unlock(&wq->wq_lock); 416 } 417 418 /* Device control bits */ 419 static inline bool idxd_is_enabled(struct idxd_device *idxd) 420 { 421 union gensts_reg gensts; 422 423 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); 424 425 if (gensts.state == IDXD_DEVICE_STATE_ENABLED) 426 return true; 427 return false; 428 } 429 430 static inline bool idxd_device_is_halted(struct idxd_device *idxd) 431 { 432 union gensts_reg gensts; 433 434 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); 435 436 return (gensts.state == IDXD_DEVICE_STATE_HALT); 437 } 438 439 /* 440 * This is function is only used for reset during probe and will 441 * poll for completion. Once the device is setup with interrupts, 442 * all commands will be done via interrupt completion. 443 */ 444 int idxd_device_init_reset(struct idxd_device *idxd) 445 { 446 struct device *dev = &idxd->pdev->dev; 447 union idxd_command_reg cmd; 448 449 if (idxd_device_is_halted(idxd)) { 450 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); 451 return -ENXIO; 452 } 453 454 memset(&cmd, 0, sizeof(cmd)); 455 cmd.cmd = IDXD_CMD_RESET_DEVICE; 456 dev_dbg(dev, "%s: sending reset for init.\n", __func__); 457 spin_lock(&idxd->cmd_lock); 458 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); 459 460 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & 461 IDXD_CMDSTS_ACTIVE) 462 cpu_relax(); 463 spin_unlock(&idxd->cmd_lock); 464 return 0; 465 } 466 467 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, 468 u32 *status) 469 { 470 union idxd_command_reg cmd; 471 DECLARE_COMPLETION_ONSTACK(done); 472 u32 stat; 473 474 if (idxd_device_is_halted(idxd)) { 475 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); 476 if (status) 477 *status = IDXD_CMDSTS_HW_ERR; 478 return; 479 } 480 481 memset(&cmd, 0, sizeof(cmd)); 482 cmd.cmd = cmd_code; 483 cmd.operand = operand; 484 cmd.int_req = 1; 485 486 spin_lock(&idxd->cmd_lock); 487 wait_event_lock_irq(idxd->cmd_waitq, 488 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags), 489 idxd->cmd_lock); 490 491 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", 492 __func__, cmd_code, operand); 493 494 idxd->cmd_status = 0; 495 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); 496 idxd->cmd_done = &done; 497 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); 498 499 /* 500 * After command submitted, release lock and go to sleep until 501 * the command completes via interrupt. 502 */ 503 spin_unlock(&idxd->cmd_lock); 504 wait_for_completion(&done); 505 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); 506 spin_lock(&idxd->cmd_lock); 507 if (status) 508 *status = stat; 509 idxd->cmd_status = stat & GENMASK(7, 0); 510 511 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); 512 /* Wake up other pending commands */ 513 wake_up(&idxd->cmd_waitq); 514 spin_unlock(&idxd->cmd_lock); 515 } 516 517 int idxd_device_enable(struct idxd_device *idxd) 518 { 519 struct device *dev = &idxd->pdev->dev; 520 u32 status; 521 522 if (idxd_is_enabled(idxd)) { 523 dev_dbg(dev, "Device already enabled\n"); 524 return -ENXIO; 525 } 526 527 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status); 528 529 /* If the command is successful or if the device was enabled */ 530 if (status != IDXD_CMDSTS_SUCCESS && 531 status != IDXD_CMDSTS_ERR_DEV_ENABLED) { 532 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); 533 return -ENXIO; 534 } 535 536 idxd->state = IDXD_DEV_ENABLED; 537 return 0; 538 } 539 540 int idxd_device_disable(struct idxd_device *idxd) 541 { 542 struct device *dev = &idxd->pdev->dev; 543 u32 status; 544 545 if (!idxd_is_enabled(idxd)) { 546 dev_dbg(dev, "Device is not enabled\n"); 547 return 0; 548 } 549 550 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status); 551 552 /* If the command is successful or if the device was disabled */ 553 if (status != IDXD_CMDSTS_SUCCESS && 554 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) { 555 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); 556 return -ENXIO; 557 } 558 559 spin_lock(&idxd->dev_lock); 560 idxd_device_clear_state(idxd); 561 idxd->state = IDXD_DEV_DISABLED; 562 spin_unlock(&idxd->dev_lock); 563 return 0; 564 } 565 566 void idxd_device_reset(struct idxd_device *idxd) 567 { 568 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); 569 spin_lock(&idxd->dev_lock); 570 idxd_device_clear_state(idxd); 571 idxd->state = IDXD_DEV_DISABLED; 572 idxd_unmask_error_interrupts(idxd); 573 spin_unlock(&idxd->dev_lock); 574 } 575 576 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid) 577 { 578 struct device *dev = &idxd->pdev->dev; 579 u32 operand; 580 581 operand = pasid; 582 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand); 583 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL); 584 dev_dbg(dev, "pasid %d drained\n", pasid); 585 } 586 587 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle, 588 enum idxd_interrupt_type irq_type) 589 { 590 struct device *dev = &idxd->pdev->dev; 591 u32 operand, status; 592 593 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))) 594 return -EOPNOTSUPP; 595 596 dev_dbg(dev, "get int handle, idx %d\n", idx); 597 598 operand = idx & GENMASK(15, 0); 599 if (irq_type == IDXD_IRQ_IMS) 600 operand |= CMD_INT_HANDLE_IMS; 601 602 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand); 603 604 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status); 605 606 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) { 607 dev_dbg(dev, "request int handle failed: %#x\n", status); 608 return -ENXIO; 609 } 610 611 *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0); 612 613 dev_dbg(dev, "int handle acquired: %u\n", *handle); 614 return 0; 615 } 616 617 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle, 618 enum idxd_interrupt_type irq_type) 619 { 620 struct device *dev = &idxd->pdev->dev; 621 u32 operand, status; 622 union idxd_command_reg cmd; 623 624 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))) 625 return -EOPNOTSUPP; 626 627 dev_dbg(dev, "release int handle, handle %d\n", handle); 628 629 memset(&cmd, 0, sizeof(cmd)); 630 operand = handle & GENMASK(15, 0); 631 632 if (irq_type == IDXD_IRQ_IMS) 633 operand |= CMD_INT_HANDLE_IMS; 634 635 cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE; 636 cmd.operand = operand; 637 638 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand); 639 640 spin_lock(&idxd->cmd_lock); 641 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); 642 643 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE) 644 cpu_relax(); 645 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); 646 spin_unlock(&idxd->cmd_lock); 647 648 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) { 649 dev_dbg(dev, "release int handle failed: %#x\n", status); 650 return -ENXIO; 651 } 652 653 dev_dbg(dev, "int handle released.\n"); 654 return 0; 655 } 656 657 /* Device configuration bits */ 658 static void idxd_engines_clear_state(struct idxd_device *idxd) 659 { 660 struct idxd_engine *engine; 661 int i; 662 663 lockdep_assert_held(&idxd->dev_lock); 664 for (i = 0; i < idxd->max_engines; i++) { 665 engine = idxd->engines[i]; 666 engine->group = NULL; 667 } 668 } 669 670 static void idxd_groups_clear_state(struct idxd_device *idxd) 671 { 672 struct idxd_group *group; 673 int i; 674 675 lockdep_assert_held(&idxd->dev_lock); 676 for (i = 0; i < idxd->max_groups; i++) { 677 group = idxd->groups[i]; 678 memset(&group->grpcfg, 0, sizeof(group->grpcfg)); 679 group->num_engines = 0; 680 group->num_wqs = 0; 681 group->use_rdbuf_limit = false; 682 group->rdbufs_allowed = 0; 683 group->rdbufs_reserved = 0; 684 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { 685 group->tc_a = 1; 686 group->tc_b = 1; 687 } else { 688 group->tc_a = -1; 689 group->tc_b = -1; 690 } 691 } 692 } 693 694 static void idxd_device_wqs_clear_state(struct idxd_device *idxd) 695 { 696 int i; 697 698 lockdep_assert_held(&idxd->dev_lock); 699 for (i = 0; i < idxd->max_wqs; i++) { 700 struct idxd_wq *wq = idxd->wqs[i]; 701 702 if (wq->state == IDXD_WQ_ENABLED) { 703 idxd_wq_disable_cleanup(wq); 704 idxd_wq_device_reset_cleanup(wq); 705 wq->state = IDXD_WQ_DISABLED; 706 } 707 } 708 } 709 710 void idxd_device_clear_state(struct idxd_device *idxd) 711 { 712 idxd_groups_clear_state(idxd); 713 idxd_engines_clear_state(idxd); 714 idxd_device_wqs_clear_state(idxd); 715 } 716 717 static void idxd_group_config_write(struct idxd_group *group) 718 { 719 struct idxd_device *idxd = group->idxd; 720 struct device *dev = &idxd->pdev->dev; 721 int i; 722 u32 grpcfg_offset; 723 724 dev_dbg(dev, "Writing group %d cfg registers\n", group->id); 725 726 /* setup GRPWQCFG */ 727 for (i = 0; i < GRPWQCFG_STRIDES; i++) { 728 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); 729 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset); 730 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", 731 group->id, i, grpcfg_offset, 732 ioread64(idxd->reg_base + grpcfg_offset)); 733 } 734 735 /* setup GRPENGCFG */ 736 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); 737 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset); 738 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, 739 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); 740 741 /* setup GRPFLAGS */ 742 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); 743 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); 744 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", 745 group->id, grpcfg_offset, 746 ioread32(idxd->reg_base + grpcfg_offset)); 747 } 748 749 static int idxd_groups_config_write(struct idxd_device *idxd) 750 751 { 752 union gencfg_reg reg; 753 int i; 754 struct device *dev = &idxd->pdev->dev; 755 756 /* Setup bandwidth rdbuf limit */ 757 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) { 758 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); 759 reg.rdbuf_limit = idxd->rdbuf_limit; 760 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); 761 } 762 763 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET, 764 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); 765 766 for (i = 0; i < idxd->max_groups; i++) { 767 struct idxd_group *group = idxd->groups[i]; 768 769 idxd_group_config_write(group); 770 } 771 772 return 0; 773 } 774 775 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd) 776 { 777 struct pci_dev *pdev = idxd->pdev; 778 779 if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV)) 780 return true; 781 return false; 782 } 783 784 static int idxd_wq_config_write(struct idxd_wq *wq) 785 { 786 struct idxd_device *idxd = wq->idxd; 787 struct device *dev = &idxd->pdev->dev; 788 u32 wq_offset; 789 int i; 790 791 if (!wq->group) 792 return 0; 793 794 /* 795 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after 796 * wq reset. This will copy back the sticky values that are present on some devices. 797 */ 798 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { 799 wq_offset = WQCFG_OFFSET(idxd, wq->id, i); 800 wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset); 801 } 802 803 if (wq->size == 0 && wq->type != IDXD_WQT_NONE) 804 wq->size = WQ_DEFAULT_QUEUE_DEPTH; 805 806 /* byte 0-3 */ 807 wq->wqcfg->wq_size = wq->size; 808 809 /* bytes 4-7 */ 810 wq->wqcfg->wq_thresh = wq->threshold; 811 812 /* byte 8-11 */ 813 if (wq_dedicated(wq)) 814 wq->wqcfg->mode = 1; 815 816 if (device_pasid_enabled(idxd)) { 817 wq->wqcfg->pasid_en = 1; 818 if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq)) 819 wq->wqcfg->pasid = idxd->pasid; 820 } 821 822 /* 823 * Here the priv bit is set depending on the WQ type. priv = 1 if the 824 * WQ type is kernel to indicate privileged access. This setting only 825 * matters for dedicated WQ. According to the DSA spec: 826 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the 827 * Privileged Mode Enable field of the PCI Express PASID capability 828 * is 0, this field must be 0. 829 * 830 * In the case of a dedicated kernel WQ that is not able to support 831 * the PASID cap, then the configuration will be rejected. 832 */ 833 wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL); 834 if (wq_dedicated(wq) && wq->wqcfg->pasid_en && 835 !idxd_device_pasid_priv_enabled(idxd) && 836 wq->type == IDXD_WQT_KERNEL) { 837 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV; 838 return -EOPNOTSUPP; 839 } 840 841 wq->wqcfg->priority = wq->priority; 842 843 if (idxd->hw.gen_cap.block_on_fault && 844 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)) 845 wq->wqcfg->bof = 1; 846 847 if (idxd->hw.wq_cap.wq_ats_support) 848 wq->wqcfg->wq_ats_disable = wq->ats_dis; 849 850 /* bytes 12-15 */ 851 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes); 852 wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size); 853 854 dev_dbg(dev, "WQ %d CFGs\n", wq->id); 855 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { 856 wq_offset = WQCFG_OFFSET(idxd, wq->id, i); 857 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset); 858 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", 859 wq->id, i, wq_offset, 860 ioread32(idxd->reg_base + wq_offset)); 861 } 862 863 return 0; 864 } 865 866 static int idxd_wqs_config_write(struct idxd_device *idxd) 867 { 868 int i, rc; 869 870 for (i = 0; i < idxd->max_wqs; i++) { 871 struct idxd_wq *wq = idxd->wqs[i]; 872 873 rc = idxd_wq_config_write(wq); 874 if (rc < 0) 875 return rc; 876 } 877 878 return 0; 879 } 880 881 static void idxd_group_flags_setup(struct idxd_device *idxd) 882 { 883 int i; 884 885 /* TC-A 0 and TC-B 1 should be defaults */ 886 for (i = 0; i < idxd->max_groups; i++) { 887 struct idxd_group *group = idxd->groups[i]; 888 889 if (group->tc_a == -1) 890 group->tc_a = group->grpcfg.flags.tc_a = 0; 891 else 892 group->grpcfg.flags.tc_a = group->tc_a; 893 if (group->tc_b == -1) 894 group->tc_b = group->grpcfg.flags.tc_b = 1; 895 else 896 group->grpcfg.flags.tc_b = group->tc_b; 897 group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit; 898 group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved; 899 if (group->rdbufs_allowed) 900 group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; 901 else 902 group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs; 903 } 904 } 905 906 static int idxd_engines_setup(struct idxd_device *idxd) 907 { 908 int i, engines = 0; 909 struct idxd_engine *eng; 910 struct idxd_group *group; 911 912 for (i = 0; i < idxd->max_groups; i++) { 913 group = idxd->groups[i]; 914 group->grpcfg.engines = 0; 915 } 916 917 for (i = 0; i < idxd->max_engines; i++) { 918 eng = idxd->engines[i]; 919 group = eng->group; 920 921 if (!group) 922 continue; 923 924 group->grpcfg.engines |= BIT(eng->id); 925 engines++; 926 } 927 928 if (!engines) 929 return -EINVAL; 930 931 return 0; 932 } 933 934 static int idxd_wqs_setup(struct idxd_device *idxd) 935 { 936 struct idxd_wq *wq; 937 struct idxd_group *group; 938 int i, j, configured = 0; 939 struct device *dev = &idxd->pdev->dev; 940 941 for (i = 0; i < idxd->max_groups; i++) { 942 group = idxd->groups[i]; 943 for (j = 0; j < 4; j++) 944 group->grpcfg.wqs[j] = 0; 945 } 946 947 for (i = 0; i < idxd->max_wqs; i++) { 948 wq = idxd->wqs[i]; 949 group = wq->group; 950 951 if (!wq->group) 952 continue; 953 954 if (wq_shared(wq) && !device_swq_supported(idxd)) { 955 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; 956 dev_warn(dev, "No shared wq support but configured.\n"); 957 return -EINVAL; 958 } 959 960 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64); 961 configured++; 962 } 963 964 if (configured == 0) { 965 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED; 966 return -EINVAL; 967 } 968 969 return 0; 970 } 971 972 int idxd_device_config(struct idxd_device *idxd) 973 { 974 int rc; 975 976 lockdep_assert_held(&idxd->dev_lock); 977 rc = idxd_wqs_setup(idxd); 978 if (rc < 0) 979 return rc; 980 981 rc = idxd_engines_setup(idxd); 982 if (rc < 0) 983 return rc; 984 985 idxd_group_flags_setup(idxd); 986 987 rc = idxd_wqs_config_write(idxd); 988 if (rc < 0) 989 return rc; 990 991 rc = idxd_groups_config_write(idxd); 992 if (rc < 0) 993 return rc; 994 995 return 0; 996 } 997 998 static int idxd_wq_load_config(struct idxd_wq *wq) 999 { 1000 struct idxd_device *idxd = wq->idxd; 1001 struct device *dev = &idxd->pdev->dev; 1002 int wqcfg_offset; 1003 int i; 1004 1005 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0); 1006 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size); 1007 1008 wq->size = wq->wqcfg->wq_size; 1009 wq->threshold = wq->wqcfg->wq_thresh; 1010 1011 /* The driver does not support shared WQ mode in read-only config yet */ 1012 if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en) 1013 return -EOPNOTSUPP; 1014 1015 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 1016 1017 wq->priority = wq->wqcfg->priority; 1018 1019 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { 1020 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); 1021 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]); 1022 } 1023 1024 return 0; 1025 } 1026 1027 static void idxd_group_load_config(struct idxd_group *group) 1028 { 1029 struct idxd_device *idxd = group->idxd; 1030 struct device *dev = &idxd->pdev->dev; 1031 int i, j, grpcfg_offset; 1032 1033 /* 1034 * Load WQS bit fields 1035 * Iterate through all 256 bits 64 bits at a time 1036 */ 1037 for (i = 0; i < GRPWQCFG_STRIDES; i++) { 1038 struct idxd_wq *wq; 1039 1040 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); 1041 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset); 1042 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", 1043 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]); 1044 1045 if (i * 64 >= idxd->max_wqs) 1046 break; 1047 1048 /* Iterate through all 64 bits and check for wq set */ 1049 for (j = 0; j < 64; j++) { 1050 int id = i * 64 + j; 1051 1052 /* No need to check beyond max wqs */ 1053 if (id >= idxd->max_wqs) 1054 break; 1055 1056 /* Set group assignment for wq if wq bit is set */ 1057 if (group->grpcfg.wqs[i] & BIT(j)) { 1058 wq = idxd->wqs[id]; 1059 wq->group = group; 1060 } 1061 } 1062 } 1063 1064 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); 1065 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset); 1066 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, 1067 grpcfg_offset, group->grpcfg.engines); 1068 1069 /* Iterate through all 64 bits to check engines set */ 1070 for (i = 0; i < 64; i++) { 1071 if (i >= idxd->max_engines) 1072 break; 1073 1074 if (group->grpcfg.engines & BIT(i)) { 1075 struct idxd_engine *engine = idxd->engines[i]; 1076 1077 engine->group = group; 1078 } 1079 } 1080 1081 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); 1082 group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset); 1083 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", 1084 group->id, grpcfg_offset, group->grpcfg.flags.bits); 1085 } 1086 1087 int idxd_device_load_config(struct idxd_device *idxd) 1088 { 1089 union gencfg_reg reg; 1090 int i, rc; 1091 1092 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); 1093 idxd->rdbuf_limit = reg.rdbuf_limit; 1094 1095 for (i = 0; i < idxd->max_groups; i++) { 1096 struct idxd_group *group = idxd->groups[i]; 1097 1098 idxd_group_load_config(group); 1099 } 1100 1101 for (i = 0; i < idxd->max_wqs; i++) { 1102 struct idxd_wq *wq = idxd->wqs[i]; 1103 1104 rc = idxd_wq_load_config(wq); 1105 if (rc < 0) 1106 return rc; 1107 } 1108 1109 return 0; 1110 } 1111 1112 static void idxd_flush_pending_descs(struct idxd_irq_entry *ie) 1113 { 1114 struct idxd_desc *desc, *itr; 1115 struct llist_node *head; 1116 LIST_HEAD(flist); 1117 enum idxd_complete_type ctype; 1118 1119 spin_lock(&ie->list_lock); 1120 head = llist_del_all(&ie->pending_llist); 1121 if (head) { 1122 llist_for_each_entry_safe(desc, itr, head, llnode) 1123 list_add_tail(&desc->list, &ie->work_list); 1124 } 1125 1126 list_for_each_entry_safe(desc, itr, &ie->work_list, list) 1127 list_move_tail(&desc->list, &flist); 1128 spin_unlock(&ie->list_lock); 1129 1130 list_for_each_entry_safe(desc, itr, &flist, list) { 1131 list_del(&desc->list); 1132 ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT; 1133 idxd_dma_complete_txd(desc, ctype, true); 1134 } 1135 } 1136 1137 static void idxd_device_set_perm_entry(struct idxd_device *idxd, 1138 struct idxd_irq_entry *ie) 1139 { 1140 union msix_perm mperm; 1141 1142 if (ie->pasid == INVALID_IOASID) 1143 return; 1144 1145 mperm.bits = 0; 1146 mperm.pasid = ie->pasid; 1147 mperm.pasid_en = 1; 1148 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); 1149 } 1150 1151 static void idxd_device_clear_perm_entry(struct idxd_device *idxd, 1152 struct idxd_irq_entry *ie) 1153 { 1154 iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); 1155 } 1156 1157 void idxd_wq_free_irq(struct idxd_wq *wq) 1158 { 1159 struct idxd_device *idxd = wq->idxd; 1160 struct idxd_irq_entry *ie = &wq->ie; 1161 1162 synchronize_irq(ie->vector); 1163 free_irq(ie->vector, ie); 1164 idxd_flush_pending_descs(ie); 1165 if (idxd->request_int_handles) 1166 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); 1167 idxd_device_clear_perm_entry(idxd, ie); 1168 ie->vector = -1; 1169 ie->int_handle = INVALID_INT_HANDLE; 1170 ie->pasid = INVALID_IOASID; 1171 } 1172 1173 int idxd_wq_request_irq(struct idxd_wq *wq) 1174 { 1175 struct idxd_device *idxd = wq->idxd; 1176 struct pci_dev *pdev = idxd->pdev; 1177 struct device *dev = &pdev->dev; 1178 struct idxd_irq_entry *ie; 1179 int rc; 1180 1181 ie = &wq->ie; 1182 ie->vector = pci_irq_vector(pdev, ie->id); 1183 ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID; 1184 idxd_device_set_perm_entry(idxd, ie); 1185 1186 rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie); 1187 if (rc < 0) { 1188 dev_err(dev, "Failed to request irq %d.\n", ie->vector); 1189 goto err_irq; 1190 } 1191 1192 if (idxd->request_int_handles) { 1193 rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle, 1194 IDXD_IRQ_MSIX); 1195 if (rc < 0) 1196 goto err_int_handle; 1197 } else { 1198 ie->int_handle = ie->id; 1199 } 1200 1201 return 0; 1202 1203 err_int_handle: 1204 ie->int_handle = INVALID_INT_HANDLE; 1205 free_irq(ie->vector, ie); 1206 err_irq: 1207 idxd_device_clear_perm_entry(idxd, ie); 1208 ie->pasid = INVALID_IOASID; 1209 return rc; 1210 } 1211 1212 int __drv_enable_wq(struct idxd_wq *wq) 1213 { 1214 struct idxd_device *idxd = wq->idxd; 1215 struct device *dev = &idxd->pdev->dev; 1216 int rc = -ENXIO; 1217 1218 lockdep_assert_held(&wq->wq_lock); 1219 1220 if (idxd->state != IDXD_DEV_ENABLED) { 1221 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED; 1222 goto err; 1223 } 1224 1225 if (wq->state != IDXD_WQ_DISABLED) { 1226 dev_dbg(dev, "wq %d already enabled.\n", wq->id); 1227 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED; 1228 rc = -EBUSY; 1229 goto err; 1230 } 1231 1232 if (!wq->group) { 1233 dev_dbg(dev, "wq %d not attached to group.\n", wq->id); 1234 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP; 1235 goto err; 1236 } 1237 1238 if (strlen(wq->name) == 0) { 1239 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME; 1240 dev_dbg(dev, "wq %d name not set.\n", wq->id); 1241 goto err; 1242 } 1243 1244 /* Shared WQ checks */ 1245 if (wq_shared(wq)) { 1246 if (!device_swq_supported(idxd)) { 1247 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM; 1248 dev_dbg(dev, "PASID not enabled and shared wq.\n"); 1249 goto err; 1250 } 1251 /* 1252 * Shared wq with the threshold set to 0 means the user 1253 * did not set the threshold or transitioned from a 1254 * dedicated wq but did not set threshold. A value 1255 * of 0 would effectively disable the shared wq. The 1256 * driver does not allow a value of 0 to be set for 1257 * threshold via sysfs. 1258 */ 1259 if (wq->threshold == 0) { 1260 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH; 1261 dev_dbg(dev, "Shared wq and threshold 0.\n"); 1262 goto err; 1263 } 1264 } 1265 1266 rc = 0; 1267 spin_lock(&idxd->dev_lock); 1268 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1269 rc = idxd_device_config(idxd); 1270 spin_unlock(&idxd->dev_lock); 1271 if (rc < 0) { 1272 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc); 1273 goto err; 1274 } 1275 1276 rc = idxd_wq_enable(wq); 1277 if (rc < 0) { 1278 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc); 1279 goto err; 1280 } 1281 1282 rc = idxd_wq_map_portal(wq); 1283 if (rc < 0) { 1284 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR; 1285 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc); 1286 goto err_map_portal; 1287 } 1288 1289 wq->client_count = 0; 1290 return 0; 1291 1292 err_map_portal: 1293 rc = idxd_wq_disable(wq, false); 1294 if (rc < 0) 1295 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq))); 1296 err: 1297 return rc; 1298 } 1299 1300 int drv_enable_wq(struct idxd_wq *wq) 1301 { 1302 int rc; 1303 1304 mutex_lock(&wq->wq_lock); 1305 rc = __drv_enable_wq(wq); 1306 mutex_unlock(&wq->wq_lock); 1307 return rc; 1308 } 1309 1310 void __drv_disable_wq(struct idxd_wq *wq) 1311 { 1312 struct idxd_device *idxd = wq->idxd; 1313 struct device *dev = &idxd->pdev->dev; 1314 1315 lockdep_assert_held(&wq->wq_lock); 1316 1317 if (idxd_wq_refcount(wq)) 1318 dev_warn(dev, "Clients has claim on wq %d: %d\n", 1319 wq->id, idxd_wq_refcount(wq)); 1320 1321 idxd_wq_unmap_portal(wq); 1322 1323 idxd_wq_drain(wq); 1324 idxd_wq_reset(wq); 1325 1326 wq->client_count = 0; 1327 } 1328 1329 void drv_disable_wq(struct idxd_wq *wq) 1330 { 1331 mutex_lock(&wq->wq_lock); 1332 __drv_disable_wq(wq); 1333 mutex_unlock(&wq->wq_lock); 1334 } 1335 1336 int idxd_device_drv_probe(struct idxd_dev *idxd_dev) 1337 { 1338 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); 1339 int rc = 0; 1340 1341 /* 1342 * Device should be in disabled state for the idxd_drv to load. If it's in 1343 * enabled state, then the device was altered outside of driver's control. 1344 * If the state is in halted state, then we don't want to proceed. 1345 */ 1346 if (idxd->state != IDXD_DEV_DISABLED) { 1347 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED; 1348 return -ENXIO; 1349 } 1350 1351 /* Device configuration */ 1352 spin_lock(&idxd->dev_lock); 1353 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1354 rc = idxd_device_config(idxd); 1355 spin_unlock(&idxd->dev_lock); 1356 if (rc < 0) 1357 return -ENXIO; 1358 1359 /* Start device */ 1360 rc = idxd_device_enable(idxd); 1361 if (rc < 0) 1362 return rc; 1363 1364 /* Setup DMA device without channels */ 1365 rc = idxd_register_dma_device(idxd); 1366 if (rc < 0) { 1367 idxd_device_disable(idxd); 1368 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR; 1369 return rc; 1370 } 1371 1372 idxd->cmd_status = 0; 1373 return 0; 1374 } 1375 1376 void idxd_device_drv_remove(struct idxd_dev *idxd_dev) 1377 { 1378 struct device *dev = &idxd_dev->conf_dev; 1379 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); 1380 int i; 1381 1382 for (i = 0; i < idxd->max_wqs; i++) { 1383 struct idxd_wq *wq = idxd->wqs[i]; 1384 struct device *wq_dev = wq_confdev(wq); 1385 1386 if (wq->state == IDXD_WQ_DISABLED) 1387 continue; 1388 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev)); 1389 device_release_driver(wq_dev); 1390 } 1391 1392 idxd_unregister_dma_device(idxd); 1393 idxd_device_disable(idxd); 1394 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1395 idxd_device_reset(idxd); 1396 } 1397 1398 static enum idxd_dev_type dev_types[] = { 1399 IDXD_DEV_DSA, 1400 IDXD_DEV_IAX, 1401 IDXD_DEV_NONE, 1402 }; 1403 1404 struct idxd_device_driver idxd_drv = { 1405 .type = dev_types, 1406 .probe = idxd_device_drv_probe, 1407 .remove = idxd_device_drv_remove, 1408 .name = "idxd", 1409 }; 1410 EXPORT_SYMBOL_GPL(idxd_drv); 1411