1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ISM driver for s390. 4 * 5 * Copyright IBM Corp. 2018 6 */ 7 #define KMSG_COMPONENT "ism" 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/types.h> 12 #include <linux/interrupt.h> 13 #include <linux/device.h> 14 #include <linux/err.h> 15 #include <linux/ctype.h> 16 #include <linux/processor.h> 17 18 #include "ism.h" 19 20 MODULE_DESCRIPTION("ISM driver for s390"); 21 MODULE_LICENSE("GPL"); 22 23 #define DRV_NAME "ism" 24 25 static const struct pci_device_id ism_device_table[] = { 26 { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 }, 27 { 0, } 28 }; 29 MODULE_DEVICE_TABLE(pci, ism_device_table); 30 31 static debug_info_t *ism_debug_info; 32 33 #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */ 34 static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */ 35 /* a list for fast mapping */ 36 static u8 max_client; 37 static DEFINE_MUTEX(clients_lock); 38 static bool ism_v2_capable; 39 struct ism_dev_list { 40 struct list_head list; 41 struct mutex mutex; /* protects ism device list */ 42 }; 43 44 static struct ism_dev_list ism_dev_list = { 45 .list = LIST_HEAD_INIT(ism_dev_list.list), 46 .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex), 47 }; 48 49 static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism) 50 { 51 unsigned long flags; 52 53 spin_lock_irqsave(&ism->lock, flags); 54 ism->subs[client->id] = client; 55 spin_unlock_irqrestore(&ism->lock, flags); 56 } 57 58 int ism_register_client(struct ism_client *client) 59 { 60 struct ism_dev *ism; 61 int i, rc = -ENOSPC; 62 63 mutex_lock(&ism_dev_list.mutex); 64 mutex_lock(&clients_lock); 65 for (i = 0; i < MAX_CLIENTS; ++i) { 66 if (!clients[i]) { 67 clients[i] = client; 68 client->id = i; 69 if (i == max_client) 70 max_client++; 71 rc = 0; 72 break; 73 } 74 } 75 mutex_unlock(&clients_lock); 76 77 if (i < MAX_CLIENTS) { 78 /* initialize with all devices that we got so far */ 79 list_for_each_entry(ism, &ism_dev_list.list, list) { 80 ism->priv[i] = NULL; 81 client->add(ism); 82 ism_setup_forwarding(client, ism); 83 } 84 } 85 mutex_unlock(&ism_dev_list.mutex); 86 87 return rc; 88 } 89 EXPORT_SYMBOL_GPL(ism_register_client); 90 91 int ism_unregister_client(struct ism_client *client) 92 { 93 struct ism_dev *ism; 94 unsigned long flags; 95 int rc = 0; 96 97 mutex_lock(&ism_dev_list.mutex); 98 list_for_each_entry(ism, &ism_dev_list.list, list) { 99 spin_lock_irqsave(&ism->lock, flags); 100 /* Stop forwarding IRQs and events */ 101 ism->subs[client->id] = NULL; 102 for (int i = 0; i < ISM_NR_DMBS; ++i) { 103 if (ism->sba_client_arr[i] == client->id) { 104 WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n", 105 __func__, client->name); 106 rc = -EBUSY; 107 goto err_reg_dmb; 108 } 109 } 110 spin_unlock_irqrestore(&ism->lock, flags); 111 } 112 mutex_unlock(&ism_dev_list.mutex); 113 114 mutex_lock(&clients_lock); 115 clients[client->id] = NULL; 116 if (client->id + 1 == max_client) 117 max_client--; 118 mutex_unlock(&clients_lock); 119 return rc; 120 121 err_reg_dmb: 122 spin_unlock_irqrestore(&ism->lock, flags); 123 mutex_unlock(&ism_dev_list.mutex); 124 return rc; 125 } 126 EXPORT_SYMBOL_GPL(ism_unregister_client); 127 128 static int ism_cmd(struct ism_dev *ism, void *cmd) 129 { 130 struct ism_req_hdr *req = cmd; 131 struct ism_resp_hdr *resp = cmd; 132 133 spin_lock(&ism->cmd_lock); 134 __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req)); 135 __ism_write_cmd(ism, req, 0, sizeof(*req)); 136 137 WRITE_ONCE(resp->ret, ISM_ERROR); 138 139 __ism_read_cmd(ism, resp, 0, sizeof(*resp)); 140 if (resp->ret) { 141 debug_text_event(ism_debug_info, 0, "cmd failure"); 142 debug_event(ism_debug_info, 0, resp, sizeof(*resp)); 143 goto out; 144 } 145 __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp)); 146 out: 147 spin_unlock(&ism->cmd_lock); 148 return resp->ret; 149 } 150 151 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code) 152 { 153 union ism_cmd_simple cmd; 154 155 memset(&cmd, 0, sizeof(cmd)); 156 cmd.request.hdr.cmd = cmd_code; 157 cmd.request.hdr.len = sizeof(cmd.request); 158 159 return ism_cmd(ism, &cmd); 160 } 161 162 static int query_info(struct ism_dev *ism) 163 { 164 union ism_qi cmd; 165 166 memset(&cmd, 0, sizeof(cmd)); 167 cmd.request.hdr.cmd = ISM_QUERY_INFO; 168 cmd.request.hdr.len = sizeof(cmd.request); 169 170 if (ism_cmd(ism, &cmd)) 171 goto out; 172 173 debug_text_event(ism_debug_info, 3, "query info"); 174 debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response)); 175 out: 176 return 0; 177 } 178 179 static int register_sba(struct ism_dev *ism) 180 { 181 union ism_reg_sba cmd; 182 dma_addr_t dma_handle; 183 struct ism_sba *sba; 184 185 sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, 186 GFP_KERNEL); 187 if (!sba) 188 return -ENOMEM; 189 190 memset(&cmd, 0, sizeof(cmd)); 191 cmd.request.hdr.cmd = ISM_REG_SBA; 192 cmd.request.hdr.len = sizeof(cmd.request); 193 cmd.request.sba = dma_handle; 194 195 if (ism_cmd(ism, &cmd)) { 196 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle); 197 return -EIO; 198 } 199 200 ism->sba = sba; 201 ism->sba_dma_addr = dma_handle; 202 203 return 0; 204 } 205 206 static int register_ieq(struct ism_dev *ism) 207 { 208 union ism_reg_ieq cmd; 209 dma_addr_t dma_handle; 210 struct ism_eq *ieq; 211 212 ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, 213 GFP_KERNEL); 214 if (!ieq) 215 return -ENOMEM; 216 217 memset(&cmd, 0, sizeof(cmd)); 218 cmd.request.hdr.cmd = ISM_REG_IEQ; 219 cmd.request.hdr.len = sizeof(cmd.request); 220 cmd.request.ieq = dma_handle; 221 cmd.request.len = sizeof(*ieq); 222 223 if (ism_cmd(ism, &cmd)) { 224 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle); 225 return -EIO; 226 } 227 228 ism->ieq = ieq; 229 ism->ieq_idx = -1; 230 ism->ieq_dma_addr = dma_handle; 231 232 return 0; 233 } 234 235 static int unregister_sba(struct ism_dev *ism) 236 { 237 int ret; 238 239 if (!ism->sba) 240 return 0; 241 242 ret = ism_cmd_simple(ism, ISM_UNREG_SBA); 243 if (ret && ret != ISM_ERROR) 244 return -EIO; 245 246 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, 247 ism->sba, ism->sba_dma_addr); 248 249 ism->sba = NULL; 250 ism->sba_dma_addr = 0; 251 252 return 0; 253 } 254 255 static int unregister_ieq(struct ism_dev *ism) 256 { 257 int ret; 258 259 if (!ism->ieq) 260 return 0; 261 262 ret = ism_cmd_simple(ism, ISM_UNREG_IEQ); 263 if (ret && ret != ISM_ERROR) 264 return -EIO; 265 266 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, 267 ism->ieq, ism->ieq_dma_addr); 268 269 ism->ieq = NULL; 270 ism->ieq_dma_addr = 0; 271 272 return 0; 273 } 274 275 static int ism_read_local_gid(struct ism_dev *ism) 276 { 277 union ism_read_gid cmd; 278 int ret; 279 280 memset(&cmd, 0, sizeof(cmd)); 281 cmd.request.hdr.cmd = ISM_READ_GID; 282 cmd.request.hdr.len = sizeof(cmd.request); 283 284 ret = ism_cmd(ism, &cmd); 285 if (ret) 286 goto out; 287 288 ism->local_gid = cmd.response.gid; 289 out: 290 return ret; 291 } 292 293 static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb) 294 { 295 clear_bit(dmb->sba_idx, ism->sba_bitmap); 296 dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len, 297 DMA_FROM_DEVICE); 298 folio_put(virt_to_folio(dmb->cpu_addr)); 299 } 300 301 static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb) 302 { 303 struct folio *folio; 304 unsigned long bit; 305 int rc; 306 307 if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev)) 308 return -EINVAL; 309 310 if (!dmb->sba_idx) { 311 bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS, 312 ISM_DMB_BIT_OFFSET); 313 if (bit == ISM_NR_DMBS) 314 return -ENOSPC; 315 316 dmb->sba_idx = bit; 317 } 318 if (dmb->sba_idx < ISM_DMB_BIT_OFFSET || 319 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) 320 return -EINVAL; 321 322 folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | 323 __GFP_NORETRY, get_order(dmb->dmb_len)); 324 325 if (!folio) { 326 rc = -ENOMEM; 327 goto out_bit; 328 } 329 330 dmb->cpu_addr = folio_address(folio); 331 dmb->dma_addr = dma_map_page(&ism->pdev->dev, 332 virt_to_page(dmb->cpu_addr), 0, 333 dmb->dmb_len, DMA_FROM_DEVICE); 334 if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) { 335 rc = -ENOMEM; 336 goto out_free; 337 } 338 339 return 0; 340 341 out_free: 342 kfree(dmb->cpu_addr); 343 out_bit: 344 clear_bit(dmb->sba_idx, ism->sba_bitmap); 345 return rc; 346 } 347 348 int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, 349 struct ism_client *client) 350 { 351 union ism_reg_dmb cmd; 352 unsigned long flags; 353 int ret; 354 355 ret = ism_alloc_dmb(ism, dmb); 356 if (ret) 357 goto out; 358 359 memset(&cmd, 0, sizeof(cmd)); 360 cmd.request.hdr.cmd = ISM_REG_DMB; 361 cmd.request.hdr.len = sizeof(cmd.request); 362 363 cmd.request.dmb = dmb->dma_addr; 364 cmd.request.dmb_len = dmb->dmb_len; 365 cmd.request.sba_idx = dmb->sba_idx; 366 cmd.request.vlan_valid = dmb->vlan_valid; 367 cmd.request.vlan_id = dmb->vlan_id; 368 cmd.request.rgid = dmb->rgid; 369 370 ret = ism_cmd(ism, &cmd); 371 if (ret) { 372 ism_free_dmb(ism, dmb); 373 goto out; 374 } 375 dmb->dmb_tok = cmd.response.dmb_tok; 376 spin_lock_irqsave(&ism->lock, flags); 377 ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id; 378 spin_unlock_irqrestore(&ism->lock, flags); 379 out: 380 return ret; 381 } 382 EXPORT_SYMBOL_GPL(ism_register_dmb); 383 384 int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb) 385 { 386 union ism_unreg_dmb cmd; 387 unsigned long flags; 388 int ret; 389 390 memset(&cmd, 0, sizeof(cmd)); 391 cmd.request.hdr.cmd = ISM_UNREG_DMB; 392 cmd.request.hdr.len = sizeof(cmd.request); 393 394 cmd.request.dmb_tok = dmb->dmb_tok; 395 396 spin_lock_irqsave(&ism->lock, flags); 397 ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT; 398 spin_unlock_irqrestore(&ism->lock, flags); 399 400 ret = ism_cmd(ism, &cmd); 401 if (ret && ret != ISM_ERROR) 402 goto out; 403 404 ism_free_dmb(ism, dmb); 405 out: 406 return ret; 407 } 408 EXPORT_SYMBOL_GPL(ism_unregister_dmb); 409 410 static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id) 411 { 412 union ism_set_vlan_id cmd; 413 414 memset(&cmd, 0, sizeof(cmd)); 415 cmd.request.hdr.cmd = ISM_ADD_VLAN_ID; 416 cmd.request.hdr.len = sizeof(cmd.request); 417 418 cmd.request.vlan_id = vlan_id; 419 420 return ism_cmd(ism, &cmd); 421 } 422 423 static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id) 424 { 425 union ism_set_vlan_id cmd; 426 427 memset(&cmd, 0, sizeof(cmd)); 428 cmd.request.hdr.cmd = ISM_DEL_VLAN_ID; 429 cmd.request.hdr.len = sizeof(cmd.request); 430 431 cmd.request.vlan_id = vlan_id; 432 433 return ism_cmd(ism, &cmd); 434 } 435 436 static unsigned int max_bytes(unsigned int start, unsigned int len, 437 unsigned int boundary) 438 { 439 return min(boundary - (start & (boundary - 1)), len); 440 } 441 442 int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf, 443 unsigned int offset, void *data, unsigned int size) 444 { 445 unsigned int bytes; 446 u64 dmb_req; 447 int ret; 448 449 while (size) { 450 bytes = max_bytes(offset, size, PAGE_SIZE); 451 dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0, 452 offset); 453 454 ret = __ism_move(ism, dmb_req, data, bytes); 455 if (ret) 456 return ret; 457 458 size -= bytes; 459 data += bytes; 460 offset += bytes; 461 } 462 463 return 0; 464 } 465 EXPORT_SYMBOL_GPL(ism_move); 466 467 static void ism_handle_event(struct ism_dev *ism) 468 { 469 struct ism_event *entry; 470 struct ism_client *clt; 471 int i; 472 473 while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) { 474 if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry)) 475 ism->ieq_idx = 0; 476 477 entry = &ism->ieq->entry[ism->ieq_idx]; 478 debug_event(ism_debug_info, 2, entry, sizeof(*entry)); 479 for (i = 0; i < max_client; ++i) { 480 clt = ism->subs[i]; 481 if (clt) 482 clt->handle_event(ism, entry); 483 } 484 } 485 } 486 487 static irqreturn_t ism_handle_irq(int irq, void *data) 488 { 489 struct ism_dev *ism = data; 490 unsigned long bit, end; 491 unsigned long *bv; 492 u16 dmbemask; 493 u8 client_id; 494 495 bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET]; 496 end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET; 497 498 spin_lock(&ism->lock); 499 ism->sba->s = 0; 500 barrier(); 501 for (bit = 0;;) { 502 bit = find_next_bit_inv(bv, end, bit); 503 if (bit >= end) 504 break; 505 506 clear_bit_inv(bit, bv); 507 dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET]; 508 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; 509 barrier(); 510 client_id = ism->sba_client_arr[bit]; 511 if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id])) 512 continue; 513 ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask); 514 } 515 516 if (ism->sba->e) { 517 ism->sba->e = 0; 518 barrier(); 519 ism_handle_event(ism); 520 } 521 spin_unlock(&ism->lock); 522 return IRQ_HANDLED; 523 } 524 525 static int ism_dev_init(struct ism_dev *ism) 526 { 527 struct pci_dev *pdev = ism->pdev; 528 int i, ret; 529 530 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 531 if (ret <= 0) 532 goto out; 533 534 ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL); 535 if (!ism->sba_client_arr) 536 goto free_vectors; 537 memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS); 538 539 ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0, 540 pci_name(pdev), ism); 541 if (ret) 542 goto free_client_arr; 543 544 ret = register_sba(ism); 545 if (ret) 546 goto free_irq; 547 548 ret = register_ieq(ism); 549 if (ret) 550 goto unreg_sba; 551 552 ret = ism_read_local_gid(ism); 553 if (ret) 554 goto unreg_ieq; 555 556 if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID)) 557 /* hardware is V2 capable */ 558 ism_v2_capable = true; 559 else 560 ism_v2_capable = false; 561 562 mutex_lock(&ism_dev_list.mutex); 563 mutex_lock(&clients_lock); 564 for (i = 0; i < max_client; ++i) { 565 if (clients[i]) { 566 clients[i]->add(ism); 567 ism_setup_forwarding(clients[i], ism); 568 } 569 } 570 mutex_unlock(&clients_lock); 571 572 list_add(&ism->list, &ism_dev_list.list); 573 mutex_unlock(&ism_dev_list.mutex); 574 575 query_info(ism); 576 return 0; 577 578 unreg_ieq: 579 unregister_ieq(ism); 580 unreg_sba: 581 unregister_sba(ism); 582 free_irq: 583 free_irq(pci_irq_vector(pdev, 0), ism); 584 free_client_arr: 585 kfree(ism->sba_client_arr); 586 free_vectors: 587 pci_free_irq_vectors(pdev); 588 out: 589 return ret; 590 } 591 592 static void ism_dev_release(struct device *dev) 593 { 594 struct ism_dev *ism; 595 596 ism = container_of(dev, struct ism_dev, dev); 597 598 kfree(ism); 599 } 600 601 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) 602 { 603 struct ism_dev *ism; 604 int ret; 605 606 ism = kzalloc(sizeof(*ism), GFP_KERNEL); 607 if (!ism) 608 return -ENOMEM; 609 610 spin_lock_init(&ism->lock); 611 spin_lock_init(&ism->cmd_lock); 612 dev_set_drvdata(&pdev->dev, ism); 613 ism->pdev = pdev; 614 ism->dev.parent = &pdev->dev; 615 ism->dev.release = ism_dev_release; 616 device_initialize(&ism->dev); 617 dev_set_name(&ism->dev, "%s", dev_name(&pdev->dev)); 618 ret = device_add(&ism->dev); 619 if (ret) 620 goto err_dev; 621 622 ret = pci_enable_device_mem(pdev); 623 if (ret) 624 goto err; 625 626 ret = pci_request_mem_regions(pdev, DRV_NAME); 627 if (ret) 628 goto err_disable; 629 630 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 631 if (ret) 632 goto err_resource; 633 634 dma_set_seg_boundary(&pdev->dev, SZ_1M - 1); 635 dma_set_max_seg_size(&pdev->dev, SZ_1M); 636 pci_set_master(pdev); 637 638 ret = ism_dev_init(ism); 639 if (ret) 640 goto err_resource; 641 642 return 0; 643 644 err_resource: 645 pci_release_mem_regions(pdev); 646 err_disable: 647 pci_disable_device(pdev); 648 err: 649 device_del(&ism->dev); 650 err_dev: 651 dev_set_drvdata(&pdev->dev, NULL); 652 put_device(&ism->dev); 653 654 return ret; 655 } 656 657 static void ism_dev_exit(struct ism_dev *ism) 658 { 659 struct pci_dev *pdev = ism->pdev; 660 unsigned long flags; 661 int i; 662 663 spin_lock_irqsave(&ism->lock, flags); 664 for (i = 0; i < max_client; ++i) 665 ism->subs[i] = NULL; 666 spin_unlock_irqrestore(&ism->lock, flags); 667 668 mutex_lock(&ism_dev_list.mutex); 669 mutex_lock(&clients_lock); 670 for (i = 0; i < max_client; ++i) { 671 if (clients[i]) 672 clients[i]->remove(ism); 673 } 674 mutex_unlock(&clients_lock); 675 676 if (ism_v2_capable) 677 ism_del_vlan_id(ism, ISM_RESERVED_VLANID); 678 unregister_ieq(ism); 679 unregister_sba(ism); 680 free_irq(pci_irq_vector(pdev, 0), ism); 681 kfree(ism->sba_client_arr); 682 pci_free_irq_vectors(pdev); 683 list_del_init(&ism->list); 684 mutex_unlock(&ism_dev_list.mutex); 685 } 686 687 static void ism_remove(struct pci_dev *pdev) 688 { 689 struct ism_dev *ism = dev_get_drvdata(&pdev->dev); 690 691 ism_dev_exit(ism); 692 693 pci_release_mem_regions(pdev); 694 pci_disable_device(pdev); 695 device_del(&ism->dev); 696 dev_set_drvdata(&pdev->dev, NULL); 697 put_device(&ism->dev); 698 } 699 700 static struct pci_driver ism_driver = { 701 .name = DRV_NAME, 702 .id_table = ism_device_table, 703 .probe = ism_probe, 704 .remove = ism_remove, 705 }; 706 707 static int __init ism_init(void) 708 { 709 int ret; 710 711 ism_debug_info = debug_register("ism", 2, 1, 16); 712 if (!ism_debug_info) 713 return -ENODEV; 714 715 memset(clients, 0, sizeof(clients)); 716 max_client = 0; 717 debug_register_view(ism_debug_info, &debug_hex_ascii_view); 718 ret = pci_register_driver(&ism_driver); 719 if (ret) 720 debug_unregister(ism_debug_info); 721 722 return ret; 723 } 724 725 static void __exit ism_exit(void) 726 { 727 pci_unregister_driver(&ism_driver); 728 debug_unregister(ism_debug_info); 729 } 730 731 module_init(ism_init); 732 module_exit(ism_exit); 733 734 /*************************** SMC-D Implementation *****************************/ 735 736 #if IS_ENABLED(CONFIG_SMC) 737 static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid, 738 u32 vid) 739 { 740 union ism_query_rgid cmd; 741 742 memset(&cmd, 0, sizeof(cmd)); 743 cmd.request.hdr.cmd = ISM_QUERY_RGID; 744 cmd.request.hdr.len = sizeof(cmd.request); 745 746 cmd.request.rgid = rgid; 747 cmd.request.vlan_valid = vid_valid; 748 cmd.request.vlan_id = vid; 749 750 return ism_cmd(ism, &cmd); 751 } 752 753 static int smcd_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid, 754 u32 vid_valid, u32 vid) 755 { 756 return ism_query_rgid(smcd->priv, rgid->gid, vid_valid, vid); 757 } 758 759 static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb, 760 void *client) 761 { 762 return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client); 763 } 764 765 static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) 766 { 767 return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb); 768 } 769 770 static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id) 771 { 772 return ism_add_vlan_id(smcd->priv, vlan_id); 773 } 774 775 static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id) 776 { 777 return ism_del_vlan_id(smcd->priv, vlan_id); 778 } 779 780 static int smcd_set_vlan_required(struct smcd_dev *smcd) 781 { 782 return ism_cmd_simple(smcd->priv, ISM_SET_VLAN); 783 } 784 785 static int smcd_reset_vlan_required(struct smcd_dev *smcd) 786 { 787 return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN); 788 } 789 790 static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq, 791 u32 event_code, u64 info) 792 { 793 union ism_sig_ieq cmd; 794 795 memset(&cmd, 0, sizeof(cmd)); 796 cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; 797 cmd.request.hdr.len = sizeof(cmd.request); 798 799 cmd.request.rgid = rgid; 800 cmd.request.trigger_irq = trigger_irq; 801 cmd.request.event_code = event_code; 802 cmd.request.info = info; 803 804 return ism_cmd(ism, &cmd); 805 } 806 807 static int smcd_signal_ieq(struct smcd_dev *smcd, struct smcd_gid *rgid, 808 u32 trigger_irq, u32 event_code, u64 info) 809 { 810 return ism_signal_ieq(smcd->priv, rgid->gid, 811 trigger_irq, event_code, info); 812 } 813 814 static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx, 815 bool sf, unsigned int offset, void *data, 816 unsigned int size) 817 { 818 return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size); 819 } 820 821 static int smcd_supports_v2(void) 822 { 823 return ism_v2_capable; 824 } 825 826 static u64 ism_get_local_gid(struct ism_dev *ism) 827 { 828 return ism->local_gid; 829 } 830 831 static void smcd_get_local_gid(struct smcd_dev *smcd, 832 struct smcd_gid *smcd_gid) 833 { 834 smcd_gid->gid = ism_get_local_gid(smcd->priv); 835 smcd_gid->gid_ext = 0; 836 } 837 838 static u16 ism_get_chid(struct ism_dev *ism) 839 { 840 if (!ism || !ism->pdev) 841 return 0; 842 843 return to_zpci(ism->pdev)->pchid; 844 } 845 846 static u16 smcd_get_chid(struct smcd_dev *smcd) 847 { 848 return ism_get_chid(smcd->priv); 849 } 850 851 static inline struct device *smcd_get_dev(struct smcd_dev *dev) 852 { 853 struct ism_dev *ism = dev->priv; 854 855 return &ism->dev; 856 } 857 858 static const struct smcd_ops ism_ops = { 859 .query_remote_gid = smcd_query_rgid, 860 .register_dmb = smcd_register_dmb, 861 .unregister_dmb = smcd_unregister_dmb, 862 .add_vlan_id = smcd_add_vlan_id, 863 .del_vlan_id = smcd_del_vlan_id, 864 .set_vlan_required = smcd_set_vlan_required, 865 .reset_vlan_required = smcd_reset_vlan_required, 866 .signal_event = smcd_signal_ieq, 867 .move_data = smcd_move, 868 .supports_v2 = smcd_supports_v2, 869 .get_local_gid = smcd_get_local_gid, 870 .get_chid = smcd_get_chid, 871 .get_dev = smcd_get_dev, 872 }; 873 874 const struct smcd_ops *ism_get_smcd_ops(void) 875 { 876 return &ism_ops; 877 } 878 EXPORT_SYMBOL_GPL(ism_get_smcd_ops); 879 #endif 880