1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ISM driver for s390. 4 * 5 * Copyright IBM Corp. 2018 6 */ 7 #define KMSG_COMPONENT "ism" 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 10 #include <linux/export.h> 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/interrupt.h> 14 #include <linux/device.h> 15 #include <linux/err.h> 16 #include <linux/ctype.h> 17 #include <linux/processor.h> 18 19 #include "ism.h" 20 21 MODULE_DESCRIPTION("ISM driver for s390"); 22 MODULE_LICENSE("GPL"); 23 24 #define DRV_NAME "ism" 25 26 static const struct pci_device_id ism_device_table[] = { 27 { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 }, 28 { 0, } 29 }; 30 MODULE_DEVICE_TABLE(pci, ism_device_table); 31 32 static debug_info_t *ism_debug_info; 33 34 #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */ 35 static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */ 36 /* a list for fast mapping */ 37 static u8 max_client; 38 static DEFINE_MUTEX(clients_lock); 39 static bool ism_v2_capable; 40 struct ism_dev_list { 41 struct list_head list; 42 struct mutex mutex; /* protects ism device list */ 43 }; 44 45 static struct ism_dev_list ism_dev_list = { 46 .list = LIST_HEAD_INIT(ism_dev_list.list), 47 .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex), 48 }; 49 50 static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism) 51 { 52 unsigned long flags; 53 54 spin_lock_irqsave(&ism->lock, flags); 55 ism->subs[client->id] = client; 56 spin_unlock_irqrestore(&ism->lock, flags); 57 } 58 59 int ism_register_client(struct ism_client *client) 60 { 61 struct ism_dev *ism; 62 int i, rc = -ENOSPC; 63 64 mutex_lock(&ism_dev_list.mutex); 65 mutex_lock(&clients_lock); 66 for (i = 0; i < MAX_CLIENTS; ++i) { 67 if (!clients[i]) { 68 clients[i] = client; 69 client->id = i; 70 if (i == max_client) 71 max_client++; 72 rc = 0; 73 break; 74 } 75 } 76 mutex_unlock(&clients_lock); 77 78 if (i < MAX_CLIENTS) { 79 /* initialize with all devices that we got so far */ 80 list_for_each_entry(ism, &ism_dev_list.list, list) { 81 ism->priv[i] = NULL; 82 client->add(ism); 83 ism_setup_forwarding(client, ism); 84 } 85 } 86 mutex_unlock(&ism_dev_list.mutex); 87 88 return rc; 89 } 90 EXPORT_SYMBOL_GPL(ism_register_client); 91 92 int ism_unregister_client(struct ism_client *client) 93 { 94 struct ism_dev *ism; 95 unsigned long flags; 96 int rc = 0; 97 98 mutex_lock(&ism_dev_list.mutex); 99 list_for_each_entry(ism, &ism_dev_list.list, list) { 100 spin_lock_irqsave(&ism->lock, flags); 101 /* Stop forwarding IRQs and events */ 102 ism->subs[client->id] = NULL; 103 for (int i = 0; i < ISM_NR_DMBS; ++i) { 104 if (ism->sba_client_arr[i] == client->id) { 105 WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n", 106 __func__, client->name); 107 rc = -EBUSY; 108 goto err_reg_dmb; 109 } 110 } 111 spin_unlock_irqrestore(&ism->lock, flags); 112 } 113 mutex_unlock(&ism_dev_list.mutex); 114 115 mutex_lock(&clients_lock); 116 clients[client->id] = NULL; 117 if (client->id + 1 == max_client) 118 max_client--; 119 mutex_unlock(&clients_lock); 120 return rc; 121 122 err_reg_dmb: 123 spin_unlock_irqrestore(&ism->lock, flags); 124 mutex_unlock(&ism_dev_list.mutex); 125 return rc; 126 } 127 EXPORT_SYMBOL_GPL(ism_unregister_client); 128 129 static int ism_cmd(struct ism_dev *ism, void *cmd) 130 { 131 struct ism_req_hdr *req = cmd; 132 struct ism_resp_hdr *resp = cmd; 133 134 spin_lock(&ism->cmd_lock); 135 __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req)); 136 __ism_write_cmd(ism, req, 0, sizeof(*req)); 137 138 WRITE_ONCE(resp->ret, ISM_ERROR); 139 140 __ism_read_cmd(ism, resp, 0, sizeof(*resp)); 141 if (resp->ret) { 142 debug_text_event(ism_debug_info, 0, "cmd failure"); 143 debug_event(ism_debug_info, 0, resp, sizeof(*resp)); 144 goto out; 145 } 146 __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp)); 147 out: 148 spin_unlock(&ism->cmd_lock); 149 return resp->ret; 150 } 151 152 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code) 153 { 154 union ism_cmd_simple cmd; 155 156 memset(&cmd, 0, sizeof(cmd)); 157 cmd.request.hdr.cmd = cmd_code; 158 cmd.request.hdr.len = sizeof(cmd.request); 159 160 return ism_cmd(ism, &cmd); 161 } 162 163 static int query_info(struct ism_dev *ism) 164 { 165 union ism_qi cmd; 166 167 memset(&cmd, 0, sizeof(cmd)); 168 cmd.request.hdr.cmd = ISM_QUERY_INFO; 169 cmd.request.hdr.len = sizeof(cmd.request); 170 171 if (ism_cmd(ism, &cmd)) 172 goto out; 173 174 debug_text_event(ism_debug_info, 3, "query info"); 175 debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response)); 176 out: 177 return 0; 178 } 179 180 static int register_sba(struct ism_dev *ism) 181 { 182 union ism_reg_sba cmd; 183 dma_addr_t dma_handle; 184 struct ism_sba *sba; 185 186 sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, 187 GFP_KERNEL); 188 if (!sba) 189 return -ENOMEM; 190 191 memset(&cmd, 0, sizeof(cmd)); 192 cmd.request.hdr.cmd = ISM_REG_SBA; 193 cmd.request.hdr.len = sizeof(cmd.request); 194 cmd.request.sba = dma_handle; 195 196 if (ism_cmd(ism, &cmd)) { 197 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle); 198 return -EIO; 199 } 200 201 ism->sba = sba; 202 ism->sba_dma_addr = dma_handle; 203 204 return 0; 205 } 206 207 static int register_ieq(struct ism_dev *ism) 208 { 209 union ism_reg_ieq cmd; 210 dma_addr_t dma_handle; 211 struct ism_eq *ieq; 212 213 ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, 214 GFP_KERNEL); 215 if (!ieq) 216 return -ENOMEM; 217 218 memset(&cmd, 0, sizeof(cmd)); 219 cmd.request.hdr.cmd = ISM_REG_IEQ; 220 cmd.request.hdr.len = sizeof(cmd.request); 221 cmd.request.ieq = dma_handle; 222 cmd.request.len = sizeof(*ieq); 223 224 if (ism_cmd(ism, &cmd)) { 225 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle); 226 return -EIO; 227 } 228 229 ism->ieq = ieq; 230 ism->ieq_idx = -1; 231 ism->ieq_dma_addr = dma_handle; 232 233 return 0; 234 } 235 236 static int unregister_sba(struct ism_dev *ism) 237 { 238 int ret; 239 240 if (!ism->sba) 241 return 0; 242 243 ret = ism_cmd_simple(ism, ISM_UNREG_SBA); 244 if (ret && ret != ISM_ERROR) 245 return -EIO; 246 247 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, 248 ism->sba, ism->sba_dma_addr); 249 250 ism->sba = NULL; 251 ism->sba_dma_addr = 0; 252 253 return 0; 254 } 255 256 static int unregister_ieq(struct ism_dev *ism) 257 { 258 int ret; 259 260 if (!ism->ieq) 261 return 0; 262 263 ret = ism_cmd_simple(ism, ISM_UNREG_IEQ); 264 if (ret && ret != ISM_ERROR) 265 return -EIO; 266 267 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, 268 ism->ieq, ism->ieq_dma_addr); 269 270 ism->ieq = NULL; 271 ism->ieq_dma_addr = 0; 272 273 return 0; 274 } 275 276 static int ism_read_local_gid(struct ism_dev *ism) 277 { 278 union ism_read_gid cmd; 279 int ret; 280 281 memset(&cmd, 0, sizeof(cmd)); 282 cmd.request.hdr.cmd = ISM_READ_GID; 283 cmd.request.hdr.len = sizeof(cmd.request); 284 285 ret = ism_cmd(ism, &cmd); 286 if (ret) 287 goto out; 288 289 ism->local_gid = cmd.response.gid; 290 out: 291 return ret; 292 } 293 294 static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb) 295 { 296 clear_bit(dmb->sba_idx, ism->sba_bitmap); 297 dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len, 298 DMA_FROM_DEVICE); 299 folio_put(virt_to_folio(dmb->cpu_addr)); 300 } 301 302 static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb) 303 { 304 struct folio *folio; 305 unsigned long bit; 306 int rc; 307 308 if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev)) 309 return -EINVAL; 310 311 if (!dmb->sba_idx) { 312 bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS, 313 ISM_DMB_BIT_OFFSET); 314 if (bit == ISM_NR_DMBS) 315 return -ENOSPC; 316 317 dmb->sba_idx = bit; 318 } 319 if (dmb->sba_idx < ISM_DMB_BIT_OFFSET || 320 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) 321 return -EINVAL; 322 323 folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | 324 __GFP_NORETRY, get_order(dmb->dmb_len)); 325 326 if (!folio) { 327 rc = -ENOMEM; 328 goto out_bit; 329 } 330 331 dmb->cpu_addr = folio_address(folio); 332 dmb->dma_addr = dma_map_page(&ism->pdev->dev, 333 virt_to_page(dmb->cpu_addr), 0, 334 dmb->dmb_len, DMA_FROM_DEVICE); 335 if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) { 336 rc = -ENOMEM; 337 goto out_free; 338 } 339 340 return 0; 341 342 out_free: 343 kfree(dmb->cpu_addr); 344 out_bit: 345 clear_bit(dmb->sba_idx, ism->sba_bitmap); 346 return rc; 347 } 348 349 int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, 350 struct ism_client *client) 351 { 352 union ism_reg_dmb cmd; 353 unsigned long flags; 354 int ret; 355 356 ret = ism_alloc_dmb(ism, dmb); 357 if (ret) 358 goto out; 359 360 memset(&cmd, 0, sizeof(cmd)); 361 cmd.request.hdr.cmd = ISM_REG_DMB; 362 cmd.request.hdr.len = sizeof(cmd.request); 363 364 cmd.request.dmb = dmb->dma_addr; 365 cmd.request.dmb_len = dmb->dmb_len; 366 cmd.request.sba_idx = dmb->sba_idx; 367 cmd.request.vlan_valid = dmb->vlan_valid; 368 cmd.request.vlan_id = dmb->vlan_id; 369 cmd.request.rgid = dmb->rgid; 370 371 ret = ism_cmd(ism, &cmd); 372 if (ret) { 373 ism_free_dmb(ism, dmb); 374 goto out; 375 } 376 dmb->dmb_tok = cmd.response.dmb_tok; 377 spin_lock_irqsave(&ism->lock, flags); 378 ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id; 379 spin_unlock_irqrestore(&ism->lock, flags); 380 out: 381 return ret; 382 } 383 EXPORT_SYMBOL_GPL(ism_register_dmb); 384 385 int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb) 386 { 387 union ism_unreg_dmb cmd; 388 unsigned long flags; 389 int ret; 390 391 memset(&cmd, 0, sizeof(cmd)); 392 cmd.request.hdr.cmd = ISM_UNREG_DMB; 393 cmd.request.hdr.len = sizeof(cmd.request); 394 395 cmd.request.dmb_tok = dmb->dmb_tok; 396 397 spin_lock_irqsave(&ism->lock, flags); 398 ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT; 399 spin_unlock_irqrestore(&ism->lock, flags); 400 401 ret = ism_cmd(ism, &cmd); 402 if (ret && ret != ISM_ERROR) 403 goto out; 404 405 ism_free_dmb(ism, dmb); 406 out: 407 return ret; 408 } 409 EXPORT_SYMBOL_GPL(ism_unregister_dmb); 410 411 static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id) 412 { 413 union ism_set_vlan_id cmd; 414 415 memset(&cmd, 0, sizeof(cmd)); 416 cmd.request.hdr.cmd = ISM_ADD_VLAN_ID; 417 cmd.request.hdr.len = sizeof(cmd.request); 418 419 cmd.request.vlan_id = vlan_id; 420 421 return ism_cmd(ism, &cmd); 422 } 423 424 static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id) 425 { 426 union ism_set_vlan_id cmd; 427 428 memset(&cmd, 0, sizeof(cmd)); 429 cmd.request.hdr.cmd = ISM_DEL_VLAN_ID; 430 cmd.request.hdr.len = sizeof(cmd.request); 431 432 cmd.request.vlan_id = vlan_id; 433 434 return ism_cmd(ism, &cmd); 435 } 436 437 static unsigned int max_bytes(unsigned int start, unsigned int len, 438 unsigned int boundary) 439 { 440 return min(boundary - (start & (boundary - 1)), len); 441 } 442 443 int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf, 444 unsigned int offset, void *data, unsigned int size) 445 { 446 unsigned int bytes; 447 u64 dmb_req; 448 int ret; 449 450 while (size) { 451 bytes = max_bytes(offset, size, PAGE_SIZE); 452 dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0, 453 offset); 454 455 ret = __ism_move(ism, dmb_req, data, bytes); 456 if (ret) 457 return ret; 458 459 size -= bytes; 460 data += bytes; 461 offset += bytes; 462 } 463 464 return 0; 465 } 466 EXPORT_SYMBOL_GPL(ism_move); 467 468 static void ism_handle_event(struct ism_dev *ism) 469 { 470 struct ism_event *entry; 471 struct ism_client *clt; 472 int i; 473 474 while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) { 475 if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry)) 476 ism->ieq_idx = 0; 477 478 entry = &ism->ieq->entry[ism->ieq_idx]; 479 debug_event(ism_debug_info, 2, entry, sizeof(*entry)); 480 for (i = 0; i < max_client; ++i) { 481 clt = ism->subs[i]; 482 if (clt) 483 clt->handle_event(ism, entry); 484 } 485 } 486 } 487 488 static irqreturn_t ism_handle_irq(int irq, void *data) 489 { 490 struct ism_dev *ism = data; 491 unsigned long bit, end; 492 unsigned long *bv; 493 u16 dmbemask; 494 u8 client_id; 495 496 bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET]; 497 end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET; 498 499 spin_lock(&ism->lock); 500 ism->sba->s = 0; 501 barrier(); 502 for (bit = 0;;) { 503 bit = find_next_bit_inv(bv, end, bit); 504 if (bit >= end) 505 break; 506 507 clear_bit_inv(bit, bv); 508 dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET]; 509 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; 510 barrier(); 511 client_id = ism->sba_client_arr[bit]; 512 if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id])) 513 continue; 514 ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask); 515 } 516 517 if (ism->sba->e) { 518 ism->sba->e = 0; 519 barrier(); 520 ism_handle_event(ism); 521 } 522 spin_unlock(&ism->lock); 523 return IRQ_HANDLED; 524 } 525 526 static int ism_dev_init(struct ism_dev *ism) 527 { 528 struct pci_dev *pdev = ism->pdev; 529 int i, ret; 530 531 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 532 if (ret <= 0) 533 goto out; 534 535 ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL); 536 if (!ism->sba_client_arr) 537 goto free_vectors; 538 memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS); 539 540 ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0, 541 pci_name(pdev), ism); 542 if (ret) 543 goto free_client_arr; 544 545 ret = register_sba(ism); 546 if (ret) 547 goto free_irq; 548 549 ret = register_ieq(ism); 550 if (ret) 551 goto unreg_sba; 552 553 ret = ism_read_local_gid(ism); 554 if (ret) 555 goto unreg_ieq; 556 557 if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID)) 558 /* hardware is V2 capable */ 559 ism_v2_capable = true; 560 else 561 ism_v2_capable = false; 562 563 mutex_lock(&ism_dev_list.mutex); 564 mutex_lock(&clients_lock); 565 for (i = 0; i < max_client; ++i) { 566 if (clients[i]) { 567 clients[i]->add(ism); 568 ism_setup_forwarding(clients[i], ism); 569 } 570 } 571 mutex_unlock(&clients_lock); 572 573 list_add(&ism->list, &ism_dev_list.list); 574 mutex_unlock(&ism_dev_list.mutex); 575 576 query_info(ism); 577 return 0; 578 579 unreg_ieq: 580 unregister_ieq(ism); 581 unreg_sba: 582 unregister_sba(ism); 583 free_irq: 584 free_irq(pci_irq_vector(pdev, 0), ism); 585 free_client_arr: 586 kfree(ism->sba_client_arr); 587 free_vectors: 588 pci_free_irq_vectors(pdev); 589 out: 590 return ret; 591 } 592 593 static void ism_dev_release(struct device *dev) 594 { 595 struct ism_dev *ism; 596 597 ism = container_of(dev, struct ism_dev, dev); 598 599 kfree(ism); 600 } 601 602 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) 603 { 604 struct ism_dev *ism; 605 int ret; 606 607 ism = kzalloc(sizeof(*ism), GFP_KERNEL); 608 if (!ism) 609 return -ENOMEM; 610 611 spin_lock_init(&ism->lock); 612 spin_lock_init(&ism->cmd_lock); 613 dev_set_drvdata(&pdev->dev, ism); 614 ism->pdev = pdev; 615 ism->dev.parent = &pdev->dev; 616 ism->dev.release = ism_dev_release; 617 device_initialize(&ism->dev); 618 dev_set_name(&ism->dev, "%s", dev_name(&pdev->dev)); 619 ret = device_add(&ism->dev); 620 if (ret) 621 goto err_dev; 622 623 ret = pci_enable_device_mem(pdev); 624 if (ret) 625 goto err; 626 627 ret = pci_request_mem_regions(pdev, DRV_NAME); 628 if (ret) 629 goto err_disable; 630 631 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 632 if (ret) 633 goto err_resource; 634 635 dma_set_seg_boundary(&pdev->dev, SZ_1M - 1); 636 dma_set_max_seg_size(&pdev->dev, SZ_1M); 637 pci_set_master(pdev); 638 639 ret = ism_dev_init(ism); 640 if (ret) 641 goto err_resource; 642 643 return 0; 644 645 err_resource: 646 pci_release_mem_regions(pdev); 647 err_disable: 648 pci_disable_device(pdev); 649 err: 650 device_del(&ism->dev); 651 err_dev: 652 dev_set_drvdata(&pdev->dev, NULL); 653 put_device(&ism->dev); 654 655 return ret; 656 } 657 658 static void ism_dev_exit(struct ism_dev *ism) 659 { 660 struct pci_dev *pdev = ism->pdev; 661 unsigned long flags; 662 int i; 663 664 spin_lock_irqsave(&ism->lock, flags); 665 for (i = 0; i < max_client; ++i) 666 ism->subs[i] = NULL; 667 spin_unlock_irqrestore(&ism->lock, flags); 668 669 mutex_lock(&ism_dev_list.mutex); 670 mutex_lock(&clients_lock); 671 for (i = 0; i < max_client; ++i) { 672 if (clients[i]) 673 clients[i]->remove(ism); 674 } 675 mutex_unlock(&clients_lock); 676 677 if (ism_v2_capable) 678 ism_del_vlan_id(ism, ISM_RESERVED_VLANID); 679 unregister_ieq(ism); 680 unregister_sba(ism); 681 free_irq(pci_irq_vector(pdev, 0), ism); 682 kfree(ism->sba_client_arr); 683 pci_free_irq_vectors(pdev); 684 list_del_init(&ism->list); 685 mutex_unlock(&ism_dev_list.mutex); 686 } 687 688 static void ism_remove(struct pci_dev *pdev) 689 { 690 struct ism_dev *ism = dev_get_drvdata(&pdev->dev); 691 692 ism_dev_exit(ism); 693 694 pci_release_mem_regions(pdev); 695 pci_disable_device(pdev); 696 device_del(&ism->dev); 697 dev_set_drvdata(&pdev->dev, NULL); 698 put_device(&ism->dev); 699 } 700 701 static struct pci_driver ism_driver = { 702 .name = DRV_NAME, 703 .id_table = ism_device_table, 704 .probe = ism_probe, 705 .remove = ism_remove, 706 }; 707 708 static int __init ism_init(void) 709 { 710 int ret; 711 712 ism_debug_info = debug_register("ism", 2, 1, 16); 713 if (!ism_debug_info) 714 return -ENODEV; 715 716 memset(clients, 0, sizeof(clients)); 717 max_client = 0; 718 debug_register_view(ism_debug_info, &debug_hex_ascii_view); 719 ret = pci_register_driver(&ism_driver); 720 if (ret) 721 debug_unregister(ism_debug_info); 722 723 return ret; 724 } 725 726 static void __exit ism_exit(void) 727 { 728 pci_unregister_driver(&ism_driver); 729 debug_unregister(ism_debug_info); 730 } 731 732 module_init(ism_init); 733 module_exit(ism_exit); 734 735 /*************************** SMC-D Implementation *****************************/ 736 737 #if IS_ENABLED(CONFIG_SMC) 738 static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid, 739 u32 vid) 740 { 741 union ism_query_rgid cmd; 742 743 memset(&cmd, 0, sizeof(cmd)); 744 cmd.request.hdr.cmd = ISM_QUERY_RGID; 745 cmd.request.hdr.len = sizeof(cmd.request); 746 747 cmd.request.rgid = rgid; 748 cmd.request.vlan_valid = vid_valid; 749 cmd.request.vlan_id = vid; 750 751 return ism_cmd(ism, &cmd); 752 } 753 754 static int smcd_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid, 755 u32 vid_valid, u32 vid) 756 { 757 return ism_query_rgid(smcd->priv, rgid->gid, vid_valid, vid); 758 } 759 760 static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb, 761 void *client) 762 { 763 return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client); 764 } 765 766 static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) 767 { 768 return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb); 769 } 770 771 static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id) 772 { 773 return ism_add_vlan_id(smcd->priv, vlan_id); 774 } 775 776 static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id) 777 { 778 return ism_del_vlan_id(smcd->priv, vlan_id); 779 } 780 781 static int smcd_set_vlan_required(struct smcd_dev *smcd) 782 { 783 return ism_cmd_simple(smcd->priv, ISM_SET_VLAN); 784 } 785 786 static int smcd_reset_vlan_required(struct smcd_dev *smcd) 787 { 788 return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN); 789 } 790 791 static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq, 792 u32 event_code, u64 info) 793 { 794 union ism_sig_ieq cmd; 795 796 memset(&cmd, 0, sizeof(cmd)); 797 cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; 798 cmd.request.hdr.len = sizeof(cmd.request); 799 800 cmd.request.rgid = rgid; 801 cmd.request.trigger_irq = trigger_irq; 802 cmd.request.event_code = event_code; 803 cmd.request.info = info; 804 805 return ism_cmd(ism, &cmd); 806 } 807 808 static int smcd_signal_ieq(struct smcd_dev *smcd, struct smcd_gid *rgid, 809 u32 trigger_irq, u32 event_code, u64 info) 810 { 811 return ism_signal_ieq(smcd->priv, rgid->gid, 812 trigger_irq, event_code, info); 813 } 814 815 static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx, 816 bool sf, unsigned int offset, void *data, 817 unsigned int size) 818 { 819 return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size); 820 } 821 822 static int smcd_supports_v2(void) 823 { 824 return ism_v2_capable; 825 } 826 827 static u64 ism_get_local_gid(struct ism_dev *ism) 828 { 829 return ism->local_gid; 830 } 831 832 static void smcd_get_local_gid(struct smcd_dev *smcd, 833 struct smcd_gid *smcd_gid) 834 { 835 smcd_gid->gid = ism_get_local_gid(smcd->priv); 836 smcd_gid->gid_ext = 0; 837 } 838 839 static u16 ism_get_chid(struct ism_dev *ism) 840 { 841 if (!ism || !ism->pdev) 842 return 0; 843 844 return to_zpci(ism->pdev)->pchid; 845 } 846 847 static u16 smcd_get_chid(struct smcd_dev *smcd) 848 { 849 return ism_get_chid(smcd->priv); 850 } 851 852 static inline struct device *smcd_get_dev(struct smcd_dev *dev) 853 { 854 struct ism_dev *ism = dev->priv; 855 856 return &ism->dev; 857 } 858 859 static const struct smcd_ops ism_ops = { 860 .query_remote_gid = smcd_query_rgid, 861 .register_dmb = smcd_register_dmb, 862 .unregister_dmb = smcd_unregister_dmb, 863 .add_vlan_id = smcd_add_vlan_id, 864 .del_vlan_id = smcd_del_vlan_id, 865 .set_vlan_required = smcd_set_vlan_required, 866 .reset_vlan_required = smcd_reset_vlan_required, 867 .signal_event = smcd_signal_ieq, 868 .move_data = smcd_move, 869 .supports_v2 = smcd_supports_v2, 870 .get_local_gid = smcd_get_local_gid, 871 .get_chid = smcd_get_chid, 872 .get_dev = smcd_get_dev, 873 }; 874 875 const struct smcd_ops *ism_get_smcd_ops(void) 876 { 877 return &ism_ops; 878 } 879 EXPORT_SYMBOL_GPL(ism_get_smcd_ops); 880 #endif 881