1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ISM driver for s390. 4 * 5 * Copyright IBM Corp. 2018 6 */ 7 #define KMSG_COMPONENT "ism" 8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/types.h> 12 #include <linux/interrupt.h> 13 #include <linux/device.h> 14 #include <linux/err.h> 15 #include <linux/ctype.h> 16 #include <linux/processor.h> 17 18 #include "ism.h" 19 20 MODULE_DESCRIPTION("ISM driver for s390"); 21 MODULE_LICENSE("GPL"); 22 23 #define PCI_DEVICE_ID_IBM_ISM 0x04ED 24 #define DRV_NAME "ism" 25 26 static const struct pci_device_id ism_device_table[] = { 27 { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 }, 28 { 0, } 29 }; 30 MODULE_DEVICE_TABLE(pci, ism_device_table); 31 32 static debug_info_t *ism_debug_info; 33 34 #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */ 35 static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */ 36 /* a list for fast mapping */ 37 static u8 max_client; 38 static DEFINE_MUTEX(clients_lock); 39 static bool ism_v2_capable; 40 struct ism_dev_list { 41 struct list_head list; 42 struct mutex mutex; /* protects ism device list */ 43 }; 44 45 static struct ism_dev_list ism_dev_list = { 46 .list = LIST_HEAD_INIT(ism_dev_list.list), 47 .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex), 48 }; 49 50 static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism) 51 { 52 unsigned long flags; 53 54 spin_lock_irqsave(&ism->lock, flags); 55 ism->subs[client->id] = client; 56 spin_unlock_irqrestore(&ism->lock, flags); 57 } 58 59 int ism_register_client(struct ism_client *client) 60 { 61 struct ism_dev *ism; 62 int i, rc = -ENOSPC; 63 64 mutex_lock(&ism_dev_list.mutex); 65 mutex_lock(&clients_lock); 66 for (i = 0; i < MAX_CLIENTS; ++i) { 67 if (!clients[i]) { 68 clients[i] = client; 69 client->id = i; 70 if (i == max_client) 71 max_client++; 72 rc = 0; 73 break; 74 } 75 } 76 mutex_unlock(&clients_lock); 77 78 if (i < MAX_CLIENTS) { 79 /* initialize with all devices that we got so far */ 80 list_for_each_entry(ism, &ism_dev_list.list, list) { 81 ism->priv[i] = NULL; 82 client->add(ism); 83 ism_setup_forwarding(client, ism); 84 } 85 } 86 mutex_unlock(&ism_dev_list.mutex); 87 88 return rc; 89 } 90 EXPORT_SYMBOL_GPL(ism_register_client); 91 92 int ism_unregister_client(struct ism_client *client) 93 { 94 struct ism_dev *ism; 95 unsigned long flags; 96 int rc = 0; 97 98 mutex_lock(&ism_dev_list.mutex); 99 list_for_each_entry(ism, &ism_dev_list.list, list) { 100 spin_lock_irqsave(&ism->lock, flags); 101 /* Stop forwarding IRQs and events */ 102 ism->subs[client->id] = NULL; 103 for (int i = 0; i < ISM_NR_DMBS; ++i) { 104 if (ism->sba_client_arr[i] == client->id) { 105 WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n", 106 __func__, client->name); 107 rc = -EBUSY; 108 goto err_reg_dmb; 109 } 110 } 111 spin_unlock_irqrestore(&ism->lock, flags); 112 } 113 mutex_unlock(&ism_dev_list.mutex); 114 115 mutex_lock(&clients_lock); 116 clients[client->id] = NULL; 117 if (client->id + 1 == max_client) 118 max_client--; 119 mutex_unlock(&clients_lock); 120 return rc; 121 122 err_reg_dmb: 123 spin_unlock_irqrestore(&ism->lock, flags); 124 mutex_unlock(&ism_dev_list.mutex); 125 return rc; 126 } 127 EXPORT_SYMBOL_GPL(ism_unregister_client); 128 129 static int ism_cmd(struct ism_dev *ism, void *cmd) 130 { 131 struct ism_req_hdr *req = cmd; 132 struct ism_resp_hdr *resp = cmd; 133 134 __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req)); 135 __ism_write_cmd(ism, req, 0, sizeof(*req)); 136 137 WRITE_ONCE(resp->ret, ISM_ERROR); 138 139 __ism_read_cmd(ism, resp, 0, sizeof(*resp)); 140 if (resp->ret) { 141 debug_text_event(ism_debug_info, 0, "cmd failure"); 142 debug_event(ism_debug_info, 0, resp, sizeof(*resp)); 143 goto out; 144 } 145 __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp)); 146 out: 147 return resp->ret; 148 } 149 150 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code) 151 { 152 union ism_cmd_simple cmd; 153 154 memset(&cmd, 0, sizeof(cmd)); 155 cmd.request.hdr.cmd = cmd_code; 156 cmd.request.hdr.len = sizeof(cmd.request); 157 158 return ism_cmd(ism, &cmd); 159 } 160 161 static int query_info(struct ism_dev *ism) 162 { 163 union ism_qi cmd; 164 165 memset(&cmd, 0, sizeof(cmd)); 166 cmd.request.hdr.cmd = ISM_QUERY_INFO; 167 cmd.request.hdr.len = sizeof(cmd.request); 168 169 if (ism_cmd(ism, &cmd)) 170 goto out; 171 172 debug_text_event(ism_debug_info, 3, "query info"); 173 debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response)); 174 out: 175 return 0; 176 } 177 178 static int register_sba(struct ism_dev *ism) 179 { 180 union ism_reg_sba cmd; 181 dma_addr_t dma_handle; 182 struct ism_sba *sba; 183 184 sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, 185 GFP_KERNEL); 186 if (!sba) 187 return -ENOMEM; 188 189 memset(&cmd, 0, sizeof(cmd)); 190 cmd.request.hdr.cmd = ISM_REG_SBA; 191 cmd.request.hdr.len = sizeof(cmd.request); 192 cmd.request.sba = dma_handle; 193 194 if (ism_cmd(ism, &cmd)) { 195 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle); 196 return -EIO; 197 } 198 199 ism->sba = sba; 200 ism->sba_dma_addr = dma_handle; 201 202 return 0; 203 } 204 205 static int register_ieq(struct ism_dev *ism) 206 { 207 union ism_reg_ieq cmd; 208 dma_addr_t dma_handle; 209 struct ism_eq *ieq; 210 211 ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, 212 GFP_KERNEL); 213 if (!ieq) 214 return -ENOMEM; 215 216 memset(&cmd, 0, sizeof(cmd)); 217 cmd.request.hdr.cmd = ISM_REG_IEQ; 218 cmd.request.hdr.len = sizeof(cmd.request); 219 cmd.request.ieq = dma_handle; 220 cmd.request.len = sizeof(*ieq); 221 222 if (ism_cmd(ism, &cmd)) { 223 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle); 224 return -EIO; 225 } 226 227 ism->ieq = ieq; 228 ism->ieq_idx = -1; 229 ism->ieq_dma_addr = dma_handle; 230 231 return 0; 232 } 233 234 static int unregister_sba(struct ism_dev *ism) 235 { 236 int ret; 237 238 if (!ism->sba) 239 return 0; 240 241 ret = ism_cmd_simple(ism, ISM_UNREG_SBA); 242 if (ret && ret != ISM_ERROR) 243 return -EIO; 244 245 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, 246 ism->sba, ism->sba_dma_addr); 247 248 ism->sba = NULL; 249 ism->sba_dma_addr = 0; 250 251 return 0; 252 } 253 254 static int unregister_ieq(struct ism_dev *ism) 255 { 256 int ret; 257 258 if (!ism->ieq) 259 return 0; 260 261 ret = ism_cmd_simple(ism, ISM_UNREG_IEQ); 262 if (ret && ret != ISM_ERROR) 263 return -EIO; 264 265 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, 266 ism->ieq, ism->ieq_dma_addr); 267 268 ism->ieq = NULL; 269 ism->ieq_dma_addr = 0; 270 271 return 0; 272 } 273 274 static int ism_read_local_gid(struct ism_dev *ism) 275 { 276 union ism_read_gid cmd; 277 int ret; 278 279 memset(&cmd, 0, sizeof(cmd)); 280 cmd.request.hdr.cmd = ISM_READ_GID; 281 cmd.request.hdr.len = sizeof(cmd.request); 282 283 ret = ism_cmd(ism, &cmd); 284 if (ret) 285 goto out; 286 287 ism->local_gid = cmd.response.gid; 288 out: 289 return ret; 290 } 291 292 static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb) 293 { 294 clear_bit(dmb->sba_idx, ism->sba_bitmap); 295 dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len, 296 DMA_FROM_DEVICE); 297 folio_put(virt_to_folio(dmb->cpu_addr)); 298 } 299 300 static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb) 301 { 302 struct folio *folio; 303 unsigned long bit; 304 int rc; 305 306 if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev)) 307 return -EINVAL; 308 309 if (!dmb->sba_idx) { 310 bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS, 311 ISM_DMB_BIT_OFFSET); 312 if (bit == ISM_NR_DMBS) 313 return -ENOSPC; 314 315 dmb->sba_idx = bit; 316 } 317 if (dmb->sba_idx < ISM_DMB_BIT_OFFSET || 318 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) 319 return -EINVAL; 320 321 folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | 322 __GFP_NORETRY, get_order(dmb->dmb_len)); 323 324 if (!folio) { 325 rc = -ENOMEM; 326 goto out_bit; 327 } 328 329 dmb->cpu_addr = folio_address(folio); 330 dmb->dma_addr = dma_map_page(&ism->pdev->dev, 331 virt_to_page(dmb->cpu_addr), 0, 332 dmb->dmb_len, DMA_FROM_DEVICE); 333 if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) { 334 rc = -ENOMEM; 335 goto out_free; 336 } 337 338 return 0; 339 340 out_free: 341 kfree(dmb->cpu_addr); 342 out_bit: 343 clear_bit(dmb->sba_idx, ism->sba_bitmap); 344 return rc; 345 } 346 347 int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, 348 struct ism_client *client) 349 { 350 union ism_reg_dmb cmd; 351 unsigned long flags; 352 int ret; 353 354 ret = ism_alloc_dmb(ism, dmb); 355 if (ret) 356 goto out; 357 358 memset(&cmd, 0, sizeof(cmd)); 359 cmd.request.hdr.cmd = ISM_REG_DMB; 360 cmd.request.hdr.len = sizeof(cmd.request); 361 362 cmd.request.dmb = dmb->dma_addr; 363 cmd.request.dmb_len = dmb->dmb_len; 364 cmd.request.sba_idx = dmb->sba_idx; 365 cmd.request.vlan_valid = dmb->vlan_valid; 366 cmd.request.vlan_id = dmb->vlan_id; 367 cmd.request.rgid = dmb->rgid; 368 369 ret = ism_cmd(ism, &cmd); 370 if (ret) { 371 ism_free_dmb(ism, dmb); 372 goto out; 373 } 374 dmb->dmb_tok = cmd.response.dmb_tok; 375 spin_lock_irqsave(&ism->lock, flags); 376 ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id; 377 spin_unlock_irqrestore(&ism->lock, flags); 378 out: 379 return ret; 380 } 381 EXPORT_SYMBOL_GPL(ism_register_dmb); 382 383 int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb) 384 { 385 union ism_unreg_dmb cmd; 386 unsigned long flags; 387 int ret; 388 389 memset(&cmd, 0, sizeof(cmd)); 390 cmd.request.hdr.cmd = ISM_UNREG_DMB; 391 cmd.request.hdr.len = sizeof(cmd.request); 392 393 cmd.request.dmb_tok = dmb->dmb_tok; 394 395 spin_lock_irqsave(&ism->lock, flags); 396 ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT; 397 spin_unlock_irqrestore(&ism->lock, flags); 398 399 ret = ism_cmd(ism, &cmd); 400 if (ret && ret != ISM_ERROR) 401 goto out; 402 403 ism_free_dmb(ism, dmb); 404 out: 405 return ret; 406 } 407 EXPORT_SYMBOL_GPL(ism_unregister_dmb); 408 409 static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id) 410 { 411 union ism_set_vlan_id cmd; 412 413 memset(&cmd, 0, sizeof(cmd)); 414 cmd.request.hdr.cmd = ISM_ADD_VLAN_ID; 415 cmd.request.hdr.len = sizeof(cmd.request); 416 417 cmd.request.vlan_id = vlan_id; 418 419 return ism_cmd(ism, &cmd); 420 } 421 422 static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id) 423 { 424 union ism_set_vlan_id cmd; 425 426 memset(&cmd, 0, sizeof(cmd)); 427 cmd.request.hdr.cmd = ISM_DEL_VLAN_ID; 428 cmd.request.hdr.len = sizeof(cmd.request); 429 430 cmd.request.vlan_id = vlan_id; 431 432 return ism_cmd(ism, &cmd); 433 } 434 435 static unsigned int max_bytes(unsigned int start, unsigned int len, 436 unsigned int boundary) 437 { 438 return min(boundary - (start & (boundary - 1)), len); 439 } 440 441 int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf, 442 unsigned int offset, void *data, unsigned int size) 443 { 444 unsigned int bytes; 445 u64 dmb_req; 446 int ret; 447 448 while (size) { 449 bytes = max_bytes(offset, size, PAGE_SIZE); 450 dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0, 451 offset); 452 453 ret = __ism_move(ism, dmb_req, data, bytes); 454 if (ret) 455 return ret; 456 457 size -= bytes; 458 data += bytes; 459 offset += bytes; 460 } 461 462 return 0; 463 } 464 EXPORT_SYMBOL_GPL(ism_move); 465 466 static void ism_handle_event(struct ism_dev *ism) 467 { 468 struct ism_event *entry; 469 struct ism_client *clt; 470 int i; 471 472 while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) { 473 if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry)) 474 ism->ieq_idx = 0; 475 476 entry = &ism->ieq->entry[ism->ieq_idx]; 477 debug_event(ism_debug_info, 2, entry, sizeof(*entry)); 478 for (i = 0; i < max_client; ++i) { 479 clt = ism->subs[i]; 480 if (clt) 481 clt->handle_event(ism, entry); 482 } 483 } 484 } 485 486 static irqreturn_t ism_handle_irq(int irq, void *data) 487 { 488 struct ism_dev *ism = data; 489 unsigned long bit, end; 490 unsigned long *bv; 491 u16 dmbemask; 492 u8 client_id; 493 494 bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET]; 495 end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET; 496 497 spin_lock(&ism->lock); 498 ism->sba->s = 0; 499 barrier(); 500 for (bit = 0;;) { 501 bit = find_next_bit_inv(bv, end, bit); 502 if (bit >= end) 503 break; 504 505 clear_bit_inv(bit, bv); 506 dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET]; 507 ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; 508 barrier(); 509 client_id = ism->sba_client_arr[bit]; 510 if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id])) 511 continue; 512 ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask); 513 } 514 515 if (ism->sba->e) { 516 ism->sba->e = 0; 517 barrier(); 518 ism_handle_event(ism); 519 } 520 spin_unlock(&ism->lock); 521 return IRQ_HANDLED; 522 } 523 524 static int ism_dev_init(struct ism_dev *ism) 525 { 526 struct pci_dev *pdev = ism->pdev; 527 int i, ret; 528 529 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 530 if (ret <= 0) 531 goto out; 532 533 ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL); 534 if (!ism->sba_client_arr) 535 goto free_vectors; 536 memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS); 537 538 ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0, 539 pci_name(pdev), ism); 540 if (ret) 541 goto free_client_arr; 542 543 ret = register_sba(ism); 544 if (ret) 545 goto free_irq; 546 547 ret = register_ieq(ism); 548 if (ret) 549 goto unreg_sba; 550 551 ret = ism_read_local_gid(ism); 552 if (ret) 553 goto unreg_ieq; 554 555 if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID)) 556 /* hardware is V2 capable */ 557 ism_v2_capable = true; 558 else 559 ism_v2_capable = false; 560 561 mutex_lock(&ism_dev_list.mutex); 562 mutex_lock(&clients_lock); 563 for (i = 0; i < max_client; ++i) { 564 if (clients[i]) { 565 clients[i]->add(ism); 566 ism_setup_forwarding(clients[i], ism); 567 } 568 } 569 mutex_unlock(&clients_lock); 570 571 list_add(&ism->list, &ism_dev_list.list); 572 mutex_unlock(&ism_dev_list.mutex); 573 574 query_info(ism); 575 return 0; 576 577 unreg_ieq: 578 unregister_ieq(ism); 579 unreg_sba: 580 unregister_sba(ism); 581 free_irq: 582 free_irq(pci_irq_vector(pdev, 0), ism); 583 free_client_arr: 584 kfree(ism->sba_client_arr); 585 free_vectors: 586 pci_free_irq_vectors(pdev); 587 out: 588 return ret; 589 } 590 591 static void ism_dev_release(struct device *dev) 592 { 593 struct ism_dev *ism; 594 595 ism = container_of(dev, struct ism_dev, dev); 596 597 kfree(ism); 598 } 599 600 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) 601 { 602 struct ism_dev *ism; 603 int ret; 604 605 ism = kzalloc(sizeof(*ism), GFP_KERNEL); 606 if (!ism) 607 return -ENOMEM; 608 609 spin_lock_init(&ism->lock); 610 dev_set_drvdata(&pdev->dev, ism); 611 ism->pdev = pdev; 612 ism->dev.parent = &pdev->dev; 613 ism->dev.release = ism_dev_release; 614 device_initialize(&ism->dev); 615 dev_set_name(&ism->dev, dev_name(&pdev->dev)); 616 ret = device_add(&ism->dev); 617 if (ret) 618 goto err_dev; 619 620 ret = pci_enable_device_mem(pdev); 621 if (ret) 622 goto err; 623 624 ret = pci_request_mem_regions(pdev, DRV_NAME); 625 if (ret) 626 goto err_disable; 627 628 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 629 if (ret) 630 goto err_resource; 631 632 dma_set_seg_boundary(&pdev->dev, SZ_1M - 1); 633 dma_set_max_seg_size(&pdev->dev, SZ_1M); 634 pci_set_master(pdev); 635 636 ret = ism_dev_init(ism); 637 if (ret) 638 goto err_resource; 639 640 return 0; 641 642 err_resource: 643 pci_release_mem_regions(pdev); 644 err_disable: 645 pci_disable_device(pdev); 646 err: 647 device_del(&ism->dev); 648 err_dev: 649 dev_set_drvdata(&pdev->dev, NULL); 650 put_device(&ism->dev); 651 652 return ret; 653 } 654 655 static void ism_dev_exit(struct ism_dev *ism) 656 { 657 struct pci_dev *pdev = ism->pdev; 658 unsigned long flags; 659 int i; 660 661 spin_lock_irqsave(&ism->lock, flags); 662 for (i = 0; i < max_client; ++i) 663 ism->subs[i] = NULL; 664 spin_unlock_irqrestore(&ism->lock, flags); 665 666 mutex_lock(&ism_dev_list.mutex); 667 mutex_lock(&clients_lock); 668 for (i = 0; i < max_client; ++i) { 669 if (clients[i]) 670 clients[i]->remove(ism); 671 } 672 mutex_unlock(&clients_lock); 673 674 if (ism_v2_capable) 675 ism_del_vlan_id(ism, ISM_RESERVED_VLANID); 676 unregister_ieq(ism); 677 unregister_sba(ism); 678 free_irq(pci_irq_vector(pdev, 0), ism); 679 kfree(ism->sba_client_arr); 680 pci_free_irq_vectors(pdev); 681 list_del_init(&ism->list); 682 mutex_unlock(&ism_dev_list.mutex); 683 } 684 685 static void ism_remove(struct pci_dev *pdev) 686 { 687 struct ism_dev *ism = dev_get_drvdata(&pdev->dev); 688 689 ism_dev_exit(ism); 690 691 pci_release_mem_regions(pdev); 692 pci_disable_device(pdev); 693 device_del(&ism->dev); 694 dev_set_drvdata(&pdev->dev, NULL); 695 put_device(&ism->dev); 696 } 697 698 static struct pci_driver ism_driver = { 699 .name = DRV_NAME, 700 .id_table = ism_device_table, 701 .probe = ism_probe, 702 .remove = ism_remove, 703 }; 704 705 static int __init ism_init(void) 706 { 707 int ret; 708 709 ism_debug_info = debug_register("ism", 2, 1, 16); 710 if (!ism_debug_info) 711 return -ENODEV; 712 713 memset(clients, 0, sizeof(clients)); 714 max_client = 0; 715 debug_register_view(ism_debug_info, &debug_hex_ascii_view); 716 ret = pci_register_driver(&ism_driver); 717 if (ret) 718 debug_unregister(ism_debug_info); 719 720 return ret; 721 } 722 723 static void __exit ism_exit(void) 724 { 725 pci_unregister_driver(&ism_driver); 726 debug_unregister(ism_debug_info); 727 } 728 729 module_init(ism_init); 730 module_exit(ism_exit); 731 732 /*************************** SMC-D Implementation *****************************/ 733 734 #if IS_ENABLED(CONFIG_SMC) 735 static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid, 736 u32 vid) 737 { 738 union ism_query_rgid cmd; 739 740 memset(&cmd, 0, sizeof(cmd)); 741 cmd.request.hdr.cmd = ISM_QUERY_RGID; 742 cmd.request.hdr.len = sizeof(cmd.request); 743 744 cmd.request.rgid = rgid; 745 cmd.request.vlan_valid = vid_valid; 746 cmd.request.vlan_id = vid; 747 748 return ism_cmd(ism, &cmd); 749 } 750 751 static int smcd_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid, 752 u32 vid_valid, u32 vid) 753 { 754 return ism_query_rgid(smcd->priv, rgid->gid, vid_valid, vid); 755 } 756 757 static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb, 758 void *client) 759 { 760 return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client); 761 } 762 763 static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) 764 { 765 return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb); 766 } 767 768 static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id) 769 { 770 return ism_add_vlan_id(smcd->priv, vlan_id); 771 } 772 773 static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id) 774 { 775 return ism_del_vlan_id(smcd->priv, vlan_id); 776 } 777 778 static int smcd_set_vlan_required(struct smcd_dev *smcd) 779 { 780 return ism_cmd_simple(smcd->priv, ISM_SET_VLAN); 781 } 782 783 static int smcd_reset_vlan_required(struct smcd_dev *smcd) 784 { 785 return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN); 786 } 787 788 static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq, 789 u32 event_code, u64 info) 790 { 791 union ism_sig_ieq cmd; 792 793 memset(&cmd, 0, sizeof(cmd)); 794 cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; 795 cmd.request.hdr.len = sizeof(cmd.request); 796 797 cmd.request.rgid = rgid; 798 cmd.request.trigger_irq = trigger_irq; 799 cmd.request.event_code = event_code; 800 cmd.request.info = info; 801 802 return ism_cmd(ism, &cmd); 803 } 804 805 static int smcd_signal_ieq(struct smcd_dev *smcd, struct smcd_gid *rgid, 806 u32 trigger_irq, u32 event_code, u64 info) 807 { 808 return ism_signal_ieq(smcd->priv, rgid->gid, 809 trigger_irq, event_code, info); 810 } 811 812 static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx, 813 bool sf, unsigned int offset, void *data, 814 unsigned int size) 815 { 816 return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size); 817 } 818 819 static int smcd_supports_v2(void) 820 { 821 return ism_v2_capable; 822 } 823 824 static u64 ism_get_local_gid(struct ism_dev *ism) 825 { 826 return ism->local_gid; 827 } 828 829 static void smcd_get_local_gid(struct smcd_dev *smcd, 830 struct smcd_gid *smcd_gid) 831 { 832 smcd_gid->gid = ism_get_local_gid(smcd->priv); 833 smcd_gid->gid_ext = 0; 834 } 835 836 static u16 ism_get_chid(struct ism_dev *ism) 837 { 838 if (!ism || !ism->pdev) 839 return 0; 840 841 return to_zpci(ism->pdev)->pchid; 842 } 843 844 static u16 smcd_get_chid(struct smcd_dev *smcd) 845 { 846 return ism_get_chid(smcd->priv); 847 } 848 849 static inline struct device *smcd_get_dev(struct smcd_dev *dev) 850 { 851 struct ism_dev *ism = dev->priv; 852 853 return &ism->dev; 854 } 855 856 static const struct smcd_ops ism_ops = { 857 .query_remote_gid = smcd_query_rgid, 858 .register_dmb = smcd_register_dmb, 859 .unregister_dmb = smcd_unregister_dmb, 860 .add_vlan_id = smcd_add_vlan_id, 861 .del_vlan_id = smcd_del_vlan_id, 862 .set_vlan_required = smcd_set_vlan_required, 863 .reset_vlan_required = smcd_reset_vlan_required, 864 .signal_event = smcd_signal_ieq, 865 .move_data = smcd_move, 866 .supports_v2 = smcd_supports_v2, 867 .get_local_gid = smcd_get_local_gid, 868 .get_chid = smcd_get_chid, 869 .get_dev = smcd_get_dev, 870 }; 871 872 const struct smcd_ops *ism_get_smcd_ops(void) 873 { 874 return &ism_ops; 875 } 876 EXPORT_SYMBOL_GPL(ism_get_smcd_ops); 877 #endif 878