1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * IUCV base infrastructure. 4 * 5 * Copyright IBM Corp. 2001, 2009 6 * 7 * Author(s): 8 * Original source: 9 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 10 * Xenia Tkatschow (xenia@us.ibm.com) 11 * 2Gb awareness and general cleanup: 12 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) 13 * Rewritten for af_iucv: 14 * Martin Schwidefsky <schwidefsky@de.ibm.com> 15 * PM functions: 16 * Ursula Braun (ursula.braun@de.ibm.com) 17 * 18 * Documentation used: 19 * The original source 20 * CP Programming Service, IBM document # SC24-5760 21 */ 22 23 #define pr_fmt(fmt) "iucv: " fmt 24 25 #include <linux/kernel_stat.h> 26 #include <linux/export.h> 27 #include <linux/module.h> 28 #include <linux/moduleparam.h> 29 #include <linux/spinlock.h> 30 #include <linux/kernel.h> 31 #include <linux/slab.h> 32 #include <linux/init.h> 33 #include <linux/interrupt.h> 34 #include <linux/list.h> 35 #include <linux/errno.h> 36 #include <linux/err.h> 37 #include <linux/device.h> 38 #include <linux/cpu.h> 39 #include <linux/reboot.h> 40 #include <net/iucv/iucv.h> 41 #include <linux/atomic.h> 42 #include <asm/machine.h> 43 #include <asm/ebcdic.h> 44 #include <asm/io.h> 45 #include <asm/irq.h> 46 #include <asm/smp.h> 47 48 /* 49 * FLAGS: 50 * All flags are defined in the field IPFLAGS1 of each function 51 * and can be found in CP Programming Services. 52 * IPSRCCLS - Indicates you have specified a source class. 53 * IPTRGCLS - Indicates you have specified a target class. 54 * IPFGPID - Indicates you have specified a pathid. 55 * IPFGMID - Indicates you have specified a message ID. 56 * IPNORPY - Indicates a one-way message. No reply expected. 57 * IPALL - Indicates that all paths are affected. 58 */ 59 #define IUCV_IPSRCCLS 0x01 60 #define IUCV_IPTRGCLS 0x01 61 #define IUCV_IPFGPID 0x02 62 #define IUCV_IPFGMID 0x04 63 #define IUCV_IPNORPY 0x10 64 #define IUCV_IPALL 0x80 65 66 static int iucv_bus_match(struct device *dev, const struct device_driver *drv) 67 { 68 return 0; 69 } 70 71 const struct bus_type iucv_bus = { 72 .name = "iucv", 73 .match = iucv_bus_match, 74 }; 75 EXPORT_SYMBOL(iucv_bus); 76 77 static struct device *iucv_root; 78 79 static void iucv_release_device(struct device *device) 80 { 81 kfree(device); 82 } 83 84 struct device *iucv_alloc_device(const struct attribute_group **attrs, 85 struct device_driver *driver, 86 void *priv, const char *fmt, ...) 87 { 88 struct device *dev; 89 va_list vargs; 90 char buf[20]; 91 int rc; 92 93 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 94 if (!dev) 95 goto out_error; 96 va_start(vargs, fmt); 97 vscnprintf(buf, sizeof(buf), fmt, vargs); 98 rc = dev_set_name(dev, "%s", buf); 99 va_end(vargs); 100 if (rc) 101 goto out_error; 102 dev->bus = &iucv_bus; 103 dev->parent = iucv_root; 104 dev->driver = driver; 105 dev->groups = attrs; 106 dev->release = iucv_release_device; 107 dev_set_drvdata(dev, priv); 108 return dev; 109 110 out_error: 111 kfree(dev); 112 return NULL; 113 } 114 EXPORT_SYMBOL(iucv_alloc_device); 115 116 static int iucv_available; 117 118 /* General IUCV interrupt structure */ 119 struct iucv_irq_data { 120 u16 ippathid; 121 u8 ipflags1; 122 u8 iptype; 123 u32 res2[9]; 124 }; 125 126 struct iucv_irq_list { 127 struct list_head list; 128 struct iucv_irq_data data; 129 }; 130 131 static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; 132 static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; 133 static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; 134 135 /* 136 * Queue of interrupt buffers lock for delivery via the tasklet 137 * (fast but can't call smp_call_function). 138 */ 139 static LIST_HEAD(iucv_task_queue); 140 141 /* 142 * The tasklet for fast delivery of iucv interrupts. 143 */ 144 static void iucv_tasklet_fn(unsigned long); 145 static DECLARE_TASKLET_OLD(iucv_tasklet, iucv_tasklet_fn); 146 147 /* 148 * Queue of interrupt buffers for delivery via a work queue 149 * (slower but can call smp_call_function). 150 */ 151 static LIST_HEAD(iucv_work_queue); 152 153 /* 154 * The work element to deliver path pending interrupts. 155 */ 156 static void iucv_work_fn(struct work_struct *work); 157 static DECLARE_WORK(iucv_work, iucv_work_fn); 158 159 /* 160 * Spinlock protecting task and work queue. 161 */ 162 static DEFINE_SPINLOCK(iucv_queue_lock); 163 164 enum iucv_command_codes { 165 IUCV_QUERY = 0, 166 IUCV_RETRIEVE_BUFFER = 2, 167 IUCV_SEND = 4, 168 IUCV_RECEIVE = 5, 169 IUCV_REPLY = 6, 170 IUCV_REJECT = 8, 171 IUCV_PURGE = 9, 172 IUCV_ACCEPT = 10, 173 IUCV_CONNECT = 11, 174 IUCV_DECLARE_BUFFER = 12, 175 IUCV_QUIESCE = 13, 176 IUCV_RESUME = 14, 177 IUCV_SEVER = 15, 178 IUCV_SETMASK = 16, 179 IUCV_SETCONTROLMASK = 17, 180 }; 181 182 /* 183 * Error messages that are used with the iucv_sever function. They get 184 * converted to EBCDIC. 185 */ 186 static char iucv_error_no_listener[16] = "NO LISTENER"; 187 static char iucv_error_no_memory[16] = "NO MEMORY"; 188 static char iucv_error_pathid[16] = "INVALID PATHID"; 189 190 /* 191 * iucv_handler_list: List of registered handlers. 192 */ 193 static LIST_HEAD(iucv_handler_list); 194 195 /* 196 * iucv_path_table: array of pointers to iucv_path structures. 197 */ 198 static struct iucv_path **iucv_path_table; 199 static unsigned long iucv_max_pathid; 200 201 /* 202 * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table 203 */ 204 static DEFINE_SPINLOCK(iucv_table_lock); 205 206 /* 207 * iucv_active_cpu: contains the number of the cpu executing the tasklet 208 * or the work handler. Needed for iucv_path_sever called from tasklet. 209 */ 210 static int iucv_active_cpu = -1; 211 212 /* 213 * Mutex and wait queue for iucv_register/iucv_unregister. 214 */ 215 static DEFINE_MUTEX(iucv_register_mutex); 216 217 /* 218 * Counter for number of non-smp capable handlers. 219 */ 220 static int iucv_nonsmp_handler; 221 222 /* 223 * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, 224 * iucv_path_quiesce and iucv_path_sever. 225 */ 226 struct iucv_cmd_control { 227 u16 ippathid; 228 u8 ipflags1; 229 u8 iprcode; 230 u16 ipmsglim; 231 u16 res1; 232 u8 ipvmid[8]; 233 u8 ipuser[16]; 234 u8 iptarget[8]; 235 } __attribute__ ((packed,aligned(8))); 236 237 /* 238 * Data in parameter list iucv structure. Used by iucv_message_send, 239 * iucv_message_send2way and iucv_message_reply. 240 */ 241 struct iucv_cmd_dpl { 242 u16 ippathid; 243 u8 ipflags1; 244 u8 iprcode; 245 u32 ipmsgid; 246 u32 iptrgcls; 247 u8 iprmmsg[8]; 248 u32 ipsrccls; 249 u32 ipmsgtag; 250 dma32_t ipbfadr2; 251 u32 ipbfln2f; 252 u32 res; 253 } __attribute__ ((packed,aligned(8))); 254 255 /* 256 * Data in buffer iucv structure. Used by iucv_message_receive, 257 * iucv_message_reject, iucv_message_send, iucv_message_send2way 258 * and iucv_declare_cpu. 259 */ 260 struct iucv_cmd_db { 261 u16 ippathid; 262 u8 ipflags1; 263 u8 iprcode; 264 u32 ipmsgid; 265 u32 iptrgcls; 266 dma32_t ipbfadr1; 267 u32 ipbfln1f; 268 u32 ipsrccls; 269 u32 ipmsgtag; 270 dma32_t ipbfadr2; 271 u32 ipbfln2f; 272 u32 res; 273 } __attribute__ ((packed,aligned(8))); 274 275 /* 276 * Purge message iucv structure. Used by iucv_message_purge. 277 */ 278 struct iucv_cmd_purge { 279 u16 ippathid; 280 u8 ipflags1; 281 u8 iprcode; 282 u32 ipmsgid; 283 u8 ipaudit[3]; 284 u8 res1[5]; 285 u32 res2; 286 u32 ipsrccls; 287 u32 ipmsgtag; 288 u32 res3[3]; 289 } __attribute__ ((packed,aligned(8))); 290 291 /* 292 * Set mask iucv structure. Used by iucv_enable_cpu. 293 */ 294 struct iucv_cmd_set_mask { 295 u8 ipmask; 296 u8 res1[2]; 297 u8 iprcode; 298 u32 res2[9]; 299 } __attribute__ ((packed,aligned(8))); 300 301 union iucv_param { 302 struct iucv_cmd_control ctrl; 303 struct iucv_cmd_dpl dpl; 304 struct iucv_cmd_db db; 305 struct iucv_cmd_purge purge; 306 struct iucv_cmd_set_mask set_mask; 307 }; 308 309 /* 310 * Anchor for per-cpu IUCV command parameter block. 311 */ 312 static union iucv_param *iucv_param[NR_CPUS]; 313 static union iucv_param *iucv_param_irq[NR_CPUS]; 314 315 /** 316 * __iucv_call_b2f0 - Calls CP to execute IUCV commands. 317 * 318 * @command: identifier of IUCV call to CP. 319 * @parm: pointer to a struct iucv_parm block 320 * 321 * Returns: the result of the CP IUCV call. 322 */ 323 static inline int __iucv_call_b2f0(int command, union iucv_param *parm) 324 { 325 unsigned long reg1 = virt_to_phys(parm); 326 int cc; 327 328 asm volatile( 329 " lgr 0,%[reg0]\n" 330 " lgr 1,%[reg1]\n" 331 " .long 0xb2f01000\n" 332 " ipm %[cc]\n" 333 " srl %[cc],28\n" 334 : [cc] "=&d" (cc), "+m" (*parm) 335 : [reg0] "d" ((unsigned long)command), 336 [reg1] "d" (reg1) 337 : "cc", "0", "1"); 338 return cc; 339 } 340 341 static inline int iucv_call_b2f0(int command, union iucv_param *parm) 342 { 343 int ccode; 344 345 ccode = __iucv_call_b2f0(command, parm); 346 return ccode == 1 ? parm->ctrl.iprcode : ccode; 347 } 348 349 /* 350 * iucv_query_maxconn - Determine the maximum number of connections that 351 * may be established. 352 * 353 * Returns: the maximum number of connections or -EPERM is IUCV is not 354 * available. 355 */ 356 static int __iucv_query_maxconn(void *param, unsigned long *max_pathid) 357 { 358 unsigned long reg1 = virt_to_phys(param); 359 int cc; 360 361 asm volatile ( 362 " lghi 0,%[cmd]\n" 363 " lgr 1,%[reg1]\n" 364 " .long 0xb2f01000\n" 365 " ipm %[cc]\n" 366 " srl %[cc],28\n" 367 " lgr %[reg1],1\n" 368 : [cc] "=&d" (cc), [reg1] "+&d" (reg1) 369 : [cmd] "K" (IUCV_QUERY) 370 : "cc", "0", "1"); 371 *max_pathid = reg1; 372 return cc; 373 } 374 375 static int iucv_query_maxconn(void) 376 { 377 unsigned long max_pathid; 378 void *param; 379 int ccode; 380 381 param = kzalloc(sizeof(union iucv_param), GFP_KERNEL | GFP_DMA); 382 if (!param) 383 return -ENOMEM; 384 ccode = __iucv_query_maxconn(param, &max_pathid); 385 if (ccode == 0) 386 iucv_max_pathid = max_pathid; 387 kfree(param); 388 return ccode ? -EPERM : 0; 389 } 390 391 /** 392 * iucv_allow_cpu - Allow iucv interrupts on this cpu. 393 * 394 * @data: unused 395 */ 396 static void iucv_allow_cpu(void *data) 397 { 398 int cpu = smp_processor_id(); 399 union iucv_param *parm; 400 401 /* 402 * Enable all iucv interrupts. 403 * ipmask contains bits for the different interrupts 404 * 0x80 - Flag to allow nonpriority message pending interrupts 405 * 0x40 - Flag to allow priority message pending interrupts 406 * 0x20 - Flag to allow nonpriority message completion interrupts 407 * 0x10 - Flag to allow priority message completion interrupts 408 * 0x08 - Flag to allow IUCV control interrupts 409 */ 410 parm = iucv_param_irq[cpu]; 411 memset(parm, 0, sizeof(union iucv_param)); 412 parm->set_mask.ipmask = 0xf8; 413 iucv_call_b2f0(IUCV_SETMASK, parm); 414 415 /* 416 * Enable all iucv control interrupts. 417 * ipmask contains bits for the different interrupts 418 * 0x80 - Flag to allow pending connections interrupts 419 * 0x40 - Flag to allow connection complete interrupts 420 * 0x20 - Flag to allow connection severed interrupts 421 * 0x10 - Flag to allow connection quiesced interrupts 422 * 0x08 - Flag to allow connection resumed interrupts 423 */ 424 memset(parm, 0, sizeof(union iucv_param)); 425 parm->set_mask.ipmask = 0xf8; 426 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 427 /* Set indication that iucv interrupts are allowed for this cpu. */ 428 cpumask_set_cpu(cpu, &iucv_irq_cpumask); 429 } 430 431 /** 432 * iucv_block_cpu - Block iucv interrupts on this cpu. 433 * 434 * @data: unused 435 */ 436 static void iucv_block_cpu(void *data) 437 { 438 int cpu = smp_processor_id(); 439 union iucv_param *parm; 440 441 /* Disable all iucv interrupts. */ 442 parm = iucv_param_irq[cpu]; 443 memset(parm, 0, sizeof(union iucv_param)); 444 iucv_call_b2f0(IUCV_SETMASK, parm); 445 446 /* Clear indication that iucv interrupts are allowed for this cpu. */ 447 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); 448 } 449 450 /** 451 * iucv_declare_cpu - Declare a interrupt buffer on this cpu. 452 * 453 * @data: unused 454 */ 455 static void iucv_declare_cpu(void *data) 456 { 457 int cpu = smp_processor_id(); 458 union iucv_param *parm; 459 int rc; 460 461 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) 462 return; 463 464 /* Declare interrupt buffer. */ 465 parm = iucv_param_irq[cpu]; 466 memset(parm, 0, sizeof(union iucv_param)); 467 parm->db.ipbfadr1 = virt_to_dma32(iucv_irq_data[cpu]); 468 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 469 if (rc) { 470 char *err = "Unknown"; 471 switch (rc) { 472 case 0x03: 473 err = "Directory error"; 474 break; 475 case 0x0a: 476 err = "Invalid length"; 477 break; 478 case 0x13: 479 err = "Buffer already exists"; 480 break; 481 case 0x3e: 482 err = "Buffer overlap"; 483 break; 484 case 0x5c: 485 err = "Paging or storage error"; 486 break; 487 } 488 pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n", 489 cpu, rc, err); 490 return; 491 } 492 493 /* Set indication that an iucv buffer exists for this cpu. */ 494 cpumask_set_cpu(cpu, &iucv_buffer_cpumask); 495 496 if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) 497 /* Enable iucv interrupts on this cpu. */ 498 iucv_allow_cpu(NULL); 499 else 500 /* Disable iucv interrupts on this cpu. */ 501 iucv_block_cpu(NULL); 502 } 503 504 /** 505 * iucv_retrieve_cpu - Retrieve interrupt buffer on this cpu. 506 * 507 * @data: unused 508 */ 509 static void iucv_retrieve_cpu(void *data) 510 { 511 int cpu = smp_processor_id(); 512 union iucv_param *parm; 513 514 if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) 515 return; 516 517 /* Block iucv interrupts. */ 518 iucv_block_cpu(NULL); 519 520 /* Retrieve interrupt buffer. */ 521 parm = iucv_param_irq[cpu]; 522 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 523 524 /* Clear indication that an iucv buffer exists for this cpu. */ 525 cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); 526 } 527 528 /* 529 * iucv_setmask_mp - Allow iucv interrupts on all cpus. 530 */ 531 static void iucv_setmask_mp(void) 532 { 533 int cpu; 534 535 cpus_read_lock(); 536 for_each_online_cpu(cpu) 537 /* Enable all cpus with a declared buffer. */ 538 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && 539 !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) 540 smp_call_function_single(cpu, iucv_allow_cpu, 541 NULL, 1); 542 cpus_read_unlock(); 543 } 544 545 /* 546 * iucv_setmask_up - Allow iucv interrupts on a single cpu. 547 */ 548 static void iucv_setmask_up(void) 549 { 550 static cpumask_t cpumask; 551 int cpu; 552 553 /* Disable all cpu but the first in cpu_irq_cpumask. */ 554 cpumask_copy(&cpumask, &iucv_irq_cpumask); 555 cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); 556 for_each_cpu(cpu, &cpumask) 557 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); 558 } 559 560 /* 561 * iucv_enable - Make the iucv ready for use 562 * 563 * It allocates the pathid table, declares an iucv interrupt buffer and 564 * enables the iucv interrupts. Called when the first user has registered 565 * an iucv handler. 566 */ 567 static int iucv_enable(void) 568 { 569 size_t alloc_size; 570 int cpu, rc; 571 572 cpus_read_lock(); 573 rc = -ENOMEM; 574 alloc_size = iucv_max_pathid * sizeof(*iucv_path_table); 575 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); 576 if (!iucv_path_table) 577 goto out; 578 /* Declare per cpu buffers. */ 579 rc = -EIO; 580 for_each_online_cpu(cpu) 581 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 582 if (cpumask_empty(&iucv_buffer_cpumask)) 583 /* No cpu could declare an iucv buffer. */ 584 goto out; 585 cpus_read_unlock(); 586 return 0; 587 out: 588 kfree(iucv_path_table); 589 iucv_path_table = NULL; 590 cpus_read_unlock(); 591 return rc; 592 } 593 594 /* 595 * iucv_disable - Shuts down iucv. 596 * 597 * It disables iucv interrupts, retrieves the iucv interrupt buffer and frees 598 * the pathid table. Called after the last user unregister its iucv handler. 599 */ 600 static void iucv_disable(void) 601 { 602 cpus_read_lock(); 603 on_each_cpu(iucv_retrieve_cpu, NULL, 1); 604 kfree(iucv_path_table); 605 iucv_path_table = NULL; 606 cpus_read_unlock(); 607 } 608 609 static int iucv_cpu_dead(unsigned int cpu) 610 { 611 kfree(iucv_param_irq[cpu]); 612 iucv_param_irq[cpu] = NULL; 613 kfree(iucv_param[cpu]); 614 iucv_param[cpu] = NULL; 615 kfree(iucv_irq_data[cpu]); 616 iucv_irq_data[cpu] = NULL; 617 return 0; 618 } 619 620 static int iucv_cpu_prepare(unsigned int cpu) 621 { 622 /* Note: GFP_DMA used to get memory below 2G */ 623 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 624 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 625 if (!iucv_irq_data[cpu]) 626 goto out_free; 627 628 /* Allocate parameter blocks. */ 629 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 630 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 631 if (!iucv_param[cpu]) 632 goto out_free; 633 634 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), 635 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 636 if (!iucv_param_irq[cpu]) 637 goto out_free; 638 639 return 0; 640 641 out_free: 642 iucv_cpu_dead(cpu); 643 return -ENOMEM; 644 } 645 646 static int iucv_cpu_online(unsigned int cpu) 647 { 648 if (!iucv_path_table) 649 return 0; 650 iucv_declare_cpu(NULL); 651 return 0; 652 } 653 654 static int iucv_cpu_down_prep(unsigned int cpu) 655 { 656 cpumask_var_t cpumask; 657 int ret = 0; 658 659 if (!iucv_path_table) 660 return 0; 661 662 if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) 663 return -ENOMEM; 664 665 cpumask_copy(cpumask, &iucv_buffer_cpumask); 666 cpumask_clear_cpu(cpu, cpumask); 667 if (cpumask_empty(cpumask)) { 668 /* Can't offline last IUCV enabled cpu. */ 669 ret = -EINVAL; 670 goto __free_cpumask; 671 } 672 673 iucv_retrieve_cpu(NULL); 674 if (!cpumask_empty(&iucv_irq_cpumask)) 675 goto __free_cpumask; 676 677 smp_call_function_single(cpumask_first(&iucv_buffer_cpumask), 678 iucv_allow_cpu, NULL, 1); 679 680 __free_cpumask: 681 free_cpumask_var(cpumask); 682 return ret; 683 } 684 685 /** 686 * iucv_sever_pathid - Sever an iucv path to free up the pathid. Used internally. 687 * 688 * @pathid: path identification number. 689 * @userdata: 16-bytes of user data. 690 */ 691 static int iucv_sever_pathid(u16 pathid, u8 *userdata) 692 { 693 union iucv_param *parm; 694 695 parm = iucv_param_irq[smp_processor_id()]; 696 memset(parm, 0, sizeof(union iucv_param)); 697 if (userdata) 698 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 699 parm->ctrl.ippathid = pathid; 700 return iucv_call_b2f0(IUCV_SEVER, parm); 701 } 702 703 /** 704 * __iucv_cleanup_queue - Nop function called via smp_call_function to force 705 * work items from pending external iucv interrupts to the work queue. 706 * 707 * @dummy: unused dummy argument 708 */ 709 static void __iucv_cleanup_queue(void *dummy) 710 { 711 } 712 713 /** 714 * iucv_cleanup_queue - Called after a path has been severed to find all 715 * remaining work items for the now stale pathid. 716 * 717 * The caller needs to hold the iucv_table_lock. 718 */ 719 static void iucv_cleanup_queue(void) 720 { 721 struct iucv_irq_list *p, *n; 722 723 /* 724 * When a path is severed, the pathid can be reused immediately 725 * on a iucv connect or a connection pending interrupt. Remove 726 * all entries from the task queue that refer to a stale pathid 727 * (iucv_path_table[ix] == NULL). Only then do the iucv connect 728 * or deliver the connection pending interrupt. To get all the 729 * pending interrupts force them to the work queue by calling 730 * an empty function on all cpus. 731 */ 732 smp_call_function(__iucv_cleanup_queue, NULL, 1); 733 spin_lock_irq(&iucv_queue_lock); 734 list_for_each_entry_safe(p, n, &iucv_task_queue, list) { 735 /* Remove stale work items from the task queue. */ 736 if (iucv_path_table[p->data.ippathid] == NULL) { 737 list_del(&p->list); 738 kfree(p); 739 } 740 } 741 spin_unlock_irq(&iucv_queue_lock); 742 } 743 744 /** 745 * iucv_register - Registers a driver with IUCV. 746 * 747 * @handler: address of iucv handler structure 748 * @smp: != 0 indicates that the handler can deal with out of order messages 749 * 750 * Returns: 0 on success, -ENOMEM if the memory allocation for the pathid 751 * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. 752 */ 753 int iucv_register(struct iucv_handler *handler, int smp) 754 { 755 int rc; 756 757 if (!iucv_available) 758 return -ENOSYS; 759 mutex_lock(&iucv_register_mutex); 760 if (!smp) 761 iucv_nonsmp_handler++; 762 if (list_empty(&iucv_handler_list)) { 763 rc = iucv_enable(); 764 if (rc) 765 goto out_mutex; 766 } else if (!smp && iucv_nonsmp_handler == 1) 767 iucv_setmask_up(); 768 INIT_LIST_HEAD(&handler->paths); 769 770 spin_lock_bh(&iucv_table_lock); 771 list_add_tail(&handler->list, &iucv_handler_list); 772 spin_unlock_bh(&iucv_table_lock); 773 rc = 0; 774 out_mutex: 775 mutex_unlock(&iucv_register_mutex); 776 return rc; 777 } 778 EXPORT_SYMBOL(iucv_register); 779 780 /** 781 * iucv_unregister - Unregister driver from IUCV. 782 * 783 * @handler: address of iucv handler structure 784 * @smp: != 0 indicates that the handler can deal with out of order messages 785 */ 786 void iucv_unregister(struct iucv_handler *handler, int smp) 787 { 788 struct iucv_path *p, *n; 789 790 mutex_lock(&iucv_register_mutex); 791 spin_lock_bh(&iucv_table_lock); 792 /* Remove handler from the iucv_handler_list. */ 793 list_del_init(&handler->list); 794 /* Sever all pathids still referring to the handler. */ 795 list_for_each_entry_safe(p, n, &handler->paths, list) { 796 iucv_sever_pathid(p->pathid, NULL); 797 iucv_path_table[p->pathid] = NULL; 798 list_del(&p->list); 799 iucv_path_free(p); 800 } 801 spin_unlock_bh(&iucv_table_lock); 802 if (!smp) 803 iucv_nonsmp_handler--; 804 if (list_empty(&iucv_handler_list)) 805 iucv_disable(); 806 else if (!smp && iucv_nonsmp_handler == 0) 807 iucv_setmask_mp(); 808 mutex_unlock(&iucv_register_mutex); 809 } 810 EXPORT_SYMBOL(iucv_unregister); 811 812 static int iucv_reboot_event(struct notifier_block *this, 813 unsigned long event, void *ptr) 814 { 815 int i; 816 817 if (cpumask_empty(&iucv_irq_cpumask)) 818 return NOTIFY_DONE; 819 820 cpus_read_lock(); 821 on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1); 822 preempt_disable(); 823 for (i = 0; i < iucv_max_pathid; i++) { 824 if (iucv_path_table[i]) 825 iucv_sever_pathid(i, NULL); 826 } 827 preempt_enable(); 828 cpus_read_unlock(); 829 iucv_disable(); 830 return NOTIFY_DONE; 831 } 832 833 static struct notifier_block iucv_reboot_notifier = { 834 .notifier_call = iucv_reboot_event, 835 }; 836 837 /** 838 * iucv_path_accept - Complete the IUCV communication path 839 * 840 * @path: address of iucv path structure 841 * @handler: address of iucv handler structure 842 * @userdata: 16 bytes of data reflected to the communication partner 843 * @private: private data passed to interrupt handlers for this path 844 * 845 * This function is issued after the user received a connection pending 846 * external interrupt and now wishes to complete the IUCV communication path. 847 * 848 * Returns: the result of the CP IUCV call. 849 */ 850 int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, 851 u8 *userdata, void *private) 852 { 853 union iucv_param *parm; 854 int rc; 855 856 local_bh_disable(); 857 if (cpumask_empty(&iucv_buffer_cpumask)) { 858 rc = -EIO; 859 goto out; 860 } 861 /* Prepare parameter block. */ 862 parm = iucv_param[smp_processor_id()]; 863 memset(parm, 0, sizeof(union iucv_param)); 864 parm->ctrl.ippathid = path->pathid; 865 parm->ctrl.ipmsglim = path->msglim; 866 if (userdata) 867 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 868 parm->ctrl.ipflags1 = path->flags; 869 870 rc = iucv_call_b2f0(IUCV_ACCEPT, parm); 871 if (!rc) { 872 path->private = private; 873 path->msglim = parm->ctrl.ipmsglim; 874 path->flags = parm->ctrl.ipflags1; 875 } 876 out: 877 local_bh_enable(); 878 return rc; 879 } 880 EXPORT_SYMBOL(iucv_path_accept); 881 882 /** 883 * iucv_path_connect - Establish an IUCV path 884 * 885 * @path: address of iucv path structure 886 * @handler: address of iucv handler structure 887 * @userid: 8-byte user identification 888 * @system: 8-byte target system identification 889 * @userdata: 16 bytes of data reflected to the communication partner 890 * @private: private data passed to interrupt handlers for this path 891 * 892 * This function establishes an IUCV path. Although the connect may complete 893 * successfully, you are not able to use the path until you receive an IUCV 894 * Connection Complete external interrupt. 895 * 896 * Returns: the result of the CP IUCV call. 897 */ 898 int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, 899 u8 *userid, u8 *system, u8 *userdata, 900 void *private) 901 { 902 union iucv_param *parm; 903 int rc; 904 905 spin_lock_bh(&iucv_table_lock); 906 iucv_cleanup_queue(); 907 if (cpumask_empty(&iucv_buffer_cpumask)) { 908 rc = -EIO; 909 goto out; 910 } 911 parm = iucv_param[smp_processor_id()]; 912 memset(parm, 0, sizeof(union iucv_param)); 913 parm->ctrl.ipmsglim = path->msglim; 914 parm->ctrl.ipflags1 = path->flags; 915 if (userid) { 916 memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); 917 ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); 918 EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); 919 } 920 if (system) { 921 memcpy(parm->ctrl.iptarget, system, 922 sizeof(parm->ctrl.iptarget)); 923 ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); 924 EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); 925 } 926 if (userdata) 927 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 928 929 rc = iucv_call_b2f0(IUCV_CONNECT, parm); 930 if (!rc) { 931 if (parm->ctrl.ippathid < iucv_max_pathid) { 932 path->pathid = parm->ctrl.ippathid; 933 path->msglim = parm->ctrl.ipmsglim; 934 path->flags = parm->ctrl.ipflags1; 935 path->handler = handler; 936 path->private = private; 937 list_add_tail(&path->list, &handler->paths); 938 iucv_path_table[path->pathid] = path; 939 } else { 940 iucv_sever_pathid(parm->ctrl.ippathid, 941 iucv_error_pathid); 942 rc = -EIO; 943 } 944 } 945 out: 946 spin_unlock_bh(&iucv_table_lock); 947 return rc; 948 } 949 EXPORT_SYMBOL(iucv_path_connect); 950 951 /** 952 * iucv_path_quiesce - Temporarily suspend incoming messages 953 * @path: address of iucv path structure 954 * @userdata: 16 bytes of data reflected to the communication partner 955 * 956 * This function temporarily suspends incoming messages on an IUCV path. 957 * You can later reactivate the path by invoking the iucv_resume function. 958 * 959 * Returns: the result from the CP IUCV call. 960 */ 961 int iucv_path_quiesce(struct iucv_path *path, u8 *userdata) 962 { 963 union iucv_param *parm; 964 int rc; 965 966 local_bh_disable(); 967 if (cpumask_empty(&iucv_buffer_cpumask)) { 968 rc = -EIO; 969 goto out; 970 } 971 parm = iucv_param[smp_processor_id()]; 972 memset(parm, 0, sizeof(union iucv_param)); 973 if (userdata) 974 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 975 parm->ctrl.ippathid = path->pathid; 976 rc = iucv_call_b2f0(IUCV_QUIESCE, parm); 977 out: 978 local_bh_enable(); 979 return rc; 980 } 981 EXPORT_SYMBOL(iucv_path_quiesce); 982 983 /** 984 * iucv_path_resume - Resume incoming messages on a suspended IUCV path 985 * 986 * @path: address of iucv path structure 987 * @userdata: 16 bytes of data reflected to the communication partner 988 * 989 * This function resumes incoming messages on an IUCV path that has 990 * been stopped with iucv_path_quiesce. 991 * 992 * Returns: the result from the CP IUCV call. 993 */ 994 int iucv_path_resume(struct iucv_path *path, u8 *userdata) 995 { 996 union iucv_param *parm; 997 int rc; 998 999 local_bh_disable(); 1000 if (cpumask_empty(&iucv_buffer_cpumask)) { 1001 rc = -EIO; 1002 goto out; 1003 } 1004 parm = iucv_param[smp_processor_id()]; 1005 memset(parm, 0, sizeof(union iucv_param)); 1006 if (userdata) 1007 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 1008 parm->ctrl.ippathid = path->pathid; 1009 rc = iucv_call_b2f0(IUCV_RESUME, parm); 1010 out: 1011 local_bh_enable(); 1012 return rc; 1013 } 1014 1015 /** 1016 * iucv_path_sever - Terminates an IUCV path. 1017 * 1018 * @path: address of iucv path structure 1019 * @userdata: 16 bytes of data reflected to the communication partner 1020 * 1021 * Returns: the result from the CP IUCV call. 1022 */ 1023 int iucv_path_sever(struct iucv_path *path, u8 *userdata) 1024 { 1025 int rc; 1026 1027 preempt_disable(); 1028 if (cpumask_empty(&iucv_buffer_cpumask)) { 1029 rc = -EIO; 1030 goto out; 1031 } 1032 if (iucv_active_cpu != smp_processor_id()) 1033 spin_lock_bh(&iucv_table_lock); 1034 rc = iucv_sever_pathid(path->pathid, userdata); 1035 iucv_path_table[path->pathid] = NULL; 1036 list_del_init(&path->list); 1037 if (iucv_active_cpu != smp_processor_id()) 1038 spin_unlock_bh(&iucv_table_lock); 1039 out: 1040 preempt_enable(); 1041 return rc; 1042 } 1043 EXPORT_SYMBOL(iucv_path_sever); 1044 1045 /** 1046 * iucv_message_purge - Cancels a message you have sent. 1047 * 1048 * @path: address of iucv path structure 1049 * @msg: address of iucv msg structure 1050 * @srccls: source class of message 1051 * 1052 * Returns: the result from the CP IUCV call. 1053 */ 1054 int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, 1055 u32 srccls) 1056 { 1057 union iucv_param *parm; 1058 int rc; 1059 1060 local_bh_disable(); 1061 if (cpumask_empty(&iucv_buffer_cpumask)) { 1062 rc = -EIO; 1063 goto out; 1064 } 1065 parm = iucv_param[smp_processor_id()]; 1066 memset(parm, 0, sizeof(union iucv_param)); 1067 parm->purge.ippathid = path->pathid; 1068 parm->purge.ipmsgid = msg->id; 1069 parm->purge.ipsrccls = srccls; 1070 parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; 1071 rc = iucv_call_b2f0(IUCV_PURGE, parm); 1072 if (!rc) { 1073 msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; 1074 msg->tag = parm->purge.ipmsgtag; 1075 } 1076 out: 1077 local_bh_enable(); 1078 return rc; 1079 } 1080 EXPORT_SYMBOL(iucv_message_purge); 1081 1082 /** 1083 * iucv_message_receive_iprmdata - Internal function to receive RMDATA 1084 * stored in &struct iucv_message 1085 * 1086 * @path: address of iucv path structure 1087 * @msg: address of iucv msg structure 1088 * @flags: how the message is received (IUCV_IPBUFLST) 1089 * @buffer: address of data buffer or address of struct iucv_array 1090 * @size: length of data buffer 1091 * @residual: number of bytes remaining in the data buffer 1092 * 1093 * Internal function used by iucv_message_receive and __iucv_message_receive 1094 * to receive RMDATA data stored in struct iucv_message. 1095 */ 1096 static int iucv_message_receive_iprmdata(struct iucv_path *path, 1097 struct iucv_message *msg, 1098 u8 flags, void *buffer, 1099 size_t size, size_t *residual) 1100 { 1101 struct iucv_array *array; 1102 u8 *rmmsg; 1103 size_t copy; 1104 1105 /* 1106 * Message is 8 bytes long and has been stored to the 1107 * message descriptor itself. 1108 */ 1109 if (residual) 1110 *residual = abs(size - 8); 1111 rmmsg = msg->rmmsg; 1112 if (flags & IUCV_IPBUFLST) { 1113 /* Copy to struct iucv_array. */ 1114 size = (size < 8) ? size : 8; 1115 for (array = buffer; size > 0; array++) { 1116 copy = min_t(size_t, size, array->length); 1117 memcpy(dma32_to_virt(array->address), rmmsg, copy); 1118 rmmsg += copy; 1119 size -= copy; 1120 } 1121 } else { 1122 /* Copy to direct buffer. */ 1123 memcpy(buffer, rmmsg, min_t(size_t, size, 8)); 1124 } 1125 return 0; 1126 } 1127 1128 /** 1129 * __iucv_message_receive - Receives messages on an established path (no locking) 1130 * 1131 * @path: address of iucv path structure 1132 * @msg: address of iucv msg structure 1133 * @flags: flags that affect how the message is received (IUCV_IPBUFLST) 1134 * @buffer: address of data buffer or address of struct iucv_array 1135 * @size: length of data buffer 1136 * @residual: 1137 * 1138 * This function receives messages that are being sent to you over 1139 * established paths. This function will deal with RMDATA messages 1140 * embedded in struct iucv_message as well. 1141 * 1142 * Locking: no locking 1143 * 1144 * Returns: the result from the CP IUCV call. 1145 */ 1146 int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, 1147 u8 flags, void *buffer, size_t size, size_t *residual) 1148 { 1149 union iucv_param *parm; 1150 int rc; 1151 1152 if (msg->flags & IUCV_IPRMDATA) 1153 return iucv_message_receive_iprmdata(path, msg, flags, 1154 buffer, size, residual); 1155 if (cpumask_empty(&iucv_buffer_cpumask)) 1156 return -EIO; 1157 1158 parm = iucv_param[smp_processor_id()]; 1159 memset(parm, 0, sizeof(union iucv_param)); 1160 parm->db.ipbfadr1 = virt_to_dma32(buffer); 1161 parm->db.ipbfln1f = (u32) size; 1162 parm->db.ipmsgid = msg->id; 1163 parm->db.ippathid = path->pathid; 1164 parm->db.iptrgcls = msg->class; 1165 parm->db.ipflags1 = (flags | IUCV_IPFGPID | 1166 IUCV_IPFGMID | IUCV_IPTRGCLS); 1167 rc = iucv_call_b2f0(IUCV_RECEIVE, parm); 1168 if (!rc || rc == 5) { 1169 msg->flags = parm->db.ipflags1; 1170 if (residual) 1171 *residual = parm->db.ipbfln1f; 1172 } 1173 return rc; 1174 } 1175 EXPORT_SYMBOL(__iucv_message_receive); 1176 1177 /** 1178 * iucv_message_receive - Receives messages on an established path, with locking 1179 * 1180 * @path: address of iucv path structure 1181 * @msg: address of iucv msg structure 1182 * @flags: flags that affect how the message is received (IUCV_IPBUFLST) 1183 * @buffer: address of data buffer or address of struct iucv_array 1184 * @size: length of data buffer 1185 * @residual: 1186 * 1187 * This function receives messages that are being sent to you over 1188 * established paths. This function will deal with RMDATA messages 1189 * embedded in struct iucv_message as well. 1190 * 1191 * Locking: local_bh_enable/local_bh_disable 1192 * 1193 * Returns: the result from the CP IUCV call. 1194 */ 1195 int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, 1196 u8 flags, void *buffer, size_t size, size_t *residual) 1197 { 1198 int rc; 1199 1200 if (msg->flags & IUCV_IPRMDATA) 1201 return iucv_message_receive_iprmdata(path, msg, flags, 1202 buffer, size, residual); 1203 local_bh_disable(); 1204 rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); 1205 local_bh_enable(); 1206 return rc; 1207 } 1208 EXPORT_SYMBOL(iucv_message_receive); 1209 1210 /** 1211 * iucv_message_reject - Refuses a specified message 1212 * 1213 * @path: address of iucv path structure 1214 * @msg: address of iucv msg structure 1215 * 1216 * The reject function refuses a specified message. Between the time you 1217 * are notified of a message and the time that you complete the message, 1218 * the message may be rejected. 1219 * 1220 * Returns: the result from the CP IUCV call. 1221 */ 1222 int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) 1223 { 1224 union iucv_param *parm; 1225 int rc; 1226 1227 local_bh_disable(); 1228 if (cpumask_empty(&iucv_buffer_cpumask)) { 1229 rc = -EIO; 1230 goto out; 1231 } 1232 parm = iucv_param[smp_processor_id()]; 1233 memset(parm, 0, sizeof(union iucv_param)); 1234 parm->db.ippathid = path->pathid; 1235 parm->db.ipmsgid = msg->id; 1236 parm->db.iptrgcls = msg->class; 1237 parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); 1238 rc = iucv_call_b2f0(IUCV_REJECT, parm); 1239 out: 1240 local_bh_enable(); 1241 return rc; 1242 } 1243 EXPORT_SYMBOL(iucv_message_reject); 1244 1245 /** 1246 * iucv_message_reply - Replies to a specified message 1247 * 1248 * @path: address of iucv path structure 1249 * @msg: address of iucv msg structure 1250 * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1251 * @reply: address of reply data buffer or address of struct iucv_array 1252 * @size: length of reply data buffer 1253 * 1254 * This function responds to the two-way messages that you receive. You 1255 * must identify completely the message to which you wish to reply. I.e., 1256 * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into 1257 * the parameter list. 1258 * 1259 * Returns: the result from the CP IUCV call. 1260 */ 1261 int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, 1262 u8 flags, void *reply, size_t size) 1263 { 1264 union iucv_param *parm; 1265 int rc; 1266 1267 local_bh_disable(); 1268 if (cpumask_empty(&iucv_buffer_cpumask)) { 1269 rc = -EIO; 1270 goto out; 1271 } 1272 parm = iucv_param[smp_processor_id()]; 1273 memset(parm, 0, sizeof(union iucv_param)); 1274 if (flags & IUCV_IPRMDATA) { 1275 parm->dpl.ippathid = path->pathid; 1276 parm->dpl.ipflags1 = flags; 1277 parm->dpl.ipmsgid = msg->id; 1278 parm->dpl.iptrgcls = msg->class; 1279 memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); 1280 } else { 1281 parm->db.ipbfadr1 = virt_to_dma32(reply); 1282 parm->db.ipbfln1f = (u32) size; 1283 parm->db.ippathid = path->pathid; 1284 parm->db.ipflags1 = flags; 1285 parm->db.ipmsgid = msg->id; 1286 parm->db.iptrgcls = msg->class; 1287 } 1288 rc = iucv_call_b2f0(IUCV_REPLY, parm); 1289 out: 1290 local_bh_enable(); 1291 return rc; 1292 } 1293 EXPORT_SYMBOL(iucv_message_reply); 1294 1295 /** 1296 * __iucv_message_send - Transmits a one-way message, no locking 1297 * 1298 * @path: address of iucv path structure 1299 * @msg: address of iucv msg structure 1300 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1301 * @srccls: source class of message 1302 * @buffer: address of send buffer or address of struct iucv_array 1303 * @size: length of send buffer 1304 * 1305 * This function transmits data to another application. Data to be 1306 * transmitted is in a buffer and this is a one-way message and the 1307 * receiver will not reply to the message. 1308 * 1309 * Locking: no locking 1310 * 1311 * Returns: the result from the CP IUCV call. 1312 */ 1313 int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, 1314 u8 flags, u32 srccls, void *buffer, size_t size) 1315 { 1316 union iucv_param *parm; 1317 int rc; 1318 1319 if (cpumask_empty(&iucv_buffer_cpumask)) { 1320 rc = -EIO; 1321 goto out; 1322 } 1323 parm = iucv_param[smp_processor_id()]; 1324 memset(parm, 0, sizeof(union iucv_param)); 1325 if (flags & IUCV_IPRMDATA) { 1326 /* Message of 8 bytes can be placed into the parameter list. */ 1327 parm->dpl.ippathid = path->pathid; 1328 parm->dpl.ipflags1 = flags | IUCV_IPNORPY; 1329 parm->dpl.iptrgcls = msg->class; 1330 parm->dpl.ipsrccls = srccls; 1331 parm->dpl.ipmsgtag = msg->tag; 1332 memcpy(parm->dpl.iprmmsg, buffer, 8); 1333 } else { 1334 parm->db.ipbfadr1 = virt_to_dma32(buffer); 1335 parm->db.ipbfln1f = (u32) size; 1336 parm->db.ippathid = path->pathid; 1337 parm->db.ipflags1 = flags | IUCV_IPNORPY; 1338 parm->db.iptrgcls = msg->class; 1339 parm->db.ipsrccls = srccls; 1340 parm->db.ipmsgtag = msg->tag; 1341 } 1342 rc = iucv_call_b2f0(IUCV_SEND, parm); 1343 if (!rc) 1344 msg->id = parm->db.ipmsgid; 1345 out: 1346 return rc; 1347 } 1348 EXPORT_SYMBOL(__iucv_message_send); 1349 1350 /** 1351 * iucv_message_send - Transmits a one-way message, with locking 1352 * 1353 * @path: address of iucv path structure 1354 * @msg: address of iucv msg structure 1355 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1356 * @srccls: source class of message 1357 * @buffer: address of send buffer or address of struct iucv_array 1358 * @size: length of send buffer 1359 * 1360 * This function transmits data to another application. Data to be 1361 * transmitted is in a buffer and this is a one-way message and the 1362 * receiver will not reply to the message. 1363 * 1364 * Locking: local_bh_enable/local_bh_disable 1365 * 1366 * Returns: the result from the CP IUCV call. 1367 */ 1368 int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, 1369 u8 flags, u32 srccls, void *buffer, size_t size) 1370 { 1371 int rc; 1372 1373 local_bh_disable(); 1374 rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); 1375 local_bh_enable(); 1376 return rc; 1377 } 1378 EXPORT_SYMBOL(iucv_message_send); 1379 1380 /** 1381 * iucv_message_send2way - Transmits a two-way message 1382 * 1383 * @path: address of iucv path structure 1384 * @msg: address of iucv msg structure 1385 * @flags: how the message is sent and the reply is received 1386 * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) 1387 * @srccls: source class of message 1388 * @buffer: address of send buffer or address of struct iucv_array 1389 * @size: length of send buffer 1390 * @answer: address of answer buffer or address of struct iucv_array 1391 * @asize: size of reply buffer 1392 * @residual: ignored 1393 * 1394 * This function transmits data to another application. Data to be 1395 * transmitted is in a buffer. The receiver of the send is expected to 1396 * reply to the message and a buffer is provided into which IUCV moves 1397 * the reply to this message. 1398 * 1399 * Returns: the result from the CP IUCV call. 1400 */ 1401 int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, 1402 u8 flags, u32 srccls, void *buffer, size_t size, 1403 void *answer, size_t asize, size_t *residual) 1404 { 1405 union iucv_param *parm; 1406 int rc; 1407 1408 local_bh_disable(); 1409 if (cpumask_empty(&iucv_buffer_cpumask)) { 1410 rc = -EIO; 1411 goto out; 1412 } 1413 parm = iucv_param[smp_processor_id()]; 1414 memset(parm, 0, sizeof(union iucv_param)); 1415 if (flags & IUCV_IPRMDATA) { 1416 parm->dpl.ippathid = path->pathid; 1417 parm->dpl.ipflags1 = path->flags; /* priority message */ 1418 parm->dpl.iptrgcls = msg->class; 1419 parm->dpl.ipsrccls = srccls; 1420 parm->dpl.ipmsgtag = msg->tag; 1421 parm->dpl.ipbfadr2 = virt_to_dma32(answer); 1422 parm->dpl.ipbfln2f = (u32) asize; 1423 memcpy(parm->dpl.iprmmsg, buffer, 8); 1424 } else { 1425 parm->db.ippathid = path->pathid; 1426 parm->db.ipflags1 = path->flags; /* priority message */ 1427 parm->db.iptrgcls = msg->class; 1428 parm->db.ipsrccls = srccls; 1429 parm->db.ipmsgtag = msg->tag; 1430 parm->db.ipbfadr1 = virt_to_dma32(buffer); 1431 parm->db.ipbfln1f = (u32) size; 1432 parm->db.ipbfadr2 = virt_to_dma32(answer); 1433 parm->db.ipbfln2f = (u32) asize; 1434 } 1435 rc = iucv_call_b2f0(IUCV_SEND, parm); 1436 if (!rc) 1437 msg->id = parm->db.ipmsgid; 1438 out: 1439 local_bh_enable(); 1440 return rc; 1441 } 1442 EXPORT_SYMBOL(iucv_message_send2way); 1443 1444 struct iucv_path_pending { 1445 u16 ippathid; 1446 u8 ipflags1; 1447 u8 iptype; 1448 u16 ipmsglim; 1449 u16 res1; 1450 u8 ipvmid[8]; 1451 u8 ipuser[16]; 1452 u32 res3; 1453 u8 ippollfg; 1454 u8 res4[3]; 1455 } __packed; 1456 1457 /** 1458 * iucv_path_pending - Process connection pending work item 1459 * 1460 * @data: Pointer to external interrupt buffer 1461 * 1462 * Context: Called from tasklet while holding iucv_table_lock. 1463 */ 1464 static void iucv_path_pending(struct iucv_irq_data *data) 1465 { 1466 struct iucv_path_pending *ipp = (void *) data; 1467 struct iucv_handler *handler; 1468 struct iucv_path *path; 1469 char *error; 1470 1471 BUG_ON(iucv_path_table[ipp->ippathid]); 1472 /* New pathid, handler found. Create a new path struct. */ 1473 error = iucv_error_no_memory; 1474 path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); 1475 if (!path) 1476 goto out_sever; 1477 path->pathid = ipp->ippathid; 1478 iucv_path_table[path->pathid] = path; 1479 EBCASC(ipp->ipvmid, 8); 1480 1481 /* Call registered handler until one is found that wants the path. */ 1482 list_for_each_entry(handler, &iucv_handler_list, list) { 1483 if (!handler->path_pending) 1484 continue; 1485 /* 1486 * Add path to handler to allow a call to iucv_path_sever 1487 * inside the path_pending function. If the handler returns 1488 * an error remove the path from the handler again. 1489 */ 1490 list_add(&path->list, &handler->paths); 1491 path->handler = handler; 1492 if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) 1493 return; 1494 list_del(&path->list); 1495 path->handler = NULL; 1496 } 1497 /* No handler wanted the path. */ 1498 iucv_path_table[path->pathid] = NULL; 1499 iucv_path_free(path); 1500 error = iucv_error_no_listener; 1501 out_sever: 1502 iucv_sever_pathid(ipp->ippathid, error); 1503 } 1504 1505 struct iucv_path_complete { 1506 u16 ippathid; 1507 u8 ipflags1; 1508 u8 iptype; 1509 u16 ipmsglim; 1510 u16 res1; 1511 u8 res2[8]; 1512 u8 ipuser[16]; 1513 u32 res3; 1514 u8 ippollfg; 1515 u8 res4[3]; 1516 } __packed; 1517 1518 /** 1519 * iucv_path_complete - Process connection complete work item 1520 * 1521 * @data: Pointer to external interrupt buffer 1522 * 1523 * Context: Called from tasklet while holding iucv_table_lock. 1524 */ 1525 static void iucv_path_complete(struct iucv_irq_data *data) 1526 { 1527 struct iucv_path_complete *ipc = (void *) data; 1528 struct iucv_path *path = iucv_path_table[ipc->ippathid]; 1529 1530 if (path) 1531 path->flags = ipc->ipflags1; 1532 if (path && path->handler && path->handler->path_complete) 1533 path->handler->path_complete(path, ipc->ipuser); 1534 } 1535 1536 struct iucv_path_severed { 1537 u16 ippathid; 1538 u8 res1; 1539 u8 iptype; 1540 u32 res2; 1541 u8 res3[8]; 1542 u8 ipuser[16]; 1543 u32 res4; 1544 u8 ippollfg; 1545 u8 res5[3]; 1546 } __packed; 1547 1548 /** 1549 * iucv_path_severed - Process connection severed work item. 1550 * 1551 * @data: Pointer to external interrupt buffer 1552 * 1553 * Context: Called from tasklet while holding iucv_table_lock. 1554 */ 1555 static void iucv_path_severed(struct iucv_irq_data *data) 1556 { 1557 struct iucv_path_severed *ips = (void *) data; 1558 struct iucv_path *path = iucv_path_table[ips->ippathid]; 1559 1560 if (!path || !path->handler) /* Already severed */ 1561 return; 1562 if (path->handler->path_severed) 1563 path->handler->path_severed(path, ips->ipuser); 1564 else { 1565 iucv_sever_pathid(path->pathid, NULL); 1566 iucv_path_table[path->pathid] = NULL; 1567 list_del(&path->list); 1568 iucv_path_free(path); 1569 } 1570 } 1571 1572 struct iucv_path_quiesced { 1573 u16 ippathid; 1574 u8 res1; 1575 u8 iptype; 1576 u32 res2; 1577 u8 res3[8]; 1578 u8 ipuser[16]; 1579 u32 res4; 1580 u8 ippollfg; 1581 u8 res5[3]; 1582 } __packed; 1583 1584 /** 1585 * iucv_path_quiesced - Process connection quiesced work item. 1586 * 1587 * @data: Pointer to external interrupt buffer 1588 * 1589 * Context: Called from tasklet while holding iucv_table_lock. 1590 */ 1591 static void iucv_path_quiesced(struct iucv_irq_data *data) 1592 { 1593 struct iucv_path_quiesced *ipq = (void *) data; 1594 struct iucv_path *path = iucv_path_table[ipq->ippathid]; 1595 1596 if (path && path->handler && path->handler->path_quiesced) 1597 path->handler->path_quiesced(path, ipq->ipuser); 1598 } 1599 1600 struct iucv_path_resumed { 1601 u16 ippathid; 1602 u8 res1; 1603 u8 iptype; 1604 u32 res2; 1605 u8 res3[8]; 1606 u8 ipuser[16]; 1607 u32 res4; 1608 u8 ippollfg; 1609 u8 res5[3]; 1610 } __packed; 1611 1612 /** 1613 * iucv_path_resumed - Process connection resumed work item. 1614 * 1615 * @data: Pointer to external interrupt buffer 1616 * 1617 * Context: Called from tasklet while holding iucv_table_lock. 1618 */ 1619 static void iucv_path_resumed(struct iucv_irq_data *data) 1620 { 1621 struct iucv_path_resumed *ipr = (void *) data; 1622 struct iucv_path *path = iucv_path_table[ipr->ippathid]; 1623 1624 if (path && path->handler && path->handler->path_resumed) 1625 path->handler->path_resumed(path, ipr->ipuser); 1626 } 1627 1628 struct iucv_message_complete { 1629 u16 ippathid; 1630 u8 ipflags1; 1631 u8 iptype; 1632 u32 ipmsgid; 1633 u32 ipaudit; 1634 u8 iprmmsg[8]; 1635 u32 ipsrccls; 1636 u32 ipmsgtag; 1637 u32 res; 1638 u32 ipbfln2f; 1639 u8 ippollfg; 1640 u8 res2[3]; 1641 } __packed; 1642 1643 /** 1644 * iucv_message_complete - Process message complete work item. 1645 * 1646 * @data: Pointer to external interrupt buffer 1647 * 1648 * Context: Called from tasklet while holding iucv_table_lock. 1649 */ 1650 static void iucv_message_complete(struct iucv_irq_data *data) 1651 { 1652 struct iucv_message_complete *imc = (void *) data; 1653 struct iucv_path *path = iucv_path_table[imc->ippathid]; 1654 struct iucv_message msg; 1655 1656 if (path && path->handler && path->handler->message_complete) { 1657 msg.flags = imc->ipflags1; 1658 msg.id = imc->ipmsgid; 1659 msg.audit = imc->ipaudit; 1660 memcpy(msg.rmmsg, imc->iprmmsg, 8); 1661 msg.class = imc->ipsrccls; 1662 msg.tag = imc->ipmsgtag; 1663 msg.length = imc->ipbfln2f; 1664 path->handler->message_complete(path, &msg); 1665 } 1666 } 1667 1668 struct iucv_message_pending { 1669 u16 ippathid; 1670 u8 ipflags1; 1671 u8 iptype; 1672 u32 ipmsgid; 1673 u32 iptrgcls; 1674 struct { 1675 union { 1676 u32 iprmmsg1_u32; 1677 u8 iprmmsg1[4]; 1678 } ln1msg1; 1679 union { 1680 u32 ipbfln1f; 1681 u8 iprmmsg2[4]; 1682 } ln1msg2; 1683 } rmmsg; 1684 u32 res1[3]; 1685 u32 ipbfln2f; 1686 u8 ippollfg; 1687 u8 res2[3]; 1688 } __packed; 1689 1690 /** 1691 * iucv_message_pending - Process message pending work item. 1692 * 1693 * @data: Pointer to external interrupt buffer 1694 * 1695 * Context: Called from tasklet while holding iucv_table_lock. 1696 */ 1697 static void iucv_message_pending(struct iucv_irq_data *data) 1698 { 1699 struct iucv_message_pending *imp = (void *) data; 1700 struct iucv_path *path = iucv_path_table[imp->ippathid]; 1701 struct iucv_message msg; 1702 1703 if (path && path->handler && path->handler->message_pending) { 1704 msg.flags = imp->ipflags1; 1705 msg.id = imp->ipmsgid; 1706 msg.class = imp->iptrgcls; 1707 if (imp->ipflags1 & IUCV_IPRMDATA) { 1708 memcpy(msg.rmmsg, &imp->rmmsg, 8); 1709 msg.length = 8; 1710 } else 1711 msg.length = imp->rmmsg.ln1msg2.ipbfln1f; 1712 msg.reply_size = imp->ipbfln2f; 1713 path->handler->message_pending(path, &msg); 1714 } 1715 } 1716 1717 /* 1718 * iucv_tasklet_fn - Process the queue of IRQ buffers 1719 * 1720 * This tasklet loops over the queue of irq buffers created by 1721 * iucv_external_interrupt, calls the appropriate action handler 1722 * and then frees the buffer. 1723 */ 1724 static void iucv_tasklet_fn(unsigned long ignored) 1725 { 1726 typedef void iucv_irq_fn(struct iucv_irq_data *); 1727 static iucv_irq_fn *irq_fn[] = { 1728 [0x02] = iucv_path_complete, 1729 [0x03] = iucv_path_severed, 1730 [0x04] = iucv_path_quiesced, 1731 [0x05] = iucv_path_resumed, 1732 [0x06] = iucv_message_complete, 1733 [0x07] = iucv_message_complete, 1734 [0x08] = iucv_message_pending, 1735 [0x09] = iucv_message_pending, 1736 }; 1737 LIST_HEAD(task_queue); 1738 struct iucv_irq_list *p, *n; 1739 1740 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1741 if (!spin_trylock(&iucv_table_lock)) { 1742 tasklet_schedule(&iucv_tasklet); 1743 return; 1744 } 1745 iucv_active_cpu = smp_processor_id(); 1746 1747 spin_lock_irq(&iucv_queue_lock); 1748 list_splice_init(&iucv_task_queue, &task_queue); 1749 spin_unlock_irq(&iucv_queue_lock); 1750 1751 list_for_each_entry_safe(p, n, &task_queue, list) { 1752 list_del_init(&p->list); 1753 irq_fn[p->data.iptype](&p->data); 1754 kfree(p); 1755 } 1756 1757 iucv_active_cpu = -1; 1758 spin_unlock(&iucv_table_lock); 1759 } 1760 1761 /* 1762 * iucv_work_fn - Process the queue of path pending IRQ blocks 1763 * 1764 * This work function loops over the queue of path pending irq blocks 1765 * created by iucv_external_interrupt, calls the appropriate action 1766 * handler and then frees the buffer. 1767 */ 1768 static void iucv_work_fn(struct work_struct *work) 1769 { 1770 LIST_HEAD(work_queue); 1771 struct iucv_irq_list *p, *n; 1772 1773 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1774 spin_lock_bh(&iucv_table_lock); 1775 iucv_active_cpu = smp_processor_id(); 1776 1777 spin_lock_irq(&iucv_queue_lock); 1778 list_splice_init(&iucv_work_queue, &work_queue); 1779 spin_unlock_irq(&iucv_queue_lock); 1780 1781 iucv_cleanup_queue(); 1782 list_for_each_entry_safe(p, n, &work_queue, list) { 1783 list_del_init(&p->list); 1784 iucv_path_pending(&p->data); 1785 kfree(p); 1786 } 1787 1788 iucv_active_cpu = -1; 1789 spin_unlock_bh(&iucv_table_lock); 1790 } 1791 1792 /* 1793 * iucv_external_interrupt - Handles external interrupts coming in from CP. 1794 * 1795 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). 1796 */ 1797 static void iucv_external_interrupt(struct ext_code ext_code, 1798 unsigned int param32, unsigned long param64) 1799 { 1800 struct iucv_irq_data *p; 1801 struct iucv_irq_list *work; 1802 1803 inc_irq_stat(IRQEXT_IUC); 1804 p = iucv_irq_data[smp_processor_id()]; 1805 if (p->ippathid >= iucv_max_pathid) { 1806 WARN_ON(p->ippathid >= iucv_max_pathid); 1807 iucv_sever_pathid(p->ippathid, iucv_error_no_listener); 1808 return; 1809 } 1810 BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); 1811 work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); 1812 if (!work) { 1813 pr_warn("iucv_external_interrupt: out of memory\n"); 1814 return; 1815 } 1816 memcpy(&work->data, p, sizeof(work->data)); 1817 spin_lock(&iucv_queue_lock); 1818 if (p->iptype == 0x01) { 1819 /* Path pending interrupt. */ 1820 list_add_tail(&work->list, &iucv_work_queue); 1821 schedule_work(&iucv_work); 1822 } else { 1823 /* The other interrupts. */ 1824 list_add_tail(&work->list, &iucv_task_queue); 1825 tasklet_schedule(&iucv_tasklet); 1826 } 1827 spin_unlock(&iucv_queue_lock); 1828 } 1829 1830 struct iucv_interface iucv_if = { 1831 .message_receive = iucv_message_receive, 1832 .__message_receive = __iucv_message_receive, 1833 .message_reply = iucv_message_reply, 1834 .message_reject = iucv_message_reject, 1835 .message_send = iucv_message_send, 1836 .__message_send = __iucv_message_send, 1837 .message_send2way = iucv_message_send2way, 1838 .message_purge = iucv_message_purge, 1839 .path_accept = iucv_path_accept, 1840 .path_connect = iucv_path_connect, 1841 .path_quiesce = iucv_path_quiesce, 1842 .path_resume = iucv_path_resume, 1843 .path_sever = iucv_path_sever, 1844 .iucv_register = iucv_register, 1845 .iucv_unregister = iucv_unregister, 1846 .bus = NULL, 1847 .root = NULL, 1848 }; 1849 EXPORT_SYMBOL(iucv_if); 1850 1851 static enum cpuhp_state iucv_online; 1852 1853 /** 1854 * iucv_init - Allocates and initializes various data structures. 1855 */ 1856 static int __init iucv_init(void) 1857 { 1858 int rc; 1859 1860 if (!machine_is_vm()) { 1861 rc = -EPROTONOSUPPORT; 1862 goto out; 1863 } 1864 system_ctl_set_bit(0, CR0_IUCV_BIT); 1865 rc = iucv_query_maxconn(); 1866 if (rc) 1867 goto out_ctl; 1868 rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 1869 if (rc) 1870 goto out_ctl; 1871 iucv_root = root_device_register("iucv"); 1872 if (IS_ERR(iucv_root)) { 1873 rc = PTR_ERR(iucv_root); 1874 goto out_int; 1875 } 1876 1877 rc = cpuhp_setup_state(CPUHP_NET_IUCV_PREPARE, "net/iucv:prepare", 1878 iucv_cpu_prepare, iucv_cpu_dead); 1879 if (rc) 1880 goto out_dev; 1881 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "net/iucv:online", 1882 iucv_cpu_online, iucv_cpu_down_prep); 1883 if (rc < 0) 1884 goto out_prep; 1885 iucv_online = rc; 1886 1887 rc = register_reboot_notifier(&iucv_reboot_notifier); 1888 if (rc) 1889 goto out_remove_hp; 1890 ASCEBC(iucv_error_no_listener, 16); 1891 ASCEBC(iucv_error_no_memory, 16); 1892 ASCEBC(iucv_error_pathid, 16); 1893 iucv_available = 1; 1894 rc = bus_register(&iucv_bus); 1895 if (rc) 1896 goto out_reboot; 1897 iucv_if.root = iucv_root; 1898 iucv_if.bus = &iucv_bus; 1899 return 0; 1900 1901 out_reboot: 1902 unregister_reboot_notifier(&iucv_reboot_notifier); 1903 out_remove_hp: 1904 cpuhp_remove_state(iucv_online); 1905 out_prep: 1906 cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE); 1907 out_dev: 1908 root_device_unregister(iucv_root); 1909 out_int: 1910 unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 1911 out_ctl: 1912 system_ctl_clear_bit(0, 1); 1913 out: 1914 return rc; 1915 } 1916 1917 /** 1918 * iucv_exit - Frees everything allocated from iucv_init. 1919 */ 1920 static void __exit iucv_exit(void) 1921 { 1922 struct iucv_irq_list *p, *n; 1923 1924 spin_lock_irq(&iucv_queue_lock); 1925 list_for_each_entry_safe(p, n, &iucv_task_queue, list) 1926 kfree(p); 1927 list_for_each_entry_safe(p, n, &iucv_work_queue, list) 1928 kfree(p); 1929 spin_unlock_irq(&iucv_queue_lock); 1930 unregister_reboot_notifier(&iucv_reboot_notifier); 1931 1932 cpuhp_remove_state_nocalls(iucv_online); 1933 cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE); 1934 root_device_unregister(iucv_root); 1935 bus_unregister(&iucv_bus); 1936 unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt); 1937 } 1938 1939 subsys_initcall(iucv_init); 1940 module_exit(iucv_exit); 1941 1942 MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert <felfert@millenux.com>"); 1943 MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); 1944 MODULE_LICENSE("GPL"); 1945