1 /* 2 * IUCV base infrastructure. 3 * 4 * Copyright IBM Corp. 2001, 2009 5 * 6 * Author(s): 7 * Original source: 8 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 9 * Xenia Tkatschow (xenia@us.ibm.com) 10 * 2Gb awareness and general cleanup: 11 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) 12 * Rewritten for af_iucv: 13 * Martin Schwidefsky <schwidefsky@de.ibm.com> 14 * PM functions: 15 * Ursula Braun (ursula.braun@de.ibm.com) 16 * 17 * Documentation used: 18 * The original source 19 * CP Programming Service, IBM document # SC24-5760 20 * 21 * This program is free software; you can redistribute it and/or modify 22 * it under the terms of the GNU General Public License as published by 23 * the Free Software Foundation; either version 2, or (at your option) 24 * any later version. 25 * 26 * This program is distributed in the hope that it will be useful, 27 * but WITHOUT ANY WARRANTY; without even the implied warranty of 28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 29 * GNU General Public License for more details. 30 * 31 * You should have received a copy of the GNU General Public License 32 * along with this program; if not, write to the Free Software 33 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 34 */ 35 36 #define KMSG_COMPONENT "iucv" 37 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 38 39 #include <linux/module.h> 40 #include <linux/moduleparam.h> 41 #include <linux/spinlock.h> 42 #include <linux/kernel.h> 43 #include <linux/slab.h> 44 #include <linux/init.h> 45 #include <linux/interrupt.h> 46 #include <linux/list.h> 47 #include <linux/errno.h> 48 #include <linux/err.h> 49 #include <linux/device.h> 50 #include <linux/cpu.h> 51 #include <linux/reboot.h> 52 #include <net/iucv/iucv.h> 53 #include <asm/atomic.h> 54 #include <asm/ebcdic.h> 55 #include <asm/io.h> 56 #include <asm/s390_ext.h> 57 #include <asm/smp.h> 58 59 /* 60 * FLAGS: 61 * All flags are defined in the field IPFLAGS1 of each function 62 * and can be found in CP Programming Services. 63 * IPSRCCLS - Indicates you have specified a source class. 64 * IPTRGCLS - Indicates you have specified a target class. 65 * IPFGPID - Indicates you have specified a pathid. 66 * IPFGMID - Indicates you have specified a message ID. 67 * IPNORPY - Indicates a one-way message. No reply expected. 68 * IPALL - Indicates that all paths are affected. 69 */ 70 #define IUCV_IPSRCCLS 0x01 71 #define IUCV_IPTRGCLS 0x01 72 #define IUCV_IPFGPID 0x02 73 #define IUCV_IPFGMID 0x04 74 #define IUCV_IPNORPY 0x10 75 #define IUCV_IPALL 0x80 76 77 static int iucv_bus_match(struct device *dev, struct device_driver *drv) 78 { 79 return 0; 80 } 81 82 enum iucv_pm_states { 83 IUCV_PM_INITIAL = 0, 84 IUCV_PM_FREEZING = 1, 85 IUCV_PM_THAWING = 2, 86 IUCV_PM_RESTORING = 3, 87 }; 88 static enum iucv_pm_states iucv_pm_state; 89 90 static int iucv_pm_prepare(struct device *); 91 static void iucv_pm_complete(struct device *); 92 static int iucv_pm_freeze(struct device *); 93 static int iucv_pm_thaw(struct device *); 94 static int iucv_pm_restore(struct device *); 95 96 static const struct dev_pm_ops iucv_pm_ops = { 97 .prepare = iucv_pm_prepare, 98 .complete = iucv_pm_complete, 99 .freeze = iucv_pm_freeze, 100 .thaw = iucv_pm_thaw, 101 .restore = iucv_pm_restore, 102 }; 103 104 struct bus_type iucv_bus = { 105 .name = "iucv", 106 .match = iucv_bus_match, 107 .pm = &iucv_pm_ops, 108 }; 109 EXPORT_SYMBOL(iucv_bus); 110 111 struct device *iucv_root; 112 EXPORT_SYMBOL(iucv_root); 113 114 static int iucv_available; 115 116 /* General IUCV interrupt structure */ 117 struct iucv_irq_data { 118 u16 ippathid; 119 u8 ipflags1; 120 u8 iptype; 121 u32 res2[8]; 122 }; 123 124 struct iucv_irq_list { 125 struct list_head list; 126 struct iucv_irq_data data; 127 }; 128 129 static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; 130 static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; 131 static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; 132 133 /* 134 * Queue of interrupt buffers lock for delivery via the tasklet 135 * (fast but can't call smp_call_function). 136 */ 137 static LIST_HEAD(iucv_task_queue); 138 139 /* 140 * The tasklet for fast delivery of iucv interrupts. 141 */ 142 static void iucv_tasklet_fn(unsigned long); 143 static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); 144 145 /* 146 * Queue of interrupt buffers for delivery via a work queue 147 * (slower but can call smp_call_function). 148 */ 149 static LIST_HEAD(iucv_work_queue); 150 151 /* 152 * The work element to deliver path pending interrupts. 153 */ 154 static void iucv_work_fn(struct work_struct *work); 155 static DECLARE_WORK(iucv_work, iucv_work_fn); 156 157 /* 158 * Spinlock protecting task and work queue. 159 */ 160 static DEFINE_SPINLOCK(iucv_queue_lock); 161 162 enum iucv_command_codes { 163 IUCV_QUERY = 0, 164 IUCV_RETRIEVE_BUFFER = 2, 165 IUCV_SEND = 4, 166 IUCV_RECEIVE = 5, 167 IUCV_REPLY = 6, 168 IUCV_REJECT = 8, 169 IUCV_PURGE = 9, 170 IUCV_ACCEPT = 10, 171 IUCV_CONNECT = 11, 172 IUCV_DECLARE_BUFFER = 12, 173 IUCV_QUIESCE = 13, 174 IUCV_RESUME = 14, 175 IUCV_SEVER = 15, 176 IUCV_SETMASK = 16, 177 IUCV_SETCONTROLMASK = 17, 178 }; 179 180 /* 181 * Error messages that are used with the iucv_sever function. They get 182 * converted to EBCDIC. 183 */ 184 static char iucv_error_no_listener[16] = "NO LISTENER"; 185 static char iucv_error_no_memory[16] = "NO MEMORY"; 186 static char iucv_error_pathid[16] = "INVALID PATHID"; 187 188 /* 189 * iucv_handler_list: List of registered handlers. 190 */ 191 static LIST_HEAD(iucv_handler_list); 192 193 /* 194 * iucv_path_table: an array of iucv_path structures. 195 */ 196 static struct iucv_path **iucv_path_table; 197 static unsigned long iucv_max_pathid; 198 199 /* 200 * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table 201 */ 202 static DEFINE_SPINLOCK(iucv_table_lock); 203 204 /* 205 * iucv_active_cpu: contains the number of the cpu executing the tasklet 206 * or the work handler. Needed for iucv_path_sever called from tasklet. 207 */ 208 static int iucv_active_cpu = -1; 209 210 /* 211 * Mutex and wait queue for iucv_register/iucv_unregister. 212 */ 213 static DEFINE_MUTEX(iucv_register_mutex); 214 215 /* 216 * Counter for number of non-smp capable handlers. 217 */ 218 static int iucv_nonsmp_handler; 219 220 /* 221 * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, 222 * iucv_path_quiesce and iucv_path_sever. 223 */ 224 struct iucv_cmd_control { 225 u16 ippathid; 226 u8 ipflags1; 227 u8 iprcode; 228 u16 ipmsglim; 229 u16 res1; 230 u8 ipvmid[8]; 231 u8 ipuser[16]; 232 u8 iptarget[8]; 233 } __attribute__ ((packed,aligned(8))); 234 235 /* 236 * Data in parameter list iucv structure. Used by iucv_message_send, 237 * iucv_message_send2way and iucv_message_reply. 238 */ 239 struct iucv_cmd_dpl { 240 u16 ippathid; 241 u8 ipflags1; 242 u8 iprcode; 243 u32 ipmsgid; 244 u32 iptrgcls; 245 u8 iprmmsg[8]; 246 u32 ipsrccls; 247 u32 ipmsgtag; 248 u32 ipbfadr2; 249 u32 ipbfln2f; 250 u32 res; 251 } __attribute__ ((packed,aligned(8))); 252 253 /* 254 * Data in buffer iucv structure. Used by iucv_message_receive, 255 * iucv_message_reject, iucv_message_send, iucv_message_send2way 256 * and iucv_declare_cpu. 257 */ 258 struct iucv_cmd_db { 259 u16 ippathid; 260 u8 ipflags1; 261 u8 iprcode; 262 u32 ipmsgid; 263 u32 iptrgcls; 264 u32 ipbfadr1; 265 u32 ipbfln1f; 266 u32 ipsrccls; 267 u32 ipmsgtag; 268 u32 ipbfadr2; 269 u32 ipbfln2f; 270 u32 res; 271 } __attribute__ ((packed,aligned(8))); 272 273 /* 274 * Purge message iucv structure. Used by iucv_message_purge. 275 */ 276 struct iucv_cmd_purge { 277 u16 ippathid; 278 u8 ipflags1; 279 u8 iprcode; 280 u32 ipmsgid; 281 u8 ipaudit[3]; 282 u8 res1[5]; 283 u32 res2; 284 u32 ipsrccls; 285 u32 ipmsgtag; 286 u32 res3[3]; 287 } __attribute__ ((packed,aligned(8))); 288 289 /* 290 * Set mask iucv structure. Used by iucv_enable_cpu. 291 */ 292 struct iucv_cmd_set_mask { 293 u8 ipmask; 294 u8 res1[2]; 295 u8 iprcode; 296 u32 res2[9]; 297 } __attribute__ ((packed,aligned(8))); 298 299 union iucv_param { 300 struct iucv_cmd_control ctrl; 301 struct iucv_cmd_dpl dpl; 302 struct iucv_cmd_db db; 303 struct iucv_cmd_purge purge; 304 struct iucv_cmd_set_mask set_mask; 305 }; 306 307 /* 308 * Anchor for per-cpu IUCV command parameter block. 309 */ 310 static union iucv_param *iucv_param[NR_CPUS]; 311 static union iucv_param *iucv_param_irq[NR_CPUS]; 312 313 /** 314 * iucv_call_b2f0 315 * @code: identifier of IUCV call to CP. 316 * @parm: pointer to a struct iucv_parm block 317 * 318 * Calls CP to execute IUCV commands. 319 * 320 * Returns the result of the CP IUCV call. 321 */ 322 static inline int iucv_call_b2f0(int command, union iucv_param *parm) 323 { 324 register unsigned long reg0 asm ("0"); 325 register unsigned long reg1 asm ("1"); 326 int ccode; 327 328 reg0 = command; 329 reg1 = virt_to_phys(parm); 330 asm volatile( 331 " .long 0xb2f01000\n" 332 " ipm %0\n" 333 " srl %0,28\n" 334 : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) 335 : "m" (*parm) : "cc"); 336 return (ccode == 1) ? parm->ctrl.iprcode : ccode; 337 } 338 339 /** 340 * iucv_query_maxconn 341 * 342 * Determines the maximum number of connections that may be established. 343 * 344 * Returns the maximum number of connections or -EPERM is IUCV is not 345 * available. 346 */ 347 static int iucv_query_maxconn(void) 348 { 349 register unsigned long reg0 asm ("0"); 350 register unsigned long reg1 asm ("1"); 351 void *param; 352 int ccode; 353 354 param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA); 355 if (!param) 356 return -ENOMEM; 357 reg0 = IUCV_QUERY; 358 reg1 = (unsigned long) param; 359 asm volatile ( 360 " .long 0xb2f01000\n" 361 " ipm %0\n" 362 " srl %0,28\n" 363 : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); 364 if (ccode == 0) 365 iucv_max_pathid = reg1; 366 kfree(param); 367 return ccode ? -EPERM : 0; 368 } 369 370 /** 371 * iucv_allow_cpu 372 * @data: unused 373 * 374 * Allow iucv interrupts on this cpu. 375 */ 376 static void iucv_allow_cpu(void *data) 377 { 378 int cpu = smp_processor_id(); 379 union iucv_param *parm; 380 381 /* 382 * Enable all iucv interrupts. 383 * ipmask contains bits for the different interrupts 384 * 0x80 - Flag to allow nonpriority message pending interrupts 385 * 0x40 - Flag to allow priority message pending interrupts 386 * 0x20 - Flag to allow nonpriority message completion interrupts 387 * 0x10 - Flag to allow priority message completion interrupts 388 * 0x08 - Flag to allow IUCV control interrupts 389 */ 390 parm = iucv_param_irq[cpu]; 391 memset(parm, 0, sizeof(union iucv_param)); 392 parm->set_mask.ipmask = 0xf8; 393 iucv_call_b2f0(IUCV_SETMASK, parm); 394 395 /* 396 * Enable all iucv control interrupts. 397 * ipmask contains bits for the different interrupts 398 * 0x80 - Flag to allow pending connections interrupts 399 * 0x40 - Flag to allow connection complete interrupts 400 * 0x20 - Flag to allow connection severed interrupts 401 * 0x10 - Flag to allow connection quiesced interrupts 402 * 0x08 - Flag to allow connection resumed interrupts 403 */ 404 memset(parm, 0, sizeof(union iucv_param)); 405 parm->set_mask.ipmask = 0xf8; 406 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 407 /* Set indication that iucv interrupts are allowed for this cpu. */ 408 cpu_set(cpu, iucv_irq_cpumask); 409 } 410 411 /** 412 * iucv_block_cpu 413 * @data: unused 414 * 415 * Block iucv interrupts on this cpu. 416 */ 417 static void iucv_block_cpu(void *data) 418 { 419 int cpu = smp_processor_id(); 420 union iucv_param *parm; 421 422 /* Disable all iucv interrupts. */ 423 parm = iucv_param_irq[cpu]; 424 memset(parm, 0, sizeof(union iucv_param)); 425 iucv_call_b2f0(IUCV_SETMASK, parm); 426 427 /* Clear indication that iucv interrupts are allowed for this cpu. */ 428 cpu_clear(cpu, iucv_irq_cpumask); 429 } 430 431 /** 432 * iucv_block_cpu_almost 433 * @data: unused 434 * 435 * Allow connection-severed interrupts only on this cpu. 436 */ 437 static void iucv_block_cpu_almost(void *data) 438 { 439 int cpu = smp_processor_id(); 440 union iucv_param *parm; 441 442 /* Allow iucv control interrupts only */ 443 parm = iucv_param_irq[cpu]; 444 memset(parm, 0, sizeof(union iucv_param)); 445 parm->set_mask.ipmask = 0x08; 446 iucv_call_b2f0(IUCV_SETMASK, parm); 447 /* Allow iucv-severed interrupt only */ 448 memset(parm, 0, sizeof(union iucv_param)); 449 parm->set_mask.ipmask = 0x20; 450 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 451 452 /* Clear indication that iucv interrupts are allowed for this cpu. */ 453 cpu_clear(cpu, iucv_irq_cpumask); 454 } 455 456 /** 457 * iucv_declare_cpu 458 * @data: unused 459 * 460 * Declare a interrupt buffer on this cpu. 461 */ 462 static void iucv_declare_cpu(void *data) 463 { 464 int cpu = smp_processor_id(); 465 union iucv_param *parm; 466 int rc; 467 468 if (cpu_isset(cpu, iucv_buffer_cpumask)) 469 return; 470 471 /* Declare interrupt buffer. */ 472 parm = iucv_param_irq[cpu]; 473 memset(parm, 0, sizeof(union iucv_param)); 474 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); 475 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 476 if (rc) { 477 char *err = "Unknown"; 478 switch (rc) { 479 case 0x03: 480 err = "Directory error"; 481 break; 482 case 0x0a: 483 err = "Invalid length"; 484 break; 485 case 0x13: 486 err = "Buffer already exists"; 487 break; 488 case 0x3e: 489 err = "Buffer overlap"; 490 break; 491 case 0x5c: 492 err = "Paging or storage error"; 493 break; 494 } 495 pr_warning("Defining an interrupt buffer on CPU %i" 496 " failed with 0x%02x (%s)\n", cpu, rc, err); 497 return; 498 } 499 500 /* Set indication that an iucv buffer exists for this cpu. */ 501 cpu_set(cpu, iucv_buffer_cpumask); 502 503 if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask)) 504 /* Enable iucv interrupts on this cpu. */ 505 iucv_allow_cpu(NULL); 506 else 507 /* Disable iucv interrupts on this cpu. */ 508 iucv_block_cpu(NULL); 509 } 510 511 /** 512 * iucv_retrieve_cpu 513 * @data: unused 514 * 515 * Retrieve interrupt buffer on this cpu. 516 */ 517 static void iucv_retrieve_cpu(void *data) 518 { 519 int cpu = smp_processor_id(); 520 union iucv_param *parm; 521 522 if (!cpu_isset(cpu, iucv_buffer_cpumask)) 523 return; 524 525 /* Block iucv interrupts. */ 526 iucv_block_cpu(NULL); 527 528 /* Retrieve interrupt buffer. */ 529 parm = iucv_param_irq[cpu]; 530 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 531 532 /* Clear indication that an iucv buffer exists for this cpu. */ 533 cpu_clear(cpu, iucv_buffer_cpumask); 534 } 535 536 /** 537 * iucv_setmask_smp 538 * 539 * Allow iucv interrupts on all cpus. 540 */ 541 static void iucv_setmask_mp(void) 542 { 543 int cpu; 544 545 get_online_cpus(); 546 for_each_online_cpu(cpu) 547 /* Enable all cpus with a declared buffer. */ 548 if (cpu_isset(cpu, iucv_buffer_cpumask) && 549 !cpu_isset(cpu, iucv_irq_cpumask)) 550 smp_call_function_single(cpu, iucv_allow_cpu, 551 NULL, 1); 552 put_online_cpus(); 553 } 554 555 /** 556 * iucv_setmask_up 557 * 558 * Allow iucv interrupts on a single cpu. 559 */ 560 static void iucv_setmask_up(void) 561 { 562 cpumask_t cpumask; 563 int cpu; 564 565 /* Disable all cpu but the first in cpu_irq_cpumask. */ 566 cpumask = iucv_irq_cpumask; 567 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); 568 for_each_cpu_mask_nr(cpu, cpumask) 569 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); 570 } 571 572 /** 573 * iucv_enable 574 * 575 * This function makes iucv ready for use. It allocates the pathid 576 * table, declares an iucv interrupt buffer and enables the iucv 577 * interrupts. Called when the first user has registered an iucv 578 * handler. 579 */ 580 static int iucv_enable(void) 581 { 582 size_t alloc_size; 583 int cpu, rc; 584 585 get_online_cpus(); 586 rc = -ENOMEM; 587 alloc_size = iucv_max_pathid * sizeof(struct iucv_path); 588 iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); 589 if (!iucv_path_table) 590 goto out; 591 /* Declare per cpu buffers. */ 592 rc = -EIO; 593 for_each_online_cpu(cpu) 594 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 595 if (cpus_empty(iucv_buffer_cpumask)) 596 /* No cpu could declare an iucv buffer. */ 597 goto out; 598 put_online_cpus(); 599 return 0; 600 out: 601 kfree(iucv_path_table); 602 iucv_path_table = NULL; 603 put_online_cpus(); 604 return rc; 605 } 606 607 /** 608 * iucv_disable 609 * 610 * This function shuts down iucv. It disables iucv interrupts, retrieves 611 * the iucv interrupt buffer and frees the pathid table. Called after the 612 * last user unregister its iucv handler. 613 */ 614 static void iucv_disable(void) 615 { 616 get_online_cpus(); 617 on_each_cpu(iucv_retrieve_cpu, NULL, 1); 618 kfree(iucv_path_table); 619 iucv_path_table = NULL; 620 put_online_cpus(); 621 } 622 623 static int __cpuinit iucv_cpu_notify(struct notifier_block *self, 624 unsigned long action, void *hcpu) 625 { 626 cpumask_t cpumask; 627 long cpu = (long) hcpu; 628 629 switch (action) { 630 case CPU_UP_PREPARE: 631 case CPU_UP_PREPARE_FROZEN: 632 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 633 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 634 if (!iucv_irq_data[cpu]) 635 return notifier_from_errno(-ENOMEM); 636 637 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 638 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 639 if (!iucv_param[cpu]) { 640 kfree(iucv_irq_data[cpu]); 641 iucv_irq_data[cpu] = NULL; 642 return notifier_from_errno(-ENOMEM); 643 } 644 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), 645 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 646 if (!iucv_param_irq[cpu]) { 647 kfree(iucv_param[cpu]); 648 iucv_param[cpu] = NULL; 649 kfree(iucv_irq_data[cpu]); 650 iucv_irq_data[cpu] = NULL; 651 return notifier_from_errno(-ENOMEM); 652 } 653 break; 654 case CPU_UP_CANCELED: 655 case CPU_UP_CANCELED_FROZEN: 656 case CPU_DEAD: 657 case CPU_DEAD_FROZEN: 658 kfree(iucv_param_irq[cpu]); 659 iucv_param_irq[cpu] = NULL; 660 kfree(iucv_param[cpu]); 661 iucv_param[cpu] = NULL; 662 kfree(iucv_irq_data[cpu]); 663 iucv_irq_data[cpu] = NULL; 664 break; 665 case CPU_ONLINE: 666 case CPU_ONLINE_FROZEN: 667 case CPU_DOWN_FAILED: 668 case CPU_DOWN_FAILED_FROZEN: 669 if (!iucv_path_table) 670 break; 671 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 672 break; 673 case CPU_DOWN_PREPARE: 674 case CPU_DOWN_PREPARE_FROZEN: 675 if (!iucv_path_table) 676 break; 677 cpumask = iucv_buffer_cpumask; 678 cpu_clear(cpu, cpumask); 679 if (cpus_empty(cpumask)) 680 /* Can't offline last IUCV enabled cpu. */ 681 return notifier_from_errno(-EINVAL); 682 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); 683 if (cpus_empty(iucv_irq_cpumask)) 684 smp_call_function_single(first_cpu(iucv_buffer_cpumask), 685 iucv_allow_cpu, NULL, 1); 686 break; 687 } 688 return NOTIFY_OK; 689 } 690 691 static struct notifier_block __refdata iucv_cpu_notifier = { 692 .notifier_call = iucv_cpu_notify, 693 }; 694 695 /** 696 * iucv_sever_pathid 697 * @pathid: path identification number. 698 * @userdata: 16-bytes of user data. 699 * 700 * Sever an iucv path to free up the pathid. Used internally. 701 */ 702 static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) 703 { 704 union iucv_param *parm; 705 706 parm = iucv_param_irq[smp_processor_id()]; 707 memset(parm, 0, sizeof(union iucv_param)); 708 if (userdata) 709 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 710 parm->ctrl.ippathid = pathid; 711 return iucv_call_b2f0(IUCV_SEVER, parm); 712 } 713 714 /** 715 * __iucv_cleanup_queue 716 * @dummy: unused dummy argument 717 * 718 * Nop function called via smp_call_function to force work items from 719 * pending external iucv interrupts to the work queue. 720 */ 721 static void __iucv_cleanup_queue(void *dummy) 722 { 723 } 724 725 /** 726 * iucv_cleanup_queue 727 * 728 * Function called after a path has been severed to find all remaining 729 * work items for the now stale pathid. The caller needs to hold the 730 * iucv_table_lock. 731 */ 732 static void iucv_cleanup_queue(void) 733 { 734 struct iucv_irq_list *p, *n; 735 736 /* 737 * When a path is severed, the pathid can be reused immediatly 738 * on a iucv connect or a connection pending interrupt. Remove 739 * all entries from the task queue that refer to a stale pathid 740 * (iucv_path_table[ix] == NULL). Only then do the iucv connect 741 * or deliver the connection pending interrupt. To get all the 742 * pending interrupts force them to the work queue by calling 743 * an empty function on all cpus. 744 */ 745 smp_call_function(__iucv_cleanup_queue, NULL, 1); 746 spin_lock_irq(&iucv_queue_lock); 747 list_for_each_entry_safe(p, n, &iucv_task_queue, list) { 748 /* Remove stale work items from the task queue. */ 749 if (iucv_path_table[p->data.ippathid] == NULL) { 750 list_del(&p->list); 751 kfree(p); 752 } 753 } 754 spin_unlock_irq(&iucv_queue_lock); 755 } 756 757 /** 758 * iucv_register: 759 * @handler: address of iucv handler structure 760 * @smp: != 0 indicates that the handler can deal with out of order messages 761 * 762 * Registers a driver with IUCV. 763 * 764 * Returns 0 on success, -ENOMEM if the memory allocation for the pathid 765 * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. 766 */ 767 int iucv_register(struct iucv_handler *handler, int smp) 768 { 769 int rc; 770 771 if (!iucv_available) 772 return -ENOSYS; 773 mutex_lock(&iucv_register_mutex); 774 if (!smp) 775 iucv_nonsmp_handler++; 776 if (list_empty(&iucv_handler_list)) { 777 rc = iucv_enable(); 778 if (rc) 779 goto out_mutex; 780 } else if (!smp && iucv_nonsmp_handler == 1) 781 iucv_setmask_up(); 782 INIT_LIST_HEAD(&handler->paths); 783 784 spin_lock_bh(&iucv_table_lock); 785 list_add_tail(&handler->list, &iucv_handler_list); 786 spin_unlock_bh(&iucv_table_lock); 787 rc = 0; 788 out_mutex: 789 mutex_unlock(&iucv_register_mutex); 790 return rc; 791 } 792 EXPORT_SYMBOL(iucv_register); 793 794 /** 795 * iucv_unregister 796 * @handler: address of iucv handler structure 797 * @smp: != 0 indicates that the handler can deal with out of order messages 798 * 799 * Unregister driver from IUCV. 800 */ 801 void iucv_unregister(struct iucv_handler *handler, int smp) 802 { 803 struct iucv_path *p, *n; 804 805 mutex_lock(&iucv_register_mutex); 806 spin_lock_bh(&iucv_table_lock); 807 /* Remove handler from the iucv_handler_list. */ 808 list_del_init(&handler->list); 809 /* Sever all pathids still refering to the handler. */ 810 list_for_each_entry_safe(p, n, &handler->paths, list) { 811 iucv_sever_pathid(p->pathid, NULL); 812 iucv_path_table[p->pathid] = NULL; 813 list_del(&p->list); 814 iucv_path_free(p); 815 } 816 spin_unlock_bh(&iucv_table_lock); 817 if (!smp) 818 iucv_nonsmp_handler--; 819 if (list_empty(&iucv_handler_list)) 820 iucv_disable(); 821 else if (!smp && iucv_nonsmp_handler == 0) 822 iucv_setmask_mp(); 823 mutex_unlock(&iucv_register_mutex); 824 } 825 EXPORT_SYMBOL(iucv_unregister); 826 827 static int iucv_reboot_event(struct notifier_block *this, 828 unsigned long event, void *ptr) 829 { 830 int i, rc; 831 832 get_online_cpus(); 833 on_each_cpu(iucv_block_cpu, NULL, 1); 834 preempt_disable(); 835 for (i = 0; i < iucv_max_pathid; i++) { 836 if (iucv_path_table[i]) 837 rc = iucv_sever_pathid(i, NULL); 838 } 839 preempt_enable(); 840 put_online_cpus(); 841 iucv_disable(); 842 return NOTIFY_DONE; 843 } 844 845 static struct notifier_block iucv_reboot_notifier = { 846 .notifier_call = iucv_reboot_event, 847 }; 848 849 /** 850 * iucv_path_accept 851 * @path: address of iucv path structure 852 * @handler: address of iucv handler structure 853 * @userdata: 16 bytes of data reflected to the communication partner 854 * @private: private data passed to interrupt handlers for this path 855 * 856 * This function is issued after the user received a connection pending 857 * external interrupt and now wishes to complete the IUCV communication path. 858 * 859 * Returns the result of the CP IUCV call. 860 */ 861 int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, 862 u8 userdata[16], void *private) 863 { 864 union iucv_param *parm; 865 int rc; 866 867 local_bh_disable(); 868 if (cpus_empty(iucv_buffer_cpumask)) { 869 rc = -EIO; 870 goto out; 871 } 872 /* Prepare parameter block. */ 873 parm = iucv_param[smp_processor_id()]; 874 memset(parm, 0, sizeof(union iucv_param)); 875 parm->ctrl.ippathid = path->pathid; 876 parm->ctrl.ipmsglim = path->msglim; 877 if (userdata) 878 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 879 parm->ctrl.ipflags1 = path->flags; 880 881 rc = iucv_call_b2f0(IUCV_ACCEPT, parm); 882 if (!rc) { 883 path->private = private; 884 path->msglim = parm->ctrl.ipmsglim; 885 path->flags = parm->ctrl.ipflags1; 886 } 887 out: 888 local_bh_enable(); 889 return rc; 890 } 891 EXPORT_SYMBOL(iucv_path_accept); 892 893 /** 894 * iucv_path_connect 895 * @path: address of iucv path structure 896 * @handler: address of iucv handler structure 897 * @userid: 8-byte user identification 898 * @system: 8-byte target system identification 899 * @userdata: 16 bytes of data reflected to the communication partner 900 * @private: private data passed to interrupt handlers for this path 901 * 902 * This function establishes an IUCV path. Although the connect may complete 903 * successfully, you are not able to use the path until you receive an IUCV 904 * Connection Complete external interrupt. 905 * 906 * Returns the result of the CP IUCV call. 907 */ 908 int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, 909 u8 userid[8], u8 system[8], u8 userdata[16], 910 void *private) 911 { 912 union iucv_param *parm; 913 int rc; 914 915 spin_lock_bh(&iucv_table_lock); 916 iucv_cleanup_queue(); 917 if (cpus_empty(iucv_buffer_cpumask)) { 918 rc = -EIO; 919 goto out; 920 } 921 parm = iucv_param[smp_processor_id()]; 922 memset(parm, 0, sizeof(union iucv_param)); 923 parm->ctrl.ipmsglim = path->msglim; 924 parm->ctrl.ipflags1 = path->flags; 925 if (userid) { 926 memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid)); 927 ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); 928 EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid)); 929 } 930 if (system) { 931 memcpy(parm->ctrl.iptarget, system, 932 sizeof(parm->ctrl.iptarget)); 933 ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); 934 EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget)); 935 } 936 if (userdata) 937 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 938 939 rc = iucv_call_b2f0(IUCV_CONNECT, parm); 940 if (!rc) { 941 if (parm->ctrl.ippathid < iucv_max_pathid) { 942 path->pathid = parm->ctrl.ippathid; 943 path->msglim = parm->ctrl.ipmsglim; 944 path->flags = parm->ctrl.ipflags1; 945 path->handler = handler; 946 path->private = private; 947 list_add_tail(&path->list, &handler->paths); 948 iucv_path_table[path->pathid] = path; 949 } else { 950 iucv_sever_pathid(parm->ctrl.ippathid, 951 iucv_error_pathid); 952 rc = -EIO; 953 } 954 } 955 out: 956 spin_unlock_bh(&iucv_table_lock); 957 return rc; 958 } 959 EXPORT_SYMBOL(iucv_path_connect); 960 961 /** 962 * iucv_path_quiesce: 963 * @path: address of iucv path structure 964 * @userdata: 16 bytes of data reflected to the communication partner 965 * 966 * This function temporarily suspends incoming messages on an IUCV path. 967 * You can later reactivate the path by invoking the iucv_resume function. 968 * 969 * Returns the result from the CP IUCV call. 970 */ 971 int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) 972 { 973 union iucv_param *parm; 974 int rc; 975 976 local_bh_disable(); 977 if (cpus_empty(iucv_buffer_cpumask)) { 978 rc = -EIO; 979 goto out; 980 } 981 parm = iucv_param[smp_processor_id()]; 982 memset(parm, 0, sizeof(union iucv_param)); 983 if (userdata) 984 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 985 parm->ctrl.ippathid = path->pathid; 986 rc = iucv_call_b2f0(IUCV_QUIESCE, parm); 987 out: 988 local_bh_enable(); 989 return rc; 990 } 991 EXPORT_SYMBOL(iucv_path_quiesce); 992 993 /** 994 * iucv_path_resume: 995 * @path: address of iucv path structure 996 * @userdata: 16 bytes of data reflected to the communication partner 997 * 998 * This function resumes incoming messages on an IUCV path that has 999 * been stopped with iucv_path_quiesce. 1000 * 1001 * Returns the result from the CP IUCV call. 1002 */ 1003 int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) 1004 { 1005 union iucv_param *parm; 1006 int rc; 1007 1008 local_bh_disable(); 1009 if (cpus_empty(iucv_buffer_cpumask)) { 1010 rc = -EIO; 1011 goto out; 1012 } 1013 parm = iucv_param[smp_processor_id()]; 1014 memset(parm, 0, sizeof(union iucv_param)); 1015 if (userdata) 1016 memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); 1017 parm->ctrl.ippathid = path->pathid; 1018 rc = iucv_call_b2f0(IUCV_RESUME, parm); 1019 out: 1020 local_bh_enable(); 1021 return rc; 1022 } 1023 1024 /** 1025 * iucv_path_sever 1026 * @path: address of iucv path structure 1027 * @userdata: 16 bytes of data reflected to the communication partner 1028 * 1029 * This function terminates an IUCV path. 1030 * 1031 * Returns the result from the CP IUCV call. 1032 */ 1033 int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) 1034 { 1035 int rc; 1036 1037 preempt_disable(); 1038 if (cpus_empty(iucv_buffer_cpumask)) { 1039 rc = -EIO; 1040 goto out; 1041 } 1042 if (iucv_active_cpu != smp_processor_id()) 1043 spin_lock_bh(&iucv_table_lock); 1044 rc = iucv_sever_pathid(path->pathid, userdata); 1045 iucv_path_table[path->pathid] = NULL; 1046 list_del_init(&path->list); 1047 if (iucv_active_cpu != smp_processor_id()) 1048 spin_unlock_bh(&iucv_table_lock); 1049 out: 1050 preempt_enable(); 1051 return rc; 1052 } 1053 EXPORT_SYMBOL(iucv_path_sever); 1054 1055 /** 1056 * iucv_message_purge 1057 * @path: address of iucv path structure 1058 * @msg: address of iucv msg structure 1059 * @srccls: source class of message 1060 * 1061 * Cancels a message you have sent. 1062 * 1063 * Returns the result from the CP IUCV call. 1064 */ 1065 int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, 1066 u32 srccls) 1067 { 1068 union iucv_param *parm; 1069 int rc; 1070 1071 local_bh_disable(); 1072 if (cpus_empty(iucv_buffer_cpumask)) { 1073 rc = -EIO; 1074 goto out; 1075 } 1076 parm = iucv_param[smp_processor_id()]; 1077 memset(parm, 0, sizeof(union iucv_param)); 1078 parm->purge.ippathid = path->pathid; 1079 parm->purge.ipmsgid = msg->id; 1080 parm->purge.ipsrccls = srccls; 1081 parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID; 1082 rc = iucv_call_b2f0(IUCV_PURGE, parm); 1083 if (!rc) { 1084 msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; 1085 msg->tag = parm->purge.ipmsgtag; 1086 } 1087 out: 1088 local_bh_enable(); 1089 return rc; 1090 } 1091 EXPORT_SYMBOL(iucv_message_purge); 1092 1093 /** 1094 * iucv_message_receive_iprmdata 1095 * @path: address of iucv path structure 1096 * @msg: address of iucv msg structure 1097 * @flags: how the message is received (IUCV_IPBUFLST) 1098 * @buffer: address of data buffer or address of struct iucv_array 1099 * @size: length of data buffer 1100 * @residual: 1101 * 1102 * Internal function used by iucv_message_receive and __iucv_message_receive 1103 * to receive RMDATA data stored in struct iucv_message. 1104 */ 1105 static int iucv_message_receive_iprmdata(struct iucv_path *path, 1106 struct iucv_message *msg, 1107 u8 flags, void *buffer, 1108 size_t size, size_t *residual) 1109 { 1110 struct iucv_array *array; 1111 u8 *rmmsg; 1112 size_t copy; 1113 1114 /* 1115 * Message is 8 bytes long and has been stored to the 1116 * message descriptor itself. 1117 */ 1118 if (residual) 1119 *residual = abs(size - 8); 1120 rmmsg = msg->rmmsg; 1121 if (flags & IUCV_IPBUFLST) { 1122 /* Copy to struct iucv_array. */ 1123 size = (size < 8) ? size : 8; 1124 for (array = buffer; size > 0; array++) { 1125 copy = min_t(size_t, size, array->length); 1126 memcpy((u8 *)(addr_t) array->address, 1127 rmmsg, copy); 1128 rmmsg += copy; 1129 size -= copy; 1130 } 1131 } else { 1132 /* Copy to direct buffer. */ 1133 memcpy(buffer, rmmsg, min_t(size_t, size, 8)); 1134 } 1135 return 0; 1136 } 1137 1138 /** 1139 * __iucv_message_receive 1140 * @path: address of iucv path structure 1141 * @msg: address of iucv msg structure 1142 * @flags: how the message is received (IUCV_IPBUFLST) 1143 * @buffer: address of data buffer or address of struct iucv_array 1144 * @size: length of data buffer 1145 * @residual: 1146 * 1147 * This function receives messages that are being sent to you over 1148 * established paths. This function will deal with RMDATA messages 1149 * embedded in struct iucv_message as well. 1150 * 1151 * Locking: no locking 1152 * 1153 * Returns the result from the CP IUCV call. 1154 */ 1155 int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, 1156 u8 flags, void *buffer, size_t size, size_t *residual) 1157 { 1158 union iucv_param *parm; 1159 int rc; 1160 1161 if (msg->flags & IUCV_IPRMDATA) 1162 return iucv_message_receive_iprmdata(path, msg, flags, 1163 buffer, size, residual); 1164 if (cpus_empty(iucv_buffer_cpumask)) { 1165 rc = -EIO; 1166 goto out; 1167 } 1168 parm = iucv_param[smp_processor_id()]; 1169 memset(parm, 0, sizeof(union iucv_param)); 1170 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1171 parm->db.ipbfln1f = (u32) size; 1172 parm->db.ipmsgid = msg->id; 1173 parm->db.ippathid = path->pathid; 1174 parm->db.iptrgcls = msg->class; 1175 parm->db.ipflags1 = (flags | IUCV_IPFGPID | 1176 IUCV_IPFGMID | IUCV_IPTRGCLS); 1177 rc = iucv_call_b2f0(IUCV_RECEIVE, parm); 1178 if (!rc || rc == 5) { 1179 msg->flags = parm->db.ipflags1; 1180 if (residual) 1181 *residual = parm->db.ipbfln1f; 1182 } 1183 out: 1184 return rc; 1185 } 1186 EXPORT_SYMBOL(__iucv_message_receive); 1187 1188 /** 1189 * iucv_message_receive 1190 * @path: address of iucv path structure 1191 * @msg: address of iucv msg structure 1192 * @flags: how the message is received (IUCV_IPBUFLST) 1193 * @buffer: address of data buffer or address of struct iucv_array 1194 * @size: length of data buffer 1195 * @residual: 1196 * 1197 * This function receives messages that are being sent to you over 1198 * established paths. This function will deal with RMDATA messages 1199 * embedded in struct iucv_message as well. 1200 * 1201 * Locking: local_bh_enable/local_bh_disable 1202 * 1203 * Returns the result from the CP IUCV call. 1204 */ 1205 int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, 1206 u8 flags, void *buffer, size_t size, size_t *residual) 1207 { 1208 int rc; 1209 1210 if (msg->flags & IUCV_IPRMDATA) 1211 return iucv_message_receive_iprmdata(path, msg, flags, 1212 buffer, size, residual); 1213 local_bh_disable(); 1214 rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); 1215 local_bh_enable(); 1216 return rc; 1217 } 1218 EXPORT_SYMBOL(iucv_message_receive); 1219 1220 /** 1221 * iucv_message_reject 1222 * @path: address of iucv path structure 1223 * @msg: address of iucv msg structure 1224 * 1225 * The reject function refuses a specified message. Between the time you 1226 * are notified of a message and the time that you complete the message, 1227 * the message may be rejected. 1228 * 1229 * Returns the result from the CP IUCV call. 1230 */ 1231 int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) 1232 { 1233 union iucv_param *parm; 1234 int rc; 1235 1236 local_bh_disable(); 1237 if (cpus_empty(iucv_buffer_cpumask)) { 1238 rc = -EIO; 1239 goto out; 1240 } 1241 parm = iucv_param[smp_processor_id()]; 1242 memset(parm, 0, sizeof(union iucv_param)); 1243 parm->db.ippathid = path->pathid; 1244 parm->db.ipmsgid = msg->id; 1245 parm->db.iptrgcls = msg->class; 1246 parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); 1247 rc = iucv_call_b2f0(IUCV_REJECT, parm); 1248 out: 1249 local_bh_enable(); 1250 return rc; 1251 } 1252 EXPORT_SYMBOL(iucv_message_reject); 1253 1254 /** 1255 * iucv_message_reply 1256 * @path: address of iucv path structure 1257 * @msg: address of iucv msg structure 1258 * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1259 * @reply: address of reply data buffer or address of struct iucv_array 1260 * @size: length of reply data buffer 1261 * 1262 * This function responds to the two-way messages that you receive. You 1263 * must identify completely the message to which you wish to reply. ie, 1264 * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into 1265 * the parameter list. 1266 * 1267 * Returns the result from the CP IUCV call. 1268 */ 1269 int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, 1270 u8 flags, void *reply, size_t size) 1271 { 1272 union iucv_param *parm; 1273 int rc; 1274 1275 local_bh_disable(); 1276 if (cpus_empty(iucv_buffer_cpumask)) { 1277 rc = -EIO; 1278 goto out; 1279 } 1280 parm = iucv_param[smp_processor_id()]; 1281 memset(parm, 0, sizeof(union iucv_param)); 1282 if (flags & IUCV_IPRMDATA) { 1283 parm->dpl.ippathid = path->pathid; 1284 parm->dpl.ipflags1 = flags; 1285 parm->dpl.ipmsgid = msg->id; 1286 parm->dpl.iptrgcls = msg->class; 1287 memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); 1288 } else { 1289 parm->db.ipbfadr1 = (u32)(addr_t) reply; 1290 parm->db.ipbfln1f = (u32) size; 1291 parm->db.ippathid = path->pathid; 1292 parm->db.ipflags1 = flags; 1293 parm->db.ipmsgid = msg->id; 1294 parm->db.iptrgcls = msg->class; 1295 } 1296 rc = iucv_call_b2f0(IUCV_REPLY, parm); 1297 out: 1298 local_bh_enable(); 1299 return rc; 1300 } 1301 EXPORT_SYMBOL(iucv_message_reply); 1302 1303 /** 1304 * __iucv_message_send 1305 * @path: address of iucv path structure 1306 * @msg: address of iucv msg structure 1307 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1308 * @srccls: source class of message 1309 * @buffer: address of send buffer or address of struct iucv_array 1310 * @size: length of send buffer 1311 * 1312 * This function transmits data to another application. Data to be 1313 * transmitted is in a buffer and this is a one-way message and the 1314 * receiver will not reply to the message. 1315 * 1316 * Locking: no locking 1317 * 1318 * Returns the result from the CP IUCV call. 1319 */ 1320 int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, 1321 u8 flags, u32 srccls, void *buffer, size_t size) 1322 { 1323 union iucv_param *parm; 1324 int rc; 1325 1326 if (cpus_empty(iucv_buffer_cpumask)) { 1327 rc = -EIO; 1328 goto out; 1329 } 1330 parm = iucv_param[smp_processor_id()]; 1331 memset(parm, 0, sizeof(union iucv_param)); 1332 if (flags & IUCV_IPRMDATA) { 1333 /* Message of 8 bytes can be placed into the parameter list. */ 1334 parm->dpl.ippathid = path->pathid; 1335 parm->dpl.ipflags1 = flags | IUCV_IPNORPY; 1336 parm->dpl.iptrgcls = msg->class; 1337 parm->dpl.ipsrccls = srccls; 1338 parm->dpl.ipmsgtag = msg->tag; 1339 memcpy(parm->dpl.iprmmsg, buffer, 8); 1340 } else { 1341 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1342 parm->db.ipbfln1f = (u32) size; 1343 parm->db.ippathid = path->pathid; 1344 parm->db.ipflags1 = flags | IUCV_IPNORPY; 1345 parm->db.iptrgcls = msg->class; 1346 parm->db.ipsrccls = srccls; 1347 parm->db.ipmsgtag = msg->tag; 1348 } 1349 rc = iucv_call_b2f0(IUCV_SEND, parm); 1350 if (!rc) 1351 msg->id = parm->db.ipmsgid; 1352 out: 1353 return rc; 1354 } 1355 EXPORT_SYMBOL(__iucv_message_send); 1356 1357 /** 1358 * iucv_message_send 1359 * @path: address of iucv path structure 1360 * @msg: address of iucv msg structure 1361 * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) 1362 * @srccls: source class of message 1363 * @buffer: address of send buffer or address of struct iucv_array 1364 * @size: length of send buffer 1365 * 1366 * This function transmits data to another application. Data to be 1367 * transmitted is in a buffer and this is a one-way message and the 1368 * receiver will not reply to the message. 1369 * 1370 * Locking: local_bh_enable/local_bh_disable 1371 * 1372 * Returns the result from the CP IUCV call. 1373 */ 1374 int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, 1375 u8 flags, u32 srccls, void *buffer, size_t size) 1376 { 1377 int rc; 1378 1379 local_bh_disable(); 1380 rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); 1381 local_bh_enable(); 1382 return rc; 1383 } 1384 EXPORT_SYMBOL(iucv_message_send); 1385 1386 /** 1387 * iucv_message_send2way 1388 * @path: address of iucv path structure 1389 * @msg: address of iucv msg structure 1390 * @flags: how the message is sent and the reply is received 1391 * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST) 1392 * @srccls: source class of message 1393 * @buffer: address of send buffer or address of struct iucv_array 1394 * @size: length of send buffer 1395 * @ansbuf: address of answer buffer or address of struct iucv_array 1396 * @asize: size of reply buffer 1397 * 1398 * This function transmits data to another application. Data to be 1399 * transmitted is in a buffer. The receiver of the send is expected to 1400 * reply to the message and a buffer is provided into which IUCV moves 1401 * the reply to this message. 1402 * 1403 * Returns the result from the CP IUCV call. 1404 */ 1405 int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, 1406 u8 flags, u32 srccls, void *buffer, size_t size, 1407 void *answer, size_t asize, size_t *residual) 1408 { 1409 union iucv_param *parm; 1410 int rc; 1411 1412 local_bh_disable(); 1413 if (cpus_empty(iucv_buffer_cpumask)) { 1414 rc = -EIO; 1415 goto out; 1416 } 1417 parm = iucv_param[smp_processor_id()]; 1418 memset(parm, 0, sizeof(union iucv_param)); 1419 if (flags & IUCV_IPRMDATA) { 1420 parm->dpl.ippathid = path->pathid; 1421 parm->dpl.ipflags1 = path->flags; /* priority message */ 1422 parm->dpl.iptrgcls = msg->class; 1423 parm->dpl.ipsrccls = srccls; 1424 parm->dpl.ipmsgtag = msg->tag; 1425 parm->dpl.ipbfadr2 = (u32)(addr_t) answer; 1426 parm->dpl.ipbfln2f = (u32) asize; 1427 memcpy(parm->dpl.iprmmsg, buffer, 8); 1428 } else { 1429 parm->db.ippathid = path->pathid; 1430 parm->db.ipflags1 = path->flags; /* priority message */ 1431 parm->db.iptrgcls = msg->class; 1432 parm->db.ipsrccls = srccls; 1433 parm->db.ipmsgtag = msg->tag; 1434 parm->db.ipbfadr1 = (u32)(addr_t) buffer; 1435 parm->db.ipbfln1f = (u32) size; 1436 parm->db.ipbfadr2 = (u32)(addr_t) answer; 1437 parm->db.ipbfln2f = (u32) asize; 1438 } 1439 rc = iucv_call_b2f0(IUCV_SEND, parm); 1440 if (!rc) 1441 msg->id = parm->db.ipmsgid; 1442 out: 1443 local_bh_enable(); 1444 return rc; 1445 } 1446 EXPORT_SYMBOL(iucv_message_send2way); 1447 1448 /** 1449 * iucv_path_pending 1450 * @data: Pointer to external interrupt buffer 1451 * 1452 * Process connection pending work item. Called from tasklet while holding 1453 * iucv_table_lock. 1454 */ 1455 struct iucv_path_pending { 1456 u16 ippathid; 1457 u8 ipflags1; 1458 u8 iptype; 1459 u16 ipmsglim; 1460 u16 res1; 1461 u8 ipvmid[8]; 1462 u8 ipuser[16]; 1463 u32 res3; 1464 u8 ippollfg; 1465 u8 res4[3]; 1466 } __packed; 1467 1468 static void iucv_path_pending(struct iucv_irq_data *data) 1469 { 1470 struct iucv_path_pending *ipp = (void *) data; 1471 struct iucv_handler *handler; 1472 struct iucv_path *path; 1473 char *error; 1474 1475 BUG_ON(iucv_path_table[ipp->ippathid]); 1476 /* New pathid, handler found. Create a new path struct. */ 1477 error = iucv_error_no_memory; 1478 path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC); 1479 if (!path) 1480 goto out_sever; 1481 path->pathid = ipp->ippathid; 1482 iucv_path_table[path->pathid] = path; 1483 EBCASC(ipp->ipvmid, 8); 1484 1485 /* Call registered handler until one is found that wants the path. */ 1486 list_for_each_entry(handler, &iucv_handler_list, list) { 1487 if (!handler->path_pending) 1488 continue; 1489 /* 1490 * Add path to handler to allow a call to iucv_path_sever 1491 * inside the path_pending function. If the handler returns 1492 * an error remove the path from the handler again. 1493 */ 1494 list_add(&path->list, &handler->paths); 1495 path->handler = handler; 1496 if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser)) 1497 return; 1498 list_del(&path->list); 1499 path->handler = NULL; 1500 } 1501 /* No handler wanted the path. */ 1502 iucv_path_table[path->pathid] = NULL; 1503 iucv_path_free(path); 1504 error = iucv_error_no_listener; 1505 out_sever: 1506 iucv_sever_pathid(ipp->ippathid, error); 1507 } 1508 1509 /** 1510 * iucv_path_complete 1511 * @data: Pointer to external interrupt buffer 1512 * 1513 * Process connection complete work item. Called from tasklet while holding 1514 * iucv_table_lock. 1515 */ 1516 struct iucv_path_complete { 1517 u16 ippathid; 1518 u8 ipflags1; 1519 u8 iptype; 1520 u16 ipmsglim; 1521 u16 res1; 1522 u8 res2[8]; 1523 u8 ipuser[16]; 1524 u32 res3; 1525 u8 ippollfg; 1526 u8 res4[3]; 1527 } __packed; 1528 1529 static void iucv_path_complete(struct iucv_irq_data *data) 1530 { 1531 struct iucv_path_complete *ipc = (void *) data; 1532 struct iucv_path *path = iucv_path_table[ipc->ippathid]; 1533 1534 if (path) 1535 path->flags = ipc->ipflags1; 1536 if (path && path->handler && path->handler->path_complete) 1537 path->handler->path_complete(path, ipc->ipuser); 1538 } 1539 1540 /** 1541 * iucv_path_severed 1542 * @data: Pointer to external interrupt buffer 1543 * 1544 * Process connection severed work item. Called from tasklet while holding 1545 * iucv_table_lock. 1546 */ 1547 struct iucv_path_severed { 1548 u16 ippathid; 1549 u8 res1; 1550 u8 iptype; 1551 u32 res2; 1552 u8 res3[8]; 1553 u8 ipuser[16]; 1554 u32 res4; 1555 u8 ippollfg; 1556 u8 res5[3]; 1557 } __packed; 1558 1559 static void iucv_path_severed(struct iucv_irq_data *data) 1560 { 1561 struct iucv_path_severed *ips = (void *) data; 1562 struct iucv_path *path = iucv_path_table[ips->ippathid]; 1563 1564 if (!path || !path->handler) /* Already severed */ 1565 return; 1566 if (path->handler->path_severed) 1567 path->handler->path_severed(path, ips->ipuser); 1568 else { 1569 iucv_sever_pathid(path->pathid, NULL); 1570 iucv_path_table[path->pathid] = NULL; 1571 list_del(&path->list); 1572 iucv_path_free(path); 1573 } 1574 } 1575 1576 /** 1577 * iucv_path_quiesced 1578 * @data: Pointer to external interrupt buffer 1579 * 1580 * Process connection quiesced work item. Called from tasklet while holding 1581 * iucv_table_lock. 1582 */ 1583 struct iucv_path_quiesced { 1584 u16 ippathid; 1585 u8 res1; 1586 u8 iptype; 1587 u32 res2; 1588 u8 res3[8]; 1589 u8 ipuser[16]; 1590 u32 res4; 1591 u8 ippollfg; 1592 u8 res5[3]; 1593 } __packed; 1594 1595 static void iucv_path_quiesced(struct iucv_irq_data *data) 1596 { 1597 struct iucv_path_quiesced *ipq = (void *) data; 1598 struct iucv_path *path = iucv_path_table[ipq->ippathid]; 1599 1600 if (path && path->handler && path->handler->path_quiesced) 1601 path->handler->path_quiesced(path, ipq->ipuser); 1602 } 1603 1604 /** 1605 * iucv_path_resumed 1606 * @data: Pointer to external interrupt buffer 1607 * 1608 * Process connection resumed work item. Called from tasklet while holding 1609 * iucv_table_lock. 1610 */ 1611 struct iucv_path_resumed { 1612 u16 ippathid; 1613 u8 res1; 1614 u8 iptype; 1615 u32 res2; 1616 u8 res3[8]; 1617 u8 ipuser[16]; 1618 u32 res4; 1619 u8 ippollfg; 1620 u8 res5[3]; 1621 } __packed; 1622 1623 static void iucv_path_resumed(struct iucv_irq_data *data) 1624 { 1625 struct iucv_path_resumed *ipr = (void *) data; 1626 struct iucv_path *path = iucv_path_table[ipr->ippathid]; 1627 1628 if (path && path->handler && path->handler->path_resumed) 1629 path->handler->path_resumed(path, ipr->ipuser); 1630 } 1631 1632 /** 1633 * iucv_message_complete 1634 * @data: Pointer to external interrupt buffer 1635 * 1636 * Process message complete work item. Called from tasklet while holding 1637 * iucv_table_lock. 1638 */ 1639 struct iucv_message_complete { 1640 u16 ippathid; 1641 u8 ipflags1; 1642 u8 iptype; 1643 u32 ipmsgid; 1644 u32 ipaudit; 1645 u8 iprmmsg[8]; 1646 u32 ipsrccls; 1647 u32 ipmsgtag; 1648 u32 res; 1649 u32 ipbfln2f; 1650 u8 ippollfg; 1651 u8 res2[3]; 1652 } __packed; 1653 1654 static void iucv_message_complete(struct iucv_irq_data *data) 1655 { 1656 struct iucv_message_complete *imc = (void *) data; 1657 struct iucv_path *path = iucv_path_table[imc->ippathid]; 1658 struct iucv_message msg; 1659 1660 if (path && path->handler && path->handler->message_complete) { 1661 msg.flags = imc->ipflags1; 1662 msg.id = imc->ipmsgid; 1663 msg.audit = imc->ipaudit; 1664 memcpy(msg.rmmsg, imc->iprmmsg, 8); 1665 msg.class = imc->ipsrccls; 1666 msg.tag = imc->ipmsgtag; 1667 msg.length = imc->ipbfln2f; 1668 path->handler->message_complete(path, &msg); 1669 } 1670 } 1671 1672 /** 1673 * iucv_message_pending 1674 * @data: Pointer to external interrupt buffer 1675 * 1676 * Process message pending work item. Called from tasklet while holding 1677 * iucv_table_lock. 1678 */ 1679 struct iucv_message_pending { 1680 u16 ippathid; 1681 u8 ipflags1; 1682 u8 iptype; 1683 u32 ipmsgid; 1684 u32 iptrgcls; 1685 union { 1686 u32 iprmmsg1_u32; 1687 u8 iprmmsg1[4]; 1688 } ln1msg1; 1689 union { 1690 u32 ipbfln1f; 1691 u8 iprmmsg2[4]; 1692 } ln1msg2; 1693 u32 res1[3]; 1694 u32 ipbfln2f; 1695 u8 ippollfg; 1696 u8 res2[3]; 1697 } __packed; 1698 1699 static void iucv_message_pending(struct iucv_irq_data *data) 1700 { 1701 struct iucv_message_pending *imp = (void *) data; 1702 struct iucv_path *path = iucv_path_table[imp->ippathid]; 1703 struct iucv_message msg; 1704 1705 if (path && path->handler && path->handler->message_pending) { 1706 msg.flags = imp->ipflags1; 1707 msg.id = imp->ipmsgid; 1708 msg.class = imp->iptrgcls; 1709 if (imp->ipflags1 & IUCV_IPRMDATA) { 1710 memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8); 1711 msg.length = 8; 1712 } else 1713 msg.length = imp->ln1msg2.ipbfln1f; 1714 msg.reply_size = imp->ipbfln2f; 1715 path->handler->message_pending(path, &msg); 1716 } 1717 } 1718 1719 /** 1720 * iucv_tasklet_fn: 1721 * 1722 * This tasklet loops over the queue of irq buffers created by 1723 * iucv_external_interrupt, calls the appropriate action handler 1724 * and then frees the buffer. 1725 */ 1726 static void iucv_tasklet_fn(unsigned long ignored) 1727 { 1728 typedef void iucv_irq_fn(struct iucv_irq_data *); 1729 static iucv_irq_fn *irq_fn[] = { 1730 [0x02] = iucv_path_complete, 1731 [0x03] = iucv_path_severed, 1732 [0x04] = iucv_path_quiesced, 1733 [0x05] = iucv_path_resumed, 1734 [0x06] = iucv_message_complete, 1735 [0x07] = iucv_message_complete, 1736 [0x08] = iucv_message_pending, 1737 [0x09] = iucv_message_pending, 1738 }; 1739 LIST_HEAD(task_queue); 1740 struct iucv_irq_list *p, *n; 1741 1742 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1743 if (!spin_trylock(&iucv_table_lock)) { 1744 tasklet_schedule(&iucv_tasklet); 1745 return; 1746 } 1747 iucv_active_cpu = smp_processor_id(); 1748 1749 spin_lock_irq(&iucv_queue_lock); 1750 list_splice_init(&iucv_task_queue, &task_queue); 1751 spin_unlock_irq(&iucv_queue_lock); 1752 1753 list_for_each_entry_safe(p, n, &task_queue, list) { 1754 list_del_init(&p->list); 1755 irq_fn[p->data.iptype](&p->data); 1756 kfree(p); 1757 } 1758 1759 iucv_active_cpu = -1; 1760 spin_unlock(&iucv_table_lock); 1761 } 1762 1763 /** 1764 * iucv_work_fn: 1765 * 1766 * This work function loops over the queue of path pending irq blocks 1767 * created by iucv_external_interrupt, calls the appropriate action 1768 * handler and then frees the buffer. 1769 */ 1770 static void iucv_work_fn(struct work_struct *work) 1771 { 1772 LIST_HEAD(work_queue); 1773 struct iucv_irq_list *p, *n; 1774 1775 /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */ 1776 spin_lock_bh(&iucv_table_lock); 1777 iucv_active_cpu = smp_processor_id(); 1778 1779 spin_lock_irq(&iucv_queue_lock); 1780 list_splice_init(&iucv_work_queue, &work_queue); 1781 spin_unlock_irq(&iucv_queue_lock); 1782 1783 iucv_cleanup_queue(); 1784 list_for_each_entry_safe(p, n, &work_queue, list) { 1785 list_del_init(&p->list); 1786 iucv_path_pending(&p->data); 1787 kfree(p); 1788 } 1789 1790 iucv_active_cpu = -1; 1791 spin_unlock_bh(&iucv_table_lock); 1792 } 1793 1794 /** 1795 * iucv_external_interrupt 1796 * @code: irq code 1797 * 1798 * Handles external interrupts coming in from CP. 1799 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). 1800 */ 1801 static void iucv_external_interrupt(u16 code) 1802 { 1803 struct iucv_irq_data *p; 1804 struct iucv_irq_list *work; 1805 1806 p = iucv_irq_data[smp_processor_id()]; 1807 if (p->ippathid >= iucv_max_pathid) { 1808 WARN_ON(p->ippathid >= iucv_max_pathid); 1809 iucv_sever_pathid(p->ippathid, iucv_error_no_listener); 1810 return; 1811 } 1812 BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); 1813 work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); 1814 if (!work) { 1815 pr_warning("iucv_external_interrupt: out of memory\n"); 1816 return; 1817 } 1818 memcpy(&work->data, p, sizeof(work->data)); 1819 spin_lock(&iucv_queue_lock); 1820 if (p->iptype == 0x01) { 1821 /* Path pending interrupt. */ 1822 list_add_tail(&work->list, &iucv_work_queue); 1823 schedule_work(&iucv_work); 1824 } else { 1825 /* The other interrupts. */ 1826 list_add_tail(&work->list, &iucv_task_queue); 1827 tasklet_schedule(&iucv_tasklet); 1828 } 1829 spin_unlock(&iucv_queue_lock); 1830 } 1831 1832 static int iucv_pm_prepare(struct device *dev) 1833 { 1834 int rc = 0; 1835 1836 #ifdef CONFIG_PM_DEBUG 1837 printk(KERN_INFO "iucv_pm_prepare\n"); 1838 #endif 1839 if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) 1840 rc = dev->driver->pm->prepare(dev); 1841 return rc; 1842 } 1843 1844 static void iucv_pm_complete(struct device *dev) 1845 { 1846 #ifdef CONFIG_PM_DEBUG 1847 printk(KERN_INFO "iucv_pm_complete\n"); 1848 #endif 1849 if (dev->driver && dev->driver->pm && dev->driver->pm->complete) 1850 dev->driver->pm->complete(dev); 1851 } 1852 1853 /** 1854 * iucv_path_table_empty() - determine if iucv path table is empty 1855 * 1856 * Returns 0 if there are still iucv pathes defined 1857 * 1 if there are no iucv pathes defined 1858 */ 1859 int iucv_path_table_empty(void) 1860 { 1861 int i; 1862 1863 for (i = 0; i < iucv_max_pathid; i++) { 1864 if (iucv_path_table[i]) 1865 return 0; 1866 } 1867 return 1; 1868 } 1869 1870 /** 1871 * iucv_pm_freeze() - Freeze PM callback 1872 * @dev: iucv-based device 1873 * 1874 * disable iucv interrupts 1875 * invoke callback function of the iucv-based driver 1876 * shut down iucv, if no iucv-pathes are established anymore 1877 */ 1878 static int iucv_pm_freeze(struct device *dev) 1879 { 1880 int cpu; 1881 struct iucv_irq_list *p, *n; 1882 int rc = 0; 1883 1884 #ifdef CONFIG_PM_DEBUG 1885 printk(KERN_WARNING "iucv_pm_freeze\n"); 1886 #endif 1887 if (iucv_pm_state != IUCV_PM_FREEZING) { 1888 for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) 1889 smp_call_function_single(cpu, iucv_block_cpu_almost, 1890 NULL, 1); 1891 cancel_work_sync(&iucv_work); 1892 list_for_each_entry_safe(p, n, &iucv_work_queue, list) { 1893 list_del_init(&p->list); 1894 iucv_sever_pathid(p->data.ippathid, 1895 iucv_error_no_listener); 1896 kfree(p); 1897 } 1898 } 1899 iucv_pm_state = IUCV_PM_FREEZING; 1900 if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) 1901 rc = dev->driver->pm->freeze(dev); 1902 if (iucv_path_table_empty()) 1903 iucv_disable(); 1904 return rc; 1905 } 1906 1907 /** 1908 * iucv_pm_thaw() - Thaw PM callback 1909 * @dev: iucv-based device 1910 * 1911 * make iucv ready for use again: allocate path table, declare interrupt buffers 1912 * and enable iucv interrupts 1913 * invoke callback function of the iucv-based driver 1914 */ 1915 static int iucv_pm_thaw(struct device *dev) 1916 { 1917 int rc = 0; 1918 1919 #ifdef CONFIG_PM_DEBUG 1920 printk(KERN_WARNING "iucv_pm_thaw\n"); 1921 #endif 1922 iucv_pm_state = IUCV_PM_THAWING; 1923 if (!iucv_path_table) { 1924 rc = iucv_enable(); 1925 if (rc) 1926 goto out; 1927 } 1928 if (cpus_empty(iucv_irq_cpumask)) { 1929 if (iucv_nonsmp_handler) 1930 /* enable interrupts on one cpu */ 1931 iucv_allow_cpu(NULL); 1932 else 1933 /* enable interrupts on all cpus */ 1934 iucv_setmask_mp(); 1935 } 1936 if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) 1937 rc = dev->driver->pm->thaw(dev); 1938 out: 1939 return rc; 1940 } 1941 1942 /** 1943 * iucv_pm_restore() - Restore PM callback 1944 * @dev: iucv-based device 1945 * 1946 * make iucv ready for use again: allocate path table, declare interrupt buffers 1947 * and enable iucv interrupts 1948 * invoke callback function of the iucv-based driver 1949 */ 1950 static int iucv_pm_restore(struct device *dev) 1951 { 1952 int rc = 0; 1953 1954 #ifdef CONFIG_PM_DEBUG 1955 printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); 1956 #endif 1957 if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) 1958 pr_warning("Suspending Linux did not completely close all IUCV " 1959 "connections\n"); 1960 iucv_pm_state = IUCV_PM_RESTORING; 1961 if (cpus_empty(iucv_irq_cpumask)) { 1962 rc = iucv_query_maxconn(); 1963 rc = iucv_enable(); 1964 if (rc) 1965 goto out; 1966 } 1967 if (dev->driver && dev->driver->pm && dev->driver->pm->restore) 1968 rc = dev->driver->pm->restore(dev); 1969 out: 1970 return rc; 1971 } 1972 1973 /** 1974 * iucv_init 1975 * 1976 * Allocates and initializes various data structures. 1977 */ 1978 static int __init iucv_init(void) 1979 { 1980 int rc; 1981 int cpu; 1982 1983 if (!MACHINE_IS_VM) { 1984 rc = -EPROTONOSUPPORT; 1985 goto out; 1986 } 1987 rc = iucv_query_maxconn(); 1988 if (rc) 1989 goto out; 1990 rc = register_external_interrupt(0x4000, iucv_external_interrupt); 1991 if (rc) 1992 goto out; 1993 iucv_root = root_device_register("iucv"); 1994 if (IS_ERR(iucv_root)) { 1995 rc = PTR_ERR(iucv_root); 1996 goto out_int; 1997 } 1998 1999 for_each_online_cpu(cpu) { 2000 /* Note: GFP_DMA used to get memory below 2G */ 2001 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 2002 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 2003 if (!iucv_irq_data[cpu]) { 2004 rc = -ENOMEM; 2005 goto out_free; 2006 } 2007 2008 /* Allocate parameter blocks. */ 2009 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 2010 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 2011 if (!iucv_param[cpu]) { 2012 rc = -ENOMEM; 2013 goto out_free; 2014 } 2015 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), 2016 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 2017 if (!iucv_param_irq[cpu]) { 2018 rc = -ENOMEM; 2019 goto out_free; 2020 } 2021 2022 } 2023 rc = register_hotcpu_notifier(&iucv_cpu_notifier); 2024 if (rc) 2025 goto out_free; 2026 rc = register_reboot_notifier(&iucv_reboot_notifier); 2027 if (rc) 2028 goto out_cpu; 2029 ASCEBC(iucv_error_no_listener, 16); 2030 ASCEBC(iucv_error_no_memory, 16); 2031 ASCEBC(iucv_error_pathid, 16); 2032 iucv_available = 1; 2033 rc = bus_register(&iucv_bus); 2034 if (rc) 2035 goto out_reboot; 2036 return 0; 2037 2038 out_reboot: 2039 unregister_reboot_notifier(&iucv_reboot_notifier); 2040 out_cpu: 2041 unregister_hotcpu_notifier(&iucv_cpu_notifier); 2042 out_free: 2043 for_each_possible_cpu(cpu) { 2044 kfree(iucv_param_irq[cpu]); 2045 iucv_param_irq[cpu] = NULL; 2046 kfree(iucv_param[cpu]); 2047 iucv_param[cpu] = NULL; 2048 kfree(iucv_irq_data[cpu]); 2049 iucv_irq_data[cpu] = NULL; 2050 } 2051 root_device_unregister(iucv_root); 2052 out_int: 2053 unregister_external_interrupt(0x4000, iucv_external_interrupt); 2054 out: 2055 return rc; 2056 } 2057 2058 /** 2059 * iucv_exit 2060 * 2061 * Frees everything allocated from iucv_init. 2062 */ 2063 static void __exit iucv_exit(void) 2064 { 2065 struct iucv_irq_list *p, *n; 2066 int cpu; 2067 2068 spin_lock_irq(&iucv_queue_lock); 2069 list_for_each_entry_safe(p, n, &iucv_task_queue, list) 2070 kfree(p); 2071 list_for_each_entry_safe(p, n, &iucv_work_queue, list) 2072 kfree(p); 2073 spin_unlock_irq(&iucv_queue_lock); 2074 unregister_reboot_notifier(&iucv_reboot_notifier); 2075 unregister_hotcpu_notifier(&iucv_cpu_notifier); 2076 for_each_possible_cpu(cpu) { 2077 kfree(iucv_param_irq[cpu]); 2078 iucv_param_irq[cpu] = NULL; 2079 kfree(iucv_param[cpu]); 2080 iucv_param[cpu] = NULL; 2081 kfree(iucv_irq_data[cpu]); 2082 iucv_irq_data[cpu] = NULL; 2083 } 2084 root_device_unregister(iucv_root); 2085 bus_unregister(&iucv_bus); 2086 unregister_external_interrupt(0x4000, iucv_external_interrupt); 2087 } 2088 2089 subsys_initcall(iucv_init); 2090 module_exit(iucv_exit); 2091 2092 MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); 2093 MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); 2094 MODULE_LICENSE("GPL"); 2095