1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Device driver for the PMU in Apple PowerBooks and PowerMacs. 4 * 5 * The VIA (versatile interface adapter) interfaces to the PMU, 6 * a 6805 microprocessor core whose primary function is to control 7 * battery charging and system power on the PowerBook 3400 and 2400. 8 * The PMU also controls the ADB (Apple Desktop Bus) which connects 9 * to the keyboard and mouse, as well as the non-volatile RAM 10 * and the RTC (real time clock) chip. 11 * 12 * Copyright (C) 1998 Paul Mackerras and Fabio Riccardi. 13 * Copyright (C) 2001-2002 Benjamin Herrenschmidt 14 * Copyright (C) 2006-2007 Johannes Berg 15 * 16 * THIS DRIVER IS BECOMING A TOTAL MESS ! 17 * - Cleanup atomically disabling reply to PMU events after 18 * a sleep or a freq. switch 19 * 20 */ 21 #include <linux/stdarg.h> 22 #include <linux/mutex.h> 23 #include <linux/types.h> 24 #include <linux/errno.h> 25 #include <linux/kernel.h> 26 #include <linux/delay.h> 27 #include <linux/sched/signal.h> 28 #include <linux/miscdevice.h> 29 #include <linux/blkdev.h> 30 #include <linux/pci.h> 31 #include <linux/slab.h> 32 #include <linux/poll.h> 33 #include <linux/adb.h> 34 #include <linux/pmu.h> 35 #include <linux/cuda.h> 36 #include <linux/module.h> 37 #include <linux/spinlock.h> 38 #include <linux/pm.h> 39 #include <linux/proc_fs.h> 40 #include <linux/seq_file.h> 41 #include <linux/init.h> 42 #include <linux/interrupt.h> 43 #include <linux/device.h> 44 #include <linux/syscore_ops.h> 45 #include <linux/freezer.h> 46 #include <linux/syscalls.h> 47 #include <linux/suspend.h> 48 #include <linux/cpu.h> 49 #include <linux/compat.h> 50 #include <linux/of_address.h> 51 #include <linux/of_irq.h> 52 #include <linux/uaccess.h> 53 #include <linux/pgtable.h> 54 #include <asm/machdep.h> 55 #include <asm/io.h> 56 #include <asm/sections.h> 57 #include <asm/irq.h> 58 #ifdef CONFIG_PPC_PMAC 59 #include <asm/pmac_feature.h> 60 #include <asm/pmac_pfunc.h> 61 #include <asm/pmac_low_i2c.h> 62 #include <asm/mmu_context.h> 63 #include <asm/cputable.h> 64 #include <asm/time.h> 65 #include <asm/backlight.h> 66 #else 67 #include <asm/macintosh.h> 68 #include <asm/macints.h> 69 #include <asm/mac_via.h> 70 #endif 71 72 #include "via-pmu-event.h" 73 74 /* Some compile options */ 75 #undef DEBUG_SLEEP 76 77 /* How many iterations between battery polls */ 78 #define BATTERY_POLLING_COUNT 2 79 80 static DEFINE_MUTEX(pmu_info_proc_mutex); 81 82 /* VIA registers - spaced 0x200 bytes apart */ 83 #define RS 0x200 /* skip between registers */ 84 #define B 0 /* B-side data */ 85 #define A RS /* A-side data */ 86 #define DIRB (2*RS) /* B-side direction (1=output) */ 87 #define DIRA (3*RS) /* A-side direction (1=output) */ 88 #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */ 89 #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */ 90 #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */ 91 #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */ 92 #define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */ 93 #define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */ 94 #define SR (10*RS) /* Shift register */ 95 #define ACR (11*RS) /* Auxiliary control register */ 96 #define PCR (12*RS) /* Peripheral control register */ 97 #define IFR (13*RS) /* Interrupt flag register */ 98 #define IER (14*RS) /* Interrupt enable register */ 99 #define ANH (15*RS) /* A-side data, no handshake */ 100 101 /* Bits in B data register: both active low */ 102 #ifdef CONFIG_PPC_PMAC 103 #define TACK 0x08 /* Transfer acknowledge (input) */ 104 #define TREQ 0x10 /* Transfer request (output) */ 105 #else 106 #define TACK 0x02 107 #define TREQ 0x04 108 #endif 109 110 /* Bits in ACR */ 111 #define SR_CTRL 0x1c /* Shift register control bits */ 112 #define SR_EXT 0x0c /* Shift on external clock */ 113 #define SR_OUT 0x10 /* Shift out if 1 */ 114 115 /* Bits in IFR and IER */ 116 #define IER_SET 0x80 /* set bits in IER */ 117 #define IER_CLR 0 /* clear bits in IER */ 118 #define SR_INT 0x04 /* Shift register full/empty */ 119 #define CB2_INT 0x08 120 #define CB1_INT 0x10 /* transition on CB1 input */ 121 122 static volatile enum pmu_state { 123 uninitialized = 0, 124 idle, 125 sending, 126 intack, 127 reading, 128 reading_intr, 129 locked, 130 } pmu_state; 131 132 static volatile enum int_data_state { 133 int_data_empty, 134 int_data_fill, 135 int_data_ready, 136 int_data_flush 137 } int_data_state[2] = { int_data_empty, int_data_empty }; 138 139 static struct adb_request *current_req; 140 static struct adb_request *last_req; 141 static struct adb_request *req_awaiting_reply; 142 static unsigned char interrupt_data[2][32]; 143 static int interrupt_data_len[2]; 144 static int int_data_last; 145 static unsigned char *reply_ptr; 146 static int data_index; 147 static int data_len; 148 static volatile int adb_int_pending; 149 static volatile int disable_poll; 150 static int pmu_kind = PMU_UNKNOWN; 151 static int pmu_fully_inited; 152 static int pmu_has_adb; 153 #ifdef CONFIG_PPC_PMAC 154 static volatile unsigned char __iomem *via1; 155 static volatile unsigned char __iomem *via2; 156 static struct device_node *vias; 157 static struct device_node *gpio_node; 158 #endif 159 static unsigned char __iomem *gpio_reg; 160 static int gpio_irq = 0; 161 static int gpio_irq_enabled = -1; 162 static volatile int pmu_suspended; 163 static DEFINE_SPINLOCK(pmu_lock); 164 static u8 pmu_intr_mask; 165 static int pmu_version; 166 static int drop_interrupts; 167 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 168 static int option_lid_wakeup = 1; 169 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 170 static unsigned long async_req_locks; 171 172 #define NUM_IRQ_STATS 13 173 static unsigned int pmu_irq_stats[NUM_IRQ_STATS]; 174 175 static struct proc_dir_entry *proc_pmu_root; 176 static struct proc_dir_entry *proc_pmu_info; 177 static struct proc_dir_entry *proc_pmu_irqstats; 178 static struct proc_dir_entry *proc_pmu_options; 179 static int option_server_mode; 180 181 int pmu_battery_count; 182 static int pmu_cur_battery; 183 unsigned int pmu_power_flags = PMU_PWR_AC_PRESENT; 184 struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES]; 185 static int query_batt_timer = BATTERY_POLLING_COUNT; 186 static struct adb_request batt_req; 187 static struct proc_dir_entry *proc_pmu_batt[PMU_MAX_BATTERIES]; 188 189 int asleep; 190 191 #ifdef CONFIG_ADB 192 static int adb_dev_map; 193 static int pmu_adb_flags; 194 195 static int pmu_probe(void); 196 static int pmu_init(void); 197 static int pmu_send_request(struct adb_request *req, int sync); 198 static int pmu_adb_autopoll(int devs); 199 static int pmu_adb_reset_bus(void); 200 #endif /* CONFIG_ADB */ 201 202 static int init_pmu(void); 203 static void pmu_start(void); 204 static irqreturn_t via_pmu_interrupt(int irq, void *arg); 205 static irqreturn_t gpio1_interrupt(int irq, void *arg); 206 #ifdef CONFIG_PROC_FS 207 static int pmu_info_proc_show(struct seq_file *m, void *v); 208 static int pmu_irqstats_proc_show(struct seq_file *m, void *v); 209 static int pmu_battery_proc_show(struct seq_file *m, void *v); 210 #endif 211 static void pmu_pass_intr(unsigned char *data, int len); 212 static const struct proc_ops pmu_options_proc_ops; 213 214 #ifdef CONFIG_ADB 215 const struct adb_driver via_pmu_driver = { 216 .name = "PMU", 217 .probe = pmu_probe, 218 .init = pmu_init, 219 .send_request = pmu_send_request, 220 .autopoll = pmu_adb_autopoll, 221 .poll = pmu_poll_adb, 222 .reset_bus = pmu_adb_reset_bus, 223 }; 224 #endif /* CONFIG_ADB */ 225 226 extern void low_sleep_handler(void); 227 extern void enable_kernel_altivec(void); 228 extern void enable_kernel_fp(void); 229 230 #ifdef DEBUG_SLEEP 231 int pmu_polled_request(struct adb_request *req); 232 void pmu_blink(int n); 233 #endif 234 235 /* 236 * This table indicates for each PMU opcode: 237 * - the number of data bytes to be sent with the command, or -1 238 * if a length byte should be sent, 239 * - the number of response bytes which the PMU will return, or 240 * -1 if it will send a length byte. 241 */ 242 static const s8 pmu_data_len[256][2] = { 243 /* 0 1 2 3 4 5 6 7 */ 244 /*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 245 /*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 246 /*10*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 247 /*18*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0, 0}, 248 /*20*/ {-1, 0},{ 0, 0},{ 2, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0}, 249 /*28*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0,-1}, 250 /*30*/ { 4, 0},{20, 0},{-1, 0},{ 3, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 251 /*38*/ { 0, 4},{ 0,20},{ 2,-1},{ 2, 1},{ 3,-1},{-1,-1},{-1,-1},{ 4, 0}, 252 /*40*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 253 /*48*/ { 0, 1},{ 0, 1},{-1,-1},{ 1, 0},{ 1, 0},{-1,-1},{-1,-1},{-1,-1}, 254 /*50*/ { 1, 0},{ 0, 0},{ 2, 0},{ 2, 0},{-1, 0},{ 1, 0},{ 3, 0},{ 1, 0}, 255 /*58*/ { 0, 1},{ 1, 0},{ 0, 2},{ 0, 2},{ 0,-1},{-1,-1},{-1,-1},{-1,-1}, 256 /*60*/ { 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 257 /*68*/ { 0, 3},{ 0, 3},{ 0, 2},{ 0, 8},{ 0,-1},{ 0,-1},{-1,-1},{-1,-1}, 258 /*70*/ { 1, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 259 /*78*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{ 5, 1},{ 4, 1},{ 4, 1}, 260 /*80*/ { 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 261 /*88*/ { 0, 5},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 262 /*90*/ { 1, 0},{ 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 263 /*98*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 264 /*a0*/ { 2, 0},{ 2, 0},{ 2, 0},{ 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0}, 265 /*a8*/ { 1, 1},{ 1, 0},{ 3, 0},{ 2, 0},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 266 /*b0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 267 /*b8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 268 /*c0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 269 /*c8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 270 /*d0*/ { 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 271 /*d8*/ { 1, 1},{ 1, 1},{-1,-1},{-1,-1},{ 0, 1},{ 0,-1},{-1,-1},{-1,-1}, 272 /*e0*/ {-1, 0},{ 4, 0},{ 0, 1},{-1, 0},{-1, 0},{ 4, 0},{-1, 0},{-1, 0}, 273 /*e8*/ { 3,-1},{-1,-1},{ 0, 1},{-1,-1},{ 0,-1},{-1,-1},{-1,-1},{ 0, 0}, 274 /*f0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, 275 /*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, 276 }; 277 278 static char *pbook_type[] = { 279 "Unknown PowerBook", 280 "PowerBook 2400/3400/3500(G3)", 281 "PowerBook G3 Series", 282 "1999 PowerBook G3", 283 "Core99" 284 }; 285 286 int __init find_via_pmu(void) 287 { 288 #ifdef CONFIG_PPC_PMAC 289 u64 taddr; 290 const u32 *reg; 291 292 if (pmu_state != uninitialized) 293 return 1; 294 vias = of_find_node_by_name(NULL, "via-pmu"); 295 if (vias == NULL) 296 return 0; 297 298 reg = of_get_property(vias, "reg", NULL); 299 if (reg == NULL) { 300 printk(KERN_ERR "via-pmu: No \"reg\" property !\n"); 301 goto fail; 302 } 303 taddr = of_translate_address(vias, reg); 304 if (taddr == OF_BAD_ADDR) { 305 printk(KERN_ERR "via-pmu: Can't translate address !\n"); 306 goto fail; 307 } 308 309 pmu_has_adb = 1; 310 311 pmu_intr_mask = PMU_INT_PCEJECT | 312 PMU_INT_SNDBRT | 313 PMU_INT_ADB | 314 PMU_INT_TICK; 315 316 if (of_node_name_eq(vias->parent, "ohare") || 317 of_device_is_compatible(vias->parent, "ohare")) 318 pmu_kind = PMU_OHARE_BASED; 319 else if (of_device_is_compatible(vias->parent, "paddington")) 320 pmu_kind = PMU_PADDINGTON_BASED; 321 else if (of_device_is_compatible(vias->parent, "heathrow")) 322 pmu_kind = PMU_HEATHROW_BASED; 323 else if (of_device_is_compatible(vias->parent, "Keylargo") 324 || of_device_is_compatible(vias->parent, "K2-Keylargo")) { 325 struct device_node *gpiop; 326 struct device_node *adbp; 327 u64 gaddr = OF_BAD_ADDR; 328 329 pmu_kind = PMU_KEYLARGO_BASED; 330 adbp = of_find_node_by_type(NULL, "adb"); 331 pmu_has_adb = (adbp != NULL); 332 of_node_put(adbp); 333 pmu_intr_mask = PMU_INT_PCEJECT | 334 PMU_INT_SNDBRT | 335 PMU_INT_ADB | 336 PMU_INT_TICK | 337 PMU_INT_ENVIRONMENT; 338 339 gpiop = of_find_node_by_name(NULL, "gpio"); 340 if (gpiop) { 341 reg = of_get_property(gpiop, "reg", NULL); 342 if (reg) 343 gaddr = of_translate_address(gpiop, reg); 344 if (gaddr != OF_BAD_ADDR) 345 gpio_reg = ioremap(gaddr, 0x10); 346 of_node_put(gpiop); 347 } 348 if (gpio_reg == NULL) { 349 printk(KERN_ERR "via-pmu: Can't find GPIO reg !\n"); 350 goto fail; 351 } 352 } else 353 pmu_kind = PMU_UNKNOWN; 354 355 via1 = via2 = ioremap(taddr, 0x2000); 356 if (via1 == NULL) { 357 printk(KERN_ERR "via-pmu: Can't map address !\n"); 358 goto fail_via_remap; 359 } 360 361 out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */ 362 out_8(&via1[IFR], 0x7f); /* clear IFR */ 363 364 pmu_state = idle; 365 366 if (!init_pmu()) 367 goto fail_init; 368 369 sys_ctrler = SYS_CTRLER_PMU; 370 371 return 1; 372 373 fail_init: 374 iounmap(via1); 375 via1 = via2 = NULL; 376 fail_via_remap: 377 iounmap(gpio_reg); 378 gpio_reg = NULL; 379 fail: 380 of_node_put(vias); 381 vias = NULL; 382 pmu_state = uninitialized; 383 return 0; 384 #else 385 if (macintosh_config->adb_type != MAC_ADB_PB2) 386 return 0; 387 388 pmu_kind = PMU_UNKNOWN; 389 390 pmu_has_adb = 1; 391 392 pmu_intr_mask = PMU_INT_PCEJECT | 393 PMU_INT_SNDBRT | 394 PMU_INT_ADB | 395 PMU_INT_TICK; 396 397 pmu_state = idle; 398 399 if (!init_pmu()) { 400 pmu_state = uninitialized; 401 return 0; 402 } 403 404 return 1; 405 #endif /* !CONFIG_PPC_PMAC */ 406 } 407 408 #ifdef CONFIG_ADB 409 static int pmu_probe(void) 410 { 411 return pmu_state == uninitialized ? -ENODEV : 0; 412 } 413 414 static int pmu_init(void) 415 { 416 return pmu_state == uninitialized ? -ENODEV : 0; 417 } 418 #endif /* CONFIG_ADB */ 419 420 /* 421 * We can't wait until pmu_init gets called, that happens too late. 422 * It happens after IDE and SCSI initialization, which can take a few 423 * seconds, and by that time the PMU could have given up on us and 424 * turned us off. 425 * Thus this is called with arch_initcall rather than device_initcall. 426 */ 427 static int __init via_pmu_start(void) 428 { 429 unsigned int __maybe_unused irq; 430 431 if (pmu_state == uninitialized) 432 return -ENODEV; 433 434 batt_req.complete = 1; 435 436 #ifdef CONFIG_PPC_PMAC 437 irq = irq_of_parse_and_map(vias, 0); 438 if (!irq) { 439 printk(KERN_ERR "via-pmu: can't map interrupt\n"); 440 return -ENODEV; 441 } 442 /* We set IRQF_NO_SUSPEND because we don't want the interrupt 443 * to be disabled between the 2 passes of driver suspend, we 444 * control our own disabling for that one 445 */ 446 if (request_irq(irq, via_pmu_interrupt, IRQF_NO_SUSPEND, 447 "VIA-PMU", (void *)0)) { 448 printk(KERN_ERR "via-pmu: can't request irq %d\n", irq); 449 return -ENODEV; 450 } 451 452 if (pmu_kind == PMU_KEYLARGO_BASED) { 453 gpio_node = of_find_node_by_name(NULL, "extint-gpio1"); 454 if (gpio_node == NULL) 455 gpio_node = of_find_node_by_name(NULL, 456 "pmu-interrupt"); 457 if (gpio_node) 458 gpio_irq = irq_of_parse_and_map(gpio_node, 0); 459 460 if (gpio_irq) { 461 if (request_irq(gpio_irq, gpio1_interrupt, 462 IRQF_NO_SUSPEND, "GPIO1 ADB", 463 (void *)0)) 464 printk(KERN_ERR "pmu: can't get irq %d" 465 " (GPIO1)\n", gpio_irq); 466 else 467 gpio_irq_enabled = 1; 468 } 469 } 470 471 /* Enable interrupts */ 472 out_8(&via1[IER], IER_SET | SR_INT | CB1_INT); 473 #else 474 if (request_irq(IRQ_MAC_ADB_SR, via_pmu_interrupt, IRQF_NO_SUSPEND, 475 "VIA-PMU-SR", NULL)) { 476 pr_err("%s: couldn't get SR irq\n", __func__); 477 return -ENODEV; 478 } 479 if (request_irq(IRQ_MAC_ADB_CL, via_pmu_interrupt, IRQF_NO_SUSPEND, 480 "VIA-PMU-CL", NULL)) { 481 pr_err("%s: couldn't get CL irq\n", __func__); 482 free_irq(IRQ_MAC_ADB_SR, NULL); 483 return -ENODEV; 484 } 485 #endif /* !CONFIG_PPC_PMAC */ 486 487 pmu_fully_inited = 1; 488 489 /* Make sure PMU settle down before continuing. This is _very_ important 490 * since the IDE probe may shut interrupts down for quite a bit of time. If 491 * a PMU communication is pending while this happens, the PMU may timeout 492 * Not that on Core99 machines, the PMU keeps sending us environement 493 * messages, we should find a way to either fix IDE or make it call 494 * pmu_suspend() before masking interrupts. This can also happens while 495 * scolling with some fbdevs. 496 */ 497 do { 498 pmu_poll(); 499 } while (pmu_state != idle); 500 501 return 0; 502 } 503 504 arch_initcall(via_pmu_start); 505 506 /* 507 * This has to be done after pci_init, which is a subsys_initcall. 508 */ 509 static int __init via_pmu_dev_init(void) 510 { 511 if (pmu_state == uninitialized) 512 return -ENODEV; 513 514 #ifdef CONFIG_PMAC_BACKLIGHT 515 /* Initialize backlight */ 516 pmu_backlight_init(); 517 #endif 518 519 #ifdef CONFIG_PPC32 520 if (of_machine_is_compatible("AAPL,3400/2400") || 521 of_machine_is_compatible("AAPL,3500")) { 522 int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, 523 NULL, PMAC_MB_INFO_MODEL, 0); 524 pmu_battery_count = 1; 525 if (mb == PMAC_TYPE_COMET) 526 pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET; 527 else 528 pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER; 529 } else if (of_machine_is_compatible("AAPL,PowerBook1998") || 530 of_machine_is_compatible("PowerBook1,1")) { 531 pmu_battery_count = 2; 532 pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; 533 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; 534 } else { 535 struct device_node* prim = 536 of_find_node_by_name(NULL, "power-mgt"); 537 const u32 *prim_info = NULL; 538 if (prim) 539 prim_info = of_get_property(prim, "prim-info", NULL); 540 if (prim_info) { 541 /* Other stuffs here yet unknown */ 542 pmu_battery_count = (prim_info[6] >> 16) & 0xff; 543 pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART; 544 if (pmu_battery_count > 1) 545 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; 546 } 547 of_node_put(prim); 548 } 549 #endif /* CONFIG_PPC32 */ 550 551 /* Create /proc/pmu */ 552 proc_pmu_root = proc_mkdir("pmu", NULL); 553 if (proc_pmu_root) { 554 long i; 555 556 for (i=0; i<pmu_battery_count; i++) { 557 char title[16]; 558 sprintf(title, "battery_%ld", i); 559 proc_pmu_batt[i] = proc_create_single_data(title, 0, 560 proc_pmu_root, pmu_battery_proc_show, 561 (void *)i); 562 } 563 564 proc_pmu_info = proc_create_single("info", 0, proc_pmu_root, 565 pmu_info_proc_show); 566 proc_pmu_irqstats = proc_create_single("interrupts", 0, 567 proc_pmu_root, pmu_irqstats_proc_show); 568 proc_pmu_options = proc_create("options", 0600, proc_pmu_root, 569 &pmu_options_proc_ops); 570 } 571 return 0; 572 } 573 574 device_initcall(via_pmu_dev_init); 575 576 static int 577 init_pmu(void) 578 { 579 int timeout; 580 struct adb_request req; 581 582 /* Negate TREQ. Set TACK to input and TREQ to output. */ 583 out_8(&via2[B], in_8(&via2[B]) | TREQ); 584 out_8(&via2[DIRB], (in_8(&via2[DIRB]) | TREQ) & ~TACK); 585 586 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); 587 timeout = 100000; 588 while (!req.complete) { 589 if (--timeout < 0) { 590 printk(KERN_ERR "init_pmu: no response from PMU\n"); 591 return 0; 592 } 593 udelay(10); 594 pmu_poll(); 595 } 596 597 /* ack all pending interrupts */ 598 timeout = 100000; 599 interrupt_data[0][0] = 1; 600 while (interrupt_data[0][0] || pmu_state != idle) { 601 if (--timeout < 0) { 602 printk(KERN_ERR "init_pmu: timed out acking intrs\n"); 603 return 0; 604 } 605 if (pmu_state == idle) 606 adb_int_pending = 1; 607 via_pmu_interrupt(0, NULL); 608 udelay(10); 609 } 610 611 /* Tell PMU we are ready. */ 612 if (pmu_kind == PMU_KEYLARGO_BASED) { 613 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 614 while (!req.complete) 615 pmu_poll(); 616 } 617 618 /* Read PMU version */ 619 pmu_request(&req, NULL, 1, PMU_GET_VERSION); 620 pmu_wait_complete(&req); 621 if (req.reply_len > 0) 622 pmu_version = req.reply[0]; 623 624 /* Read server mode setting */ 625 if (pmu_kind == PMU_KEYLARGO_BASED) { 626 pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, 627 PMU_PWR_GET_POWERUP_EVENTS); 628 pmu_wait_complete(&req); 629 if (req.reply_len == 2) { 630 if (req.reply[1] & PMU_PWR_WAKEUP_AC_INSERT) 631 option_server_mode = 1; 632 printk(KERN_INFO "via-pmu: Server Mode is %s\n", 633 option_server_mode ? "enabled" : "disabled"); 634 } 635 } 636 637 printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n", 638 PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version); 639 640 return 1; 641 } 642 643 int 644 pmu_get_model(void) 645 { 646 return pmu_kind; 647 } 648 649 static void pmu_set_server_mode(int server_mode) 650 { 651 struct adb_request req; 652 653 if (pmu_kind != PMU_KEYLARGO_BASED) 654 return; 655 656 option_server_mode = server_mode; 657 pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, PMU_PWR_GET_POWERUP_EVENTS); 658 pmu_wait_complete(&req); 659 if (req.reply_len < 2) 660 return; 661 if (server_mode) 662 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, 663 PMU_PWR_SET_POWERUP_EVENTS, 664 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); 665 else 666 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, 667 PMU_PWR_CLR_POWERUP_EVENTS, 668 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); 669 pmu_wait_complete(&req); 670 } 671 672 /* This new version of the code for 2400/3400/3500 powerbooks 673 * is inspired from the implementation in gkrellm-pmu 674 */ 675 static void 676 done_battery_state_ohare(struct adb_request* req) 677 { 678 #ifdef CONFIG_PPC_PMAC 679 /* format: 680 * [0] : flags 681 * 0x01 : AC indicator 682 * 0x02 : charging 683 * 0x04 : battery exist 684 * 0x08 : 685 * 0x10 : 686 * 0x20 : full charged 687 * 0x40 : pcharge reset 688 * 0x80 : battery exist 689 * 690 * [1][2] : battery voltage 691 * [3] : CPU temperature 692 * [4] : battery temperature 693 * [5] : current 694 * [6][7] : pcharge 695 * --tkoba 696 */ 697 unsigned int bat_flags = PMU_BATT_TYPE_HOOPER; 698 long pcharge, charge, vb, vmax, lmax; 699 long vmax_charging, vmax_charged; 700 long amperage, voltage, time, max; 701 int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO, 702 NULL, PMAC_MB_INFO_MODEL, 0); 703 704 if (req->reply[0] & 0x01) 705 pmu_power_flags |= PMU_PWR_AC_PRESENT; 706 else 707 pmu_power_flags &= ~PMU_PWR_AC_PRESENT; 708 709 if (mb == PMAC_TYPE_COMET) { 710 vmax_charged = 189; 711 vmax_charging = 213; 712 lmax = 6500; 713 } else { 714 vmax_charged = 330; 715 vmax_charging = 330; 716 lmax = 6500; 717 } 718 vmax = vmax_charged; 719 720 /* If battery installed */ 721 if (req->reply[0] & 0x04) { 722 bat_flags |= PMU_BATT_PRESENT; 723 if (req->reply[0] & 0x02) 724 bat_flags |= PMU_BATT_CHARGING; 725 vb = (req->reply[1] << 8) | req->reply[2]; 726 voltage = (vb * 265 + 72665) / 10; 727 amperage = req->reply[5]; 728 if ((req->reply[0] & 0x01) == 0) { 729 if (amperage > 200) 730 vb += ((amperage - 200) * 15)/100; 731 } else if (req->reply[0] & 0x02) { 732 vb = (vb * 97) / 100; 733 vmax = vmax_charging; 734 } 735 charge = (100 * vb) / vmax; 736 if (req->reply[0] & 0x40) { 737 pcharge = (req->reply[6] << 8) + req->reply[7]; 738 if (pcharge > lmax) 739 pcharge = lmax; 740 pcharge *= 100; 741 pcharge = 100 - pcharge / lmax; 742 if (pcharge < charge) 743 charge = pcharge; 744 } 745 if (amperage > 0) 746 time = (charge * 16440) / amperage; 747 else 748 time = 0; 749 max = 100; 750 amperage = -amperage; 751 } else 752 charge = max = amperage = voltage = time = 0; 753 754 pmu_batteries[pmu_cur_battery].flags = bat_flags; 755 pmu_batteries[pmu_cur_battery].charge = charge; 756 pmu_batteries[pmu_cur_battery].max_charge = max; 757 pmu_batteries[pmu_cur_battery].amperage = amperage; 758 pmu_batteries[pmu_cur_battery].voltage = voltage; 759 pmu_batteries[pmu_cur_battery].time_remaining = time; 760 #endif /* CONFIG_PPC_PMAC */ 761 762 clear_bit(0, &async_req_locks); 763 } 764 765 static void 766 done_battery_state_smart(struct adb_request* req) 767 { 768 /* format: 769 * [0] : format of this structure (known: 3,4,5) 770 * [1] : flags 771 * 772 * format 3 & 4: 773 * 774 * [2] : charge 775 * [3] : max charge 776 * [4] : current 777 * [5] : voltage 778 * 779 * format 5: 780 * 781 * [2][3] : charge 782 * [4][5] : max charge 783 * [6][7] : current 784 * [8][9] : voltage 785 */ 786 787 unsigned int bat_flags = PMU_BATT_TYPE_SMART; 788 int amperage; 789 unsigned int capa, max, voltage; 790 791 if (req->reply[1] & 0x01) 792 pmu_power_flags |= PMU_PWR_AC_PRESENT; 793 else 794 pmu_power_flags &= ~PMU_PWR_AC_PRESENT; 795 796 797 capa = max = amperage = voltage = 0; 798 799 if (req->reply[1] & 0x04) { 800 bat_flags |= PMU_BATT_PRESENT; 801 switch(req->reply[0]) { 802 case 3: 803 case 4: capa = req->reply[2]; 804 max = req->reply[3]; 805 amperage = *((signed char *)&req->reply[4]); 806 voltage = req->reply[5]; 807 break; 808 case 5: capa = (req->reply[2] << 8) | req->reply[3]; 809 max = (req->reply[4] << 8) | req->reply[5]; 810 amperage = *((signed short *)&req->reply[6]); 811 voltage = (req->reply[8] << 8) | req->reply[9]; 812 break; 813 default: 814 pr_warn("pmu.c: unrecognized battery info, " 815 "len: %d, %4ph\n", req->reply_len, 816 req->reply); 817 break; 818 } 819 } 820 821 if ((req->reply[1] & 0x01) && (amperage > 0)) 822 bat_flags |= PMU_BATT_CHARGING; 823 824 pmu_batteries[pmu_cur_battery].flags = bat_flags; 825 pmu_batteries[pmu_cur_battery].charge = capa; 826 pmu_batteries[pmu_cur_battery].max_charge = max; 827 pmu_batteries[pmu_cur_battery].amperage = amperage; 828 pmu_batteries[pmu_cur_battery].voltage = voltage; 829 if (amperage) { 830 if ((req->reply[1] & 0x01) && (amperage > 0)) 831 pmu_batteries[pmu_cur_battery].time_remaining 832 = ((max-capa) * 3600) / amperage; 833 else 834 pmu_batteries[pmu_cur_battery].time_remaining 835 = (capa * 3600) / (-amperage); 836 } else 837 pmu_batteries[pmu_cur_battery].time_remaining = 0; 838 839 pmu_cur_battery = (pmu_cur_battery + 1) % pmu_battery_count; 840 841 clear_bit(0, &async_req_locks); 842 } 843 844 static void 845 query_battery_state(void) 846 { 847 if (test_and_set_bit(0, &async_req_locks)) 848 return; 849 if (pmu_kind == PMU_OHARE_BASED) 850 pmu_request(&batt_req, done_battery_state_ohare, 851 1, PMU_BATTERY_STATE); 852 else 853 pmu_request(&batt_req, done_battery_state_smart, 854 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1); 855 } 856 857 #ifdef CONFIG_PROC_FS 858 static int pmu_info_proc_show(struct seq_file *m, void *v) 859 { 860 seq_printf(m, "PMU driver version : %d\n", PMU_DRIVER_VERSION); 861 seq_printf(m, "PMU firmware version : %02x\n", pmu_version); 862 seq_printf(m, "AC Power : %d\n", 863 ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0); 864 seq_printf(m, "Battery count : %d\n", pmu_battery_count); 865 866 return 0; 867 } 868 869 static int pmu_irqstats_proc_show(struct seq_file *m, void *v) 870 { 871 int i; 872 static const char *irq_names[NUM_IRQ_STATS] = { 873 "Unknown interrupt (type 0)", 874 "Unknown interrupt (type 1)", 875 "PC-Card eject button", 876 "Sound/Brightness button", 877 "ADB message", 878 "Battery state change", 879 "Environment interrupt", 880 "Tick timer", 881 "Ghost interrupt (zero len)", 882 "Empty interrupt (empty mask)", 883 "Max irqs in a row", 884 "Total CB1 triggered events", 885 "Total GPIO1 triggered events", 886 }; 887 888 for (i = 0; i < NUM_IRQ_STATS; i++) { 889 seq_printf(m, " %2u: %10u (%s)\n", 890 i, pmu_irq_stats[i], irq_names[i]); 891 } 892 return 0; 893 } 894 895 static int pmu_battery_proc_show(struct seq_file *m, void *v) 896 { 897 long batnum = (long)m->private; 898 899 seq_putc(m, '\n'); 900 seq_printf(m, "flags : %08x\n", pmu_batteries[batnum].flags); 901 seq_printf(m, "charge : %d\n", pmu_batteries[batnum].charge); 902 seq_printf(m, "max_charge : %d\n", pmu_batteries[batnum].max_charge); 903 seq_printf(m, "current : %d\n", pmu_batteries[batnum].amperage); 904 seq_printf(m, "voltage : %d\n", pmu_batteries[batnum].voltage); 905 seq_printf(m, "time rem. : %d\n", pmu_batteries[batnum].time_remaining); 906 return 0; 907 } 908 909 static int pmu_options_proc_show(struct seq_file *m, void *v) 910 { 911 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 912 if (pmu_kind == PMU_KEYLARGO_BASED && 913 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) 914 seq_printf(m, "lid_wakeup=%d\n", option_lid_wakeup); 915 #endif 916 if (pmu_kind == PMU_KEYLARGO_BASED) 917 seq_printf(m, "server_mode=%d\n", option_server_mode); 918 919 return 0; 920 } 921 922 static int pmu_options_proc_open(struct inode *inode, struct file *file) 923 { 924 return single_open(file, pmu_options_proc_show, NULL); 925 } 926 927 static ssize_t pmu_options_proc_write(struct file *file, 928 const char __user *buffer, size_t count, loff_t *pos) 929 { 930 char tmp[33]; 931 char *label, *val; 932 size_t fcount = count; 933 934 if (!count) 935 return -EINVAL; 936 if (count > 32) 937 count = 32; 938 if (copy_from_user(tmp, buffer, count)) 939 return -EFAULT; 940 tmp[count] = 0; 941 942 label = tmp; 943 while(*label == ' ') 944 label++; 945 val = label; 946 while(*val && (*val != '=')) { 947 if (*val == ' ') 948 *val = 0; 949 val++; 950 } 951 if ((*val) == 0) 952 return -EINVAL; 953 *(val++) = 0; 954 while(*val == ' ') 955 val++; 956 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 957 if (pmu_kind == PMU_KEYLARGO_BASED && 958 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0) 959 if (!strcmp(label, "lid_wakeup")) 960 option_lid_wakeup = ((*val) == '1'); 961 #endif 962 if (pmu_kind == PMU_KEYLARGO_BASED && !strcmp(label, "server_mode")) { 963 int new_value; 964 new_value = ((*val) == '1'); 965 if (new_value != option_server_mode) 966 pmu_set_server_mode(new_value); 967 } 968 return fcount; 969 } 970 971 static const struct proc_ops pmu_options_proc_ops = { 972 .proc_open = pmu_options_proc_open, 973 .proc_read = seq_read, 974 .proc_lseek = seq_lseek, 975 .proc_release = single_release, 976 .proc_write = pmu_options_proc_write, 977 }; 978 #endif 979 980 #ifdef CONFIG_ADB 981 /* Send an ADB command */ 982 static int pmu_send_request(struct adb_request *req, int sync) 983 { 984 int i, ret; 985 986 if (pmu_state == uninitialized || !pmu_fully_inited) { 987 req->complete = 1; 988 return -ENXIO; 989 } 990 991 ret = -EINVAL; 992 993 switch (req->data[0]) { 994 case PMU_PACKET: 995 for (i = 0; i < req->nbytes - 1; ++i) 996 req->data[i] = req->data[i+1]; 997 --req->nbytes; 998 if (pmu_data_len[req->data[0]][1] != 0) { 999 req->reply[0] = ADB_RET_OK; 1000 req->reply_len = 1; 1001 } else 1002 req->reply_len = 0; 1003 ret = pmu_queue_request(req); 1004 break; 1005 case CUDA_PACKET: 1006 switch (req->data[1]) { 1007 case CUDA_GET_TIME: 1008 if (req->nbytes != 2) 1009 break; 1010 req->data[0] = PMU_READ_RTC; 1011 req->nbytes = 1; 1012 req->reply_len = 3; 1013 req->reply[0] = CUDA_PACKET; 1014 req->reply[1] = 0; 1015 req->reply[2] = CUDA_GET_TIME; 1016 ret = pmu_queue_request(req); 1017 break; 1018 case CUDA_SET_TIME: 1019 if (req->nbytes != 6) 1020 break; 1021 req->data[0] = PMU_SET_RTC; 1022 req->nbytes = 5; 1023 for (i = 1; i <= 4; ++i) 1024 req->data[i] = req->data[i+1]; 1025 req->reply_len = 3; 1026 req->reply[0] = CUDA_PACKET; 1027 req->reply[1] = 0; 1028 req->reply[2] = CUDA_SET_TIME; 1029 ret = pmu_queue_request(req); 1030 break; 1031 } 1032 break; 1033 case ADB_PACKET: 1034 if (!pmu_has_adb) 1035 return -ENXIO; 1036 for (i = req->nbytes - 1; i > 1; --i) 1037 req->data[i+2] = req->data[i]; 1038 req->data[3] = req->nbytes - 2; 1039 req->data[2] = pmu_adb_flags; 1040 /*req->data[1] = req->data[1];*/ 1041 req->data[0] = PMU_ADB_CMD; 1042 req->nbytes += 2; 1043 req->reply_expected = 1; 1044 req->reply_len = 0; 1045 ret = pmu_queue_request(req); 1046 break; 1047 } 1048 if (ret) { 1049 req->complete = 1; 1050 return ret; 1051 } 1052 1053 if (sync) 1054 while (!req->complete) 1055 pmu_poll(); 1056 1057 return 0; 1058 } 1059 1060 /* Enable/disable autopolling */ 1061 static int __pmu_adb_autopoll(int devs) 1062 { 1063 struct adb_request req; 1064 1065 if (devs) { 1066 pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86, 1067 adb_dev_map >> 8, adb_dev_map); 1068 pmu_adb_flags = 2; 1069 } else { 1070 pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF); 1071 pmu_adb_flags = 0; 1072 } 1073 while (!req.complete) 1074 pmu_poll(); 1075 return 0; 1076 } 1077 1078 static int pmu_adb_autopoll(int devs) 1079 { 1080 if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb) 1081 return -ENXIO; 1082 1083 adb_dev_map = devs; 1084 return __pmu_adb_autopoll(devs); 1085 } 1086 1087 /* Reset the ADB bus */ 1088 static int pmu_adb_reset_bus(void) 1089 { 1090 struct adb_request req; 1091 int save_autopoll = adb_dev_map; 1092 1093 if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb) 1094 return -ENXIO; 1095 1096 /* anyone got a better idea?? */ 1097 __pmu_adb_autopoll(0); 1098 1099 req.nbytes = 4; 1100 req.done = NULL; 1101 req.data[0] = PMU_ADB_CMD; 1102 req.data[1] = ADB_BUSRESET; 1103 req.data[2] = 0; 1104 req.data[3] = 0; 1105 req.data[4] = 0; 1106 req.reply_len = 0; 1107 req.reply_expected = 1; 1108 if (pmu_queue_request(&req) != 0) { 1109 printk(KERN_ERR "pmu_adb_reset_bus: pmu_queue_request failed\n"); 1110 return -EIO; 1111 } 1112 pmu_wait_complete(&req); 1113 1114 if (save_autopoll != 0) 1115 __pmu_adb_autopoll(save_autopoll); 1116 1117 return 0; 1118 } 1119 #endif /* CONFIG_ADB */ 1120 1121 /* Construct and send a pmu request */ 1122 int 1123 pmu_request(struct adb_request *req, void (*done)(struct adb_request *), 1124 int nbytes, ...) 1125 { 1126 va_list list; 1127 int i; 1128 1129 if (pmu_state == uninitialized) 1130 return -ENXIO; 1131 1132 if (nbytes < 0 || nbytes > 32) { 1133 printk(KERN_ERR "pmu_request: bad nbytes (%d)\n", nbytes); 1134 req->complete = 1; 1135 return -EINVAL; 1136 } 1137 req->nbytes = nbytes; 1138 req->done = done; 1139 va_start(list, nbytes); 1140 for (i = 0; i < nbytes; ++i) 1141 req->data[i] = va_arg(list, int); 1142 va_end(list); 1143 req->reply_len = 0; 1144 req->reply_expected = 0; 1145 return pmu_queue_request(req); 1146 } 1147 1148 int 1149 pmu_queue_request(struct adb_request *req) 1150 { 1151 unsigned long flags; 1152 int nsend; 1153 1154 if (pmu_state == uninitialized) { 1155 req->complete = 1; 1156 return -ENXIO; 1157 } 1158 if (req->nbytes <= 0) { 1159 req->complete = 1; 1160 return 0; 1161 } 1162 nsend = pmu_data_len[req->data[0]][0]; 1163 if (nsend >= 0 && req->nbytes != nsend + 1) { 1164 req->complete = 1; 1165 return -EINVAL; 1166 } 1167 1168 req->next = NULL; 1169 req->sent = 0; 1170 req->complete = 0; 1171 1172 spin_lock_irqsave(&pmu_lock, flags); 1173 if (current_req) { 1174 last_req->next = req; 1175 last_req = req; 1176 } else { 1177 current_req = req; 1178 last_req = req; 1179 if (pmu_state == idle) 1180 pmu_start(); 1181 } 1182 spin_unlock_irqrestore(&pmu_lock, flags); 1183 1184 return 0; 1185 } 1186 1187 static inline void 1188 wait_for_ack(void) 1189 { 1190 /* Sightly increased the delay, I had one occurrence of the message 1191 * reported 1192 */ 1193 int timeout = 4000; 1194 while ((in_8(&via2[B]) & TACK) == 0) { 1195 if (--timeout < 0) { 1196 printk(KERN_ERR "PMU not responding (!ack)\n"); 1197 return; 1198 } 1199 udelay(10); 1200 } 1201 } 1202 1203 /* New PMU seems to be very sensitive to those timings, so we make sure 1204 * PCI is flushed immediately */ 1205 static inline void 1206 send_byte(int x) 1207 { 1208 out_8(&via1[ACR], in_8(&via1[ACR]) | SR_OUT | SR_EXT); 1209 out_8(&via1[SR], x); 1210 out_8(&via2[B], in_8(&via2[B]) & ~TREQ); /* assert TREQ */ 1211 (void)in_8(&via2[B]); 1212 } 1213 1214 static inline void 1215 recv_byte(void) 1216 { 1217 out_8(&via1[ACR], (in_8(&via1[ACR]) & ~SR_OUT) | SR_EXT); 1218 in_8(&via1[SR]); /* resets SR */ 1219 out_8(&via2[B], in_8(&via2[B]) & ~TREQ); 1220 (void)in_8(&via2[B]); 1221 } 1222 1223 static inline void 1224 pmu_done(struct adb_request *req) 1225 { 1226 void (*done)(struct adb_request *) = req->done; 1227 mb(); 1228 req->complete = 1; 1229 /* Here, we assume that if the request has a done member, the 1230 * struct request will survive to setting req->complete to 1 1231 */ 1232 if (done) 1233 (*done)(req); 1234 } 1235 1236 static void 1237 pmu_start(void) 1238 { 1239 struct adb_request *req; 1240 1241 /* assert pmu_state == idle */ 1242 /* get the packet to send */ 1243 req = current_req; 1244 if (!req || pmu_state != idle 1245 || (/*req->reply_expected && */req_awaiting_reply)) 1246 return; 1247 1248 pmu_state = sending; 1249 data_index = 1; 1250 data_len = pmu_data_len[req->data[0]][0]; 1251 1252 /* Sounds safer to make sure ACK is high before writing. This helped 1253 * kill a problem with ADB and some iBooks 1254 */ 1255 wait_for_ack(); 1256 /* set the shift register to shift out and send a byte */ 1257 send_byte(req->data[0]); 1258 } 1259 1260 void 1261 pmu_poll(void) 1262 { 1263 if (pmu_state == uninitialized) 1264 return; 1265 if (disable_poll) 1266 return; 1267 via_pmu_interrupt(0, NULL); 1268 } 1269 1270 void 1271 pmu_poll_adb(void) 1272 { 1273 if (pmu_state == uninitialized) 1274 return; 1275 if (disable_poll) 1276 return; 1277 /* Kicks ADB read when PMU is suspended */ 1278 adb_int_pending = 1; 1279 do { 1280 via_pmu_interrupt(0, NULL); 1281 } while (pmu_suspended && (adb_int_pending || pmu_state != idle 1282 || req_awaiting_reply)); 1283 } 1284 1285 void 1286 pmu_wait_complete(struct adb_request *req) 1287 { 1288 if (pmu_state == uninitialized) 1289 return; 1290 while((pmu_state != idle && pmu_state != locked) || !req->complete) 1291 via_pmu_interrupt(0, NULL); 1292 } 1293 1294 /* This function loops until the PMU is idle and prevents it from 1295 * anwsering to ADB interrupts. pmu_request can still be called. 1296 * This is done to avoid spurrious shutdowns when we know we'll have 1297 * interrupts switched off for a long time 1298 */ 1299 void 1300 pmu_suspend(void) 1301 { 1302 unsigned long flags; 1303 1304 if (pmu_state == uninitialized) 1305 return; 1306 1307 spin_lock_irqsave(&pmu_lock, flags); 1308 pmu_suspended++; 1309 if (pmu_suspended > 1) { 1310 spin_unlock_irqrestore(&pmu_lock, flags); 1311 return; 1312 } 1313 1314 do { 1315 spin_unlock_irqrestore(&pmu_lock, flags); 1316 if (req_awaiting_reply) 1317 adb_int_pending = 1; 1318 via_pmu_interrupt(0, NULL); 1319 spin_lock_irqsave(&pmu_lock, flags); 1320 if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) { 1321 if (gpio_irq >= 0) 1322 disable_irq_nosync(gpio_irq); 1323 out_8(&via1[IER], CB1_INT | IER_CLR); 1324 spin_unlock_irqrestore(&pmu_lock, flags); 1325 break; 1326 } 1327 } while (1); 1328 } 1329 1330 void 1331 pmu_resume(void) 1332 { 1333 unsigned long flags; 1334 1335 if (pmu_state == uninitialized || pmu_suspended < 1) 1336 return; 1337 1338 spin_lock_irqsave(&pmu_lock, flags); 1339 pmu_suspended--; 1340 if (pmu_suspended > 0) { 1341 spin_unlock_irqrestore(&pmu_lock, flags); 1342 return; 1343 } 1344 adb_int_pending = 1; 1345 if (gpio_irq >= 0) 1346 enable_irq(gpio_irq); 1347 out_8(&via1[IER], CB1_INT | IER_SET); 1348 spin_unlock_irqrestore(&pmu_lock, flags); 1349 pmu_poll(); 1350 } 1351 1352 /* Interrupt data could be the result data from an ADB cmd */ 1353 static void 1354 pmu_handle_data(unsigned char *data, int len) 1355 { 1356 unsigned char ints; 1357 int idx; 1358 int i = 0; 1359 1360 asleep = 0; 1361 if (drop_interrupts || len < 1) { 1362 adb_int_pending = 0; 1363 pmu_irq_stats[8]++; 1364 return; 1365 } 1366 1367 /* Get PMU interrupt mask */ 1368 ints = data[0]; 1369 1370 /* Record zero interrupts for stats */ 1371 if (ints == 0) 1372 pmu_irq_stats[9]++; 1373 1374 /* Hack to deal with ADB autopoll flag */ 1375 if (ints & PMU_INT_ADB) 1376 ints &= ~(PMU_INT_ADB_AUTO | PMU_INT_AUTO_SRQ_POLL); 1377 1378 next: 1379 if (ints == 0) { 1380 if (i > pmu_irq_stats[10]) 1381 pmu_irq_stats[10] = i; 1382 return; 1383 } 1384 i++; 1385 1386 idx = ffs(ints) - 1; 1387 ints &= ~BIT(idx); 1388 1389 pmu_irq_stats[idx]++; 1390 1391 /* Note: for some reason, we get an interrupt with len=1, 1392 * data[0]==0 after each normal ADB interrupt, at least 1393 * on the Pismo. Still investigating... --BenH 1394 */ 1395 switch (BIT(idx)) { 1396 case PMU_INT_ADB: 1397 if ((data[0] & PMU_INT_ADB_AUTO) == 0) { 1398 struct adb_request *req = req_awaiting_reply; 1399 if (!req) { 1400 printk(KERN_ERR "PMU: extra ADB reply\n"); 1401 return; 1402 } 1403 req_awaiting_reply = NULL; 1404 if (len <= 2) 1405 req->reply_len = 0; 1406 else { 1407 memcpy(req->reply, data + 1, len - 1); 1408 req->reply_len = len - 1; 1409 } 1410 pmu_done(req); 1411 } else { 1412 #ifdef CONFIG_XMON 1413 if (len == 4 && data[1] == 0x2c) { 1414 extern int xmon_wants_key, xmon_adb_keycode; 1415 if (xmon_wants_key) { 1416 xmon_adb_keycode = data[2]; 1417 return; 1418 } 1419 } 1420 #endif /* CONFIG_XMON */ 1421 #ifdef CONFIG_ADB 1422 /* 1423 * XXX On the [23]400 the PMU gives us an up 1424 * event for keycodes 0x74 or 0x75 when the PC 1425 * card eject buttons are released, so we 1426 * ignore those events. 1427 */ 1428 if (!(pmu_kind == PMU_OHARE_BASED && len == 4 1429 && data[1] == 0x2c && data[3] == 0xff 1430 && (data[2] & ~1) == 0xf4)) 1431 adb_input(data+1, len-1, 1); 1432 #endif /* CONFIG_ADB */ 1433 } 1434 break; 1435 1436 /* Sound/brightness button pressed */ 1437 case PMU_INT_SNDBRT: 1438 #ifdef CONFIG_PMAC_BACKLIGHT 1439 if (len == 3) 1440 pmac_backlight_set_legacy_brightness_pmu(data[1] >> 4); 1441 #endif 1442 break; 1443 1444 /* Tick interrupt */ 1445 case PMU_INT_TICK: 1446 /* Environment or tick interrupt, query batteries */ 1447 if (pmu_battery_count) { 1448 if ((--query_batt_timer) == 0) { 1449 query_battery_state(); 1450 query_batt_timer = BATTERY_POLLING_COUNT; 1451 } 1452 } 1453 break; 1454 1455 case PMU_INT_ENVIRONMENT: 1456 if (pmu_battery_count) 1457 query_battery_state(); 1458 pmu_pass_intr(data, len); 1459 /* len == 6 is probably a bad check. But how do I 1460 * know what PMU versions send what events here? */ 1461 if (IS_ENABLED(CONFIG_ADB_PMU_EVENT) && len == 6) { 1462 via_pmu_event(PMU_EVT_POWER, !!(data[1]&8)); 1463 via_pmu_event(PMU_EVT_LID, data[1]&1); 1464 } 1465 break; 1466 1467 default: 1468 pmu_pass_intr(data, len); 1469 } 1470 goto next; 1471 } 1472 1473 static struct adb_request* 1474 pmu_sr_intr(void) 1475 { 1476 struct adb_request *req; 1477 int bite = 0; 1478 1479 if (in_8(&via2[B]) & TREQ) { 1480 printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via2[B])); 1481 return NULL; 1482 } 1483 /* The ack may not yet be low when we get the interrupt */ 1484 while ((in_8(&via2[B]) & TACK) != 0) 1485 ; 1486 1487 /* if reading grab the byte, and reset the interrupt */ 1488 if (pmu_state == reading || pmu_state == reading_intr) 1489 bite = in_8(&via1[SR]); 1490 1491 /* reset TREQ and wait for TACK to go high */ 1492 out_8(&via2[B], in_8(&via2[B]) | TREQ); 1493 wait_for_ack(); 1494 1495 switch (pmu_state) { 1496 case sending: 1497 req = current_req; 1498 if (data_len < 0) { 1499 data_len = req->nbytes - 1; 1500 send_byte(data_len); 1501 break; 1502 } 1503 if (data_index <= data_len) { 1504 send_byte(req->data[data_index++]); 1505 break; 1506 } 1507 req->sent = 1; 1508 data_len = pmu_data_len[req->data[0]][1]; 1509 if (data_len == 0) { 1510 pmu_state = idle; 1511 current_req = req->next; 1512 if (req->reply_expected) 1513 req_awaiting_reply = req; 1514 else 1515 return req; 1516 } else { 1517 pmu_state = reading; 1518 data_index = 0; 1519 reply_ptr = req->reply + req->reply_len; 1520 recv_byte(); 1521 } 1522 break; 1523 1524 case intack: 1525 data_index = 0; 1526 data_len = -1; 1527 pmu_state = reading_intr; 1528 reply_ptr = interrupt_data[int_data_last]; 1529 recv_byte(); 1530 if (gpio_irq >= 0 && !gpio_irq_enabled) { 1531 enable_irq(gpio_irq); 1532 gpio_irq_enabled = 1; 1533 } 1534 break; 1535 1536 case reading: 1537 case reading_intr: 1538 if (data_len == -1) { 1539 data_len = bite; 1540 if (bite > 32) 1541 printk(KERN_ERR "PMU: bad reply len %d\n", bite); 1542 } else if (data_index < 32) { 1543 reply_ptr[data_index++] = bite; 1544 } 1545 if (data_index < data_len) { 1546 recv_byte(); 1547 break; 1548 } 1549 1550 if (pmu_state == reading_intr) { 1551 pmu_state = idle; 1552 int_data_state[int_data_last] = int_data_ready; 1553 interrupt_data_len[int_data_last] = data_len; 1554 } else { 1555 req = current_req; 1556 /* 1557 * For PMU sleep and freq change requests, we lock the 1558 * PMU until it's explicitly unlocked. This avoids any 1559 * spurrious event polling getting in 1560 */ 1561 current_req = req->next; 1562 req->reply_len += data_index; 1563 if (req->data[0] == PMU_SLEEP || req->data[0] == PMU_CPU_SPEED) 1564 pmu_state = locked; 1565 else 1566 pmu_state = idle; 1567 return req; 1568 } 1569 break; 1570 1571 default: 1572 printk(KERN_ERR "via_pmu_interrupt: unknown state %d?\n", 1573 pmu_state); 1574 } 1575 return NULL; 1576 } 1577 1578 static irqreturn_t 1579 via_pmu_interrupt(int irq, void *arg) 1580 { 1581 unsigned long flags; 1582 int intr; 1583 int nloop = 0; 1584 int int_data = -1; 1585 struct adb_request *req = NULL; 1586 int handled = 0; 1587 1588 /* This is a bit brutal, we can probably do better */ 1589 spin_lock_irqsave(&pmu_lock, flags); 1590 ++disable_poll; 1591 1592 for (;;) { 1593 /* On 68k Macs, VIA interrupts are dispatched individually. 1594 * Unless we are polling, the relevant IRQ flag has already 1595 * been cleared. 1596 */ 1597 intr = 0; 1598 if (IS_ENABLED(CONFIG_PPC_PMAC) || !irq) { 1599 intr = in_8(&via1[IFR]) & (SR_INT | CB1_INT); 1600 out_8(&via1[IFR], intr); 1601 } 1602 #ifndef CONFIG_PPC_PMAC 1603 switch (irq) { 1604 case IRQ_MAC_ADB_CL: 1605 intr = CB1_INT; 1606 break; 1607 case IRQ_MAC_ADB_SR: 1608 intr = SR_INT; 1609 break; 1610 } 1611 #endif 1612 if (intr == 0) 1613 break; 1614 handled = 1; 1615 if (++nloop > 1000) { 1616 printk(KERN_DEBUG "PMU: stuck in intr loop, " 1617 "intr=%x, ier=%x pmu_state=%d\n", 1618 intr, in_8(&via1[IER]), pmu_state); 1619 break; 1620 } 1621 if (intr & CB1_INT) { 1622 adb_int_pending = 1; 1623 pmu_irq_stats[11]++; 1624 } 1625 if (intr & SR_INT) { 1626 req = pmu_sr_intr(); 1627 if (req) 1628 break; 1629 } 1630 #ifndef CONFIG_PPC_PMAC 1631 break; 1632 #endif 1633 } 1634 1635 recheck: 1636 if (pmu_state == idle) { 1637 if (adb_int_pending) { 1638 if (int_data_state[0] == int_data_empty) 1639 int_data_last = 0; 1640 else if (int_data_state[1] == int_data_empty) 1641 int_data_last = 1; 1642 else 1643 goto no_free_slot; 1644 pmu_state = intack; 1645 int_data_state[int_data_last] = int_data_fill; 1646 /* Sounds safer to make sure ACK is high before writing. 1647 * This helped kill a problem with ADB and some iBooks 1648 */ 1649 wait_for_ack(); 1650 send_byte(PMU_INT_ACK); 1651 adb_int_pending = 0; 1652 } else if (current_req) 1653 pmu_start(); 1654 } 1655 no_free_slot: 1656 /* Mark the oldest buffer for flushing */ 1657 if (int_data_state[!int_data_last] == int_data_ready) { 1658 int_data_state[!int_data_last] = int_data_flush; 1659 int_data = !int_data_last; 1660 } else if (int_data_state[int_data_last] == int_data_ready) { 1661 int_data_state[int_data_last] = int_data_flush; 1662 int_data = int_data_last; 1663 } 1664 --disable_poll; 1665 spin_unlock_irqrestore(&pmu_lock, flags); 1666 1667 /* Deal with completed PMU requests outside of the lock */ 1668 if (req) { 1669 pmu_done(req); 1670 req = NULL; 1671 } 1672 1673 /* Deal with interrupt datas outside of the lock */ 1674 if (int_data >= 0) { 1675 pmu_handle_data(interrupt_data[int_data], interrupt_data_len[int_data]); 1676 spin_lock_irqsave(&pmu_lock, flags); 1677 ++disable_poll; 1678 int_data_state[int_data] = int_data_empty; 1679 int_data = -1; 1680 goto recheck; 1681 } 1682 1683 return IRQ_RETVAL(handled); 1684 } 1685 1686 void 1687 pmu_unlock(void) 1688 { 1689 unsigned long flags; 1690 1691 spin_lock_irqsave(&pmu_lock, flags); 1692 if (pmu_state == locked) 1693 pmu_state = idle; 1694 adb_int_pending = 1; 1695 spin_unlock_irqrestore(&pmu_lock, flags); 1696 } 1697 1698 1699 static __maybe_unused irqreturn_t 1700 gpio1_interrupt(int irq, void *arg) 1701 { 1702 unsigned long flags; 1703 1704 if ((in_8(gpio_reg + 0x9) & 0x02) == 0) { 1705 spin_lock_irqsave(&pmu_lock, flags); 1706 if (gpio_irq_enabled > 0) { 1707 disable_irq_nosync(gpio_irq); 1708 gpio_irq_enabled = 0; 1709 } 1710 pmu_irq_stats[12]++; 1711 adb_int_pending = 1; 1712 spin_unlock_irqrestore(&pmu_lock, flags); 1713 via_pmu_interrupt(0, NULL); 1714 return IRQ_HANDLED; 1715 } 1716 return IRQ_NONE; 1717 } 1718 1719 void 1720 pmu_enable_irled(int on) 1721 { 1722 struct adb_request req; 1723 1724 if (pmu_state == uninitialized) 1725 return ; 1726 if (pmu_kind == PMU_KEYLARGO_BASED) 1727 return ; 1728 1729 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED | 1730 (on ? PMU_POW_ON : PMU_POW_OFF)); 1731 pmu_wait_complete(&req); 1732 } 1733 1734 /* Offset between Unix time (1970-based) and Mac time (1904-based) */ 1735 #define RTC_OFFSET 2082844800 1736 1737 time64_t pmu_get_time(void) 1738 { 1739 struct adb_request req; 1740 u32 now; 1741 1742 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) 1743 return 0; 1744 pmu_wait_complete(&req); 1745 if (req.reply_len != 4) 1746 pr_err("%s: got %d byte reply\n", __func__, req.reply_len); 1747 now = (req.reply[0] << 24) + (req.reply[1] << 16) + 1748 (req.reply[2] << 8) + req.reply[3]; 1749 return (time64_t)now - RTC_OFFSET; 1750 } 1751 1752 int pmu_set_rtc_time(struct rtc_time *tm) 1753 { 1754 u32 now; 1755 struct adb_request req; 1756 1757 now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET); 1758 if (pmu_request(&req, NULL, 5, PMU_SET_RTC, 1759 now >> 24, now >> 16, now >> 8, now) < 0) 1760 return -ENXIO; 1761 pmu_wait_complete(&req); 1762 if (req.reply_len != 0) 1763 pr_err("%s: got %d byte reply\n", __func__, req.reply_len); 1764 return 0; 1765 } 1766 1767 void 1768 pmu_restart(void) 1769 { 1770 struct adb_request req; 1771 1772 if (pmu_state == uninitialized) 1773 return; 1774 1775 local_irq_disable(); 1776 1777 drop_interrupts = 1; 1778 1779 if (pmu_kind != PMU_KEYLARGO_BASED) { 1780 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB | 1781 PMU_INT_TICK ); 1782 while(!req.complete) 1783 pmu_poll(); 1784 } 1785 1786 pmu_request(&req, NULL, 1, PMU_RESET); 1787 pmu_wait_complete(&req); 1788 for (;;) 1789 ; 1790 } 1791 1792 void 1793 pmu_shutdown(void) 1794 { 1795 struct adb_request req; 1796 1797 if (pmu_state == uninitialized) 1798 return; 1799 1800 local_irq_disable(); 1801 1802 drop_interrupts = 1; 1803 1804 if (pmu_kind != PMU_KEYLARGO_BASED) { 1805 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB | 1806 PMU_INT_TICK ); 1807 pmu_wait_complete(&req); 1808 } else { 1809 /* Disable server mode on shutdown or we'll just 1810 * wake up again 1811 */ 1812 pmu_set_server_mode(0); 1813 } 1814 1815 pmu_request(&req, NULL, 5, PMU_SHUTDOWN, 1816 'M', 'A', 'T', 'T'); 1817 pmu_wait_complete(&req); 1818 for (;;) 1819 ; 1820 } 1821 1822 int 1823 pmu_present(void) 1824 { 1825 return pmu_state != uninitialized; 1826 } 1827 1828 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 1829 /* 1830 * Put the powerbook to sleep. 1831 */ 1832 1833 static u32 save_via[8]; 1834 static int __fake_sleep; 1835 1836 static void 1837 save_via_state(void) 1838 { 1839 save_via[0] = in_8(&via1[ANH]); 1840 save_via[1] = in_8(&via1[DIRA]); 1841 save_via[2] = in_8(&via1[B]); 1842 save_via[3] = in_8(&via1[DIRB]); 1843 save_via[4] = in_8(&via1[PCR]); 1844 save_via[5] = in_8(&via1[ACR]); 1845 save_via[6] = in_8(&via1[T1CL]); 1846 save_via[7] = in_8(&via1[T1CH]); 1847 } 1848 static void 1849 restore_via_state(void) 1850 { 1851 out_8(&via1[ANH], save_via[0]); 1852 out_8(&via1[DIRA], save_via[1]); 1853 out_8(&via1[B], save_via[2]); 1854 out_8(&via1[DIRB], save_via[3]); 1855 out_8(&via1[PCR], save_via[4]); 1856 out_8(&via1[ACR], save_via[5]); 1857 out_8(&via1[T1CL], save_via[6]); 1858 out_8(&via1[T1CH], save_via[7]); 1859 out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */ 1860 out_8(&via1[IFR], 0x7f); /* clear IFR */ 1861 out_8(&via1[IER], IER_SET | SR_INT | CB1_INT); 1862 } 1863 1864 #define GRACKLE_PM (1<<7) 1865 #define GRACKLE_DOZE (1<<5) 1866 #define GRACKLE_NAP (1<<4) 1867 #define GRACKLE_SLEEP (1<<3) 1868 1869 static int powerbook_sleep_grackle(void) 1870 { 1871 unsigned long save_l2cr; 1872 unsigned short pmcr1; 1873 struct adb_request req; 1874 struct pci_dev *grackle; 1875 1876 grackle = pci_get_domain_bus_and_slot(0, 0, 0); 1877 if (!grackle) 1878 return -ENODEV; 1879 1880 /* Turn off various things. Darwin does some retry tests here... */ 1881 pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, PMU_POW0_OFF|PMU_POW0_HARD_DRIVE); 1882 pmu_wait_complete(&req); 1883 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, 1884 PMU_POW_OFF|PMU_POW_BACKLIGHT|PMU_POW_IRLED|PMU_POW_MEDIABAY); 1885 pmu_wait_complete(&req); 1886 1887 /* For 750, save backside cache setting and disable it */ 1888 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */ 1889 1890 if (!__fake_sleep) { 1891 /* Ask the PMU to put us to sleep */ 1892 pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); 1893 pmu_wait_complete(&req); 1894 } 1895 1896 /* The VIA is supposed not to be restored correctly*/ 1897 save_via_state(); 1898 /* We shut down some HW */ 1899 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,1); 1900 1901 pci_read_config_word(grackle, 0x70, &pmcr1); 1902 /* Apparently, MacOS uses NAP mode for Grackle ??? */ 1903 pmcr1 &= ~(GRACKLE_DOZE|GRACKLE_SLEEP); 1904 pmcr1 |= GRACKLE_PM|GRACKLE_NAP; 1905 pci_write_config_word(grackle, 0x70, pmcr1); 1906 1907 /* Call low-level ASM sleep handler */ 1908 if (__fake_sleep) 1909 mdelay(5000); 1910 else 1911 low_sleep_handler(); 1912 1913 /* We're awake again, stop grackle PM */ 1914 pci_read_config_word(grackle, 0x70, &pmcr1); 1915 pmcr1 &= ~(GRACKLE_PM|GRACKLE_DOZE|GRACKLE_SLEEP|GRACKLE_NAP); 1916 pci_write_config_word(grackle, 0x70, pmcr1); 1917 1918 pci_dev_put(grackle); 1919 1920 /* Make sure the PMU is idle */ 1921 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,0); 1922 restore_via_state(); 1923 1924 /* Restore L2 cache */ 1925 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0) 1926 _set_L2CR(save_l2cr); 1927 1928 /* Restore userland MMU context */ 1929 switch_mmu_context(NULL, current->active_mm, NULL); 1930 1931 /* Power things up */ 1932 pmu_unlock(); 1933 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); 1934 pmu_wait_complete(&req); 1935 pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, 1936 PMU_POW0_ON|PMU_POW0_HARD_DRIVE); 1937 pmu_wait_complete(&req); 1938 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, 1939 PMU_POW_ON|PMU_POW_BACKLIGHT|PMU_POW_CHARGER|PMU_POW_IRLED|PMU_POW_MEDIABAY); 1940 pmu_wait_complete(&req); 1941 1942 return 0; 1943 } 1944 1945 static int 1946 powerbook_sleep_Core99(void) 1947 { 1948 unsigned long save_l2cr; 1949 unsigned long save_l3cr; 1950 struct adb_request req; 1951 1952 if (pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) < 0) { 1953 printk(KERN_ERR "Sleep mode not supported on this machine\n"); 1954 return -ENOSYS; 1955 } 1956 1957 if (num_online_cpus() > 1 || cpu_is_offline(0)) 1958 return -EAGAIN; 1959 1960 /* Stop environment and ADB interrupts */ 1961 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0); 1962 pmu_wait_complete(&req); 1963 1964 /* Tell PMU what events will wake us up */ 1965 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_CLR_WAKEUP_EVENTS, 1966 0xff, 0xff); 1967 pmu_wait_complete(&req); 1968 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_SET_WAKEUP_EVENTS, 1969 0, PMU_PWR_WAKEUP_KEY | 1970 (option_lid_wakeup ? PMU_PWR_WAKEUP_LID_OPEN : 0)); 1971 pmu_wait_complete(&req); 1972 1973 /* Save the state of the L2 and L3 caches */ 1974 save_l3cr = _get_L3CR(); /* (returns -1 if not available) */ 1975 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */ 1976 1977 if (!__fake_sleep) { 1978 /* Ask the PMU to put us to sleep */ 1979 pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); 1980 pmu_wait_complete(&req); 1981 } 1982 1983 /* The VIA is supposed not to be restored correctly*/ 1984 save_via_state(); 1985 1986 /* Shut down various ASICs. There's a chance that we can no longer 1987 * talk to the PMU after this, so I moved it to _after_ sending the 1988 * sleep command to it. Still need to be checked. 1989 */ 1990 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 1); 1991 1992 /* Call low-level ASM sleep handler */ 1993 if (__fake_sleep) 1994 mdelay(5000); 1995 else 1996 low_sleep_handler(); 1997 1998 /* Restore Apple core ASICs state */ 1999 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 0); 2000 2001 /* Restore VIA */ 2002 restore_via_state(); 2003 2004 /* tweak LPJ before cpufreq is there */ 2005 loops_per_jiffy *= 2; 2006 2007 /* Restore video */ 2008 pmac_call_early_video_resume(); 2009 2010 /* Restore L2 cache */ 2011 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0) 2012 _set_L2CR(save_l2cr); 2013 /* Restore L3 cache */ 2014 if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0) 2015 _set_L3CR(save_l3cr); 2016 2017 /* Restore userland MMU context */ 2018 switch_mmu_context(NULL, current->active_mm, NULL); 2019 2020 /* Tell PMU we are ready */ 2021 pmu_unlock(); 2022 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 2023 pmu_wait_complete(&req); 2024 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); 2025 pmu_wait_complete(&req); 2026 2027 /* Restore LPJ, cpufreq will adjust the cpu frequency */ 2028 loops_per_jiffy /= 2; 2029 2030 return 0; 2031 } 2032 2033 #define PB3400_MEM_CTRL 0xf8000000 2034 #define PB3400_MEM_CTRL_SLEEP 0x70 2035 2036 static void __iomem *pb3400_mem_ctrl; 2037 2038 static void powerbook_sleep_init_3400(void) 2039 { 2040 /* map in the memory controller registers */ 2041 pb3400_mem_ctrl = ioremap(PB3400_MEM_CTRL, 0x100); 2042 if (pb3400_mem_ctrl == NULL) 2043 printk(KERN_WARNING "ioremap failed: sleep won't be possible"); 2044 } 2045 2046 static int powerbook_sleep_3400(void) 2047 { 2048 int i, x; 2049 unsigned int hid0; 2050 unsigned long msr; 2051 struct adb_request sleep_req; 2052 unsigned int __iomem *mem_ctrl_sleep; 2053 2054 if (pb3400_mem_ctrl == NULL) 2055 return -ENOMEM; 2056 mem_ctrl_sleep = pb3400_mem_ctrl + PB3400_MEM_CTRL_SLEEP; 2057 2058 /* Set the memory controller to keep the memory refreshed 2059 while we're asleep */ 2060 for (i = 0x403f; i >= 0x4000; --i) { 2061 out_be32(mem_ctrl_sleep, i); 2062 do { 2063 x = (in_be32(mem_ctrl_sleep) >> 16) & 0x3ff; 2064 } while (x == 0); 2065 if (x >= 0x100) 2066 break; 2067 } 2068 2069 /* Ask the PMU to put us to sleep */ 2070 pmu_request(&sleep_req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); 2071 pmu_wait_complete(&sleep_req); 2072 pmu_unlock(); 2073 2074 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 1); 2075 2076 asleep = 1; 2077 2078 /* Put the CPU into sleep mode */ 2079 hid0 = mfspr(SPRN_HID0); 2080 hid0 = (hid0 & ~(HID0_NAP | HID0_DOZE)) | HID0_SLEEP; 2081 mtspr(SPRN_HID0, hid0); 2082 local_irq_enable(); 2083 msr = mfmsr() | MSR_POW; 2084 while (asleep) { 2085 mb(); 2086 mtmsr(msr); 2087 isync(); 2088 } 2089 local_irq_disable(); 2090 2091 /* OK, we're awake again, start restoring things */ 2092 out_be32(mem_ctrl_sleep, 0x3f); 2093 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 0); 2094 2095 return 0; 2096 } 2097 2098 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2099 2100 /* 2101 * Support for /dev/pmu device 2102 */ 2103 #define RB_SIZE 0x10 2104 struct pmu_private { 2105 struct list_head list; 2106 int rb_get; 2107 int rb_put; 2108 struct rb_entry { 2109 unsigned short len; 2110 unsigned char data[16]; 2111 } rb_buf[RB_SIZE]; 2112 wait_queue_head_t wait; 2113 spinlock_t lock; 2114 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2115 int backlight_locker; 2116 #endif 2117 }; 2118 2119 static LIST_HEAD(all_pmu_pvt); 2120 static DEFINE_SPINLOCK(all_pvt_lock); 2121 2122 static void 2123 pmu_pass_intr(unsigned char *data, int len) 2124 { 2125 struct pmu_private *pp; 2126 struct list_head *list; 2127 int i; 2128 unsigned long flags; 2129 2130 if (len > sizeof(pp->rb_buf[0].data)) 2131 len = sizeof(pp->rb_buf[0].data); 2132 spin_lock_irqsave(&all_pvt_lock, flags); 2133 for (list = &all_pmu_pvt; (list = list->next) != &all_pmu_pvt; ) { 2134 pp = list_entry(list, struct pmu_private, list); 2135 spin_lock(&pp->lock); 2136 i = pp->rb_put + 1; 2137 if (i >= RB_SIZE) 2138 i = 0; 2139 if (i != pp->rb_get) { 2140 struct rb_entry *rp = &pp->rb_buf[pp->rb_put]; 2141 rp->len = len; 2142 memcpy(rp->data, data, len); 2143 pp->rb_put = i; 2144 wake_up_interruptible(&pp->wait); 2145 } 2146 spin_unlock(&pp->lock); 2147 } 2148 spin_unlock_irqrestore(&all_pvt_lock, flags); 2149 } 2150 2151 static int 2152 pmu_open(struct inode *inode, struct file *file) 2153 { 2154 struct pmu_private *pp; 2155 unsigned long flags; 2156 2157 pp = kmalloc(sizeof(struct pmu_private), GFP_KERNEL); 2158 if (!pp) 2159 return -ENOMEM; 2160 pp->rb_get = pp->rb_put = 0; 2161 spin_lock_init(&pp->lock); 2162 init_waitqueue_head(&pp->wait); 2163 mutex_lock(&pmu_info_proc_mutex); 2164 spin_lock_irqsave(&all_pvt_lock, flags); 2165 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2166 pp->backlight_locker = 0; 2167 #endif 2168 list_add(&pp->list, &all_pmu_pvt); 2169 spin_unlock_irqrestore(&all_pvt_lock, flags); 2170 file->private_data = pp; 2171 mutex_unlock(&pmu_info_proc_mutex); 2172 return 0; 2173 } 2174 2175 static ssize_t 2176 pmu_read(struct file *file, char __user *buf, 2177 size_t count, loff_t *ppos) 2178 { 2179 struct pmu_private *pp = file->private_data; 2180 DECLARE_WAITQUEUE(wait, current); 2181 unsigned long flags; 2182 int ret = 0; 2183 2184 if (count < 1 || !pp) 2185 return -EINVAL; 2186 2187 spin_lock_irqsave(&pp->lock, flags); 2188 add_wait_queue(&pp->wait, &wait); 2189 set_current_state(TASK_INTERRUPTIBLE); 2190 2191 for (;;) { 2192 ret = -EAGAIN; 2193 if (pp->rb_get != pp->rb_put) { 2194 int i = pp->rb_get; 2195 struct rb_entry *rp = &pp->rb_buf[i]; 2196 ret = rp->len; 2197 spin_unlock_irqrestore(&pp->lock, flags); 2198 if (ret > count) 2199 ret = count; 2200 if (ret > 0 && copy_to_user(buf, rp->data, ret)) 2201 ret = -EFAULT; 2202 if (++i >= RB_SIZE) 2203 i = 0; 2204 spin_lock_irqsave(&pp->lock, flags); 2205 pp->rb_get = i; 2206 } 2207 if (ret >= 0) 2208 break; 2209 if (file->f_flags & O_NONBLOCK) 2210 break; 2211 ret = -ERESTARTSYS; 2212 if (signal_pending(current)) 2213 break; 2214 spin_unlock_irqrestore(&pp->lock, flags); 2215 schedule(); 2216 spin_lock_irqsave(&pp->lock, flags); 2217 } 2218 __set_current_state(TASK_RUNNING); 2219 remove_wait_queue(&pp->wait, &wait); 2220 spin_unlock_irqrestore(&pp->lock, flags); 2221 2222 return ret; 2223 } 2224 2225 static ssize_t 2226 pmu_write(struct file *file, const char __user *buf, 2227 size_t count, loff_t *ppos) 2228 { 2229 return 0; 2230 } 2231 2232 static __poll_t 2233 pmu_fpoll(struct file *filp, poll_table *wait) 2234 { 2235 struct pmu_private *pp = filp->private_data; 2236 __poll_t mask = 0; 2237 unsigned long flags; 2238 2239 if (!pp) 2240 return 0; 2241 poll_wait(filp, &pp->wait, wait); 2242 spin_lock_irqsave(&pp->lock, flags); 2243 if (pp->rb_get != pp->rb_put) 2244 mask |= EPOLLIN; 2245 spin_unlock_irqrestore(&pp->lock, flags); 2246 return mask; 2247 } 2248 2249 static int 2250 pmu_release(struct inode *inode, struct file *file) 2251 { 2252 struct pmu_private *pp = file->private_data; 2253 unsigned long flags; 2254 2255 if (pp) { 2256 file->private_data = NULL; 2257 spin_lock_irqsave(&all_pvt_lock, flags); 2258 list_del(&pp->list); 2259 spin_unlock_irqrestore(&all_pvt_lock, flags); 2260 2261 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) 2262 if (pp->backlight_locker) 2263 pmac_backlight_enable(); 2264 #endif 2265 2266 kfree(pp); 2267 } 2268 return 0; 2269 } 2270 2271 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 2272 static void pmac_suspend_disable_irqs(void) 2273 { 2274 /* Call platform functions marked "on sleep" */ 2275 pmac_pfunc_i2c_suspend(); 2276 pmac_pfunc_base_suspend(); 2277 } 2278 2279 static int powerbook_sleep(suspend_state_t state) 2280 { 2281 int error = 0; 2282 2283 /* Wait for completion of async requests */ 2284 while (!batt_req.complete) 2285 pmu_poll(); 2286 2287 /* Giveup the lazy FPU & vec so we don't have to back them 2288 * up from the low level code 2289 */ 2290 enable_kernel_fp(); 2291 2292 #ifdef CONFIG_ALTIVEC 2293 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 2294 enable_kernel_altivec(); 2295 #endif /* CONFIG_ALTIVEC */ 2296 2297 switch (pmu_kind) { 2298 case PMU_OHARE_BASED: 2299 error = powerbook_sleep_3400(); 2300 break; 2301 case PMU_HEATHROW_BASED: 2302 case PMU_PADDINGTON_BASED: 2303 error = powerbook_sleep_grackle(); 2304 break; 2305 case PMU_KEYLARGO_BASED: 2306 error = powerbook_sleep_Core99(); 2307 break; 2308 default: 2309 return -ENOSYS; 2310 } 2311 2312 if (error) 2313 return error; 2314 2315 mdelay(100); 2316 2317 return 0; 2318 } 2319 2320 static void pmac_suspend_enable_irqs(void) 2321 { 2322 /* Force a poll of ADB interrupts */ 2323 adb_int_pending = 1; 2324 via_pmu_interrupt(0, NULL); 2325 2326 mdelay(10); 2327 2328 /* Call platform functions marked "on wake" */ 2329 pmac_pfunc_base_resume(); 2330 pmac_pfunc_i2c_resume(); 2331 } 2332 2333 static int pmu_sleep_valid(suspend_state_t state) 2334 { 2335 return state == PM_SUSPEND_MEM 2336 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0); 2337 } 2338 2339 static const struct platform_suspend_ops pmu_pm_ops = { 2340 .enter = powerbook_sleep, 2341 .valid = pmu_sleep_valid, 2342 }; 2343 2344 static int register_pmu_pm_ops(void) 2345 { 2346 if (pmu_kind == PMU_OHARE_BASED) 2347 powerbook_sleep_init_3400(); 2348 ppc_md.suspend_disable_irqs = pmac_suspend_disable_irqs; 2349 ppc_md.suspend_enable_irqs = pmac_suspend_enable_irqs; 2350 suspend_set_ops(&pmu_pm_ops); 2351 2352 return 0; 2353 } 2354 2355 device_initcall(register_pmu_pm_ops); 2356 #endif 2357 2358 static int pmu_ioctl(struct file *filp, 2359 u_int cmd, u_long arg) 2360 { 2361 __u32 __user *argp = (__u32 __user *)arg; 2362 int error = -EINVAL; 2363 2364 switch (cmd) { 2365 #ifdef CONFIG_PPC_PMAC 2366 case PMU_IOC_SLEEP: 2367 if (!capable(CAP_SYS_ADMIN)) 2368 return -EACCES; 2369 return pm_suspend(PM_SUSPEND_MEM); 2370 case PMU_IOC_CAN_SLEEP: 2371 if (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) < 0) 2372 return put_user(0, argp); 2373 else 2374 return put_user(1, argp); 2375 #endif 2376 2377 #ifdef CONFIG_PMAC_BACKLIGHT_LEGACY 2378 /* Compatibility ioctl's for backlight */ 2379 case PMU_IOC_GET_BACKLIGHT: 2380 { 2381 int brightness; 2382 2383 brightness = pmac_backlight_get_legacy_brightness(); 2384 if (brightness < 0) 2385 return brightness; 2386 else 2387 return put_user(brightness, argp); 2388 2389 } 2390 case PMU_IOC_SET_BACKLIGHT: 2391 { 2392 int brightness; 2393 2394 error = get_user(brightness, argp); 2395 if (error) 2396 return error; 2397 2398 return pmac_backlight_set_legacy_brightness(brightness); 2399 } 2400 #ifdef CONFIG_INPUT_ADBHID 2401 case PMU_IOC_GRAB_BACKLIGHT: { 2402 struct pmu_private *pp = filp->private_data; 2403 2404 if (pp->backlight_locker) 2405 return 0; 2406 2407 pp->backlight_locker = 1; 2408 pmac_backlight_disable(); 2409 2410 return 0; 2411 } 2412 #endif /* CONFIG_INPUT_ADBHID */ 2413 #endif /* CONFIG_PMAC_BACKLIGHT_LEGACY */ 2414 2415 case PMU_IOC_GET_MODEL: 2416 return put_user(pmu_kind, argp); 2417 case PMU_IOC_HAS_ADB: 2418 return put_user(pmu_has_adb, argp); 2419 } 2420 return error; 2421 } 2422 2423 static long pmu_unlocked_ioctl(struct file *filp, 2424 u_int cmd, u_long arg) 2425 { 2426 int ret; 2427 2428 mutex_lock(&pmu_info_proc_mutex); 2429 ret = pmu_ioctl(filp, cmd, arg); 2430 mutex_unlock(&pmu_info_proc_mutex); 2431 2432 return ret; 2433 } 2434 2435 #ifdef CONFIG_COMPAT 2436 #define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t) 2437 #define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t) 2438 #define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t) 2439 #define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t) 2440 #define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t) 2441 #define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t) 2442 2443 static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg) 2444 { 2445 switch (cmd) { 2446 case PMU_IOC_SLEEP: 2447 break; 2448 case PMU_IOC_GET_BACKLIGHT32: 2449 cmd = PMU_IOC_GET_BACKLIGHT; 2450 break; 2451 case PMU_IOC_SET_BACKLIGHT32: 2452 cmd = PMU_IOC_SET_BACKLIGHT; 2453 break; 2454 case PMU_IOC_GET_MODEL32: 2455 cmd = PMU_IOC_GET_MODEL; 2456 break; 2457 case PMU_IOC_HAS_ADB32: 2458 cmd = PMU_IOC_HAS_ADB; 2459 break; 2460 case PMU_IOC_CAN_SLEEP32: 2461 cmd = PMU_IOC_CAN_SLEEP; 2462 break; 2463 case PMU_IOC_GRAB_BACKLIGHT32: 2464 cmd = PMU_IOC_GRAB_BACKLIGHT; 2465 break; 2466 default: 2467 return -ENOIOCTLCMD; 2468 } 2469 return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); 2470 } 2471 #endif 2472 2473 static const struct file_operations pmu_device_fops = { 2474 .read = pmu_read, 2475 .write = pmu_write, 2476 .poll = pmu_fpoll, 2477 .unlocked_ioctl = pmu_unlocked_ioctl, 2478 #ifdef CONFIG_COMPAT 2479 .compat_ioctl = compat_pmu_ioctl, 2480 #endif 2481 .open = pmu_open, 2482 .release = pmu_release, 2483 .llseek = noop_llseek, 2484 }; 2485 2486 static struct miscdevice pmu_device = { 2487 PMU_MINOR, "pmu", &pmu_device_fops 2488 }; 2489 2490 static int pmu_device_init(void) 2491 { 2492 if (pmu_state == uninitialized) 2493 return 0; 2494 if (misc_register(&pmu_device) < 0) 2495 printk(KERN_ERR "via-pmu: cannot register misc device.\n"); 2496 return 0; 2497 } 2498 device_initcall(pmu_device_init); 2499 2500 2501 #ifdef DEBUG_SLEEP 2502 static inline void 2503 polled_handshake(void) 2504 { 2505 via2[B] &= ~TREQ; eieio(); 2506 while ((via2[B] & TACK) != 0) 2507 ; 2508 via2[B] |= TREQ; eieio(); 2509 while ((via2[B] & TACK) == 0) 2510 ; 2511 } 2512 2513 static inline void 2514 polled_send_byte(int x) 2515 { 2516 via1[ACR] |= SR_OUT | SR_EXT; eieio(); 2517 via1[SR] = x; eieio(); 2518 polled_handshake(); 2519 } 2520 2521 static inline int 2522 polled_recv_byte(void) 2523 { 2524 int x; 2525 2526 via1[ACR] = (via1[ACR] & ~SR_OUT) | SR_EXT; eieio(); 2527 x = via1[SR]; eieio(); 2528 polled_handshake(); 2529 x = via1[SR]; eieio(); 2530 return x; 2531 } 2532 2533 int 2534 pmu_polled_request(struct adb_request *req) 2535 { 2536 unsigned long flags; 2537 int i, l, c; 2538 2539 req->complete = 1; 2540 c = req->data[0]; 2541 l = pmu_data_len[c][0]; 2542 if (l >= 0 && req->nbytes != l + 1) 2543 return -EINVAL; 2544 2545 local_irq_save(flags); 2546 while (pmu_state != idle) 2547 pmu_poll(); 2548 2549 while ((via2[B] & TACK) == 0) 2550 ; 2551 polled_send_byte(c); 2552 if (l < 0) { 2553 l = req->nbytes - 1; 2554 polled_send_byte(l); 2555 } 2556 for (i = 1; i <= l; ++i) 2557 polled_send_byte(req->data[i]); 2558 2559 l = pmu_data_len[c][1]; 2560 if (l < 0) 2561 l = polled_recv_byte(); 2562 for (i = 0; i < l; ++i) 2563 req->reply[i + req->reply_len] = polled_recv_byte(); 2564 2565 if (req->done) 2566 (*req->done)(req); 2567 2568 local_irq_restore(flags); 2569 return 0; 2570 } 2571 2572 /* N.B. This doesn't work on the 3400 */ 2573 void pmu_blink(int n) 2574 { 2575 struct adb_request req; 2576 2577 memset(&req, 0, sizeof(req)); 2578 2579 for (; n > 0; --n) { 2580 req.nbytes = 4; 2581 req.done = NULL; 2582 req.data[0] = 0xee; 2583 req.data[1] = 4; 2584 req.data[2] = 0; 2585 req.data[3] = 1; 2586 req.reply[0] = ADB_RET_OK; 2587 req.reply_len = 1; 2588 req.reply_expected = 0; 2589 pmu_polled_request(&req); 2590 mdelay(50); 2591 req.nbytes = 4; 2592 req.done = NULL; 2593 req.data[0] = 0xee; 2594 req.data[1] = 4; 2595 req.data[2] = 0; 2596 req.data[3] = 0; 2597 req.reply[0] = ADB_RET_OK; 2598 req.reply_len = 1; 2599 req.reply_expected = 0; 2600 pmu_polled_request(&req); 2601 mdelay(50); 2602 } 2603 mdelay(50); 2604 } 2605 #endif /* DEBUG_SLEEP */ 2606 2607 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 2608 int pmu_sys_suspended; 2609 2610 static int pmu_syscore_suspend(void) 2611 { 2612 /* Suspend PMU event interrupts */ 2613 pmu_suspend(); 2614 pmu_sys_suspended = 1; 2615 2616 #ifdef CONFIG_PMAC_BACKLIGHT 2617 /* Tell backlight code not to muck around with the chip anymore */ 2618 pmu_backlight_set_sleep(1); 2619 #endif 2620 2621 return 0; 2622 } 2623 2624 static void pmu_syscore_resume(void) 2625 { 2626 struct adb_request req; 2627 2628 if (!pmu_sys_suspended) 2629 return; 2630 2631 /* Tell PMU we are ready */ 2632 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); 2633 pmu_wait_complete(&req); 2634 2635 #ifdef CONFIG_PMAC_BACKLIGHT 2636 /* Tell backlight code it can use the chip again */ 2637 pmu_backlight_set_sleep(0); 2638 #endif 2639 /* Resume PMU event interrupts */ 2640 pmu_resume(); 2641 pmu_sys_suspended = 0; 2642 } 2643 2644 static struct syscore_ops pmu_syscore_ops = { 2645 .suspend = pmu_syscore_suspend, 2646 .resume = pmu_syscore_resume, 2647 }; 2648 2649 static int pmu_syscore_register(void) 2650 { 2651 register_syscore_ops(&pmu_syscore_ops); 2652 2653 return 0; 2654 } 2655 subsys_initcall(pmu_syscore_register); 2656 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2657 2658 EXPORT_SYMBOL(pmu_request); 2659 EXPORT_SYMBOL(pmu_queue_request); 2660 EXPORT_SYMBOL(pmu_poll); 2661 EXPORT_SYMBOL(pmu_poll_adb); 2662 EXPORT_SYMBOL(pmu_wait_complete); 2663 EXPORT_SYMBOL(pmu_suspend); 2664 EXPORT_SYMBOL(pmu_resume); 2665 EXPORT_SYMBOL(pmu_unlock); 2666 #if defined(CONFIG_PPC32) 2667 EXPORT_SYMBOL(pmu_enable_irled); 2668 EXPORT_SYMBOL(pmu_battery_count); 2669 EXPORT_SYMBOL(pmu_batteries); 2670 EXPORT_SYMBOL(pmu_power_flags); 2671 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */ 2672 2673