1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Device driver for the PMU in Apple PowerBooks and PowerMacs.
4 *
5 * The VIA (versatile interface adapter) interfaces to the PMU,
6 * a 6805 microprocessor core whose primary function is to control
7 * battery charging and system power on the PowerBook 3400 and 2400.
8 * The PMU also controls the ADB (Apple Desktop Bus) which connects
9 * to the keyboard and mouse, as well as the non-volatile RAM
10 * and the RTC (real time clock) chip.
11 *
12 * Copyright (C) 1998 Paul Mackerras and Fabio Riccardi.
13 * Copyright (C) 2001-2002 Benjamin Herrenschmidt
14 * Copyright (C) 2006-2007 Johannes Berg
15 *
16 * THIS DRIVER IS BECOMING A TOTAL MESS !
17 * - Cleanup atomically disabling reply to PMU events after
18 * a sleep or a freq. switch
19 *
20 */
21 #include <linux/stdarg.h>
22 #include <linux/mutex.h>
23 #include <linux/types.h>
24 #include <linux/errno.h>
25 #include <linux/kernel.h>
26 #include <linux/delay.h>
27 #include <linux/sched/signal.h>
28 #include <linux/miscdevice.h>
29 #include <linux/blkdev.h>
30 #include <linux/pci.h>
31 #include <linux/slab.h>
32 #include <linux/poll.h>
33 #include <linux/adb.h>
34 #include <linux/pmu.h>
35 #include <linux/cuda.h>
36 #include <linux/module.h>
37 #include <linux/spinlock.h>
38 #include <linux/pm.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/device.h>
44 #include <linux/syscore_ops.h>
45 #include <linux/freezer.h>
46 #include <linux/syscalls.h>
47 #include <linux/suspend.h>
48 #include <linux/cpu.h>
49 #include <linux/compat.h>
50 #include <linux/of_address.h>
51 #include <linux/of_irq.h>
52 #include <linux/uaccess.h>
53 #include <linux/pgtable.h>
54 #include <asm/machdep.h>
55 #include <asm/io.h>
56 #include <asm/sections.h>
57 #include <asm/irq.h>
58 #ifdef CONFIG_PPC_PMAC
59 #include <asm/pmac_feature.h>
60 #include <asm/pmac_pfunc.h>
61 #include <asm/pmac_low_i2c.h>
62 #include <asm/mmu_context.h>
63 #include <asm/cputable.h>
64 #include <asm/time.h>
65 #include <asm/backlight.h>
66 #else
67 #include <asm/macintosh.h>
68 #include <asm/macints.h>
69 #include <asm/mac_via.h>
70 #endif
71
72 #include "via-pmu-event.h"
73
74 /* Some compile options */
75 #undef DEBUG_SLEEP
76
77 /* How many iterations between battery polls */
78 #define BATTERY_POLLING_COUNT 2
79
80 static DEFINE_MUTEX(pmu_info_proc_mutex);
81
82 /* VIA registers - spaced 0x200 bytes apart */
83 #define RS 0x200 /* skip between registers */
84 #define B 0 /* B-side data */
85 #define A RS /* A-side data */
86 #define DIRB (2*RS) /* B-side direction (1=output) */
87 #define DIRA (3*RS) /* A-side direction (1=output) */
88 #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
89 #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
90 #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
91 #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
92 #define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */
93 #define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */
94 #define SR (10*RS) /* Shift register */
95 #define ACR (11*RS) /* Auxiliary control register */
96 #define PCR (12*RS) /* Peripheral control register */
97 #define IFR (13*RS) /* Interrupt flag register */
98 #define IER (14*RS) /* Interrupt enable register */
99 #define ANH (15*RS) /* A-side data, no handshake */
100
101 /* Bits in B data register: both active low */
102 #ifdef CONFIG_PPC_PMAC
103 #define TACK 0x08 /* Transfer acknowledge (input) */
104 #define TREQ 0x10 /* Transfer request (output) */
105 #else
106 #define TACK 0x02
107 #define TREQ 0x04
108 #endif
109
110 /* Bits in ACR */
111 #define SR_CTRL 0x1c /* Shift register control bits */
112 #define SR_EXT 0x0c /* Shift on external clock */
113 #define SR_OUT 0x10 /* Shift out if 1 */
114
115 /* Bits in IFR and IER */
116 #define IER_SET 0x80 /* set bits in IER */
117 #define IER_CLR 0 /* clear bits in IER */
118 #define SR_INT 0x04 /* Shift register full/empty */
119 #define CB2_INT 0x08
120 #define CB1_INT 0x10 /* transition on CB1 input */
121
122 static volatile enum pmu_state {
123 uninitialized = 0,
124 idle,
125 sending,
126 intack,
127 reading,
128 reading_intr,
129 locked,
130 } pmu_state;
131
132 static volatile enum int_data_state {
133 int_data_empty,
134 int_data_fill,
135 int_data_ready,
136 int_data_flush
137 } int_data_state[2] = { int_data_empty, int_data_empty };
138
139 static struct adb_request *current_req;
140 static struct adb_request *last_req;
141 static struct adb_request *req_awaiting_reply;
142 static unsigned char interrupt_data[2][32];
143 static int interrupt_data_len[2];
144 static int int_data_last;
145 static unsigned char *reply_ptr;
146 static int data_index;
147 static int data_len;
148 static volatile int adb_int_pending;
149 static volatile int disable_poll;
150 static int pmu_kind = PMU_UNKNOWN;
151 static int pmu_fully_inited;
152 static int pmu_has_adb;
153 #ifdef CONFIG_PPC_PMAC
154 static volatile unsigned char __iomem *via1;
155 static volatile unsigned char __iomem *via2;
156 static struct device_node *vias;
157 static struct device_node *gpio_node;
158 #endif
159 static unsigned char __iomem *gpio_reg;
160 static int gpio_irq = 0;
161 static int gpio_irq_enabled = -1;
162 static volatile int pmu_suspended;
163 static DEFINE_SPINLOCK(pmu_lock);
164 static u8 pmu_intr_mask;
165 static int pmu_version;
166 static int drop_interrupts;
167 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
168 static int option_lid_wakeup = 1;
169 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
170 static unsigned long async_req_locks;
171
172 #define NUM_IRQ_STATS 13
173 static unsigned int pmu_irq_stats[NUM_IRQ_STATS];
174
175 static struct proc_dir_entry *proc_pmu_root;
176 static struct proc_dir_entry *proc_pmu_info;
177 static struct proc_dir_entry *proc_pmu_irqstats;
178 static struct proc_dir_entry *proc_pmu_options;
179 static int option_server_mode;
180
181 int pmu_battery_count;
182 static int pmu_cur_battery;
183 unsigned int pmu_power_flags = PMU_PWR_AC_PRESENT;
184 struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES];
185 static int query_batt_timer = BATTERY_POLLING_COUNT;
186 static struct adb_request batt_req;
187 static struct proc_dir_entry *proc_pmu_batt[PMU_MAX_BATTERIES];
188
189 int asleep;
190
191 #ifdef CONFIG_ADB
192 static int adb_dev_map;
193 static int pmu_adb_flags;
194
195 static int pmu_probe(void);
196 static int pmu_init(void);
197 static int pmu_send_request(struct adb_request *req, int sync);
198 static int pmu_adb_autopoll(int devs);
199 static int pmu_adb_reset_bus(void);
200 #endif /* CONFIG_ADB */
201
202 static int init_pmu(void);
203 static void pmu_start(void);
204 static irqreturn_t via_pmu_interrupt(int irq, void *arg);
205 static irqreturn_t gpio1_interrupt(int irq, void *arg);
206 #ifdef CONFIG_PROC_FS
207 static int pmu_info_proc_show(struct seq_file *m, void *v);
208 static int pmu_irqstats_proc_show(struct seq_file *m, void *v);
209 static int pmu_battery_proc_show(struct seq_file *m, void *v);
210 #endif
211 static void pmu_pass_intr(unsigned char *data, int len);
212 static const struct proc_ops pmu_options_proc_ops;
213
214 #ifdef CONFIG_ADB
215 const struct adb_driver via_pmu_driver = {
216 .name = "PMU",
217 .probe = pmu_probe,
218 .init = pmu_init,
219 .send_request = pmu_send_request,
220 .autopoll = pmu_adb_autopoll,
221 .poll = pmu_poll_adb,
222 .reset_bus = pmu_adb_reset_bus,
223 };
224 #endif /* CONFIG_ADB */
225
226 extern void low_sleep_handler(void);
227 extern void enable_kernel_altivec(void);
228 extern void enable_kernel_fp(void);
229
230 #ifdef DEBUG_SLEEP
231 int pmu_polled_request(struct adb_request *req);
232 void pmu_blink(int n);
233 #endif
234
235 /*
236 * This table indicates for each PMU opcode:
237 * - the number of data bytes to be sent with the command, or -1
238 * if a length byte should be sent,
239 * - the number of response bytes which the PMU will return, or
240 * -1 if it will send a length byte.
241 */
242 static const s8 pmu_data_len[256][2] = {
243 /* 0 1 2 3 4 5 6 7 */
244 /*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
245 /*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
246 /*10*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
247 /*18*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0, 0},
248 /*20*/ {-1, 0},{ 0, 0},{ 2, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},
249 /*28*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0,-1},
250 /*30*/ { 4, 0},{20, 0},{-1, 0},{ 3, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
251 /*38*/ { 0, 4},{ 0,20},{ 2,-1},{ 2, 1},{ 3,-1},{-1,-1},{-1,-1},{ 4, 0},
252 /*40*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
253 /*48*/ { 0, 1},{ 0, 1},{-1,-1},{ 1, 0},{ 1, 0},{-1,-1},{-1,-1},{-1,-1},
254 /*50*/ { 1, 0},{ 0, 0},{ 2, 0},{ 2, 0},{-1, 0},{ 1, 0},{ 3, 0},{ 1, 0},
255 /*58*/ { 0, 1},{ 1, 0},{ 0, 2},{ 0, 2},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},
256 /*60*/ { 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
257 /*68*/ { 0, 3},{ 0, 3},{ 0, 2},{ 0, 8},{ 0,-1},{ 0,-1},{-1,-1},{-1,-1},
258 /*70*/ { 1, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
259 /*78*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{ 5, 1},{ 4, 1},{ 4, 1},
260 /*80*/ { 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
261 /*88*/ { 0, 5},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
262 /*90*/ { 1, 0},{ 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
263 /*98*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
264 /*a0*/ { 2, 0},{ 2, 0},{ 2, 0},{ 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},
265 /*a8*/ { 1, 1},{ 1, 0},{ 3, 0},{ 2, 0},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
266 /*b0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
267 /*b8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
268 /*c0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
269 /*c8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
270 /*d0*/ { 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
271 /*d8*/ { 1, 1},{ 1, 1},{-1,-1},{-1,-1},{ 0, 1},{ 0,-1},{-1,-1},{-1,-1},
272 /*e0*/ {-1, 0},{ 4, 0},{ 0, 1},{-1, 0},{-1, 0},{ 4, 0},{-1, 0},{-1, 0},
273 /*e8*/ { 3,-1},{-1,-1},{ 0, 1},{-1,-1},{ 0,-1},{-1,-1},{-1,-1},{ 0, 0},
274 /*f0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},
275 /*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},
276 };
277
278 static char *pbook_type[] = {
279 "Unknown PowerBook",
280 "PowerBook 2400/3400/3500(G3)",
281 "PowerBook G3 Series",
282 "1999 PowerBook G3",
283 "Core99"
284 };
285
find_via_pmu(void)286 int __init find_via_pmu(void)
287 {
288 #ifdef CONFIG_PPC_PMAC
289 int err;
290 u64 taddr;
291 struct resource res;
292
293 if (pmu_state != uninitialized)
294 return 1;
295 vias = of_find_node_by_name(NULL, "via-pmu");
296 if (vias == NULL)
297 return 0;
298
299 err = of_address_to_resource(vias, 0, &res);
300 if (err) {
301 printk(KERN_ERR "via-pmu: Error getting \"reg\" property !\n");
302 goto fail;
303 }
304 taddr = res.start;
305
306 pmu_has_adb = 1;
307
308 pmu_intr_mask = PMU_INT_PCEJECT |
309 PMU_INT_SNDBRT |
310 PMU_INT_ADB |
311 PMU_INT_TICK;
312
313 if (of_node_name_eq(vias->parent, "ohare") ||
314 of_device_is_compatible(vias->parent, "ohare"))
315 pmu_kind = PMU_OHARE_BASED;
316 else if (of_device_is_compatible(vias->parent, "paddington"))
317 pmu_kind = PMU_PADDINGTON_BASED;
318 else if (of_device_is_compatible(vias->parent, "heathrow"))
319 pmu_kind = PMU_HEATHROW_BASED;
320 else if (of_device_is_compatible(vias->parent, "Keylargo")
321 || of_device_is_compatible(vias->parent, "K2-Keylargo")) {
322 struct device_node *gpiop;
323 struct device_node *adbp;
324
325 pmu_kind = PMU_KEYLARGO_BASED;
326 adbp = of_find_node_by_type(NULL, "adb");
327 pmu_has_adb = (adbp != NULL);
328 of_node_put(adbp);
329 pmu_intr_mask = PMU_INT_PCEJECT |
330 PMU_INT_SNDBRT |
331 PMU_INT_ADB |
332 PMU_INT_TICK |
333 PMU_INT_ENVIRONMENT;
334
335 gpiop = of_find_node_by_name(NULL, "gpio");
336 if (gpiop) {
337 if (!of_address_to_resource(gpiop, 0, &res))
338 gpio_reg = ioremap(res.start, 0x10);
339 of_node_put(gpiop);
340 }
341 if (gpio_reg == NULL) {
342 printk(KERN_ERR "via-pmu: Can't find GPIO reg !\n");
343 goto fail;
344 }
345 } else
346 pmu_kind = PMU_UNKNOWN;
347
348 via1 = via2 = ioremap(taddr, 0x2000);
349 if (via1 == NULL) {
350 printk(KERN_ERR "via-pmu: Can't map address !\n");
351 goto fail_via_remap;
352 }
353
354 out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */
355 out_8(&via1[IFR], 0x7f); /* clear IFR */
356
357 pmu_state = idle;
358
359 if (!init_pmu())
360 goto fail_init;
361
362 sys_ctrler = SYS_CTRLER_PMU;
363
364 return 1;
365
366 fail_init:
367 iounmap(via1);
368 via1 = via2 = NULL;
369 fail_via_remap:
370 iounmap(gpio_reg);
371 gpio_reg = NULL;
372 fail:
373 of_node_put(vias);
374 vias = NULL;
375 pmu_state = uninitialized;
376 return 0;
377 #else
378 if (macintosh_config->adb_type != MAC_ADB_PB2)
379 return 0;
380
381 pmu_kind = PMU_UNKNOWN;
382
383 pmu_has_adb = 1;
384
385 pmu_intr_mask = PMU_INT_PCEJECT |
386 PMU_INT_SNDBRT |
387 PMU_INT_ADB |
388 PMU_INT_TICK;
389
390 pmu_state = idle;
391
392 if (!init_pmu()) {
393 pmu_state = uninitialized;
394 return 0;
395 }
396
397 return 1;
398 #endif /* !CONFIG_PPC_PMAC */
399 }
400
401 #ifdef CONFIG_ADB
pmu_probe(void)402 static int pmu_probe(void)
403 {
404 return pmu_state == uninitialized ? -ENODEV : 0;
405 }
406
pmu_init(void)407 static int pmu_init(void)
408 {
409 return pmu_state == uninitialized ? -ENODEV : 0;
410 }
411 #endif /* CONFIG_ADB */
412
413 /*
414 * We can't wait until pmu_init gets called, that happens too late.
415 * It happens after IDE and SCSI initialization, which can take a few
416 * seconds, and by that time the PMU could have given up on us and
417 * turned us off.
418 * Thus this is called with arch_initcall rather than device_initcall.
419 */
via_pmu_start(void)420 static int __init via_pmu_start(void)
421 {
422 unsigned int __maybe_unused irq;
423
424 if (pmu_state == uninitialized)
425 return -ENODEV;
426
427 batt_req.complete = 1;
428
429 #ifdef CONFIG_PPC_PMAC
430 irq = irq_of_parse_and_map(vias, 0);
431 if (!irq) {
432 printk(KERN_ERR "via-pmu: can't map interrupt\n");
433 return -ENODEV;
434 }
435 /* We set IRQF_NO_SUSPEND because we don't want the interrupt
436 * to be disabled between the 2 passes of driver suspend, we
437 * control our own disabling for that one
438 */
439 if (request_irq(irq, via_pmu_interrupt, IRQF_NO_SUSPEND,
440 "VIA-PMU", (void *)0)) {
441 printk(KERN_ERR "via-pmu: can't request irq %d\n", irq);
442 return -ENODEV;
443 }
444
445 if (pmu_kind == PMU_KEYLARGO_BASED) {
446 gpio_node = of_find_node_by_name(NULL, "extint-gpio1");
447 if (gpio_node == NULL)
448 gpio_node = of_find_node_by_name(NULL,
449 "pmu-interrupt");
450 if (gpio_node)
451 gpio_irq = irq_of_parse_and_map(gpio_node, 0);
452
453 if (gpio_irq) {
454 if (request_irq(gpio_irq, gpio1_interrupt,
455 IRQF_NO_SUSPEND, "GPIO1 ADB",
456 (void *)0))
457 printk(KERN_ERR "pmu: can't get irq %d"
458 " (GPIO1)\n", gpio_irq);
459 else
460 gpio_irq_enabled = 1;
461 }
462 }
463
464 /* Enable interrupts */
465 out_8(&via1[IER], IER_SET | SR_INT | CB1_INT);
466 #else
467 if (request_irq(IRQ_MAC_ADB_SR, via_pmu_interrupt, IRQF_NO_SUSPEND,
468 "VIA-PMU-SR", NULL)) {
469 pr_err("%s: couldn't get SR irq\n", __func__);
470 return -ENODEV;
471 }
472 if (request_irq(IRQ_MAC_ADB_CL, via_pmu_interrupt, IRQF_NO_SUSPEND,
473 "VIA-PMU-CL", NULL)) {
474 pr_err("%s: couldn't get CL irq\n", __func__);
475 free_irq(IRQ_MAC_ADB_SR, NULL);
476 return -ENODEV;
477 }
478 #endif /* !CONFIG_PPC_PMAC */
479
480 pmu_fully_inited = 1;
481
482 /* Make sure PMU settle down before continuing. This is _very_ important
483 * since the IDE probe may shut interrupts down for quite a bit of time. If
484 * a PMU communication is pending while this happens, the PMU may timeout
485 * Not that on Core99 machines, the PMU keeps sending us environement
486 * messages, we should find a way to either fix IDE or make it call
487 * pmu_suspend() before masking interrupts. This can also happens while
488 * scolling with some fbdevs.
489 */
490 do {
491 pmu_poll();
492 } while (pmu_state != idle);
493
494 return 0;
495 }
496
497 arch_initcall(via_pmu_start);
498
499 /*
500 * This has to be done after pci_init, which is a subsys_initcall.
501 */
via_pmu_dev_init(void)502 static int __init via_pmu_dev_init(void)
503 {
504 if (pmu_state == uninitialized)
505 return -ENODEV;
506
507 #ifdef CONFIG_PMAC_BACKLIGHT
508 /* Initialize backlight */
509 pmu_backlight_init();
510 #endif
511
512 #ifdef CONFIG_PPC32
513 if (of_machine_is_compatible("AAPL,3400/2400") ||
514 of_machine_is_compatible("AAPL,3500")) {
515 int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO,
516 NULL, PMAC_MB_INFO_MODEL, 0);
517 pmu_battery_count = 1;
518 if (mb == PMAC_TYPE_COMET)
519 pmu_batteries[0].flags |= PMU_BATT_TYPE_COMET;
520 else
521 pmu_batteries[0].flags |= PMU_BATT_TYPE_HOOPER;
522 } else if (of_machine_is_compatible("AAPL,PowerBook1998") ||
523 of_machine_is_compatible("PowerBook1,1")) {
524 pmu_battery_count = 2;
525 pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART;
526 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART;
527 } else {
528 struct device_node* prim =
529 of_find_node_by_name(NULL, "power-mgt");
530 const u32 *prim_info = NULL;
531 if (prim)
532 prim_info = of_get_property(prim, "prim-info", NULL);
533 if (prim_info) {
534 /* Other stuffs here yet unknown */
535 pmu_battery_count = (prim_info[6] >> 16) & 0xff;
536 pmu_batteries[0].flags |= PMU_BATT_TYPE_SMART;
537 if (pmu_battery_count > 1)
538 pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART;
539 }
540 of_node_put(prim);
541 }
542 #endif /* CONFIG_PPC32 */
543
544 /* Create /proc/pmu */
545 proc_pmu_root = proc_mkdir("pmu", NULL);
546 if (proc_pmu_root) {
547 long i;
548
549 for (i=0; i<pmu_battery_count; i++) {
550 char title[16];
551 sprintf(title, "battery_%ld", i);
552 proc_pmu_batt[i] = proc_create_single_data(title, 0,
553 proc_pmu_root, pmu_battery_proc_show,
554 (void *)i);
555 }
556
557 proc_pmu_info = proc_create_single("info", 0, proc_pmu_root,
558 pmu_info_proc_show);
559 proc_pmu_irqstats = proc_create_single("interrupts", 0,
560 proc_pmu_root, pmu_irqstats_proc_show);
561 proc_pmu_options = proc_create("options", 0600, proc_pmu_root,
562 &pmu_options_proc_ops);
563 }
564 return 0;
565 }
566
567 device_initcall(via_pmu_dev_init);
568
569 static int
init_pmu(void)570 init_pmu(void)
571 {
572 int timeout;
573 struct adb_request req;
574
575 /* Negate TREQ. Set TACK to input and TREQ to output. */
576 out_8(&via2[B], in_8(&via2[B]) | TREQ);
577 out_8(&via2[DIRB], (in_8(&via2[DIRB]) | TREQ) & ~TACK);
578
579 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
580 timeout = 100000;
581 while (!req.complete) {
582 if (--timeout < 0) {
583 printk(KERN_ERR "init_pmu: no response from PMU\n");
584 return 0;
585 }
586 udelay(10);
587 pmu_poll();
588 }
589
590 /* ack all pending interrupts */
591 timeout = 100000;
592 interrupt_data[0][0] = 1;
593 while (interrupt_data[0][0] || pmu_state != idle) {
594 if (--timeout < 0) {
595 printk(KERN_ERR "init_pmu: timed out acking intrs\n");
596 return 0;
597 }
598 if (pmu_state == idle)
599 adb_int_pending = 1;
600 via_pmu_interrupt(0, NULL);
601 udelay(10);
602 }
603
604 /* Tell PMU we are ready. */
605 if (pmu_kind == PMU_KEYLARGO_BASED) {
606 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
607 while (!req.complete)
608 pmu_poll();
609 }
610
611 /* Read PMU version */
612 pmu_request(&req, NULL, 1, PMU_GET_VERSION);
613 pmu_wait_complete(&req);
614 if (req.reply_len > 0)
615 pmu_version = req.reply[0];
616
617 /* Read server mode setting */
618 if (pmu_kind == PMU_KEYLARGO_BASED) {
619 pmu_request(&req, NULL, 2, PMU_POWER_EVENTS,
620 PMU_PWR_GET_POWERUP_EVENTS);
621 pmu_wait_complete(&req);
622 if (req.reply_len == 2) {
623 if (req.reply[1] & PMU_PWR_WAKEUP_AC_INSERT)
624 option_server_mode = 1;
625 printk(KERN_INFO "via-pmu: Server Mode is %s\n",
626 option_server_mode ? "enabled" : "disabled");
627 }
628 }
629
630 printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n",
631 PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version);
632
633 return 1;
634 }
635
636 int
pmu_get_model(void)637 pmu_get_model(void)
638 {
639 return pmu_kind;
640 }
641
pmu_set_server_mode(int server_mode)642 static void pmu_set_server_mode(int server_mode)
643 {
644 struct adb_request req;
645
646 if (pmu_kind != PMU_KEYLARGO_BASED)
647 return;
648
649 option_server_mode = server_mode;
650 pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, PMU_PWR_GET_POWERUP_EVENTS);
651 pmu_wait_complete(&req);
652 if (req.reply_len < 2)
653 return;
654 if (server_mode)
655 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS,
656 PMU_PWR_SET_POWERUP_EVENTS,
657 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT);
658 else
659 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS,
660 PMU_PWR_CLR_POWERUP_EVENTS,
661 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT);
662 pmu_wait_complete(&req);
663 }
664
665 /* This new version of the code for 2400/3400/3500 powerbooks
666 * is inspired from the implementation in gkrellm-pmu
667 */
668 static void
done_battery_state_ohare(struct adb_request * req)669 done_battery_state_ohare(struct adb_request* req)
670 {
671 #ifdef CONFIG_PPC_PMAC
672 /* format:
673 * [0] : flags
674 * 0x01 : AC indicator
675 * 0x02 : charging
676 * 0x04 : battery exist
677 * 0x08 :
678 * 0x10 :
679 * 0x20 : full charged
680 * 0x40 : pcharge reset
681 * 0x80 : battery exist
682 *
683 * [1][2] : battery voltage
684 * [3] : CPU temperature
685 * [4] : battery temperature
686 * [5] : current
687 * [6][7] : pcharge
688 * --tkoba
689 */
690 unsigned int bat_flags = PMU_BATT_TYPE_HOOPER;
691 long pcharge, charge, vb, vmax, lmax;
692 long vmax_charging, vmax_charged;
693 long amperage, voltage, time, max;
694 int mb = pmac_call_feature(PMAC_FTR_GET_MB_INFO,
695 NULL, PMAC_MB_INFO_MODEL, 0);
696
697 if (req->reply[0] & 0x01)
698 pmu_power_flags |= PMU_PWR_AC_PRESENT;
699 else
700 pmu_power_flags &= ~PMU_PWR_AC_PRESENT;
701
702 if (mb == PMAC_TYPE_COMET) {
703 vmax_charged = 189;
704 vmax_charging = 213;
705 lmax = 6500;
706 } else {
707 vmax_charged = 330;
708 vmax_charging = 330;
709 lmax = 6500;
710 }
711 vmax = vmax_charged;
712
713 /* If battery installed */
714 if (req->reply[0] & 0x04) {
715 bat_flags |= PMU_BATT_PRESENT;
716 if (req->reply[0] & 0x02)
717 bat_flags |= PMU_BATT_CHARGING;
718 vb = (req->reply[1] << 8) | req->reply[2];
719 voltage = (vb * 265 + 72665) / 10;
720 amperage = req->reply[5];
721 if ((req->reply[0] & 0x01) == 0) {
722 if (amperage > 200)
723 vb += ((amperage - 200) * 15)/100;
724 } else if (req->reply[0] & 0x02) {
725 vb = (vb * 97) / 100;
726 vmax = vmax_charging;
727 }
728 charge = (100 * vb) / vmax;
729 if (req->reply[0] & 0x40) {
730 pcharge = (req->reply[6] << 8) + req->reply[7];
731 if (pcharge > lmax)
732 pcharge = lmax;
733 pcharge *= 100;
734 pcharge = 100 - pcharge / lmax;
735 if (pcharge < charge)
736 charge = pcharge;
737 }
738 if (amperage > 0)
739 time = (charge * 16440) / amperage;
740 else
741 time = 0;
742 max = 100;
743 amperage = -amperage;
744 } else
745 charge = max = amperage = voltage = time = 0;
746
747 pmu_batteries[pmu_cur_battery].flags = bat_flags;
748 pmu_batteries[pmu_cur_battery].charge = charge;
749 pmu_batteries[pmu_cur_battery].max_charge = max;
750 pmu_batteries[pmu_cur_battery].amperage = amperage;
751 pmu_batteries[pmu_cur_battery].voltage = voltage;
752 pmu_batteries[pmu_cur_battery].time_remaining = time;
753 #endif /* CONFIG_PPC_PMAC */
754
755 clear_bit(0, &async_req_locks);
756 }
757
758 static void
done_battery_state_smart(struct adb_request * req)759 done_battery_state_smart(struct adb_request* req)
760 {
761 /* format:
762 * [0] : format of this structure (known: 3,4,5)
763 * [1] : flags
764 *
765 * format 3 & 4:
766 *
767 * [2] : charge
768 * [3] : max charge
769 * [4] : current
770 * [5] : voltage
771 *
772 * format 5:
773 *
774 * [2][3] : charge
775 * [4][5] : max charge
776 * [6][7] : current
777 * [8][9] : voltage
778 */
779
780 unsigned int bat_flags = PMU_BATT_TYPE_SMART;
781 int amperage;
782 unsigned int capa, max, voltage;
783
784 if (req->reply[1] & 0x01)
785 pmu_power_flags |= PMU_PWR_AC_PRESENT;
786 else
787 pmu_power_flags &= ~PMU_PWR_AC_PRESENT;
788
789
790 capa = max = amperage = voltage = 0;
791
792 if (req->reply[1] & 0x04) {
793 bat_flags |= PMU_BATT_PRESENT;
794 switch(req->reply[0]) {
795 case 3:
796 case 4: capa = req->reply[2];
797 max = req->reply[3];
798 amperage = *((signed char *)&req->reply[4]);
799 voltage = req->reply[5];
800 break;
801 case 5: capa = (req->reply[2] << 8) | req->reply[3];
802 max = (req->reply[4] << 8) | req->reply[5];
803 amperage = *((signed short *)&req->reply[6]);
804 voltage = (req->reply[8] << 8) | req->reply[9];
805 break;
806 default:
807 pr_warn("pmu.c: unrecognized battery info, "
808 "len: %d, %4ph\n", req->reply_len,
809 req->reply);
810 break;
811 }
812 }
813
814 if ((req->reply[1] & 0x01) && (amperage > 0))
815 bat_flags |= PMU_BATT_CHARGING;
816
817 pmu_batteries[pmu_cur_battery].flags = bat_flags;
818 pmu_batteries[pmu_cur_battery].charge = capa;
819 pmu_batteries[pmu_cur_battery].max_charge = max;
820 pmu_batteries[pmu_cur_battery].amperage = amperage;
821 pmu_batteries[pmu_cur_battery].voltage = voltage;
822 if (amperage) {
823 if ((req->reply[1] & 0x01) && (amperage > 0))
824 pmu_batteries[pmu_cur_battery].time_remaining
825 = ((max-capa) * 3600) / amperage;
826 else
827 pmu_batteries[pmu_cur_battery].time_remaining
828 = (capa * 3600) / (-amperage);
829 } else
830 pmu_batteries[pmu_cur_battery].time_remaining = 0;
831
832 pmu_cur_battery = (pmu_cur_battery + 1) % pmu_battery_count;
833
834 clear_bit(0, &async_req_locks);
835 }
836
837 static void
query_battery_state(void)838 query_battery_state(void)
839 {
840 if (test_and_set_bit(0, &async_req_locks))
841 return;
842 if (pmu_kind == PMU_OHARE_BASED)
843 pmu_request(&batt_req, done_battery_state_ohare,
844 1, PMU_BATTERY_STATE);
845 else
846 pmu_request(&batt_req, done_battery_state_smart,
847 2, PMU_SMART_BATTERY_STATE, pmu_cur_battery+1);
848 }
849
850 #ifdef CONFIG_PROC_FS
pmu_info_proc_show(struct seq_file * m,void * v)851 static int pmu_info_proc_show(struct seq_file *m, void *v)
852 {
853 seq_printf(m, "PMU driver version : %d\n", PMU_DRIVER_VERSION);
854 seq_printf(m, "PMU firmware version : %02x\n", pmu_version);
855 seq_printf(m, "AC Power : %d\n",
856 ((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0);
857 seq_printf(m, "Battery count : %d\n", pmu_battery_count);
858
859 return 0;
860 }
861
pmu_irqstats_proc_show(struct seq_file * m,void * v)862 static int pmu_irqstats_proc_show(struct seq_file *m, void *v)
863 {
864 int i;
865 static const char *irq_names[NUM_IRQ_STATS] = {
866 "Unknown interrupt (type 0)",
867 "Unknown interrupt (type 1)",
868 "PC-Card eject button",
869 "Sound/Brightness button",
870 "ADB message",
871 "Battery state change",
872 "Environment interrupt",
873 "Tick timer",
874 "Ghost interrupt (zero len)",
875 "Empty interrupt (empty mask)",
876 "Max irqs in a row",
877 "Total CB1 triggered events",
878 "Total GPIO1 triggered events",
879 };
880
881 for (i = 0; i < NUM_IRQ_STATS; i++) {
882 seq_printf(m, " %2u: %10u (%s)\n",
883 i, pmu_irq_stats[i], irq_names[i]);
884 }
885 return 0;
886 }
887
pmu_battery_proc_show(struct seq_file * m,void * v)888 static int pmu_battery_proc_show(struct seq_file *m, void *v)
889 {
890 long batnum = (long)m->private;
891
892 seq_putc(m, '\n');
893 seq_printf(m, "flags : %08x\n", pmu_batteries[batnum].flags);
894 seq_printf(m, "charge : %d\n", pmu_batteries[batnum].charge);
895 seq_printf(m, "max_charge : %d\n", pmu_batteries[batnum].max_charge);
896 seq_printf(m, "current : %d\n", pmu_batteries[batnum].amperage);
897 seq_printf(m, "voltage : %d\n", pmu_batteries[batnum].voltage);
898 seq_printf(m, "time rem. : %d\n", pmu_batteries[batnum].time_remaining);
899 return 0;
900 }
901
pmu_options_proc_show(struct seq_file * m,void * v)902 static int pmu_options_proc_show(struct seq_file *m, void *v)
903 {
904 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
905 if (pmu_kind == PMU_KEYLARGO_BASED &&
906 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
907 seq_printf(m, "lid_wakeup=%d\n", option_lid_wakeup);
908 #endif
909 if (pmu_kind == PMU_KEYLARGO_BASED)
910 seq_printf(m, "server_mode=%d\n", option_server_mode);
911
912 return 0;
913 }
914
pmu_options_proc_open(struct inode * inode,struct file * file)915 static int pmu_options_proc_open(struct inode *inode, struct file *file)
916 {
917 return single_open(file, pmu_options_proc_show, NULL);
918 }
919
pmu_options_proc_write(struct file * file,const char __user * buffer,size_t count,loff_t * pos)920 static ssize_t pmu_options_proc_write(struct file *file,
921 const char __user *buffer, size_t count, loff_t *pos)
922 {
923 char tmp[33];
924 char *label, *val;
925 size_t fcount = count;
926
927 if (!count)
928 return -EINVAL;
929 if (count > 32)
930 count = 32;
931 if (copy_from_user(tmp, buffer, count))
932 return -EFAULT;
933 tmp[count] = 0;
934
935 label = tmp;
936 while(*label == ' ')
937 label++;
938 val = label;
939 while(*val && (*val != '=')) {
940 if (*val == ' ')
941 *val = 0;
942 val++;
943 }
944 if ((*val) == 0)
945 return -EINVAL;
946 *(val++) = 0;
947 while(*val == ' ')
948 val++;
949 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
950 if (pmu_kind == PMU_KEYLARGO_BASED &&
951 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) >= 0)
952 if (!strcmp(label, "lid_wakeup"))
953 option_lid_wakeup = ((*val) == '1');
954 #endif
955 if (pmu_kind == PMU_KEYLARGO_BASED && !strcmp(label, "server_mode")) {
956 int new_value;
957 new_value = ((*val) == '1');
958 if (new_value != option_server_mode)
959 pmu_set_server_mode(new_value);
960 }
961 return fcount;
962 }
963
964 static const struct proc_ops pmu_options_proc_ops = {
965 .proc_open = pmu_options_proc_open,
966 .proc_read = seq_read,
967 .proc_lseek = seq_lseek,
968 .proc_release = single_release,
969 .proc_write = pmu_options_proc_write,
970 };
971 #endif
972
973 #ifdef CONFIG_ADB
974 /* Send an ADB command */
pmu_send_request(struct adb_request * req,int sync)975 static int pmu_send_request(struct adb_request *req, int sync)
976 {
977 int i, ret;
978
979 if (pmu_state == uninitialized || !pmu_fully_inited) {
980 req->complete = 1;
981 return -ENXIO;
982 }
983
984 ret = -EINVAL;
985
986 switch (req->data[0]) {
987 case PMU_PACKET:
988 for (i = 0; i < req->nbytes - 1; ++i)
989 req->data[i] = req->data[i+1];
990 --req->nbytes;
991 if (pmu_data_len[req->data[0]][1] != 0) {
992 req->reply[0] = ADB_RET_OK;
993 req->reply_len = 1;
994 } else
995 req->reply_len = 0;
996 ret = pmu_queue_request(req);
997 break;
998 case CUDA_PACKET:
999 switch (req->data[1]) {
1000 case CUDA_GET_TIME:
1001 if (req->nbytes != 2)
1002 break;
1003 req->data[0] = PMU_READ_RTC;
1004 req->nbytes = 1;
1005 req->reply_len = 3;
1006 req->reply[0] = CUDA_PACKET;
1007 req->reply[1] = 0;
1008 req->reply[2] = CUDA_GET_TIME;
1009 ret = pmu_queue_request(req);
1010 break;
1011 case CUDA_SET_TIME:
1012 if (req->nbytes != 6)
1013 break;
1014 req->data[0] = PMU_SET_RTC;
1015 req->nbytes = 5;
1016 for (i = 1; i <= 4; ++i)
1017 req->data[i] = req->data[i+1];
1018 req->reply_len = 3;
1019 req->reply[0] = CUDA_PACKET;
1020 req->reply[1] = 0;
1021 req->reply[2] = CUDA_SET_TIME;
1022 ret = pmu_queue_request(req);
1023 break;
1024 }
1025 break;
1026 case ADB_PACKET:
1027 if (!pmu_has_adb)
1028 return -ENXIO;
1029 for (i = req->nbytes - 1; i > 1; --i)
1030 req->data[i+2] = req->data[i];
1031 req->data[3] = req->nbytes - 2;
1032 req->data[2] = pmu_adb_flags;
1033 /*req->data[1] = req->data[1];*/
1034 req->data[0] = PMU_ADB_CMD;
1035 req->nbytes += 2;
1036 req->reply_expected = 1;
1037 req->reply_len = 0;
1038 ret = pmu_queue_request(req);
1039 break;
1040 }
1041 if (ret) {
1042 req->complete = 1;
1043 return ret;
1044 }
1045
1046 if (sync)
1047 while (!req->complete)
1048 pmu_poll();
1049
1050 return 0;
1051 }
1052
1053 /* Enable/disable autopolling */
__pmu_adb_autopoll(int devs)1054 static int __pmu_adb_autopoll(int devs)
1055 {
1056 struct adb_request req;
1057
1058 if (devs) {
1059 pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86,
1060 adb_dev_map >> 8, adb_dev_map);
1061 pmu_adb_flags = 2;
1062 } else {
1063 pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF);
1064 pmu_adb_flags = 0;
1065 }
1066 while (!req.complete)
1067 pmu_poll();
1068 return 0;
1069 }
1070
pmu_adb_autopoll(int devs)1071 static int pmu_adb_autopoll(int devs)
1072 {
1073 if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb)
1074 return -ENXIO;
1075
1076 adb_dev_map = devs;
1077 return __pmu_adb_autopoll(devs);
1078 }
1079
1080 /* Reset the ADB bus */
pmu_adb_reset_bus(void)1081 static int pmu_adb_reset_bus(void)
1082 {
1083 struct adb_request req;
1084 int save_autopoll = adb_dev_map;
1085
1086 if (pmu_state == uninitialized || !pmu_fully_inited || !pmu_has_adb)
1087 return -ENXIO;
1088
1089 /* anyone got a better idea?? */
1090 __pmu_adb_autopoll(0);
1091
1092 req.nbytes = 4;
1093 req.done = NULL;
1094 req.data[0] = PMU_ADB_CMD;
1095 req.data[1] = ADB_BUSRESET;
1096 req.data[2] = 0;
1097 req.data[3] = 0;
1098 req.data[4] = 0;
1099 req.reply_len = 0;
1100 req.reply_expected = 1;
1101 if (pmu_queue_request(&req) != 0) {
1102 printk(KERN_ERR "pmu_adb_reset_bus: pmu_queue_request failed\n");
1103 return -EIO;
1104 }
1105 pmu_wait_complete(&req);
1106
1107 if (save_autopoll != 0)
1108 __pmu_adb_autopoll(save_autopoll);
1109
1110 return 0;
1111 }
1112 #endif /* CONFIG_ADB */
1113
1114 /* Construct and send a pmu request */
1115 int
pmu_request(struct adb_request * req,void (* done)(struct adb_request *),int nbytes,...)1116 pmu_request(struct adb_request *req, void (*done)(struct adb_request *),
1117 int nbytes, ...)
1118 {
1119 va_list list;
1120 int i;
1121
1122 if (pmu_state == uninitialized)
1123 return -ENXIO;
1124
1125 if (nbytes < 0 || nbytes > 32) {
1126 printk(KERN_ERR "pmu_request: bad nbytes (%d)\n", nbytes);
1127 req->complete = 1;
1128 return -EINVAL;
1129 }
1130 req->nbytes = nbytes;
1131 req->done = done;
1132 va_start(list, nbytes);
1133 for (i = 0; i < nbytes; ++i)
1134 req->data[i] = va_arg(list, int);
1135 va_end(list);
1136 req->reply_len = 0;
1137 req->reply_expected = 0;
1138 return pmu_queue_request(req);
1139 }
1140
1141 int
pmu_queue_request(struct adb_request * req)1142 pmu_queue_request(struct adb_request *req)
1143 {
1144 unsigned long flags;
1145 int nsend;
1146
1147 if (pmu_state == uninitialized) {
1148 req->complete = 1;
1149 return -ENXIO;
1150 }
1151 if (req->nbytes <= 0) {
1152 req->complete = 1;
1153 return 0;
1154 }
1155 nsend = pmu_data_len[req->data[0]][0];
1156 if (nsend >= 0 && req->nbytes != nsend + 1) {
1157 req->complete = 1;
1158 return -EINVAL;
1159 }
1160
1161 req->next = NULL;
1162 req->sent = 0;
1163 req->complete = 0;
1164
1165 spin_lock_irqsave(&pmu_lock, flags);
1166 if (current_req) {
1167 last_req->next = req;
1168 last_req = req;
1169 } else {
1170 current_req = req;
1171 last_req = req;
1172 if (pmu_state == idle)
1173 pmu_start();
1174 }
1175 spin_unlock_irqrestore(&pmu_lock, flags);
1176
1177 return 0;
1178 }
1179
1180 static inline void
wait_for_ack(void)1181 wait_for_ack(void)
1182 {
1183 /* Sightly increased the delay, I had one occurrence of the message
1184 * reported
1185 */
1186 int timeout = 4000;
1187 while ((in_8(&via2[B]) & TACK) == 0) {
1188 if (--timeout < 0) {
1189 printk(KERN_ERR "PMU not responding (!ack)\n");
1190 return;
1191 }
1192 udelay(10);
1193 }
1194 }
1195
1196 /* New PMU seems to be very sensitive to those timings, so we make sure
1197 * PCI is flushed immediately */
1198 static inline void
send_byte(int x)1199 send_byte(int x)
1200 {
1201 out_8(&via1[ACR], in_8(&via1[ACR]) | SR_OUT | SR_EXT);
1202 out_8(&via1[SR], x);
1203 out_8(&via2[B], in_8(&via2[B]) & ~TREQ); /* assert TREQ */
1204 (void)in_8(&via2[B]);
1205 }
1206
1207 static inline void
recv_byte(void)1208 recv_byte(void)
1209 {
1210 out_8(&via1[ACR], (in_8(&via1[ACR]) & ~SR_OUT) | SR_EXT);
1211 in_8(&via1[SR]); /* resets SR */
1212 out_8(&via2[B], in_8(&via2[B]) & ~TREQ);
1213 (void)in_8(&via2[B]);
1214 }
1215
1216 static inline void
pmu_done(struct adb_request * req)1217 pmu_done(struct adb_request *req)
1218 {
1219 void (*done)(struct adb_request *) = req->done;
1220 mb();
1221 req->complete = 1;
1222 /* Here, we assume that if the request has a done member, the
1223 * struct request will survive to setting req->complete to 1
1224 */
1225 if (done)
1226 (*done)(req);
1227 }
1228
1229 static void
pmu_start(void)1230 pmu_start(void)
1231 {
1232 struct adb_request *req;
1233
1234 /* assert pmu_state == idle */
1235 /* get the packet to send */
1236 req = current_req;
1237 if (!req || pmu_state != idle
1238 || (/*req->reply_expected && */req_awaiting_reply))
1239 return;
1240
1241 pmu_state = sending;
1242 data_index = 1;
1243 data_len = pmu_data_len[req->data[0]][0];
1244
1245 /* Sounds safer to make sure ACK is high before writing. This helped
1246 * kill a problem with ADB and some iBooks
1247 */
1248 wait_for_ack();
1249 /* set the shift register to shift out and send a byte */
1250 send_byte(req->data[0]);
1251 }
1252
1253 void
pmu_poll(void)1254 pmu_poll(void)
1255 {
1256 if (pmu_state == uninitialized)
1257 return;
1258 if (disable_poll)
1259 return;
1260 via_pmu_interrupt(0, NULL);
1261 }
1262
1263 void
pmu_poll_adb(void)1264 pmu_poll_adb(void)
1265 {
1266 if (pmu_state == uninitialized)
1267 return;
1268 if (disable_poll)
1269 return;
1270 /* Kicks ADB read when PMU is suspended */
1271 adb_int_pending = 1;
1272 do {
1273 via_pmu_interrupt(0, NULL);
1274 } while (pmu_suspended && (adb_int_pending || pmu_state != idle
1275 || req_awaiting_reply));
1276 }
1277
1278 void
pmu_wait_complete(struct adb_request * req)1279 pmu_wait_complete(struct adb_request *req)
1280 {
1281 if (pmu_state == uninitialized)
1282 return;
1283 while((pmu_state != idle && pmu_state != locked) || !req->complete)
1284 via_pmu_interrupt(0, NULL);
1285 }
1286
1287 /* This function loops until the PMU is idle and prevents it from
1288 * anwsering to ADB interrupts. pmu_request can still be called.
1289 * This is done to avoid spurrious shutdowns when we know we'll have
1290 * interrupts switched off for a long time
1291 */
1292 void
pmu_suspend(void)1293 pmu_suspend(void)
1294 {
1295 unsigned long flags;
1296
1297 if (pmu_state == uninitialized)
1298 return;
1299
1300 spin_lock_irqsave(&pmu_lock, flags);
1301 pmu_suspended++;
1302 if (pmu_suspended > 1) {
1303 spin_unlock_irqrestore(&pmu_lock, flags);
1304 return;
1305 }
1306
1307 do {
1308 spin_unlock_irqrestore(&pmu_lock, flags);
1309 if (req_awaiting_reply)
1310 adb_int_pending = 1;
1311 via_pmu_interrupt(0, NULL);
1312 spin_lock_irqsave(&pmu_lock, flags);
1313 if (!adb_int_pending && pmu_state == idle && !req_awaiting_reply) {
1314 if (gpio_irq >= 0)
1315 disable_irq_nosync(gpio_irq);
1316 out_8(&via1[IER], CB1_INT | IER_CLR);
1317 spin_unlock_irqrestore(&pmu_lock, flags);
1318 break;
1319 }
1320 } while (1);
1321 }
1322
1323 void
pmu_resume(void)1324 pmu_resume(void)
1325 {
1326 unsigned long flags;
1327
1328 if (pmu_state == uninitialized || pmu_suspended < 1)
1329 return;
1330
1331 spin_lock_irqsave(&pmu_lock, flags);
1332 pmu_suspended--;
1333 if (pmu_suspended > 0) {
1334 spin_unlock_irqrestore(&pmu_lock, flags);
1335 return;
1336 }
1337 adb_int_pending = 1;
1338 if (gpio_irq >= 0)
1339 enable_irq(gpio_irq);
1340 out_8(&via1[IER], CB1_INT | IER_SET);
1341 spin_unlock_irqrestore(&pmu_lock, flags);
1342 pmu_poll();
1343 }
1344
1345 /* Interrupt data could be the result data from an ADB cmd */
1346 static void
pmu_handle_data(unsigned char * data,int len)1347 pmu_handle_data(unsigned char *data, int len)
1348 {
1349 unsigned char ints;
1350 int idx;
1351 int i = 0;
1352
1353 asleep = 0;
1354 if (drop_interrupts || len < 1) {
1355 adb_int_pending = 0;
1356 pmu_irq_stats[8]++;
1357 return;
1358 }
1359
1360 /* Get PMU interrupt mask */
1361 ints = data[0];
1362
1363 /* Record zero interrupts for stats */
1364 if (ints == 0)
1365 pmu_irq_stats[9]++;
1366
1367 /* Hack to deal with ADB autopoll flag */
1368 if (ints & PMU_INT_ADB)
1369 ints &= ~(PMU_INT_ADB_AUTO | PMU_INT_AUTO_SRQ_POLL);
1370
1371 next:
1372 if (ints == 0) {
1373 if (i > pmu_irq_stats[10])
1374 pmu_irq_stats[10] = i;
1375 return;
1376 }
1377 i++;
1378
1379 idx = ffs(ints) - 1;
1380 ints &= ~BIT(idx);
1381
1382 pmu_irq_stats[idx]++;
1383
1384 /* Note: for some reason, we get an interrupt with len=1,
1385 * data[0]==0 after each normal ADB interrupt, at least
1386 * on the Pismo. Still investigating... --BenH
1387 */
1388 switch (BIT(idx)) {
1389 case PMU_INT_ADB:
1390 if ((data[0] & PMU_INT_ADB_AUTO) == 0) {
1391 struct adb_request *req = req_awaiting_reply;
1392 if (!req) {
1393 printk(KERN_ERR "PMU: extra ADB reply\n");
1394 return;
1395 }
1396 req_awaiting_reply = NULL;
1397 if (len <= 2)
1398 req->reply_len = 0;
1399 else {
1400 memcpy(req->reply, data + 1, len - 1);
1401 req->reply_len = len - 1;
1402 }
1403 pmu_done(req);
1404 } else {
1405 #ifdef CONFIG_XMON
1406 if (len == 4 && data[1] == 0x2c) {
1407 extern int xmon_wants_key, xmon_adb_keycode;
1408 if (xmon_wants_key) {
1409 xmon_adb_keycode = data[2];
1410 return;
1411 }
1412 }
1413 #endif /* CONFIG_XMON */
1414 #ifdef CONFIG_ADB
1415 /*
1416 * XXX On the [23]400 the PMU gives us an up
1417 * event for keycodes 0x74 or 0x75 when the PC
1418 * card eject buttons are released, so we
1419 * ignore those events.
1420 */
1421 if (!(pmu_kind == PMU_OHARE_BASED && len == 4
1422 && data[1] == 0x2c && data[3] == 0xff
1423 && (data[2] & ~1) == 0xf4))
1424 adb_input(data+1, len-1, 1);
1425 #endif /* CONFIG_ADB */
1426 }
1427 break;
1428
1429 /* Sound/brightness button pressed */
1430 case PMU_INT_SNDBRT:
1431 #ifdef CONFIG_PMAC_BACKLIGHT
1432 if (len == 3)
1433 pmac_backlight_set_legacy_brightness_pmu(data[1] >> 4);
1434 #endif
1435 break;
1436
1437 /* Tick interrupt */
1438 case PMU_INT_TICK:
1439 /* Environment or tick interrupt, query batteries */
1440 if (pmu_battery_count) {
1441 if ((--query_batt_timer) == 0) {
1442 query_battery_state();
1443 query_batt_timer = BATTERY_POLLING_COUNT;
1444 }
1445 }
1446 break;
1447
1448 case PMU_INT_ENVIRONMENT:
1449 if (pmu_battery_count)
1450 query_battery_state();
1451 pmu_pass_intr(data, len);
1452 /* len == 6 is probably a bad check. But how do I
1453 * know what PMU versions send what events here? */
1454 if (IS_ENABLED(CONFIG_ADB_PMU_EVENT) && len == 6) {
1455 via_pmu_event(PMU_EVT_POWER, !!(data[1]&8));
1456 via_pmu_event(PMU_EVT_LID, data[1]&1);
1457 }
1458 break;
1459
1460 default:
1461 pmu_pass_intr(data, len);
1462 }
1463 goto next;
1464 }
1465
1466 static struct adb_request*
pmu_sr_intr(void)1467 pmu_sr_intr(void)
1468 {
1469 struct adb_request *req;
1470 int bite = 0;
1471
1472 if (in_8(&via2[B]) & TREQ) {
1473 printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via2[B]));
1474 return NULL;
1475 }
1476 /* The ack may not yet be low when we get the interrupt */
1477 while ((in_8(&via2[B]) & TACK) != 0)
1478 ;
1479
1480 /* if reading grab the byte, and reset the interrupt */
1481 if (pmu_state == reading || pmu_state == reading_intr)
1482 bite = in_8(&via1[SR]);
1483
1484 /* reset TREQ and wait for TACK to go high */
1485 out_8(&via2[B], in_8(&via2[B]) | TREQ);
1486 wait_for_ack();
1487
1488 switch (pmu_state) {
1489 case sending:
1490 req = current_req;
1491 if (data_len < 0) {
1492 data_len = req->nbytes - 1;
1493 send_byte(data_len);
1494 break;
1495 }
1496 if (data_index <= data_len) {
1497 send_byte(req->data[data_index++]);
1498 break;
1499 }
1500 req->sent = 1;
1501 data_len = pmu_data_len[req->data[0]][1];
1502 if (data_len == 0) {
1503 pmu_state = idle;
1504 current_req = req->next;
1505 if (req->reply_expected)
1506 req_awaiting_reply = req;
1507 else
1508 return req;
1509 } else {
1510 pmu_state = reading;
1511 data_index = 0;
1512 reply_ptr = req->reply + req->reply_len;
1513 recv_byte();
1514 }
1515 break;
1516
1517 case intack:
1518 data_index = 0;
1519 data_len = -1;
1520 pmu_state = reading_intr;
1521 reply_ptr = interrupt_data[int_data_last];
1522 recv_byte();
1523 if (gpio_irq >= 0 && !gpio_irq_enabled) {
1524 enable_irq(gpio_irq);
1525 gpio_irq_enabled = 1;
1526 }
1527 break;
1528
1529 case reading:
1530 case reading_intr:
1531 if (data_len == -1) {
1532 data_len = bite;
1533 if (bite > 32)
1534 printk(KERN_ERR "PMU: bad reply len %d\n", bite);
1535 } else if (data_index < 32) {
1536 reply_ptr[data_index++] = bite;
1537 }
1538 if (data_index < data_len) {
1539 recv_byte();
1540 break;
1541 }
1542
1543 if (pmu_state == reading_intr) {
1544 pmu_state = idle;
1545 int_data_state[int_data_last] = int_data_ready;
1546 interrupt_data_len[int_data_last] = data_len;
1547 } else {
1548 req = current_req;
1549 /*
1550 * For PMU sleep and freq change requests, we lock the
1551 * PMU until it's explicitly unlocked. This avoids any
1552 * spurrious event polling getting in
1553 */
1554 current_req = req->next;
1555 req->reply_len += data_index;
1556 if (req->data[0] == PMU_SLEEP || req->data[0] == PMU_CPU_SPEED)
1557 pmu_state = locked;
1558 else
1559 pmu_state = idle;
1560 return req;
1561 }
1562 break;
1563
1564 default:
1565 printk(KERN_ERR "via_pmu_interrupt: unknown state %d?\n",
1566 pmu_state);
1567 }
1568 return NULL;
1569 }
1570
1571 static irqreturn_t
via_pmu_interrupt(int irq,void * arg)1572 via_pmu_interrupt(int irq, void *arg)
1573 {
1574 unsigned long flags;
1575 int intr;
1576 int nloop = 0;
1577 int int_data = -1;
1578 struct adb_request *req = NULL;
1579 int handled = 0;
1580
1581 /* This is a bit brutal, we can probably do better */
1582 spin_lock_irqsave(&pmu_lock, flags);
1583 ++disable_poll;
1584
1585 for (;;) {
1586 /* On 68k Macs, VIA interrupts are dispatched individually.
1587 * Unless we are polling, the relevant IRQ flag has already
1588 * been cleared.
1589 */
1590 intr = 0;
1591 if (IS_ENABLED(CONFIG_PPC_PMAC) || !irq) {
1592 intr = in_8(&via1[IFR]) & (SR_INT | CB1_INT);
1593 out_8(&via1[IFR], intr);
1594 }
1595 #ifndef CONFIG_PPC_PMAC
1596 switch (irq) {
1597 case IRQ_MAC_ADB_CL:
1598 intr = CB1_INT;
1599 break;
1600 case IRQ_MAC_ADB_SR:
1601 intr = SR_INT;
1602 break;
1603 }
1604 #endif
1605 if (intr == 0)
1606 break;
1607 handled = 1;
1608 if (++nloop > 1000) {
1609 printk(KERN_DEBUG "PMU: stuck in intr loop, "
1610 "intr=%x, ier=%x pmu_state=%d\n",
1611 intr, in_8(&via1[IER]), pmu_state);
1612 break;
1613 }
1614 if (intr & CB1_INT) {
1615 adb_int_pending = 1;
1616 pmu_irq_stats[11]++;
1617 }
1618 if (intr & SR_INT) {
1619 req = pmu_sr_intr();
1620 if (req)
1621 break;
1622 }
1623 #ifndef CONFIG_PPC_PMAC
1624 break;
1625 #endif
1626 }
1627
1628 recheck:
1629 if (pmu_state == idle) {
1630 if (adb_int_pending) {
1631 if (int_data_state[0] == int_data_empty)
1632 int_data_last = 0;
1633 else if (int_data_state[1] == int_data_empty)
1634 int_data_last = 1;
1635 else
1636 goto no_free_slot;
1637 pmu_state = intack;
1638 int_data_state[int_data_last] = int_data_fill;
1639 /* Sounds safer to make sure ACK is high before writing.
1640 * This helped kill a problem with ADB and some iBooks
1641 */
1642 wait_for_ack();
1643 send_byte(PMU_INT_ACK);
1644 adb_int_pending = 0;
1645 } else if (current_req)
1646 pmu_start();
1647 }
1648 no_free_slot:
1649 /* Mark the oldest buffer for flushing */
1650 if (int_data_state[!int_data_last] == int_data_ready) {
1651 int_data_state[!int_data_last] = int_data_flush;
1652 int_data = !int_data_last;
1653 } else if (int_data_state[int_data_last] == int_data_ready) {
1654 int_data_state[int_data_last] = int_data_flush;
1655 int_data = int_data_last;
1656 }
1657 --disable_poll;
1658 spin_unlock_irqrestore(&pmu_lock, flags);
1659
1660 /* Deal with completed PMU requests outside of the lock */
1661 if (req) {
1662 pmu_done(req);
1663 req = NULL;
1664 }
1665
1666 /* Deal with interrupt datas outside of the lock */
1667 if (int_data >= 0) {
1668 pmu_handle_data(interrupt_data[int_data], interrupt_data_len[int_data]);
1669 spin_lock_irqsave(&pmu_lock, flags);
1670 ++disable_poll;
1671 int_data_state[int_data] = int_data_empty;
1672 int_data = -1;
1673 goto recheck;
1674 }
1675
1676 return IRQ_RETVAL(handled);
1677 }
1678
1679 void
pmu_unlock(void)1680 pmu_unlock(void)
1681 {
1682 unsigned long flags;
1683
1684 spin_lock_irqsave(&pmu_lock, flags);
1685 if (pmu_state == locked)
1686 pmu_state = idle;
1687 adb_int_pending = 1;
1688 spin_unlock_irqrestore(&pmu_lock, flags);
1689 }
1690
1691
1692 static __maybe_unused irqreturn_t
gpio1_interrupt(int irq,void * arg)1693 gpio1_interrupt(int irq, void *arg)
1694 {
1695 unsigned long flags;
1696
1697 if ((in_8(gpio_reg + 0x9) & 0x02) == 0) {
1698 spin_lock_irqsave(&pmu_lock, flags);
1699 if (gpio_irq_enabled > 0) {
1700 disable_irq_nosync(gpio_irq);
1701 gpio_irq_enabled = 0;
1702 }
1703 pmu_irq_stats[12]++;
1704 adb_int_pending = 1;
1705 spin_unlock_irqrestore(&pmu_lock, flags);
1706 via_pmu_interrupt(0, NULL);
1707 return IRQ_HANDLED;
1708 }
1709 return IRQ_NONE;
1710 }
1711
1712 void
pmu_enable_irled(int on)1713 pmu_enable_irled(int on)
1714 {
1715 struct adb_request req;
1716
1717 if (pmu_state == uninitialized)
1718 return ;
1719 if (pmu_kind == PMU_KEYLARGO_BASED)
1720 return ;
1721
1722 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED |
1723 (on ? PMU_POW_ON : PMU_POW_OFF));
1724 pmu_wait_complete(&req);
1725 }
1726
1727 /* Offset between Unix time (1970-based) and Mac time (1904-based) */
1728 #define RTC_OFFSET 2082844800
1729
pmu_get_time(void)1730 time64_t pmu_get_time(void)
1731 {
1732 struct adb_request req;
1733 u32 now;
1734
1735 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
1736 return 0;
1737 pmu_wait_complete(&req);
1738 if (req.reply_len != 4)
1739 pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
1740 now = (req.reply[0] << 24) + (req.reply[1] << 16) +
1741 (req.reply[2] << 8) + req.reply[3];
1742 return (time64_t)now - RTC_OFFSET;
1743 }
1744
pmu_set_rtc_time(struct rtc_time * tm)1745 int pmu_set_rtc_time(struct rtc_time *tm)
1746 {
1747 u32 now;
1748 struct adb_request req;
1749
1750 now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
1751 if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
1752 now >> 24, now >> 16, now >> 8, now) < 0)
1753 return -ENXIO;
1754 pmu_wait_complete(&req);
1755 if (req.reply_len != 0)
1756 pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
1757 return 0;
1758 }
1759
1760 void
pmu_restart(void)1761 pmu_restart(void)
1762 {
1763 struct adb_request req;
1764
1765 if (pmu_state == uninitialized)
1766 return;
1767
1768 local_irq_disable();
1769
1770 drop_interrupts = 1;
1771
1772 if (pmu_kind != PMU_KEYLARGO_BASED) {
1773 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB |
1774 PMU_INT_TICK );
1775 while(!req.complete)
1776 pmu_poll();
1777 }
1778
1779 pmu_request(&req, NULL, 1, PMU_RESET);
1780 pmu_wait_complete(&req);
1781 for (;;)
1782 ;
1783 }
1784
1785 void
pmu_shutdown(void)1786 pmu_shutdown(void)
1787 {
1788 struct adb_request req;
1789
1790 if (pmu_state == uninitialized)
1791 return;
1792
1793 local_irq_disable();
1794
1795 drop_interrupts = 1;
1796
1797 if (pmu_kind != PMU_KEYLARGO_BASED) {
1798 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB |
1799 PMU_INT_TICK );
1800 pmu_wait_complete(&req);
1801 } else {
1802 /* Disable server mode on shutdown or we'll just
1803 * wake up again
1804 */
1805 pmu_set_server_mode(0);
1806 }
1807
1808 pmu_request(&req, NULL, 5, PMU_SHUTDOWN,
1809 'M', 'A', 'T', 'T');
1810 pmu_wait_complete(&req);
1811 for (;;)
1812 ;
1813 }
1814
1815 int
pmu_present(void)1816 pmu_present(void)
1817 {
1818 return pmu_state != uninitialized;
1819 }
1820
1821 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
1822 /*
1823 * Put the powerbook to sleep.
1824 */
1825
1826 static u32 save_via[8];
1827 static int __fake_sleep;
1828
1829 static void
save_via_state(void)1830 save_via_state(void)
1831 {
1832 save_via[0] = in_8(&via1[ANH]);
1833 save_via[1] = in_8(&via1[DIRA]);
1834 save_via[2] = in_8(&via1[B]);
1835 save_via[3] = in_8(&via1[DIRB]);
1836 save_via[4] = in_8(&via1[PCR]);
1837 save_via[5] = in_8(&via1[ACR]);
1838 save_via[6] = in_8(&via1[T1CL]);
1839 save_via[7] = in_8(&via1[T1CH]);
1840 }
1841 static void
restore_via_state(void)1842 restore_via_state(void)
1843 {
1844 out_8(&via1[ANH], save_via[0]);
1845 out_8(&via1[DIRA], save_via[1]);
1846 out_8(&via1[B], save_via[2]);
1847 out_8(&via1[DIRB], save_via[3]);
1848 out_8(&via1[PCR], save_via[4]);
1849 out_8(&via1[ACR], save_via[5]);
1850 out_8(&via1[T1CL], save_via[6]);
1851 out_8(&via1[T1CH], save_via[7]);
1852 out_8(&via1[IER], IER_CLR | 0x7f); /* disable all intrs */
1853 out_8(&via1[IFR], 0x7f); /* clear IFR */
1854 out_8(&via1[IER], IER_SET | SR_INT | CB1_INT);
1855 }
1856
1857 #define GRACKLE_PM (1<<7)
1858 #define GRACKLE_DOZE (1<<5)
1859 #define GRACKLE_NAP (1<<4)
1860 #define GRACKLE_SLEEP (1<<3)
1861
powerbook_sleep_grackle(void)1862 static int powerbook_sleep_grackle(void)
1863 {
1864 unsigned long save_l2cr;
1865 unsigned short pmcr1;
1866 struct adb_request req;
1867 struct pci_dev *grackle;
1868
1869 grackle = pci_get_domain_bus_and_slot(0, 0, 0);
1870 if (!grackle)
1871 return -ENODEV;
1872
1873 /* Turn off various things. Darwin does some retry tests here... */
1874 pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, PMU_POW0_OFF|PMU_POW0_HARD_DRIVE);
1875 pmu_wait_complete(&req);
1876 pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
1877 PMU_POW_OFF|PMU_POW_BACKLIGHT|PMU_POW_IRLED|PMU_POW_MEDIABAY);
1878 pmu_wait_complete(&req);
1879
1880 /* For 750, save backside cache setting and disable it */
1881 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
1882
1883 if (!__fake_sleep) {
1884 /* Ask the PMU to put us to sleep */
1885 pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T');
1886 pmu_wait_complete(&req);
1887 }
1888
1889 /* The VIA is supposed not to be restored correctly*/
1890 save_via_state();
1891 /* We shut down some HW */
1892 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,1);
1893
1894 pci_read_config_word(grackle, 0x70, &pmcr1);
1895 /* Apparently, MacOS uses NAP mode for Grackle ??? */
1896 pmcr1 &= ~(GRACKLE_DOZE|GRACKLE_SLEEP);
1897 pmcr1 |= GRACKLE_PM|GRACKLE_NAP;
1898 pci_write_config_word(grackle, 0x70, pmcr1);
1899
1900 /* Call low-level ASM sleep handler */
1901 if (__fake_sleep)
1902 mdelay(5000);
1903 else
1904 low_sleep_handler();
1905
1906 /* We're awake again, stop grackle PM */
1907 pci_read_config_word(grackle, 0x70, &pmcr1);
1908 pmcr1 &= ~(GRACKLE_PM|GRACKLE_DOZE|GRACKLE_SLEEP|GRACKLE_NAP);
1909 pci_write_config_word(grackle, 0x70, pmcr1);
1910
1911 pci_dev_put(grackle);
1912
1913 /* Make sure the PMU is idle */
1914 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,0);
1915 restore_via_state();
1916
1917 /* Restore L2 cache */
1918 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
1919 _set_L2CR(save_l2cr);
1920
1921 /* Restore userland MMU context */
1922 switch_mmu_context(NULL, current->active_mm, NULL);
1923
1924 /* Power things up */
1925 pmu_unlock();
1926 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
1927 pmu_wait_complete(&req);
1928 pmu_request(&req, NULL, 2, PMU_POWER_CTRL0,
1929 PMU_POW0_ON|PMU_POW0_HARD_DRIVE);
1930 pmu_wait_complete(&req);
1931 pmu_request(&req, NULL, 2, PMU_POWER_CTRL,
1932 PMU_POW_ON|PMU_POW_BACKLIGHT|PMU_POW_CHARGER|PMU_POW_IRLED|PMU_POW_MEDIABAY);
1933 pmu_wait_complete(&req);
1934
1935 return 0;
1936 }
1937
1938 static int
powerbook_sleep_Core99(void)1939 powerbook_sleep_Core99(void)
1940 {
1941 unsigned long save_l2cr;
1942 unsigned long save_l3cr;
1943 struct adb_request req;
1944
1945 if (pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,0,-1) < 0) {
1946 printk(KERN_ERR "Sleep mode not supported on this machine\n");
1947 return -ENOSYS;
1948 }
1949
1950 if (num_online_cpus() > 1 || cpu_is_offline(0))
1951 return -EAGAIN;
1952
1953 /* Stop environment and ADB interrupts */
1954 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0);
1955 pmu_wait_complete(&req);
1956
1957 /* Tell PMU what events will wake us up */
1958 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_CLR_WAKEUP_EVENTS,
1959 0xff, 0xff);
1960 pmu_wait_complete(&req);
1961 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_SET_WAKEUP_EVENTS,
1962 0, PMU_PWR_WAKEUP_KEY |
1963 (option_lid_wakeup ? PMU_PWR_WAKEUP_LID_OPEN : 0));
1964 pmu_wait_complete(&req);
1965
1966 /* Save the state of the L2 and L3 caches */
1967 save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
1968 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
1969
1970 if (!__fake_sleep) {
1971 /* Ask the PMU to put us to sleep */
1972 pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T');
1973 pmu_wait_complete(&req);
1974 }
1975
1976 /* The VIA is supposed not to be restored correctly*/
1977 save_via_state();
1978
1979 /* Shut down various ASICs. There's a chance that we can no longer
1980 * talk to the PMU after this, so I moved it to _after_ sending the
1981 * sleep command to it. Still need to be checked.
1982 */
1983 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 1);
1984
1985 /* Call low-level ASM sleep handler */
1986 if (__fake_sleep)
1987 mdelay(5000);
1988 else
1989 low_sleep_handler();
1990
1991 /* Restore Apple core ASICs state */
1992 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 0);
1993
1994 /* Restore VIA */
1995 restore_via_state();
1996
1997 /* tweak LPJ before cpufreq is there */
1998 loops_per_jiffy *= 2;
1999
2000 /* Restore video */
2001 pmac_call_early_video_resume();
2002
2003 /* Restore L2 cache */
2004 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
2005 _set_L2CR(save_l2cr);
2006 /* Restore L3 cache */
2007 if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
2008 _set_L3CR(save_l3cr);
2009
2010 /* Restore userland MMU context */
2011 switch_mmu_context(NULL, current->active_mm, NULL);
2012
2013 /* Tell PMU we are ready */
2014 pmu_unlock();
2015 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
2016 pmu_wait_complete(&req);
2017 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
2018 pmu_wait_complete(&req);
2019
2020 /* Restore LPJ, cpufreq will adjust the cpu frequency */
2021 loops_per_jiffy /= 2;
2022
2023 return 0;
2024 }
2025
2026 #define PB3400_MEM_CTRL 0xf8000000
2027 #define PB3400_MEM_CTRL_SLEEP 0x70
2028
2029 static void __iomem *pb3400_mem_ctrl;
2030
powerbook_sleep_init_3400(void)2031 static void powerbook_sleep_init_3400(void)
2032 {
2033 /* map in the memory controller registers */
2034 pb3400_mem_ctrl = ioremap(PB3400_MEM_CTRL, 0x100);
2035 if (pb3400_mem_ctrl == NULL)
2036 printk(KERN_WARNING "ioremap failed: sleep won't be possible");
2037 }
2038
powerbook_sleep_3400(void)2039 static int powerbook_sleep_3400(void)
2040 {
2041 int i, x;
2042 unsigned int hid0;
2043 unsigned long msr;
2044 struct adb_request sleep_req;
2045 unsigned int __iomem *mem_ctrl_sleep;
2046
2047 if (pb3400_mem_ctrl == NULL)
2048 return -ENOMEM;
2049 mem_ctrl_sleep = pb3400_mem_ctrl + PB3400_MEM_CTRL_SLEEP;
2050
2051 /* Set the memory controller to keep the memory refreshed
2052 while we're asleep */
2053 for (i = 0x403f; i >= 0x4000; --i) {
2054 out_be32(mem_ctrl_sleep, i);
2055 do {
2056 x = (in_be32(mem_ctrl_sleep) >> 16) & 0x3ff;
2057 } while (x == 0);
2058 if (x >= 0x100)
2059 break;
2060 }
2061
2062 /* Ask the PMU to put us to sleep */
2063 pmu_request(&sleep_req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T');
2064 pmu_wait_complete(&sleep_req);
2065 pmu_unlock();
2066
2067 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 1);
2068
2069 asleep = 1;
2070
2071 /* Put the CPU into sleep mode */
2072 hid0 = mfspr(SPRN_HID0);
2073 hid0 = (hid0 & ~(HID0_NAP | HID0_DOZE)) | HID0_SLEEP;
2074 mtspr(SPRN_HID0, hid0);
2075 local_irq_enable();
2076 msr = mfmsr() | MSR_POW;
2077 while (asleep) {
2078 mb();
2079 mtmsr(msr);
2080 isync();
2081 }
2082 local_irq_disable();
2083
2084 /* OK, we're awake again, start restoring things */
2085 out_be32(mem_ctrl_sleep, 0x3f);
2086 pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, 0);
2087
2088 return 0;
2089 }
2090
2091 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
2092
2093 /*
2094 * Support for /dev/pmu device
2095 */
2096 #define RB_SIZE 0x10
2097 struct pmu_private {
2098 struct list_head list;
2099 int rb_get;
2100 int rb_put;
2101 struct rb_entry {
2102 unsigned short len;
2103 unsigned char data[16];
2104 } rb_buf[RB_SIZE];
2105 wait_queue_head_t wait;
2106 spinlock_t lock;
2107 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
2108 int backlight_locker;
2109 #endif
2110 };
2111
2112 static LIST_HEAD(all_pmu_pvt);
2113 static DEFINE_SPINLOCK(all_pvt_lock);
2114
2115 static void
pmu_pass_intr(unsigned char * data,int len)2116 pmu_pass_intr(unsigned char *data, int len)
2117 {
2118 struct pmu_private *pp;
2119 struct list_head *list;
2120 int i;
2121 unsigned long flags;
2122
2123 if (len > sizeof(pp->rb_buf[0].data))
2124 len = sizeof(pp->rb_buf[0].data);
2125 spin_lock_irqsave(&all_pvt_lock, flags);
2126 for (list = &all_pmu_pvt; (list = list->next) != &all_pmu_pvt; ) {
2127 pp = list_entry(list, struct pmu_private, list);
2128 spin_lock(&pp->lock);
2129 i = pp->rb_put + 1;
2130 if (i >= RB_SIZE)
2131 i = 0;
2132 if (i != pp->rb_get) {
2133 struct rb_entry *rp = &pp->rb_buf[pp->rb_put];
2134 rp->len = len;
2135 memcpy(rp->data, data, len);
2136 pp->rb_put = i;
2137 wake_up_interruptible(&pp->wait);
2138 }
2139 spin_unlock(&pp->lock);
2140 }
2141 spin_unlock_irqrestore(&all_pvt_lock, flags);
2142 }
2143
2144 static int
pmu_open(struct inode * inode,struct file * file)2145 pmu_open(struct inode *inode, struct file *file)
2146 {
2147 struct pmu_private *pp;
2148 unsigned long flags;
2149
2150 pp = kmalloc(sizeof(struct pmu_private), GFP_KERNEL);
2151 if (!pp)
2152 return -ENOMEM;
2153 pp->rb_get = pp->rb_put = 0;
2154 spin_lock_init(&pp->lock);
2155 init_waitqueue_head(&pp->wait);
2156 mutex_lock(&pmu_info_proc_mutex);
2157 spin_lock_irqsave(&all_pvt_lock, flags);
2158 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
2159 pp->backlight_locker = 0;
2160 #endif
2161 list_add(&pp->list, &all_pmu_pvt);
2162 spin_unlock_irqrestore(&all_pvt_lock, flags);
2163 file->private_data = pp;
2164 mutex_unlock(&pmu_info_proc_mutex);
2165 return 0;
2166 }
2167
2168 static ssize_t
pmu_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2169 pmu_read(struct file *file, char __user *buf,
2170 size_t count, loff_t *ppos)
2171 {
2172 struct pmu_private *pp = file->private_data;
2173 DECLARE_WAITQUEUE(wait, current);
2174 unsigned long flags;
2175 int ret = 0;
2176
2177 if (count < 1 || !pp)
2178 return -EINVAL;
2179
2180 spin_lock_irqsave(&pp->lock, flags);
2181 add_wait_queue(&pp->wait, &wait);
2182 set_current_state(TASK_INTERRUPTIBLE);
2183
2184 for (;;) {
2185 ret = -EAGAIN;
2186 if (pp->rb_get != pp->rb_put) {
2187 int i = pp->rb_get;
2188 struct rb_entry *rp = &pp->rb_buf[i];
2189 ret = rp->len;
2190 spin_unlock_irqrestore(&pp->lock, flags);
2191 if (ret > count)
2192 ret = count;
2193 if (ret > 0 && copy_to_user(buf, rp->data, ret))
2194 ret = -EFAULT;
2195 if (++i >= RB_SIZE)
2196 i = 0;
2197 spin_lock_irqsave(&pp->lock, flags);
2198 pp->rb_get = i;
2199 }
2200 if (ret >= 0)
2201 break;
2202 if (file->f_flags & O_NONBLOCK)
2203 break;
2204 ret = -ERESTARTSYS;
2205 if (signal_pending(current))
2206 break;
2207 spin_unlock_irqrestore(&pp->lock, flags);
2208 schedule();
2209 spin_lock_irqsave(&pp->lock, flags);
2210 }
2211 __set_current_state(TASK_RUNNING);
2212 remove_wait_queue(&pp->wait, &wait);
2213 spin_unlock_irqrestore(&pp->lock, flags);
2214
2215 return ret;
2216 }
2217
2218 static ssize_t
pmu_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)2219 pmu_write(struct file *file, const char __user *buf,
2220 size_t count, loff_t *ppos)
2221 {
2222 return 0;
2223 }
2224
2225 static __poll_t
pmu_fpoll(struct file * filp,poll_table * wait)2226 pmu_fpoll(struct file *filp, poll_table *wait)
2227 {
2228 struct pmu_private *pp = filp->private_data;
2229 __poll_t mask = 0;
2230 unsigned long flags;
2231
2232 if (!pp)
2233 return 0;
2234 poll_wait(filp, &pp->wait, wait);
2235 spin_lock_irqsave(&pp->lock, flags);
2236 if (pp->rb_get != pp->rb_put)
2237 mask |= EPOLLIN;
2238 spin_unlock_irqrestore(&pp->lock, flags);
2239 return mask;
2240 }
2241
2242 static int
pmu_release(struct inode * inode,struct file * file)2243 pmu_release(struct inode *inode, struct file *file)
2244 {
2245 struct pmu_private *pp = file->private_data;
2246 unsigned long flags;
2247
2248 if (pp) {
2249 file->private_data = NULL;
2250 spin_lock_irqsave(&all_pvt_lock, flags);
2251 list_del(&pp->list);
2252 spin_unlock_irqrestore(&all_pvt_lock, flags);
2253
2254 #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT)
2255 if (pp->backlight_locker)
2256 pmac_backlight_enable();
2257 #endif
2258
2259 kfree(pp);
2260 }
2261 return 0;
2262 }
2263
2264 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
pmac_suspend_disable_irqs(void)2265 static void pmac_suspend_disable_irqs(void)
2266 {
2267 /* Call platform functions marked "on sleep" */
2268 pmac_pfunc_i2c_suspend();
2269 pmac_pfunc_base_suspend();
2270 }
2271
powerbook_sleep(suspend_state_t state)2272 static int powerbook_sleep(suspend_state_t state)
2273 {
2274 int error = 0;
2275
2276 /* Wait for completion of async requests */
2277 while (!batt_req.complete)
2278 pmu_poll();
2279
2280 /* Giveup the lazy FPU & vec so we don't have to back them
2281 * up from the low level code
2282 */
2283 enable_kernel_fp();
2284
2285 #ifdef CONFIG_ALTIVEC
2286 if (cpu_has_feature(CPU_FTR_ALTIVEC))
2287 enable_kernel_altivec();
2288 #endif /* CONFIG_ALTIVEC */
2289
2290 switch (pmu_kind) {
2291 case PMU_OHARE_BASED:
2292 error = powerbook_sleep_3400();
2293 break;
2294 case PMU_HEATHROW_BASED:
2295 case PMU_PADDINGTON_BASED:
2296 error = powerbook_sleep_grackle();
2297 break;
2298 case PMU_KEYLARGO_BASED:
2299 error = powerbook_sleep_Core99();
2300 break;
2301 default:
2302 return -ENOSYS;
2303 }
2304
2305 if (error)
2306 return error;
2307
2308 mdelay(100);
2309
2310 return 0;
2311 }
2312
pmac_suspend_enable_irqs(void)2313 static void pmac_suspend_enable_irqs(void)
2314 {
2315 /* Force a poll of ADB interrupts */
2316 adb_int_pending = 1;
2317 via_pmu_interrupt(0, NULL);
2318
2319 mdelay(10);
2320
2321 /* Call platform functions marked "on wake" */
2322 pmac_pfunc_base_resume();
2323 pmac_pfunc_i2c_resume();
2324 }
2325
pmu_sleep_valid(suspend_state_t state)2326 static int pmu_sleep_valid(suspend_state_t state)
2327 {
2328 return state == PM_SUSPEND_MEM
2329 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
2330 }
2331
2332 static const struct platform_suspend_ops pmu_pm_ops = {
2333 .enter = powerbook_sleep,
2334 .valid = pmu_sleep_valid,
2335 };
2336
register_pmu_pm_ops(void)2337 static int __init register_pmu_pm_ops(void)
2338 {
2339 if (pmu_kind == PMU_OHARE_BASED)
2340 powerbook_sleep_init_3400();
2341 ppc_md.suspend_disable_irqs = pmac_suspend_disable_irqs;
2342 ppc_md.suspend_enable_irqs = pmac_suspend_enable_irqs;
2343 suspend_set_ops(&pmu_pm_ops);
2344
2345 return 0;
2346 }
2347
2348 device_initcall(register_pmu_pm_ops);
2349 #endif
2350
pmu_ioctl(struct file * filp,u_int cmd,u_long arg)2351 static int pmu_ioctl(struct file *filp,
2352 u_int cmd, u_long arg)
2353 {
2354 __u32 __user *argp = (__u32 __user *)arg;
2355 int error = -EINVAL;
2356
2357 switch (cmd) {
2358 #ifdef CONFIG_PPC_PMAC
2359 case PMU_IOC_SLEEP:
2360 if (!capable(CAP_SYS_ADMIN))
2361 return -EACCES;
2362 return pm_suspend(PM_SUSPEND_MEM);
2363 case PMU_IOC_CAN_SLEEP:
2364 if (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) < 0)
2365 return put_user(0, argp);
2366 else
2367 return put_user(1, argp);
2368 #endif
2369
2370 #ifdef CONFIG_PMAC_BACKLIGHT_LEGACY
2371 /* Compatibility ioctl's for backlight */
2372 case PMU_IOC_GET_BACKLIGHT:
2373 {
2374 int brightness;
2375
2376 brightness = pmac_backlight_get_legacy_brightness();
2377 if (brightness < 0)
2378 return brightness;
2379 else
2380 return put_user(brightness, argp);
2381
2382 }
2383 case PMU_IOC_SET_BACKLIGHT:
2384 {
2385 int brightness;
2386
2387 error = get_user(brightness, argp);
2388 if (error)
2389 return error;
2390
2391 return pmac_backlight_set_legacy_brightness(brightness);
2392 }
2393 #ifdef CONFIG_INPUT_ADBHID
2394 case PMU_IOC_GRAB_BACKLIGHT: {
2395 struct pmu_private *pp = filp->private_data;
2396
2397 if (pp->backlight_locker)
2398 return 0;
2399
2400 pp->backlight_locker = 1;
2401 pmac_backlight_disable();
2402
2403 return 0;
2404 }
2405 #endif /* CONFIG_INPUT_ADBHID */
2406 #endif /* CONFIG_PMAC_BACKLIGHT_LEGACY */
2407
2408 case PMU_IOC_GET_MODEL:
2409 return put_user(pmu_kind, argp);
2410 case PMU_IOC_HAS_ADB:
2411 return put_user(pmu_has_adb, argp);
2412 }
2413 return error;
2414 }
2415
pmu_unlocked_ioctl(struct file * filp,u_int cmd,u_long arg)2416 static long pmu_unlocked_ioctl(struct file *filp,
2417 u_int cmd, u_long arg)
2418 {
2419 int ret;
2420
2421 mutex_lock(&pmu_info_proc_mutex);
2422 ret = pmu_ioctl(filp, cmd, arg);
2423 mutex_unlock(&pmu_info_proc_mutex);
2424
2425 return ret;
2426 }
2427
2428 #ifdef CONFIG_COMPAT
2429 #define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t)
2430 #define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t)
2431 #define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t)
2432 #define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t)
2433 #define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t)
2434 #define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t)
2435
compat_pmu_ioctl(struct file * filp,u_int cmd,u_long arg)2436 static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg)
2437 {
2438 switch (cmd) {
2439 case PMU_IOC_SLEEP:
2440 break;
2441 case PMU_IOC_GET_BACKLIGHT32:
2442 cmd = PMU_IOC_GET_BACKLIGHT;
2443 break;
2444 case PMU_IOC_SET_BACKLIGHT32:
2445 cmd = PMU_IOC_SET_BACKLIGHT;
2446 break;
2447 case PMU_IOC_GET_MODEL32:
2448 cmd = PMU_IOC_GET_MODEL;
2449 break;
2450 case PMU_IOC_HAS_ADB32:
2451 cmd = PMU_IOC_HAS_ADB;
2452 break;
2453 case PMU_IOC_CAN_SLEEP32:
2454 cmd = PMU_IOC_CAN_SLEEP;
2455 break;
2456 case PMU_IOC_GRAB_BACKLIGHT32:
2457 cmd = PMU_IOC_GRAB_BACKLIGHT;
2458 break;
2459 default:
2460 return -ENOIOCTLCMD;
2461 }
2462 return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
2463 }
2464 #endif
2465
2466 static const struct file_operations pmu_device_fops = {
2467 .read = pmu_read,
2468 .write = pmu_write,
2469 .poll = pmu_fpoll,
2470 .unlocked_ioctl = pmu_unlocked_ioctl,
2471 #ifdef CONFIG_COMPAT
2472 .compat_ioctl = compat_pmu_ioctl,
2473 #endif
2474 .open = pmu_open,
2475 .release = pmu_release,
2476 .llseek = noop_llseek,
2477 };
2478
2479 static struct miscdevice pmu_device = {
2480 PMU_MINOR, "pmu", &pmu_device_fops
2481 };
2482
pmu_device_init(void)2483 static int pmu_device_init(void)
2484 {
2485 if (pmu_state == uninitialized)
2486 return 0;
2487 if (misc_register(&pmu_device) < 0)
2488 printk(KERN_ERR "via-pmu: cannot register misc device.\n");
2489 return 0;
2490 }
2491 device_initcall(pmu_device_init);
2492
2493
2494 #ifdef DEBUG_SLEEP
2495 static inline void
polled_handshake(void)2496 polled_handshake(void)
2497 {
2498 via2[B] &= ~TREQ; eieio();
2499 while ((via2[B] & TACK) != 0)
2500 ;
2501 via2[B] |= TREQ; eieio();
2502 while ((via2[B] & TACK) == 0)
2503 ;
2504 }
2505
2506 static inline void
polled_send_byte(int x)2507 polled_send_byte(int x)
2508 {
2509 via1[ACR] |= SR_OUT | SR_EXT; eieio();
2510 via1[SR] = x; eieio();
2511 polled_handshake();
2512 }
2513
2514 static inline int
polled_recv_byte(void)2515 polled_recv_byte(void)
2516 {
2517 int x;
2518
2519 via1[ACR] = (via1[ACR] & ~SR_OUT) | SR_EXT; eieio();
2520 x = via1[SR]; eieio();
2521 polled_handshake();
2522 x = via1[SR]; eieio();
2523 return x;
2524 }
2525
2526 int
pmu_polled_request(struct adb_request * req)2527 pmu_polled_request(struct adb_request *req)
2528 {
2529 unsigned long flags;
2530 int i, l, c;
2531
2532 req->complete = 1;
2533 c = req->data[0];
2534 l = pmu_data_len[c][0];
2535 if (l >= 0 && req->nbytes != l + 1)
2536 return -EINVAL;
2537
2538 local_irq_save(flags);
2539 while (pmu_state != idle)
2540 pmu_poll();
2541
2542 while ((via2[B] & TACK) == 0)
2543 ;
2544 polled_send_byte(c);
2545 if (l < 0) {
2546 l = req->nbytes - 1;
2547 polled_send_byte(l);
2548 }
2549 for (i = 1; i <= l; ++i)
2550 polled_send_byte(req->data[i]);
2551
2552 l = pmu_data_len[c][1];
2553 if (l < 0)
2554 l = polled_recv_byte();
2555 for (i = 0; i < l; ++i)
2556 req->reply[i + req->reply_len] = polled_recv_byte();
2557
2558 if (req->done)
2559 (*req->done)(req);
2560
2561 local_irq_restore(flags);
2562 return 0;
2563 }
2564
2565 /* N.B. This doesn't work on the 3400 */
pmu_blink(int n)2566 void pmu_blink(int n)
2567 {
2568 struct adb_request req;
2569
2570 memset(&req, 0, sizeof(req));
2571
2572 for (; n > 0; --n) {
2573 req.nbytes = 4;
2574 req.done = NULL;
2575 req.data[0] = 0xee;
2576 req.data[1] = 4;
2577 req.data[2] = 0;
2578 req.data[3] = 1;
2579 req.reply[0] = ADB_RET_OK;
2580 req.reply_len = 1;
2581 req.reply_expected = 0;
2582 pmu_polled_request(&req);
2583 mdelay(50);
2584 req.nbytes = 4;
2585 req.done = NULL;
2586 req.data[0] = 0xee;
2587 req.data[1] = 4;
2588 req.data[2] = 0;
2589 req.data[3] = 0;
2590 req.reply[0] = ADB_RET_OK;
2591 req.reply_len = 1;
2592 req.reply_expected = 0;
2593 pmu_polled_request(&req);
2594 mdelay(50);
2595 }
2596 mdelay(50);
2597 }
2598 #endif /* DEBUG_SLEEP */
2599
2600 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
2601 int pmu_sys_suspended;
2602
pmu_syscore_suspend(void)2603 static int pmu_syscore_suspend(void)
2604 {
2605 /* Suspend PMU event interrupts */
2606 pmu_suspend();
2607 pmu_sys_suspended = 1;
2608
2609 #ifdef CONFIG_PMAC_BACKLIGHT
2610 /* Tell backlight code not to muck around with the chip anymore */
2611 pmu_backlight_set_sleep(1);
2612 #endif
2613
2614 return 0;
2615 }
2616
pmu_syscore_resume(void)2617 static void pmu_syscore_resume(void)
2618 {
2619 struct adb_request req;
2620
2621 if (!pmu_sys_suspended)
2622 return;
2623
2624 /* Tell PMU we are ready */
2625 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2);
2626 pmu_wait_complete(&req);
2627
2628 #ifdef CONFIG_PMAC_BACKLIGHT
2629 /* Tell backlight code it can use the chip again */
2630 pmu_backlight_set_sleep(0);
2631 #endif
2632 /* Resume PMU event interrupts */
2633 pmu_resume();
2634 pmu_sys_suspended = 0;
2635 }
2636
2637 static struct syscore_ops pmu_syscore_ops = {
2638 .suspend = pmu_syscore_suspend,
2639 .resume = pmu_syscore_resume,
2640 };
2641
pmu_syscore_register(void)2642 static int pmu_syscore_register(void)
2643 {
2644 register_syscore_ops(&pmu_syscore_ops);
2645
2646 return 0;
2647 }
2648 subsys_initcall(pmu_syscore_register);
2649 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
2650
2651 EXPORT_SYMBOL(pmu_request);
2652 EXPORT_SYMBOL(pmu_queue_request);
2653 EXPORT_SYMBOL(pmu_poll);
2654 EXPORT_SYMBOL(pmu_poll_adb);
2655 EXPORT_SYMBOL(pmu_wait_complete);
2656 EXPORT_SYMBOL(pmu_suspend);
2657 EXPORT_SYMBOL(pmu_resume);
2658 EXPORT_SYMBOL(pmu_unlock);
2659 #if defined(CONFIG_PPC32)
2660 EXPORT_SYMBOL(pmu_enable_irled);
2661 EXPORT_SYMBOL(pmu_battery_count);
2662 EXPORT_SYMBOL(pmu_batteries);
2663 EXPORT_SYMBOL(pmu_power_flags);
2664 #endif /* CONFIG_SUSPEND && CONFIG_PPC32 */
2665
2666