1 /* 2 * ec.c - ACPI Embedded Controller Driver (v2.1) 3 * 4 * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de> 5 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> 6 * Copyright (C) 2004 Luming Yu <luming.yu@intel.com> 7 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 8 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or (at 15 * your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License along 23 * with this program; if not, write to the Free Software Foundation, Inc., 24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 */ 28 29 /* Uncomment next line to get verbose printout */ 30 /* #define DEBUG */ 31 #define pr_fmt(fmt) "ACPI : EC: " fmt 32 33 #include <linux/kernel.h> 34 #include <linux/module.h> 35 #include <linux/init.h> 36 #include <linux/types.h> 37 #include <linux/delay.h> 38 #include <linux/interrupt.h> 39 #include <linux/list.h> 40 #include <linux/spinlock.h> 41 #include <linux/slab.h> 42 #include <linux/acpi.h> 43 #include <linux/dmi.h> 44 #include <asm/io.h> 45 46 #include "internal.h" 47 48 #define ACPI_EC_CLASS "embedded_controller" 49 #define ACPI_EC_DEVICE_NAME "Embedded Controller" 50 #define ACPI_EC_FILE_INFO "info" 51 52 /* EC status register */ 53 #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ 54 #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ 55 #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */ 56 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ 57 58 /* EC commands */ 59 enum ec_command { 60 ACPI_EC_COMMAND_READ = 0x80, 61 ACPI_EC_COMMAND_WRITE = 0x81, 62 ACPI_EC_BURST_ENABLE = 0x82, 63 ACPI_EC_BURST_DISABLE = 0x83, 64 ACPI_EC_COMMAND_QUERY = 0x84, 65 }; 66 67 #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 68 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 69 #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ 70 #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query 71 * when trying to clear the EC */ 72 73 enum { 74 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 75 EC_FLAGS_GPE_STORM, /* GPE storm detected */ 76 EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and 77 * OpReg are installed */ 78 EC_FLAGS_BLOCKED, /* Transactions are blocked */ 79 }; 80 81 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ 82 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; 83 module_param(ec_delay, uint, 0644); 84 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); 85 86 /* 87 * If the number of false interrupts per one transaction exceeds 88 * this threshold, will think there is a GPE storm happened and 89 * will disable the GPE for normal transaction. 90 */ 91 static unsigned int ec_storm_threshold __read_mostly = 8; 92 module_param(ec_storm_threshold, uint, 0644); 93 MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); 94 95 struct acpi_ec_query_handler { 96 struct list_head node; 97 acpi_ec_query_func func; 98 acpi_handle handle; 99 void *data; 100 u8 query_bit; 101 }; 102 103 struct transaction { 104 const u8 *wdata; 105 u8 *rdata; 106 unsigned short irq_count; 107 u8 command; 108 u8 wi; 109 u8 ri; 110 u8 wlen; 111 u8 rlen; 112 bool done; 113 }; 114 115 struct acpi_ec *boot_ec, *first_ec; 116 EXPORT_SYMBOL(first_ec); 117 118 static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ 119 static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ 120 static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ 121 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ 122 123 /* -------------------------------------------------------------------------- 124 Transaction Management 125 -------------------------------------------------------------------------- */ 126 127 static inline u8 acpi_ec_read_status(struct acpi_ec *ec) 128 { 129 u8 x = inb(ec->command_addr); 130 pr_debug("---> status = 0x%2.2x\n", x); 131 return x; 132 } 133 134 static inline u8 acpi_ec_read_data(struct acpi_ec *ec) 135 { 136 u8 x = inb(ec->data_addr); 137 pr_debug("---> data = 0x%2.2x\n", x); 138 return x; 139 } 140 141 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) 142 { 143 pr_debug("<--- command = 0x%2.2x\n", command); 144 outb(command, ec->command_addr); 145 } 146 147 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) 148 { 149 pr_debug("<--- data = 0x%2.2x\n", data); 150 outb(data, ec->data_addr); 151 } 152 153 static int ec_transaction_done(struct acpi_ec *ec) 154 { 155 unsigned long flags; 156 int ret = 0; 157 spin_lock_irqsave(&ec->lock, flags); 158 if (!ec->curr || ec->curr->done) 159 ret = 1; 160 spin_unlock_irqrestore(&ec->lock, flags); 161 return ret; 162 } 163 164 static void start_transaction(struct acpi_ec *ec) 165 { 166 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; 167 ec->curr->done = false; 168 acpi_ec_write_cmd(ec, ec->curr->command); 169 } 170 171 static void advance_transaction(struct acpi_ec *ec, u8 status) 172 { 173 unsigned long flags; 174 struct transaction *t; 175 176 spin_lock_irqsave(&ec->lock, flags); 177 t = ec->curr; 178 if (!t) 179 goto unlock; 180 if (t->wlen > t->wi) { 181 if ((status & ACPI_EC_FLAG_IBF) == 0) 182 acpi_ec_write_data(ec, 183 t->wdata[t->wi++]); 184 else 185 goto err; 186 } else if (t->rlen > t->ri) { 187 if ((status & ACPI_EC_FLAG_OBF) == 1) { 188 t->rdata[t->ri++] = acpi_ec_read_data(ec); 189 if (t->rlen == t->ri) 190 t->done = true; 191 } else 192 goto err; 193 } else if (t->wlen == t->wi && 194 (status & ACPI_EC_FLAG_IBF) == 0) 195 t->done = true; 196 goto unlock; 197 err: 198 /* 199 * If SCI bit is set, then don't think it's a false IRQ 200 * otherwise will take a not handled IRQ as a false one. 201 */ 202 if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) 203 ++t->irq_count; 204 205 unlock: 206 spin_unlock_irqrestore(&ec->lock, flags); 207 } 208 209 static int acpi_ec_sync_query(struct acpi_ec *ec); 210 211 static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) 212 { 213 if (state & ACPI_EC_FLAG_SCI) { 214 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) 215 return acpi_ec_sync_query(ec); 216 } 217 return 0; 218 } 219 220 static int ec_poll(struct acpi_ec *ec) 221 { 222 unsigned long flags; 223 int repeat = 5; /* number of command restarts */ 224 while (repeat--) { 225 unsigned long delay = jiffies + 226 msecs_to_jiffies(ec_delay); 227 do { 228 /* don't sleep with disabled interrupts */ 229 if (EC_FLAGS_MSI || irqs_disabled()) { 230 udelay(ACPI_EC_MSI_UDELAY); 231 if (ec_transaction_done(ec)) 232 return 0; 233 } else { 234 if (wait_event_timeout(ec->wait, 235 ec_transaction_done(ec), 236 msecs_to_jiffies(1))) 237 return 0; 238 } 239 advance_transaction(ec, acpi_ec_read_status(ec)); 240 } while (time_before(jiffies, delay)); 241 pr_debug("controller reset, restart transaction\n"); 242 spin_lock_irqsave(&ec->lock, flags); 243 start_transaction(ec); 244 spin_unlock_irqrestore(&ec->lock, flags); 245 } 246 return -ETIME; 247 } 248 249 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, 250 struct transaction *t) 251 { 252 unsigned long tmp; 253 int ret = 0; 254 if (EC_FLAGS_MSI) 255 udelay(ACPI_EC_MSI_UDELAY); 256 /* start transaction */ 257 spin_lock_irqsave(&ec->lock, tmp); 258 /* following two actions should be kept atomic */ 259 ec->curr = t; 260 start_transaction(ec); 261 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) 262 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 263 spin_unlock_irqrestore(&ec->lock, tmp); 264 ret = ec_poll(ec); 265 spin_lock_irqsave(&ec->lock, tmp); 266 ec->curr = NULL; 267 spin_unlock_irqrestore(&ec->lock, tmp); 268 return ret; 269 } 270 271 static int ec_check_ibf0(struct acpi_ec *ec) 272 { 273 u8 status = acpi_ec_read_status(ec); 274 return (status & ACPI_EC_FLAG_IBF) == 0; 275 } 276 277 static int ec_wait_ibf0(struct acpi_ec *ec) 278 { 279 unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); 280 /* interrupt wait manually if GPE mode is not active */ 281 while (time_before(jiffies, delay)) 282 if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), 283 msecs_to_jiffies(1))) 284 return 0; 285 return -ETIME; 286 } 287 288 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) 289 { 290 int status; 291 u32 glk; 292 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) 293 return -EINVAL; 294 if (t->rdata) 295 memset(t->rdata, 0, t->rlen); 296 mutex_lock(&ec->mutex); 297 if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) { 298 status = -EINVAL; 299 goto unlock; 300 } 301 if (ec->global_lock) { 302 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 303 if (ACPI_FAILURE(status)) { 304 status = -ENODEV; 305 goto unlock; 306 } 307 } 308 if (ec_wait_ibf0(ec)) { 309 pr_err("input buffer is not empty, " 310 "aborting transaction\n"); 311 status = -ETIME; 312 goto end; 313 } 314 pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", 315 t->command, t->wdata ? t->wdata[0] : 0); 316 /* disable GPE during transaction if storm is detected */ 317 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { 318 /* It has to be disabled, so that it doesn't trigger. */ 319 acpi_disable_gpe(NULL, ec->gpe); 320 } 321 322 status = acpi_ec_transaction_unlocked(ec, t); 323 324 /* check if we received SCI during transaction */ 325 ec_check_sci_sync(ec, acpi_ec_read_status(ec)); 326 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { 327 msleep(1); 328 /* It is safe to enable the GPE outside of the transaction. */ 329 acpi_enable_gpe(NULL, ec->gpe); 330 } else if (t->irq_count > ec_storm_threshold) { 331 pr_info("GPE storm detected(%d GPEs), " 332 "transactions will use polling mode\n", 333 t->irq_count); 334 set_bit(EC_FLAGS_GPE_STORM, &ec->flags); 335 } 336 pr_debug("transaction end\n"); 337 end: 338 if (ec->global_lock) 339 acpi_release_global_lock(glk); 340 unlock: 341 mutex_unlock(&ec->mutex); 342 return status; 343 } 344 345 static int acpi_ec_burst_enable(struct acpi_ec *ec) 346 { 347 u8 d; 348 struct transaction t = {.command = ACPI_EC_BURST_ENABLE, 349 .wdata = NULL, .rdata = &d, 350 .wlen = 0, .rlen = 1}; 351 352 return acpi_ec_transaction(ec, &t); 353 } 354 355 static int acpi_ec_burst_disable(struct acpi_ec *ec) 356 { 357 struct transaction t = {.command = ACPI_EC_BURST_DISABLE, 358 .wdata = NULL, .rdata = NULL, 359 .wlen = 0, .rlen = 0}; 360 361 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? 362 acpi_ec_transaction(ec, &t) : 0; 363 } 364 365 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) 366 { 367 int result; 368 u8 d; 369 struct transaction t = {.command = ACPI_EC_COMMAND_READ, 370 .wdata = &address, .rdata = &d, 371 .wlen = 1, .rlen = 1}; 372 373 result = acpi_ec_transaction(ec, &t); 374 *data = d; 375 return result; 376 } 377 378 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) 379 { 380 u8 wdata[2] = { address, data }; 381 struct transaction t = {.command = ACPI_EC_COMMAND_WRITE, 382 .wdata = wdata, .rdata = NULL, 383 .wlen = 2, .rlen = 0}; 384 385 return acpi_ec_transaction(ec, &t); 386 } 387 388 int ec_read(u8 addr, u8 *val) 389 { 390 int err; 391 u8 temp_data; 392 393 if (!first_ec) 394 return -ENODEV; 395 396 err = acpi_ec_read(first_ec, addr, &temp_data); 397 398 if (!err) { 399 *val = temp_data; 400 return 0; 401 } else 402 return err; 403 } 404 405 EXPORT_SYMBOL(ec_read); 406 407 int ec_write(u8 addr, u8 val) 408 { 409 int err; 410 411 if (!first_ec) 412 return -ENODEV; 413 414 err = acpi_ec_write(first_ec, addr, val); 415 416 return err; 417 } 418 419 EXPORT_SYMBOL(ec_write); 420 421 int ec_transaction(u8 command, 422 const u8 * wdata, unsigned wdata_len, 423 u8 * rdata, unsigned rdata_len) 424 { 425 struct transaction t = {.command = command, 426 .wdata = wdata, .rdata = rdata, 427 .wlen = wdata_len, .rlen = rdata_len}; 428 if (!first_ec) 429 return -ENODEV; 430 431 return acpi_ec_transaction(first_ec, &t); 432 } 433 434 EXPORT_SYMBOL(ec_transaction); 435 436 /* Get the handle to the EC device */ 437 acpi_handle ec_get_handle(void) 438 { 439 if (!first_ec) 440 return NULL; 441 return first_ec->handle; 442 } 443 444 EXPORT_SYMBOL(ec_get_handle); 445 446 static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); 447 448 /* 449 * Clears stale _Q events that might have accumulated in the EC. 450 * Run with locked ec mutex. 451 */ 452 static void acpi_ec_clear(struct acpi_ec *ec) 453 { 454 int i, status; 455 u8 value = 0; 456 457 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { 458 status = acpi_ec_query_unlocked(ec, &value); 459 if (status || !value) 460 break; 461 } 462 463 if (unlikely(i == ACPI_EC_CLEAR_MAX)) 464 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); 465 else 466 pr_info("%d stale EC events cleared\n", i); 467 } 468 469 void acpi_ec_block_transactions(void) 470 { 471 struct acpi_ec *ec = first_ec; 472 473 if (!ec) 474 return; 475 476 mutex_lock(&ec->mutex); 477 /* Prevent transactions from being carried out */ 478 set_bit(EC_FLAGS_BLOCKED, &ec->flags); 479 mutex_unlock(&ec->mutex); 480 } 481 482 void acpi_ec_unblock_transactions(void) 483 { 484 struct acpi_ec *ec = first_ec; 485 486 if (!ec) 487 return; 488 489 mutex_lock(&ec->mutex); 490 /* Allow transactions to be carried out again */ 491 clear_bit(EC_FLAGS_BLOCKED, &ec->flags); 492 493 if (EC_FLAGS_CLEAR_ON_RESUME) 494 acpi_ec_clear(ec); 495 496 mutex_unlock(&ec->mutex); 497 } 498 499 void acpi_ec_unblock_transactions_early(void) 500 { 501 /* 502 * Allow transactions to happen again (this function is called from 503 * atomic context during wakeup, so we don't need to acquire the mutex). 504 */ 505 if (first_ec) 506 clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags); 507 } 508 509 static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) 510 { 511 int result; 512 u8 d; 513 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, 514 .wdata = NULL, .rdata = &d, 515 .wlen = 0, .rlen = 1}; 516 if (!ec || !data) 517 return -EINVAL; 518 /* 519 * Query the EC to find out which _Qxx method we need to evaluate. 520 * Note that successful completion of the query causes the ACPI_EC_SCI 521 * bit to be cleared (and thus clearing the interrupt source). 522 */ 523 result = acpi_ec_transaction_unlocked(ec, &t); 524 if (result) 525 return result; 526 if (!d) 527 return -ENODATA; 528 *data = d; 529 return 0; 530 } 531 532 /* -------------------------------------------------------------------------- 533 Event Management 534 -------------------------------------------------------------------------- */ 535 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, 536 acpi_handle handle, acpi_ec_query_func func, 537 void *data) 538 { 539 struct acpi_ec_query_handler *handler = 540 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL); 541 if (!handler) 542 return -ENOMEM; 543 544 handler->query_bit = query_bit; 545 handler->handle = handle; 546 handler->func = func; 547 handler->data = data; 548 mutex_lock(&ec->mutex); 549 list_add(&handler->node, &ec->list); 550 mutex_unlock(&ec->mutex); 551 return 0; 552 } 553 554 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); 555 556 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) 557 { 558 struct acpi_ec_query_handler *handler, *tmp; 559 mutex_lock(&ec->mutex); 560 list_for_each_entry_safe(handler, tmp, &ec->list, node) { 561 if (query_bit == handler->query_bit) { 562 list_del(&handler->node); 563 kfree(handler); 564 } 565 } 566 mutex_unlock(&ec->mutex); 567 } 568 569 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); 570 571 static void acpi_ec_run(void *cxt) 572 { 573 struct acpi_ec_query_handler *handler = cxt; 574 if (!handler) 575 return; 576 pr_debug("start query execution\n"); 577 if (handler->func) 578 handler->func(handler->data); 579 else if (handler->handle) 580 acpi_evaluate_object(handler->handle, NULL, NULL, NULL); 581 pr_debug("stop query execution\n"); 582 kfree(handler); 583 } 584 585 static int acpi_ec_sync_query(struct acpi_ec *ec) 586 { 587 u8 value = 0; 588 int status; 589 struct acpi_ec_query_handler *handler, *copy; 590 if ((status = acpi_ec_query_unlocked(ec, &value))) 591 return status; 592 list_for_each_entry(handler, &ec->list, node) { 593 if (value == handler->query_bit) { 594 /* have custom handler for this bit */ 595 copy = kmalloc(sizeof(*handler), GFP_KERNEL); 596 if (!copy) 597 return -ENOMEM; 598 memcpy(copy, handler, sizeof(*copy)); 599 pr_debug("push query execution (0x%2x) on queue\n", 600 value); 601 return acpi_os_execute((copy->func) ? 602 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, 603 acpi_ec_run, copy); 604 } 605 } 606 return 0; 607 } 608 609 static void acpi_ec_gpe_query(void *ec_cxt) 610 { 611 struct acpi_ec *ec = ec_cxt; 612 if (!ec) 613 return; 614 mutex_lock(&ec->mutex); 615 acpi_ec_sync_query(ec); 616 mutex_unlock(&ec->mutex); 617 } 618 619 static int ec_check_sci(struct acpi_ec *ec, u8 state) 620 { 621 if (state & ACPI_EC_FLAG_SCI) { 622 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { 623 pr_debug("push gpe query to the queue\n"); 624 return acpi_os_execute(OSL_NOTIFY_HANDLER, 625 acpi_ec_gpe_query, ec); 626 } 627 } 628 return 0; 629 } 630 631 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, 632 u32 gpe_number, void *data) 633 { 634 struct acpi_ec *ec = data; 635 u8 status = acpi_ec_read_status(ec); 636 637 pr_debug("~~~> interrupt, status:0x%02x\n", status); 638 639 advance_transaction(ec, status); 640 if (ec_transaction_done(ec) && 641 (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { 642 wake_up(&ec->wait); 643 ec_check_sci(ec, acpi_ec_read_status(ec)); 644 } 645 return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; 646 } 647 648 /* -------------------------------------------------------------------------- 649 Address Space Management 650 -------------------------------------------------------------------------- */ 651 652 static acpi_status 653 acpi_ec_space_handler(u32 function, acpi_physical_address address, 654 u32 bits, u64 *value64, 655 void *handler_context, void *region_context) 656 { 657 struct acpi_ec *ec = handler_context; 658 int result = 0, i, bytes = bits / 8; 659 u8 *value = (u8 *)value64; 660 661 if ((address > 0xFF) || !value || !handler_context) 662 return AE_BAD_PARAMETER; 663 664 if (function != ACPI_READ && function != ACPI_WRITE) 665 return AE_BAD_PARAMETER; 666 667 if (EC_FLAGS_MSI || bits > 8) 668 acpi_ec_burst_enable(ec); 669 670 for (i = 0; i < bytes; ++i, ++address, ++value) 671 result = (function == ACPI_READ) ? 672 acpi_ec_read(ec, address, value) : 673 acpi_ec_write(ec, address, *value); 674 675 if (EC_FLAGS_MSI || bits > 8) 676 acpi_ec_burst_disable(ec); 677 678 switch (result) { 679 case -EINVAL: 680 return AE_BAD_PARAMETER; 681 break; 682 case -ENODEV: 683 return AE_NOT_FOUND; 684 break; 685 case -ETIME: 686 return AE_TIME; 687 break; 688 default: 689 return AE_OK; 690 } 691 } 692 693 /* -------------------------------------------------------------------------- 694 Driver Interface 695 -------------------------------------------------------------------------- */ 696 static acpi_status 697 ec_parse_io_ports(struct acpi_resource *resource, void *context); 698 699 static struct acpi_ec *make_acpi_ec(void) 700 { 701 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL); 702 if (!ec) 703 return NULL; 704 ec->flags = 1 << EC_FLAGS_QUERY_PENDING; 705 mutex_init(&ec->mutex); 706 init_waitqueue_head(&ec->wait); 707 INIT_LIST_HEAD(&ec->list); 708 spin_lock_init(&ec->lock); 709 return ec; 710 } 711 712 static acpi_status 713 acpi_ec_register_query_methods(acpi_handle handle, u32 level, 714 void *context, void **return_value) 715 { 716 char node_name[5]; 717 struct acpi_buffer buffer = { sizeof(node_name), node_name }; 718 struct acpi_ec *ec = context; 719 int value = 0; 720 acpi_status status; 721 722 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); 723 724 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1) { 725 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL); 726 } 727 return AE_OK; 728 } 729 730 static acpi_status 731 ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) 732 { 733 acpi_status status; 734 unsigned long long tmp = 0; 735 736 struct acpi_ec *ec = context; 737 738 /* clear addr values, ec_parse_io_ports depend on it */ 739 ec->command_addr = ec->data_addr = 0; 740 741 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 742 ec_parse_io_ports, ec); 743 if (ACPI_FAILURE(status)) 744 return status; 745 746 /* Get GPE bit assignment (EC events). */ 747 /* TODO: Add support for _GPE returning a package */ 748 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); 749 if (ACPI_FAILURE(status)) 750 return status; 751 ec->gpe = tmp; 752 /* Use the global lock for all EC transactions? */ 753 tmp = 0; 754 acpi_evaluate_integer(handle, "_GLK", NULL, &tmp); 755 ec->global_lock = tmp; 756 ec->handle = handle; 757 return AE_CTRL_TERMINATE; 758 } 759 760 static int ec_install_handlers(struct acpi_ec *ec) 761 { 762 acpi_status status; 763 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) 764 return 0; 765 status = acpi_install_gpe_handler(NULL, ec->gpe, 766 ACPI_GPE_EDGE_TRIGGERED, 767 &acpi_ec_gpe_handler, ec); 768 if (ACPI_FAILURE(status)) 769 return -ENODEV; 770 771 acpi_enable_gpe(NULL, ec->gpe); 772 status = acpi_install_address_space_handler(ec->handle, 773 ACPI_ADR_SPACE_EC, 774 &acpi_ec_space_handler, 775 NULL, ec); 776 if (ACPI_FAILURE(status)) { 777 if (status == AE_NOT_FOUND) { 778 /* 779 * Maybe OS fails in evaluating the _REG object. 780 * The AE_NOT_FOUND error will be ignored and OS 781 * continue to initialize EC. 782 */ 783 pr_err("Fail in evaluating the _REG object" 784 " of EC device. Broken bios is suspected.\n"); 785 } else { 786 acpi_disable_gpe(NULL, ec->gpe); 787 acpi_remove_gpe_handler(NULL, ec->gpe, 788 &acpi_ec_gpe_handler); 789 return -ENODEV; 790 } 791 } 792 793 set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags); 794 return 0; 795 } 796 797 static void ec_remove_handlers(struct acpi_ec *ec) 798 { 799 acpi_disable_gpe(NULL, ec->gpe); 800 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, 801 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) 802 pr_err("failed to remove space handler\n"); 803 if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, 804 &acpi_ec_gpe_handler))) 805 pr_err("failed to remove gpe handler\n"); 806 clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags); 807 } 808 809 static int acpi_ec_add(struct acpi_device *device) 810 { 811 struct acpi_ec *ec = NULL; 812 int ret; 813 814 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); 815 strcpy(acpi_device_class(device), ACPI_EC_CLASS); 816 817 /* Check for boot EC */ 818 if (boot_ec && 819 (boot_ec->handle == device->handle || 820 boot_ec->handle == ACPI_ROOT_OBJECT)) { 821 ec = boot_ec; 822 boot_ec = NULL; 823 } else { 824 ec = make_acpi_ec(); 825 if (!ec) 826 return -ENOMEM; 827 } 828 if (ec_parse_device(device->handle, 0, ec, NULL) != 829 AE_CTRL_TERMINATE) { 830 kfree(ec); 831 return -EINVAL; 832 } 833 834 /* Find and register all query methods */ 835 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, 836 acpi_ec_register_query_methods, NULL, ec, NULL); 837 838 if (!first_ec) 839 first_ec = ec; 840 device->driver_data = ec; 841 842 ret = !!request_region(ec->data_addr, 1, "EC data"); 843 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr); 844 ret = !!request_region(ec->command_addr, 1, "EC cmd"); 845 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); 846 847 pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n", 848 ec->gpe, ec->command_addr, ec->data_addr); 849 850 ret = ec_install_handlers(ec); 851 852 /* EC is fully operational, allow queries */ 853 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 854 855 /* Clear stale _Q events if hardware might require that */ 856 if (EC_FLAGS_CLEAR_ON_RESUME) { 857 mutex_lock(&ec->mutex); 858 acpi_ec_clear(ec); 859 mutex_unlock(&ec->mutex); 860 } 861 return ret; 862 } 863 864 static int acpi_ec_remove(struct acpi_device *device) 865 { 866 struct acpi_ec *ec; 867 struct acpi_ec_query_handler *handler, *tmp; 868 869 if (!device) 870 return -EINVAL; 871 872 ec = acpi_driver_data(device); 873 ec_remove_handlers(ec); 874 mutex_lock(&ec->mutex); 875 list_for_each_entry_safe(handler, tmp, &ec->list, node) { 876 list_del(&handler->node); 877 kfree(handler); 878 } 879 mutex_unlock(&ec->mutex); 880 release_region(ec->data_addr, 1); 881 release_region(ec->command_addr, 1); 882 device->driver_data = NULL; 883 if (ec == first_ec) 884 first_ec = NULL; 885 kfree(ec); 886 return 0; 887 } 888 889 static acpi_status 890 ec_parse_io_ports(struct acpi_resource *resource, void *context) 891 { 892 struct acpi_ec *ec = context; 893 894 if (resource->type != ACPI_RESOURCE_TYPE_IO) 895 return AE_OK; 896 897 /* 898 * The first address region returned is the data port, and 899 * the second address region returned is the status/command 900 * port. 901 */ 902 if (ec->data_addr == 0) 903 ec->data_addr = resource->data.io.minimum; 904 else if (ec->command_addr == 0) 905 ec->command_addr = resource->data.io.minimum; 906 else 907 return AE_CTRL_TERMINATE; 908 909 return AE_OK; 910 } 911 912 int __init acpi_boot_ec_enable(void) 913 { 914 if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags)) 915 return 0; 916 if (!ec_install_handlers(boot_ec)) { 917 first_ec = boot_ec; 918 return 0; 919 } 920 return -EFAULT; 921 } 922 923 static const struct acpi_device_id ec_device_ids[] = { 924 {"PNP0C09", 0}, 925 {"", 0}, 926 }; 927 928 /* Some BIOS do not survive early DSDT scan, skip it */ 929 static int ec_skip_dsdt_scan(const struct dmi_system_id *id) 930 { 931 EC_FLAGS_SKIP_DSDT_SCAN = 1; 932 return 0; 933 } 934 935 /* ASUStek often supplies us with broken ECDT, validate it */ 936 static int ec_validate_ecdt(const struct dmi_system_id *id) 937 { 938 EC_FLAGS_VALIDATE_ECDT = 1; 939 return 0; 940 } 941 942 /* MSI EC needs special treatment, enable it */ 943 static int ec_flag_msi(const struct dmi_system_id *id) 944 { 945 pr_debug("Detected MSI hardware, enabling workarounds.\n"); 946 EC_FLAGS_MSI = 1; 947 EC_FLAGS_VALIDATE_ECDT = 1; 948 return 0; 949 } 950 951 /* 952 * Clevo M720 notebook actually works ok with IRQ mode, if we lifted 953 * the GPE storm threshold back to 20 954 */ 955 static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) 956 { 957 pr_debug("Setting the EC GPE storm threshold to 20\n"); 958 ec_storm_threshold = 20; 959 return 0; 960 } 961 962 /* 963 * On some hardware it is necessary to clear events accumulated by the EC during 964 * sleep. These ECs stop reporting GPEs until they are manually polled, if too 965 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) 966 * 967 * https://bugzilla.kernel.org/show_bug.cgi?id=44161 968 * 969 * Ideally, the EC should also be instructed NOT to accumulate events during 970 * sleep (which Windows seems to do somehow), but the interface to control this 971 * behaviour is not known at this time. 972 * 973 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, 974 * however it is very likely that other Samsung models are affected. 975 * 976 * On systems which don't accumulate _Q events during sleep, this extra check 977 * should be harmless. 978 */ 979 static int ec_clear_on_resume(const struct dmi_system_id *id) 980 { 981 pr_debug("Detected system needing EC poll on resume.\n"); 982 EC_FLAGS_CLEAR_ON_RESUME = 1; 983 return 0; 984 } 985 986 static struct dmi_system_id ec_dmi_table[] __initdata = { 987 { 988 ec_skip_dsdt_scan, "Compal JFL92", { 989 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), 990 DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL}, 991 { 992 ec_flag_msi, "MSI hardware", { 993 DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL}, 994 { 995 ec_flag_msi, "MSI hardware", { 996 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL}, 997 { 998 ec_flag_msi, "MSI hardware", { 999 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, 1000 { 1001 ec_flag_msi, "MSI hardware", { 1002 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, 1003 { 1004 ec_flag_msi, "Quanta hardware", { 1005 DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), 1006 DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL}, 1007 { 1008 ec_flag_msi, "Quanta hardware", { 1009 DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), 1010 DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL}, 1011 { 1012 ec_validate_ecdt, "ASUS hardware", { 1013 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, 1014 { 1015 ec_validate_ecdt, "ASUS hardware", { 1016 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL}, 1017 { 1018 ec_enlarge_storm_threshold, "CLEVO hardware", { 1019 DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), 1020 DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, 1021 { 1022 ec_skip_dsdt_scan, "HP Folio 13", { 1023 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1024 DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL}, 1025 { 1026 ec_validate_ecdt, "ASUS hardware", { 1027 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), 1028 DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, 1029 { 1030 ec_clear_on_resume, "Samsung hardware", { 1031 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, 1032 {}, 1033 }; 1034 1035 int __init acpi_ec_ecdt_probe(void) 1036 { 1037 acpi_status status; 1038 struct acpi_ec *saved_ec = NULL; 1039 struct acpi_table_ecdt *ecdt_ptr; 1040 1041 boot_ec = make_acpi_ec(); 1042 if (!boot_ec) 1043 return -ENOMEM; 1044 /* 1045 * Generate a boot ec context 1046 */ 1047 dmi_check_system(ec_dmi_table); 1048 status = acpi_get_table(ACPI_SIG_ECDT, 1, 1049 (struct acpi_table_header **)&ecdt_ptr); 1050 if (ACPI_SUCCESS(status)) { 1051 pr_info("EC description table is found, configuring boot EC\n"); 1052 boot_ec->command_addr = ecdt_ptr->control.address; 1053 boot_ec->data_addr = ecdt_ptr->data.address; 1054 boot_ec->gpe = ecdt_ptr->gpe; 1055 boot_ec->handle = ACPI_ROOT_OBJECT; 1056 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); 1057 /* Don't trust ECDT, which comes from ASUSTek */ 1058 if (!EC_FLAGS_VALIDATE_ECDT) 1059 goto install; 1060 saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL); 1061 if (!saved_ec) 1062 return -ENOMEM; 1063 /* fall through */ 1064 } 1065 1066 if (EC_FLAGS_SKIP_DSDT_SCAN) 1067 return -ENODEV; 1068 1069 /* This workaround is needed only on some broken machines, 1070 * which require early EC, but fail to provide ECDT */ 1071 pr_debug("Look up EC in DSDT\n"); 1072 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, 1073 boot_ec, NULL); 1074 /* Check that acpi_get_devices actually find something */ 1075 if (ACPI_FAILURE(status) || !boot_ec->handle) 1076 goto error; 1077 if (saved_ec) { 1078 /* try to find good ECDT from ASUSTek */ 1079 if (saved_ec->command_addr != boot_ec->command_addr || 1080 saved_ec->data_addr != boot_ec->data_addr || 1081 saved_ec->gpe != boot_ec->gpe || 1082 saved_ec->handle != boot_ec->handle) 1083 pr_info("ASUSTek keeps feeding us with broken " 1084 "ECDT tables, which are very hard to workaround. " 1085 "Trying to use DSDT EC info instead. Please send " 1086 "output of acpidump to linux-acpi@vger.kernel.org\n"); 1087 kfree(saved_ec); 1088 saved_ec = NULL; 1089 } else { 1090 /* We really need to limit this workaround, the only ASUS, 1091 * which needs it, has fake EC._INI method, so use it as flag. 1092 * Keep boot_ec struct as it will be needed soon. 1093 */ 1094 if (!dmi_name_in_vendors("ASUS") || 1095 !acpi_has_method(boot_ec->handle, "_INI")) 1096 return -ENODEV; 1097 } 1098 install: 1099 if (!ec_install_handlers(boot_ec)) { 1100 first_ec = boot_ec; 1101 return 0; 1102 } 1103 error: 1104 kfree(boot_ec); 1105 boot_ec = NULL; 1106 return -ENODEV; 1107 } 1108 1109 static struct acpi_driver acpi_ec_driver = { 1110 .name = "ec", 1111 .class = ACPI_EC_CLASS, 1112 .ids = ec_device_ids, 1113 .ops = { 1114 .add = acpi_ec_add, 1115 .remove = acpi_ec_remove, 1116 }, 1117 }; 1118 1119 int __init acpi_ec_init(void) 1120 { 1121 int result = 0; 1122 1123 /* Now register the driver for the EC */ 1124 result = acpi_bus_register_driver(&acpi_ec_driver); 1125 if (result < 0) 1126 return -ENODEV; 1127 1128 return result; 1129 } 1130 1131 /* EC driver currently not unloadable */ 1132 #if 0 1133 static void __exit acpi_ec_exit(void) 1134 { 1135 1136 acpi_bus_unregister_driver(&acpi_ec_driver); 1137 return; 1138 } 1139 #endif /* 0 */ 1140