1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * ec.c - ACPI Embedded Controller Driver (v3) 4 * 5 * Copyright (C) 2001-2015 Intel Corporation 6 * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com> 7 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> 8 * 2006 Denis Sadykov <denis.m.sadykov@intel.com> 9 * 2004 Luming Yu <luming.yu@intel.com> 10 * 2001, 2002 Andy Grover <andrew.grover@intel.com> 11 * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 12 * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> 13 */ 14 15 /* Uncomment next line to get verbose printout */ 16 /* #define DEBUG */ 17 #define pr_fmt(fmt) "ACPI: EC: " fmt 18 19 #include <linux/kernel.h> 20 #include <linux/module.h> 21 #include <linux/init.h> 22 #include <linux/types.h> 23 #include <linux/delay.h> 24 #include <linux/interrupt.h> 25 #include <linux/list.h> 26 #include <linux/spinlock.h> 27 #include <linux/slab.h> 28 #include <linux/suspend.h> 29 #include <linux/acpi.h> 30 #include <linux/dmi.h> 31 #include <asm/io.h> 32 33 #include "internal.h" 34 35 #define ACPI_EC_CLASS "embedded_controller" 36 #define ACPI_EC_DEVICE_NAME "Embedded Controller" 37 #define ACPI_EC_FILE_INFO "info" 38 39 /* EC status register */ 40 #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ 41 #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ 42 #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */ 43 #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */ 44 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ 45 46 /* 47 * The SCI_EVT clearing timing is not defined by the ACPI specification. 48 * This leads to lots of practical timing issues for the host EC driver. 49 * The following variations are defined (from the target EC firmware's 50 * perspective): 51 * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the 52 * target can clear SCI_EVT at any time so long as the host can see 53 * the indication by reading the status register (EC_SC). So the 54 * host should re-check SCI_EVT after the first time the SCI_EVT 55 * indication is seen, which is the same time the query request 56 * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set 57 * at any later time could indicate another event. Normally such 58 * kind of EC firmware has implemented an event queue and will 59 * return 0x00 to indicate "no outstanding event". 60 * QUERY: After seeing the query request (QR_EC) written to the command 61 * register (EC_CMD) by the host and having prepared the responding 62 * event value in the data register (EC_DATA), the target can safely 63 * clear SCI_EVT because the target can confirm that the current 64 * event is being handled by the host. The host then should check 65 * SCI_EVT right after reading the event response from the data 66 * register (EC_DATA). 67 * EVENT: After seeing the event response read from the data register 68 * (EC_DATA) by the host, the target can clear SCI_EVT. As the 69 * target requires time to notice the change in the data register 70 * (EC_DATA), the host may be required to wait additional guarding 71 * time before checking the SCI_EVT again. Such guarding may not be 72 * necessary if the host is notified via another IRQ. 73 */ 74 #define ACPI_EC_EVT_TIMING_STATUS 0x00 75 #define ACPI_EC_EVT_TIMING_QUERY 0x01 76 #define ACPI_EC_EVT_TIMING_EVENT 0x02 77 78 /* EC commands */ 79 enum ec_command { 80 ACPI_EC_COMMAND_READ = 0x80, 81 ACPI_EC_COMMAND_WRITE = 0x81, 82 ACPI_EC_BURST_ENABLE = 0x82, 83 ACPI_EC_BURST_DISABLE = 0x83, 84 ACPI_EC_COMMAND_QUERY = 0x84, 85 }; 86 87 #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 88 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 89 #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */ 90 #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query 91 * when trying to clear the EC */ 92 #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */ 93 94 enum { 95 EC_FLAGS_QUERY_ENABLED, /* Query is enabled */ 96 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 97 EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */ 98 EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */ 99 EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */ 100 EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */ 101 EC_FLAGS_STARTED, /* Driver is started */ 102 EC_FLAGS_STOPPED, /* Driver is stopped */ 103 EC_FLAGS_EVENTS_MASKED, /* Events masked */ 104 }; 105 106 #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ 107 #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ 108 109 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ 110 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; 111 module_param(ec_delay, uint, 0644); 112 MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); 113 114 static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES; 115 module_param(ec_max_queries, uint, 0644); 116 MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations"); 117 118 static bool ec_busy_polling __read_mostly; 119 module_param(ec_busy_polling, bool, 0644); 120 MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction"); 121 122 static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL; 123 module_param(ec_polling_guard, uint, 0644); 124 MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes"); 125 126 static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY; 127 128 /* 129 * If the number of false interrupts per one transaction exceeds 130 * this threshold, will think there is a GPE storm happened and 131 * will disable the GPE for normal transaction. 132 */ 133 static unsigned int ec_storm_threshold __read_mostly = 8; 134 module_param(ec_storm_threshold, uint, 0644); 135 MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); 136 137 static bool ec_freeze_events __read_mostly = false; 138 module_param(ec_freeze_events, bool, 0644); 139 MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); 140 141 static bool ec_no_wakeup __read_mostly; 142 module_param(ec_no_wakeup, bool, 0644); 143 MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle"); 144 145 struct acpi_ec_query_handler { 146 struct list_head node; 147 acpi_ec_query_func func; 148 acpi_handle handle; 149 void *data; 150 u8 query_bit; 151 struct kref kref; 152 }; 153 154 struct transaction { 155 const u8 *wdata; 156 u8 *rdata; 157 unsigned short irq_count; 158 u8 command; 159 u8 wi; 160 u8 ri; 161 u8 wlen; 162 u8 rlen; 163 u8 flags; 164 }; 165 166 struct acpi_ec_query { 167 struct transaction transaction; 168 struct work_struct work; 169 struct acpi_ec_query_handler *handler; 170 }; 171 172 static int acpi_ec_query(struct acpi_ec *ec, u8 *data); 173 static void advance_transaction(struct acpi_ec *ec); 174 static void acpi_ec_event_handler(struct work_struct *work); 175 static void acpi_ec_event_processor(struct work_struct *work); 176 177 struct acpi_ec *first_ec; 178 EXPORT_SYMBOL(first_ec); 179 180 static struct acpi_ec *boot_ec; 181 static bool boot_ec_is_ecdt = false; 182 static struct workqueue_struct *ec_query_wq; 183 184 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ 185 static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ 186 static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */ 187 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ 188 189 /* -------------------------------------------------------------------------- 190 * Logging/Debugging 191 * -------------------------------------------------------------------------- */ 192 193 /* 194 * Splitters used by the developers to track the boundary of the EC 195 * handling processes. 196 */ 197 #ifdef DEBUG 198 #define EC_DBG_SEP " " 199 #define EC_DBG_DRV "+++++" 200 #define EC_DBG_STM "=====" 201 #define EC_DBG_REQ "*****" 202 #define EC_DBG_EVT "#####" 203 #else 204 #define EC_DBG_SEP "" 205 #define EC_DBG_DRV 206 #define EC_DBG_STM 207 #define EC_DBG_REQ 208 #define EC_DBG_EVT 209 #endif 210 211 #define ec_log_raw(fmt, ...) \ 212 pr_info(fmt "\n", ##__VA_ARGS__) 213 #define ec_dbg_raw(fmt, ...) \ 214 pr_debug(fmt "\n", ##__VA_ARGS__) 215 #define ec_log(filter, fmt, ...) \ 216 ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__) 217 #define ec_dbg(filter, fmt, ...) \ 218 ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__) 219 220 #define ec_log_drv(fmt, ...) \ 221 ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__) 222 #define ec_dbg_drv(fmt, ...) \ 223 ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__) 224 #define ec_dbg_stm(fmt, ...) \ 225 ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__) 226 #define ec_dbg_req(fmt, ...) \ 227 ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__) 228 #define ec_dbg_evt(fmt, ...) \ 229 ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__) 230 #define ec_dbg_ref(ec, fmt, ...) \ 231 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__) 232 233 /* -------------------------------------------------------------------------- 234 * Device Flags 235 * -------------------------------------------------------------------------- */ 236 237 static bool acpi_ec_started(struct acpi_ec *ec) 238 { 239 return test_bit(EC_FLAGS_STARTED, &ec->flags) && 240 !test_bit(EC_FLAGS_STOPPED, &ec->flags); 241 } 242 243 static bool acpi_ec_event_enabled(struct acpi_ec *ec) 244 { 245 /* 246 * There is an OSPM early stage logic. During the early stages 247 * (boot/resume), OSPMs shouldn't enable the event handling, only 248 * the EC transactions are allowed to be performed. 249 */ 250 if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) 251 return false; 252 /* 253 * However, disabling the event handling is experimental for late 254 * stage (suspend), and is controlled by the boot parameter of 255 * "ec_freeze_events": 256 * 1. true: The EC event handling is disabled before entering 257 * the noirq stage. 258 * 2. false: The EC event handling is automatically disabled as 259 * soon as the EC driver is stopped. 260 */ 261 if (ec_freeze_events) 262 return acpi_ec_started(ec); 263 else 264 return test_bit(EC_FLAGS_STARTED, &ec->flags); 265 } 266 267 static bool acpi_ec_flushed(struct acpi_ec *ec) 268 { 269 return ec->reference_count == 1; 270 } 271 272 /* -------------------------------------------------------------------------- 273 * EC Registers 274 * -------------------------------------------------------------------------- */ 275 276 static inline u8 acpi_ec_read_status(struct acpi_ec *ec) 277 { 278 u8 x = inb(ec->command_addr); 279 280 ec_dbg_raw("EC_SC(R) = 0x%2.2x " 281 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d", 282 x, 283 !!(x & ACPI_EC_FLAG_SCI), 284 !!(x & ACPI_EC_FLAG_BURST), 285 !!(x & ACPI_EC_FLAG_CMD), 286 !!(x & ACPI_EC_FLAG_IBF), 287 !!(x & ACPI_EC_FLAG_OBF)); 288 return x; 289 } 290 291 static inline u8 acpi_ec_read_data(struct acpi_ec *ec) 292 { 293 u8 x = inb(ec->data_addr); 294 295 ec->timestamp = jiffies; 296 ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x); 297 return x; 298 } 299 300 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) 301 { 302 ec_dbg_raw("EC_SC(W) = 0x%2.2x", command); 303 outb(command, ec->command_addr); 304 ec->timestamp = jiffies; 305 } 306 307 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) 308 { 309 ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data); 310 outb(data, ec->data_addr); 311 ec->timestamp = jiffies; 312 } 313 314 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) 315 static const char *acpi_ec_cmd_string(u8 cmd) 316 { 317 switch (cmd) { 318 case 0x80: 319 return "RD_EC"; 320 case 0x81: 321 return "WR_EC"; 322 case 0x82: 323 return "BE_EC"; 324 case 0x83: 325 return "BD_EC"; 326 case 0x84: 327 return "QR_EC"; 328 } 329 return "UNKNOWN"; 330 } 331 #else 332 #define acpi_ec_cmd_string(cmd) "UNDEF" 333 #endif 334 335 /* -------------------------------------------------------------------------- 336 * GPE Registers 337 * -------------------------------------------------------------------------- */ 338 339 static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec) 340 { 341 acpi_event_status gpe_status = 0; 342 343 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status); 344 return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false; 345 } 346 347 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open) 348 { 349 if (open) 350 acpi_enable_gpe(NULL, ec->gpe); 351 else { 352 BUG_ON(ec->reference_count < 1); 353 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); 354 } 355 if (acpi_ec_is_gpe_raised(ec)) { 356 /* 357 * On some platforms, EN=1 writes cannot trigger GPE. So 358 * software need to manually trigger a pseudo GPE event on 359 * EN=1 writes. 360 */ 361 ec_dbg_raw("Polling quirk"); 362 advance_transaction(ec); 363 } 364 } 365 366 static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close) 367 { 368 if (close) 369 acpi_disable_gpe(NULL, ec->gpe); 370 else { 371 BUG_ON(ec->reference_count < 1); 372 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); 373 } 374 } 375 376 static inline void acpi_ec_clear_gpe(struct acpi_ec *ec) 377 { 378 /* 379 * GPE STS is a W1C register, which means: 380 * 1. Software can clear it without worrying about clearing other 381 * GPEs' STS bits when the hardware sets them in parallel. 382 * 2. As long as software can ensure only clearing it when it is 383 * set, hardware won't set it in parallel. 384 * So software can clear GPE in any contexts. 385 * Warning: do not move the check into advance_transaction() as the 386 * EC commands will be sent without GPE raised. 387 */ 388 if (!acpi_ec_is_gpe_raised(ec)) 389 return; 390 acpi_clear_gpe(NULL, ec->gpe); 391 } 392 393 /* -------------------------------------------------------------------------- 394 * Transaction Management 395 * -------------------------------------------------------------------------- */ 396 397 static void acpi_ec_submit_request(struct acpi_ec *ec) 398 { 399 ec->reference_count++; 400 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && 401 ec->gpe >= 0 && ec->reference_count == 1) 402 acpi_ec_enable_gpe(ec, true); 403 } 404 405 static void acpi_ec_complete_request(struct acpi_ec *ec) 406 { 407 bool flushed = false; 408 409 ec->reference_count--; 410 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && 411 ec->gpe >= 0 && ec->reference_count == 0) 412 acpi_ec_disable_gpe(ec, true); 413 flushed = acpi_ec_flushed(ec); 414 if (flushed) 415 wake_up(&ec->wait); 416 } 417 418 static void acpi_ec_mask_events(struct acpi_ec *ec) 419 { 420 if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { 421 if (ec->gpe >= 0) 422 acpi_ec_disable_gpe(ec, false); 423 else 424 disable_irq_nosync(ec->irq); 425 426 ec_dbg_drv("Polling enabled"); 427 set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags); 428 } 429 } 430 431 static void acpi_ec_unmask_events(struct acpi_ec *ec) 432 { 433 if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { 434 clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags); 435 if (ec->gpe >= 0) 436 acpi_ec_enable_gpe(ec, false); 437 else 438 enable_irq(ec->irq); 439 440 ec_dbg_drv("Polling disabled"); 441 } 442 } 443 444 /* 445 * acpi_ec_submit_flushable_request() - Increase the reference count unless 446 * the flush operation is not in 447 * progress 448 * @ec: the EC device 449 * 450 * This function must be used before taking a new action that should hold 451 * the reference count. If this function returns false, then the action 452 * must be discarded or it will prevent the flush operation from being 453 * completed. 454 */ 455 static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) 456 { 457 if (!acpi_ec_started(ec)) 458 return false; 459 acpi_ec_submit_request(ec); 460 return true; 461 } 462 463 static void acpi_ec_submit_query(struct acpi_ec *ec) 464 { 465 acpi_ec_mask_events(ec); 466 if (!acpi_ec_event_enabled(ec)) 467 return; 468 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { 469 ec_dbg_evt("Command(%s) submitted/blocked", 470 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); 471 ec->nr_pending_queries++; 472 schedule_work(&ec->work); 473 } 474 } 475 476 static void acpi_ec_complete_query(struct acpi_ec *ec) 477 { 478 if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) 479 ec_dbg_evt("Command(%s) unblocked", 480 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); 481 acpi_ec_unmask_events(ec); 482 } 483 484 static inline void __acpi_ec_enable_event(struct acpi_ec *ec) 485 { 486 if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) 487 ec_log_drv("event unblocked"); 488 /* 489 * Unconditionally invoke this once after enabling the event 490 * handling mechanism to detect the pending events. 491 */ 492 advance_transaction(ec); 493 } 494 495 static inline void __acpi_ec_disable_event(struct acpi_ec *ec) 496 { 497 if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) 498 ec_log_drv("event blocked"); 499 } 500 501 /* 502 * Process _Q events that might have accumulated in the EC. 503 * Run with locked ec mutex. 504 */ 505 static void acpi_ec_clear(struct acpi_ec *ec) 506 { 507 int i, status; 508 u8 value = 0; 509 510 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { 511 status = acpi_ec_query(ec, &value); 512 if (status || !value) 513 break; 514 } 515 if (unlikely(i == ACPI_EC_CLEAR_MAX)) 516 pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); 517 else 518 pr_info("%d stale EC events cleared\n", i); 519 } 520 521 static void acpi_ec_enable_event(struct acpi_ec *ec) 522 { 523 unsigned long flags; 524 525 spin_lock_irqsave(&ec->lock, flags); 526 if (acpi_ec_started(ec)) 527 __acpi_ec_enable_event(ec); 528 spin_unlock_irqrestore(&ec->lock, flags); 529 530 /* Drain additional events if hardware requires that */ 531 if (EC_FLAGS_CLEAR_ON_RESUME) 532 acpi_ec_clear(ec); 533 } 534 535 #ifdef CONFIG_PM_SLEEP 536 static bool acpi_ec_query_flushed(struct acpi_ec *ec) 537 { 538 bool flushed; 539 unsigned long flags; 540 541 spin_lock_irqsave(&ec->lock, flags); 542 flushed = !ec->nr_pending_queries; 543 spin_unlock_irqrestore(&ec->lock, flags); 544 return flushed; 545 } 546 547 static void __acpi_ec_flush_event(struct acpi_ec *ec) 548 { 549 /* 550 * When ec_freeze_events is true, we need to flush events in 551 * the proper position before entering the noirq stage. 552 */ 553 wait_event(ec->wait, acpi_ec_query_flushed(ec)); 554 if (ec_query_wq) 555 flush_workqueue(ec_query_wq); 556 } 557 558 static void acpi_ec_disable_event(struct acpi_ec *ec) 559 { 560 unsigned long flags; 561 562 spin_lock_irqsave(&ec->lock, flags); 563 __acpi_ec_disable_event(ec); 564 spin_unlock_irqrestore(&ec->lock, flags); 565 __acpi_ec_flush_event(ec); 566 } 567 568 void acpi_ec_flush_work(void) 569 { 570 if (first_ec) 571 __acpi_ec_flush_event(first_ec); 572 573 flush_scheduled_work(); 574 } 575 #endif /* CONFIG_PM_SLEEP */ 576 577 static bool acpi_ec_guard_event(struct acpi_ec *ec) 578 { 579 bool guarded = true; 580 unsigned long flags; 581 582 spin_lock_irqsave(&ec->lock, flags); 583 /* 584 * If firmware SCI_EVT clearing timing is "event", we actually 585 * don't know when the SCI_EVT will be cleared by firmware after 586 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an 587 * acceptable period. 588 * 589 * The guarding period begins when EC_FLAGS_QUERY_PENDING is 590 * flagged, which means SCI_EVT check has just been performed. 591 * But if the current transaction is ACPI_EC_COMMAND_QUERY, the 592 * guarding should have already been performed (via 593 * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the 594 * ACPI_EC_COMMAND_QUERY transaction can be transitioned into 595 * ACPI_EC_COMMAND_POLL state immediately. 596 */ 597 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS || 598 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY || 599 !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) || 600 (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY)) 601 guarded = false; 602 spin_unlock_irqrestore(&ec->lock, flags); 603 return guarded; 604 } 605 606 static int ec_transaction_polled(struct acpi_ec *ec) 607 { 608 unsigned long flags; 609 int ret = 0; 610 611 spin_lock_irqsave(&ec->lock, flags); 612 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) 613 ret = 1; 614 spin_unlock_irqrestore(&ec->lock, flags); 615 return ret; 616 } 617 618 static int ec_transaction_completed(struct acpi_ec *ec) 619 { 620 unsigned long flags; 621 int ret = 0; 622 623 spin_lock_irqsave(&ec->lock, flags); 624 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) 625 ret = 1; 626 spin_unlock_irqrestore(&ec->lock, flags); 627 return ret; 628 } 629 630 static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag) 631 { 632 ec->curr->flags |= flag; 633 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { 634 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS && 635 flag == ACPI_EC_COMMAND_POLL) 636 acpi_ec_complete_query(ec); 637 if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY && 638 flag == ACPI_EC_COMMAND_COMPLETE) 639 acpi_ec_complete_query(ec); 640 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && 641 flag == ACPI_EC_COMMAND_COMPLETE) 642 set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); 643 } 644 } 645 646 static void advance_transaction(struct acpi_ec *ec) 647 { 648 struct transaction *t; 649 u8 status; 650 bool wakeup = false; 651 652 ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK", 653 smp_processor_id()); 654 /* 655 * By always clearing STS before handling all indications, we can 656 * ensure a hardware STS 0->1 change after this clearing can always 657 * trigger a GPE interrupt. 658 */ 659 if (ec->gpe >= 0) 660 acpi_ec_clear_gpe(ec); 661 662 status = acpi_ec_read_status(ec); 663 t = ec->curr; 664 /* 665 * Another IRQ or a guarded polling mode advancement is detected, 666 * the next QR_EC submission is then allowed. 667 */ 668 if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) { 669 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && 670 (!ec->nr_pending_queries || 671 test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) { 672 clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); 673 acpi_ec_complete_query(ec); 674 } 675 } 676 if (!t) 677 goto err; 678 if (t->flags & ACPI_EC_COMMAND_POLL) { 679 if (t->wlen > t->wi) { 680 if ((status & ACPI_EC_FLAG_IBF) == 0) 681 acpi_ec_write_data(ec, t->wdata[t->wi++]); 682 else 683 goto err; 684 } else if (t->rlen > t->ri) { 685 if ((status & ACPI_EC_FLAG_OBF) == 1) { 686 t->rdata[t->ri++] = acpi_ec_read_data(ec); 687 if (t->rlen == t->ri) { 688 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); 689 if (t->command == ACPI_EC_COMMAND_QUERY) 690 ec_dbg_evt("Command(%s) completed by hardware", 691 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); 692 wakeup = true; 693 } 694 } else 695 goto err; 696 } else if (t->wlen == t->wi && 697 (status & ACPI_EC_FLAG_IBF) == 0) { 698 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); 699 wakeup = true; 700 } 701 goto out; 702 } else { 703 if (EC_FLAGS_QUERY_HANDSHAKE && 704 !(status & ACPI_EC_FLAG_SCI) && 705 (t->command == ACPI_EC_COMMAND_QUERY)) { 706 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); 707 t->rdata[t->ri++] = 0x00; 708 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE); 709 ec_dbg_evt("Command(%s) completed by software", 710 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); 711 wakeup = true; 712 } else if ((status & ACPI_EC_FLAG_IBF) == 0) { 713 acpi_ec_write_cmd(ec, t->command); 714 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL); 715 } else 716 goto err; 717 goto out; 718 } 719 err: 720 /* 721 * If SCI bit is set, then don't think it's a false IRQ 722 * otherwise will take a not handled IRQ as a false one. 723 */ 724 if (!(status & ACPI_EC_FLAG_SCI)) { 725 if (in_interrupt() && t) { 726 if (t->irq_count < ec_storm_threshold) 727 ++t->irq_count; 728 /* Allow triggering on 0 threshold */ 729 if (t->irq_count == ec_storm_threshold) 730 acpi_ec_mask_events(ec); 731 } 732 } 733 out: 734 if (status & ACPI_EC_FLAG_SCI) 735 acpi_ec_submit_query(ec); 736 if (wakeup && in_interrupt()) 737 wake_up(&ec->wait); 738 } 739 740 static void start_transaction(struct acpi_ec *ec) 741 { 742 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; 743 ec->curr->flags = 0; 744 } 745 746 static int ec_guard(struct acpi_ec *ec) 747 { 748 unsigned long guard = usecs_to_jiffies(ec->polling_guard); 749 unsigned long timeout = ec->timestamp + guard; 750 751 /* Ensure guarding period before polling EC status */ 752 do { 753 if (ec->busy_polling) { 754 /* Perform busy polling */ 755 if (ec_transaction_completed(ec)) 756 return 0; 757 udelay(jiffies_to_usecs(guard)); 758 } else { 759 /* 760 * Perform wait polling 761 * 1. Wait the transaction to be completed by the 762 * GPE handler after the transaction enters 763 * ACPI_EC_COMMAND_POLL state. 764 * 2. A special guarding logic is also required 765 * for event clearing mode "event" before the 766 * transaction enters ACPI_EC_COMMAND_POLL 767 * state. 768 */ 769 if (!ec_transaction_polled(ec) && 770 !acpi_ec_guard_event(ec)) 771 break; 772 if (wait_event_timeout(ec->wait, 773 ec_transaction_completed(ec), 774 guard)) 775 return 0; 776 } 777 } while (time_before(jiffies, timeout)); 778 return -ETIME; 779 } 780 781 static int ec_poll(struct acpi_ec *ec) 782 { 783 unsigned long flags; 784 int repeat = 5; /* number of command restarts */ 785 786 while (repeat--) { 787 unsigned long delay = jiffies + 788 msecs_to_jiffies(ec_delay); 789 do { 790 if (!ec_guard(ec)) 791 return 0; 792 spin_lock_irqsave(&ec->lock, flags); 793 advance_transaction(ec); 794 spin_unlock_irqrestore(&ec->lock, flags); 795 } while (time_before(jiffies, delay)); 796 pr_debug("controller reset, restart transaction\n"); 797 spin_lock_irqsave(&ec->lock, flags); 798 start_transaction(ec); 799 spin_unlock_irqrestore(&ec->lock, flags); 800 } 801 return -ETIME; 802 } 803 804 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, 805 struct transaction *t) 806 { 807 unsigned long tmp; 808 int ret = 0; 809 810 /* start transaction */ 811 spin_lock_irqsave(&ec->lock, tmp); 812 /* Enable GPE for command processing (IBF=0/OBF=1) */ 813 if (!acpi_ec_submit_flushable_request(ec)) { 814 ret = -EINVAL; 815 goto unlock; 816 } 817 ec_dbg_ref(ec, "Increase command"); 818 /* following two actions should be kept atomic */ 819 ec->curr = t; 820 ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command)); 821 start_transaction(ec); 822 spin_unlock_irqrestore(&ec->lock, tmp); 823 824 ret = ec_poll(ec); 825 826 spin_lock_irqsave(&ec->lock, tmp); 827 if (t->irq_count == ec_storm_threshold) 828 acpi_ec_unmask_events(ec); 829 ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command)); 830 ec->curr = NULL; 831 /* Disable GPE for command processing (IBF=0/OBF=1) */ 832 acpi_ec_complete_request(ec); 833 ec_dbg_ref(ec, "Decrease command"); 834 unlock: 835 spin_unlock_irqrestore(&ec->lock, tmp); 836 return ret; 837 } 838 839 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) 840 { 841 int status; 842 u32 glk; 843 844 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) 845 return -EINVAL; 846 if (t->rdata) 847 memset(t->rdata, 0, t->rlen); 848 849 mutex_lock(&ec->mutex); 850 if (ec->global_lock) { 851 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 852 if (ACPI_FAILURE(status)) { 853 status = -ENODEV; 854 goto unlock; 855 } 856 } 857 858 status = acpi_ec_transaction_unlocked(ec, t); 859 860 if (ec->global_lock) 861 acpi_release_global_lock(glk); 862 unlock: 863 mutex_unlock(&ec->mutex); 864 return status; 865 } 866 867 static int acpi_ec_burst_enable(struct acpi_ec *ec) 868 { 869 u8 d; 870 struct transaction t = {.command = ACPI_EC_BURST_ENABLE, 871 .wdata = NULL, .rdata = &d, 872 .wlen = 0, .rlen = 1}; 873 874 return acpi_ec_transaction(ec, &t); 875 } 876 877 static int acpi_ec_burst_disable(struct acpi_ec *ec) 878 { 879 struct transaction t = {.command = ACPI_EC_BURST_DISABLE, 880 .wdata = NULL, .rdata = NULL, 881 .wlen = 0, .rlen = 0}; 882 883 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? 884 acpi_ec_transaction(ec, &t) : 0; 885 } 886 887 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) 888 { 889 int result; 890 u8 d; 891 struct transaction t = {.command = ACPI_EC_COMMAND_READ, 892 .wdata = &address, .rdata = &d, 893 .wlen = 1, .rlen = 1}; 894 895 result = acpi_ec_transaction(ec, &t); 896 *data = d; 897 return result; 898 } 899 900 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) 901 { 902 u8 wdata[2] = { address, data }; 903 struct transaction t = {.command = ACPI_EC_COMMAND_WRITE, 904 .wdata = wdata, .rdata = NULL, 905 .wlen = 2, .rlen = 0}; 906 907 return acpi_ec_transaction(ec, &t); 908 } 909 910 int ec_read(u8 addr, u8 *val) 911 { 912 int err; 913 u8 temp_data; 914 915 if (!first_ec) 916 return -ENODEV; 917 918 err = acpi_ec_read(first_ec, addr, &temp_data); 919 920 if (!err) { 921 *val = temp_data; 922 return 0; 923 } 924 return err; 925 } 926 EXPORT_SYMBOL(ec_read); 927 928 int ec_write(u8 addr, u8 val) 929 { 930 int err; 931 932 if (!first_ec) 933 return -ENODEV; 934 935 err = acpi_ec_write(first_ec, addr, val); 936 937 return err; 938 } 939 EXPORT_SYMBOL(ec_write); 940 941 int ec_transaction(u8 command, 942 const u8 *wdata, unsigned wdata_len, 943 u8 *rdata, unsigned rdata_len) 944 { 945 struct transaction t = {.command = command, 946 .wdata = wdata, .rdata = rdata, 947 .wlen = wdata_len, .rlen = rdata_len}; 948 949 if (!first_ec) 950 return -ENODEV; 951 952 return acpi_ec_transaction(first_ec, &t); 953 } 954 EXPORT_SYMBOL(ec_transaction); 955 956 /* Get the handle to the EC device */ 957 acpi_handle ec_get_handle(void) 958 { 959 if (!first_ec) 960 return NULL; 961 return first_ec->handle; 962 } 963 EXPORT_SYMBOL(ec_get_handle); 964 965 static void acpi_ec_start(struct acpi_ec *ec, bool resuming) 966 { 967 unsigned long flags; 968 969 spin_lock_irqsave(&ec->lock, flags); 970 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) { 971 ec_dbg_drv("Starting EC"); 972 /* Enable GPE for event processing (SCI_EVT=1) */ 973 if (!resuming) { 974 acpi_ec_submit_request(ec); 975 ec_dbg_ref(ec, "Increase driver"); 976 } 977 ec_log_drv("EC started"); 978 } 979 spin_unlock_irqrestore(&ec->lock, flags); 980 } 981 982 static bool acpi_ec_stopped(struct acpi_ec *ec) 983 { 984 unsigned long flags; 985 bool flushed; 986 987 spin_lock_irqsave(&ec->lock, flags); 988 flushed = acpi_ec_flushed(ec); 989 spin_unlock_irqrestore(&ec->lock, flags); 990 return flushed; 991 } 992 993 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) 994 { 995 unsigned long flags; 996 997 spin_lock_irqsave(&ec->lock, flags); 998 if (acpi_ec_started(ec)) { 999 ec_dbg_drv("Stopping EC"); 1000 set_bit(EC_FLAGS_STOPPED, &ec->flags); 1001 spin_unlock_irqrestore(&ec->lock, flags); 1002 wait_event(ec->wait, acpi_ec_stopped(ec)); 1003 spin_lock_irqsave(&ec->lock, flags); 1004 /* Disable GPE for event processing (SCI_EVT=1) */ 1005 if (!suspending) { 1006 acpi_ec_complete_request(ec); 1007 ec_dbg_ref(ec, "Decrease driver"); 1008 } else if (!ec_freeze_events) 1009 __acpi_ec_disable_event(ec); 1010 clear_bit(EC_FLAGS_STARTED, &ec->flags); 1011 clear_bit(EC_FLAGS_STOPPED, &ec->flags); 1012 ec_log_drv("EC stopped"); 1013 } 1014 spin_unlock_irqrestore(&ec->lock, flags); 1015 } 1016 1017 static void acpi_ec_enter_noirq(struct acpi_ec *ec) 1018 { 1019 unsigned long flags; 1020 1021 spin_lock_irqsave(&ec->lock, flags); 1022 ec->busy_polling = true; 1023 ec->polling_guard = 0; 1024 ec_log_drv("interrupt blocked"); 1025 spin_unlock_irqrestore(&ec->lock, flags); 1026 } 1027 1028 static void acpi_ec_leave_noirq(struct acpi_ec *ec) 1029 { 1030 unsigned long flags; 1031 1032 spin_lock_irqsave(&ec->lock, flags); 1033 ec->busy_polling = ec_busy_polling; 1034 ec->polling_guard = ec_polling_guard; 1035 ec_log_drv("interrupt unblocked"); 1036 spin_unlock_irqrestore(&ec->lock, flags); 1037 } 1038 1039 void acpi_ec_block_transactions(void) 1040 { 1041 struct acpi_ec *ec = first_ec; 1042 1043 if (!ec) 1044 return; 1045 1046 mutex_lock(&ec->mutex); 1047 /* Prevent transactions from being carried out */ 1048 acpi_ec_stop(ec, true); 1049 mutex_unlock(&ec->mutex); 1050 } 1051 1052 void acpi_ec_unblock_transactions(void) 1053 { 1054 /* 1055 * Allow transactions to happen again (this function is called from 1056 * atomic context during wakeup, so we don't need to acquire the mutex). 1057 */ 1058 if (first_ec) 1059 acpi_ec_start(first_ec, true); 1060 } 1061 1062 /* -------------------------------------------------------------------------- 1063 Event Management 1064 -------------------------------------------------------------------------- */ 1065 static struct acpi_ec_query_handler * 1066 acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) 1067 { 1068 if (handler) 1069 kref_get(&handler->kref); 1070 return handler; 1071 } 1072 1073 static struct acpi_ec_query_handler * 1074 acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) 1075 { 1076 struct acpi_ec_query_handler *handler; 1077 bool found = false; 1078 1079 mutex_lock(&ec->mutex); 1080 list_for_each_entry(handler, &ec->list, node) { 1081 if (value == handler->query_bit) { 1082 found = true; 1083 break; 1084 } 1085 } 1086 mutex_unlock(&ec->mutex); 1087 return found ? acpi_ec_get_query_handler(handler) : NULL; 1088 } 1089 1090 static void acpi_ec_query_handler_release(struct kref *kref) 1091 { 1092 struct acpi_ec_query_handler *handler = 1093 container_of(kref, struct acpi_ec_query_handler, kref); 1094 1095 kfree(handler); 1096 } 1097 1098 static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler) 1099 { 1100 kref_put(&handler->kref, acpi_ec_query_handler_release); 1101 } 1102 1103 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, 1104 acpi_handle handle, acpi_ec_query_func func, 1105 void *data) 1106 { 1107 struct acpi_ec_query_handler *handler = 1108 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL); 1109 1110 if (!handler) 1111 return -ENOMEM; 1112 1113 handler->query_bit = query_bit; 1114 handler->handle = handle; 1115 handler->func = func; 1116 handler->data = data; 1117 mutex_lock(&ec->mutex); 1118 kref_init(&handler->kref); 1119 list_add(&handler->node, &ec->list); 1120 mutex_unlock(&ec->mutex); 1121 return 0; 1122 } 1123 EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); 1124 1125 static void acpi_ec_remove_query_handlers(struct acpi_ec *ec, 1126 bool remove_all, u8 query_bit) 1127 { 1128 struct acpi_ec_query_handler *handler, *tmp; 1129 LIST_HEAD(free_list); 1130 1131 mutex_lock(&ec->mutex); 1132 list_for_each_entry_safe(handler, tmp, &ec->list, node) { 1133 if (remove_all || query_bit == handler->query_bit) { 1134 list_del_init(&handler->node); 1135 list_add(&handler->node, &free_list); 1136 } 1137 } 1138 mutex_unlock(&ec->mutex); 1139 list_for_each_entry_safe(handler, tmp, &free_list, node) 1140 acpi_ec_put_query_handler(handler); 1141 } 1142 1143 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) 1144 { 1145 acpi_ec_remove_query_handlers(ec, false, query_bit); 1146 } 1147 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); 1148 1149 static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) 1150 { 1151 struct acpi_ec_query *q; 1152 struct transaction *t; 1153 1154 q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL); 1155 if (!q) 1156 return NULL; 1157 INIT_WORK(&q->work, acpi_ec_event_processor); 1158 t = &q->transaction; 1159 t->command = ACPI_EC_COMMAND_QUERY; 1160 t->rdata = pval; 1161 t->rlen = 1; 1162 return q; 1163 } 1164 1165 static void acpi_ec_delete_query(struct acpi_ec_query *q) 1166 { 1167 if (q) { 1168 if (q->handler) 1169 acpi_ec_put_query_handler(q->handler); 1170 kfree(q); 1171 } 1172 } 1173 1174 static void acpi_ec_event_processor(struct work_struct *work) 1175 { 1176 struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); 1177 struct acpi_ec_query_handler *handler = q->handler; 1178 1179 ec_dbg_evt("Query(0x%02x) started", handler->query_bit); 1180 if (handler->func) 1181 handler->func(handler->data); 1182 else if (handler->handle) 1183 acpi_evaluate_object(handler->handle, NULL, NULL, NULL); 1184 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); 1185 acpi_ec_delete_query(q); 1186 } 1187 1188 static int acpi_ec_query(struct acpi_ec *ec, u8 *data) 1189 { 1190 u8 value = 0; 1191 int result; 1192 struct acpi_ec_query *q; 1193 1194 q = acpi_ec_create_query(&value); 1195 if (!q) 1196 return -ENOMEM; 1197 1198 /* 1199 * Query the EC to find out which _Qxx method we need to evaluate. 1200 * Note that successful completion of the query causes the ACPI_EC_SCI 1201 * bit to be cleared (and thus clearing the interrupt source). 1202 */ 1203 result = acpi_ec_transaction(ec, &q->transaction); 1204 if (!value) 1205 result = -ENODATA; 1206 if (result) 1207 goto err_exit; 1208 1209 q->handler = acpi_ec_get_query_handler_by_value(ec, value); 1210 if (!q->handler) { 1211 result = -ENODATA; 1212 goto err_exit; 1213 } 1214 1215 /* 1216 * It is reported that _Qxx are evaluated in a parallel way on 1217 * Windows: 1218 * https://bugzilla.kernel.org/show_bug.cgi?id=94411 1219 * 1220 * Put this log entry before schedule_work() in order to make 1221 * it appearing before any other log entries occurred during the 1222 * work queue execution. 1223 */ 1224 ec_dbg_evt("Query(0x%02x) scheduled", value); 1225 if (!queue_work(ec_query_wq, &q->work)) { 1226 ec_dbg_evt("Query(0x%02x) overlapped", value); 1227 result = -EBUSY; 1228 } 1229 1230 err_exit: 1231 if (result) 1232 acpi_ec_delete_query(q); 1233 if (data) 1234 *data = value; 1235 return result; 1236 } 1237 1238 static void acpi_ec_check_event(struct acpi_ec *ec) 1239 { 1240 unsigned long flags; 1241 1242 if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) { 1243 if (ec_guard(ec)) { 1244 spin_lock_irqsave(&ec->lock, flags); 1245 /* 1246 * Take care of the SCI_EVT unless no one else is 1247 * taking care of it. 1248 */ 1249 if (!ec->curr) 1250 advance_transaction(ec); 1251 spin_unlock_irqrestore(&ec->lock, flags); 1252 } 1253 } 1254 } 1255 1256 static void acpi_ec_event_handler(struct work_struct *work) 1257 { 1258 unsigned long flags; 1259 struct acpi_ec *ec = container_of(work, struct acpi_ec, work); 1260 1261 ec_dbg_evt("Event started"); 1262 1263 spin_lock_irqsave(&ec->lock, flags); 1264 while (ec->nr_pending_queries) { 1265 spin_unlock_irqrestore(&ec->lock, flags); 1266 (void)acpi_ec_query(ec, NULL); 1267 spin_lock_irqsave(&ec->lock, flags); 1268 ec->nr_pending_queries--; 1269 /* 1270 * Before exit, make sure that this work item can be 1271 * scheduled again. There might be QR_EC failures, leaving 1272 * EC_FLAGS_QUERY_PENDING uncleared and preventing this work 1273 * item from being scheduled again. 1274 */ 1275 if (!ec->nr_pending_queries) { 1276 if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS || 1277 ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY) 1278 acpi_ec_complete_query(ec); 1279 } 1280 } 1281 spin_unlock_irqrestore(&ec->lock, flags); 1282 1283 ec_dbg_evt("Event stopped"); 1284 1285 acpi_ec_check_event(ec); 1286 } 1287 1288 static void acpi_ec_handle_interrupt(struct acpi_ec *ec) 1289 { 1290 unsigned long flags; 1291 1292 spin_lock_irqsave(&ec->lock, flags); 1293 advance_transaction(ec); 1294 spin_unlock_irqrestore(&ec->lock, flags); 1295 } 1296 1297 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, 1298 u32 gpe_number, void *data) 1299 { 1300 acpi_ec_handle_interrupt(data); 1301 return ACPI_INTERRUPT_HANDLED; 1302 } 1303 1304 static irqreturn_t acpi_ec_irq_handler(int irq, void *data) 1305 { 1306 acpi_ec_handle_interrupt(data); 1307 return IRQ_HANDLED; 1308 } 1309 1310 /* -------------------------------------------------------------------------- 1311 * Address Space Management 1312 * -------------------------------------------------------------------------- */ 1313 1314 static acpi_status 1315 acpi_ec_space_handler(u32 function, acpi_physical_address address, 1316 u32 bits, u64 *value64, 1317 void *handler_context, void *region_context) 1318 { 1319 struct acpi_ec *ec = handler_context; 1320 int result = 0, i, bytes = bits / 8; 1321 u8 *value = (u8 *)value64; 1322 1323 if ((address > 0xFF) || !value || !handler_context) 1324 return AE_BAD_PARAMETER; 1325 1326 if (function != ACPI_READ && function != ACPI_WRITE) 1327 return AE_BAD_PARAMETER; 1328 1329 if (ec->busy_polling || bits > 8) 1330 acpi_ec_burst_enable(ec); 1331 1332 for (i = 0; i < bytes; ++i, ++address, ++value) 1333 result = (function == ACPI_READ) ? 1334 acpi_ec_read(ec, address, value) : 1335 acpi_ec_write(ec, address, *value); 1336 1337 if (ec->busy_polling || bits > 8) 1338 acpi_ec_burst_disable(ec); 1339 1340 switch (result) { 1341 case -EINVAL: 1342 return AE_BAD_PARAMETER; 1343 case -ENODEV: 1344 return AE_NOT_FOUND; 1345 case -ETIME: 1346 return AE_TIME; 1347 default: 1348 return AE_OK; 1349 } 1350 } 1351 1352 /* -------------------------------------------------------------------------- 1353 * Driver Interface 1354 * -------------------------------------------------------------------------- */ 1355 1356 static acpi_status 1357 ec_parse_io_ports(struct acpi_resource *resource, void *context); 1358 1359 static void acpi_ec_free(struct acpi_ec *ec) 1360 { 1361 if (first_ec == ec) 1362 first_ec = NULL; 1363 if (boot_ec == ec) 1364 boot_ec = NULL; 1365 kfree(ec); 1366 } 1367 1368 static struct acpi_ec *acpi_ec_alloc(void) 1369 { 1370 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1371 1372 if (!ec) 1373 return NULL; 1374 mutex_init(&ec->mutex); 1375 init_waitqueue_head(&ec->wait); 1376 INIT_LIST_HEAD(&ec->list); 1377 spin_lock_init(&ec->lock); 1378 INIT_WORK(&ec->work, acpi_ec_event_handler); 1379 ec->timestamp = jiffies; 1380 ec->busy_polling = true; 1381 ec->polling_guard = 0; 1382 ec->gpe = -1; 1383 ec->irq = -1; 1384 return ec; 1385 } 1386 1387 static acpi_status 1388 acpi_ec_register_query_methods(acpi_handle handle, u32 level, 1389 void *context, void **return_value) 1390 { 1391 char node_name[5]; 1392 struct acpi_buffer buffer = { sizeof(node_name), node_name }; 1393 struct acpi_ec *ec = context; 1394 int value = 0; 1395 acpi_status status; 1396 1397 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); 1398 1399 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1) 1400 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL); 1401 return AE_OK; 1402 } 1403 1404 static acpi_status 1405 ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) 1406 { 1407 acpi_status status; 1408 unsigned long long tmp = 0; 1409 struct acpi_ec *ec = context; 1410 1411 /* clear addr values, ec_parse_io_ports depend on it */ 1412 ec->command_addr = ec->data_addr = 0; 1413 1414 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 1415 ec_parse_io_ports, ec); 1416 if (ACPI_FAILURE(status)) 1417 return status; 1418 if (ec->data_addr == 0 || ec->command_addr == 0) 1419 return AE_OK; 1420 1421 if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) { 1422 /* 1423 * Always inherit the GPE number setting from the ECDT 1424 * EC. 1425 */ 1426 ec->gpe = boot_ec->gpe; 1427 } else { 1428 /* Get GPE bit assignment (EC events). */ 1429 /* TODO: Add support for _GPE returning a package */ 1430 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); 1431 if (ACPI_SUCCESS(status)) 1432 ec->gpe = tmp; 1433 1434 /* 1435 * Errors are non-fatal, allowing for ACPI Reduced Hardware 1436 * platforms which use GpioInt instead of GPE. 1437 */ 1438 } 1439 /* Use the global lock for all EC transactions? */ 1440 tmp = 0; 1441 acpi_evaluate_integer(handle, "_GLK", NULL, &tmp); 1442 ec->global_lock = tmp; 1443 ec->handle = handle; 1444 return AE_CTRL_TERMINATE; 1445 } 1446 1447 static void install_gpe_event_handler(struct acpi_ec *ec) 1448 { 1449 acpi_status status = 1450 acpi_install_gpe_raw_handler(NULL, ec->gpe, 1451 ACPI_GPE_EDGE_TRIGGERED, 1452 &acpi_ec_gpe_handler, 1453 ec); 1454 if (ACPI_SUCCESS(status)) { 1455 /* This is not fatal as we can poll EC events */ 1456 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); 1457 acpi_ec_leave_noirq(ec); 1458 if (test_bit(EC_FLAGS_STARTED, &ec->flags) && 1459 ec->reference_count >= 1) 1460 acpi_ec_enable_gpe(ec, true); 1461 } 1462 } 1463 1464 /* ACPI reduced hardware platforms use a GpioInt specified in _CRS. */ 1465 static int install_gpio_irq_event_handler(struct acpi_ec *ec, 1466 struct acpi_device *device) 1467 { 1468 int irq = acpi_dev_gpio_irq_get(device, 0); 1469 int ret; 1470 1471 if (irq < 0) 1472 return irq; 1473 1474 ret = request_irq(irq, acpi_ec_irq_handler, IRQF_SHARED, 1475 "ACPI EC", ec); 1476 1477 /* 1478 * Unlike the GPE case, we treat errors here as fatal, we'll only 1479 * implement GPIO polling if we find a case that needs it. 1480 */ 1481 if (ret < 0) 1482 return ret; 1483 1484 ec->irq = irq; 1485 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); 1486 acpi_ec_leave_noirq(ec); 1487 1488 return 0; 1489 } 1490 1491 /* 1492 * Note: This function returns an error code only when the address space 1493 * handler is not installed, which means "not able to handle 1494 * transactions". 1495 */ 1496 static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, 1497 bool handle_events) 1498 { 1499 acpi_status status; 1500 1501 acpi_ec_start(ec, false); 1502 1503 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { 1504 acpi_ec_enter_noirq(ec); 1505 status = acpi_install_address_space_handler(ec->handle, 1506 ACPI_ADR_SPACE_EC, 1507 &acpi_ec_space_handler, 1508 NULL, ec); 1509 if (ACPI_FAILURE(status)) { 1510 if (status == AE_NOT_FOUND) { 1511 /* 1512 * Maybe OS fails in evaluating the _REG 1513 * object. The AE_NOT_FOUND error will be 1514 * ignored and OS * continue to initialize 1515 * EC. 1516 */ 1517 pr_err("Fail in evaluating the _REG object" 1518 " of EC device. Broken bios is suspected.\n"); 1519 } else { 1520 acpi_ec_stop(ec, false); 1521 return -ENODEV; 1522 } 1523 } 1524 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); 1525 } 1526 1527 if (!handle_events) 1528 return 0; 1529 1530 if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { 1531 /* Find and register all query methods */ 1532 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, 1533 acpi_ec_register_query_methods, 1534 NULL, ec, NULL); 1535 set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); 1536 } 1537 if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { 1538 if (ec->gpe >= 0) { 1539 install_gpe_event_handler(ec); 1540 } else if (device) { 1541 int ret = install_gpio_irq_event_handler(ec, device); 1542 1543 if (ret) 1544 return ret; 1545 } else { /* No GPE and no GpioInt? */ 1546 return -ENODEV; 1547 } 1548 } 1549 /* EC is fully operational, allow queries */ 1550 acpi_ec_enable_event(ec); 1551 1552 return 0; 1553 } 1554 1555 static void ec_remove_handlers(struct acpi_ec *ec) 1556 { 1557 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { 1558 if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, 1559 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) 1560 pr_err("failed to remove space handler\n"); 1561 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); 1562 } 1563 1564 /* 1565 * Stops handling the EC transactions after removing the operation 1566 * region handler. This is required because _REG(DISCONNECT) 1567 * invoked during the removal can result in new EC transactions. 1568 * 1569 * Flushes the EC requests and thus disables the GPE before 1570 * removing the GPE handler. This is required by the current ACPICA 1571 * GPE core. ACPICA GPE core will automatically disable a GPE when 1572 * it is indicated but there is no way to handle it. So the drivers 1573 * must disable the GPEs prior to removing the GPE handlers. 1574 */ 1575 acpi_ec_stop(ec, false); 1576 1577 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { 1578 if (ec->gpe >= 0 && 1579 ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, 1580 &acpi_ec_gpe_handler))) 1581 pr_err("failed to remove gpe handler\n"); 1582 1583 if (ec->irq >= 0) 1584 free_irq(ec->irq, ec); 1585 1586 clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); 1587 } 1588 if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { 1589 acpi_ec_remove_query_handlers(ec, true, 0); 1590 clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); 1591 } 1592 } 1593 1594 static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, 1595 bool handle_events) 1596 { 1597 int ret; 1598 1599 ret = ec_install_handlers(ec, device, handle_events); 1600 if (ret) 1601 return ret; 1602 1603 /* First EC capable of handling transactions */ 1604 if (!first_ec) { 1605 first_ec = ec; 1606 acpi_handle_info(first_ec->handle, "Used as first EC\n"); 1607 } 1608 1609 acpi_handle_info(ec->handle, 1610 "GPE=0x%x, IRQ=%d, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", 1611 ec->gpe, ec->irq, ec->command_addr, ec->data_addr); 1612 return ret; 1613 } 1614 1615 static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle) 1616 { 1617 struct acpi_table_ecdt *ecdt_ptr; 1618 acpi_status status; 1619 acpi_handle handle; 1620 1621 status = acpi_get_table(ACPI_SIG_ECDT, 1, 1622 (struct acpi_table_header **)&ecdt_ptr); 1623 if (ACPI_FAILURE(status)) 1624 return false; 1625 1626 status = acpi_get_handle(NULL, ecdt_ptr->id, &handle); 1627 if (ACPI_FAILURE(status)) 1628 return false; 1629 1630 *phandle = handle; 1631 return true; 1632 } 1633 1634 static int acpi_ec_add(struct acpi_device *device) 1635 { 1636 struct acpi_ec *ec = NULL; 1637 bool dep_update = true; 1638 acpi_status status; 1639 int ret; 1640 1641 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); 1642 strcpy(acpi_device_class(device), ACPI_EC_CLASS); 1643 1644 if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) { 1645 boot_ec_is_ecdt = true; 1646 ec = boot_ec; 1647 dep_update = false; 1648 } else { 1649 ec = acpi_ec_alloc(); 1650 if (!ec) 1651 return -ENOMEM; 1652 1653 status = ec_parse_device(device->handle, 0, ec, NULL); 1654 if (status != AE_CTRL_TERMINATE) { 1655 ret = -EINVAL; 1656 goto err_alloc; 1657 } 1658 1659 if (boot_ec && ec->command_addr == boot_ec->command_addr && 1660 ec->data_addr == boot_ec->data_addr) { 1661 boot_ec_is_ecdt = false; 1662 /* 1663 * Trust PNP0C09 namespace location rather than 1664 * ECDT ID. But trust ECDT GPE rather than _GPE 1665 * because of ASUS quirks, so do not change 1666 * boot_ec->gpe to ec->gpe. 1667 */ 1668 boot_ec->handle = ec->handle; 1669 acpi_handle_debug(ec->handle, "duplicated.\n"); 1670 acpi_ec_free(ec); 1671 ec = boot_ec; 1672 } 1673 } 1674 1675 ret = acpi_ec_setup(ec, device, true); 1676 if (ret) 1677 goto err_query; 1678 1679 if (ec == boot_ec) 1680 acpi_handle_info(boot_ec->handle, 1681 "Boot %s EC used to handle transactions and events\n", 1682 boot_ec_is_ecdt ? "ECDT" : "DSDT"); 1683 1684 device->driver_data = ec; 1685 1686 ret = !!request_region(ec->data_addr, 1, "EC data"); 1687 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr); 1688 ret = !!request_region(ec->command_addr, 1, "EC cmd"); 1689 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); 1690 1691 if (dep_update) { 1692 /* Reprobe devices depending on the EC */ 1693 acpi_walk_dep_device_list(ec->handle); 1694 } 1695 acpi_handle_debug(ec->handle, "enumerated.\n"); 1696 return 0; 1697 1698 err_query: 1699 if (ec != boot_ec) 1700 acpi_ec_remove_query_handlers(ec, true, 0); 1701 err_alloc: 1702 if (ec != boot_ec) 1703 acpi_ec_free(ec); 1704 return ret; 1705 } 1706 1707 static int acpi_ec_remove(struct acpi_device *device) 1708 { 1709 struct acpi_ec *ec; 1710 1711 if (!device) 1712 return -EINVAL; 1713 1714 ec = acpi_driver_data(device); 1715 release_region(ec->data_addr, 1); 1716 release_region(ec->command_addr, 1); 1717 device->driver_data = NULL; 1718 if (ec != boot_ec) { 1719 ec_remove_handlers(ec); 1720 acpi_ec_free(ec); 1721 } 1722 return 0; 1723 } 1724 1725 static acpi_status 1726 ec_parse_io_ports(struct acpi_resource *resource, void *context) 1727 { 1728 struct acpi_ec *ec = context; 1729 1730 if (resource->type != ACPI_RESOURCE_TYPE_IO) 1731 return AE_OK; 1732 1733 /* 1734 * The first address region returned is the data port, and 1735 * the second address region returned is the status/command 1736 * port. 1737 */ 1738 if (ec->data_addr == 0) 1739 ec->data_addr = resource->data.io.minimum; 1740 else if (ec->command_addr == 0) 1741 ec->command_addr = resource->data.io.minimum; 1742 else 1743 return AE_CTRL_TERMINATE; 1744 1745 return AE_OK; 1746 } 1747 1748 static const struct acpi_device_id ec_device_ids[] = { 1749 {"PNP0C09", 0}, 1750 {ACPI_ECDT_HID, 0}, 1751 {"", 0}, 1752 }; 1753 1754 /* 1755 * This function is not Windows-compatible as Windows never enumerates the 1756 * namespace EC before the main ACPI device enumeration process. It is 1757 * retained for historical reason and will be deprecated in the future. 1758 */ 1759 void __init acpi_ec_dsdt_probe(void) 1760 { 1761 struct acpi_ec *ec; 1762 acpi_status status; 1763 int ret; 1764 1765 /* 1766 * If a platform has ECDT, there is no need to proceed as the 1767 * following probe is not a part of the ACPI device enumeration, 1768 * executing _STA is not safe, and thus this probe may risk of 1769 * picking up an invalid EC device. 1770 */ 1771 if (boot_ec) 1772 return; 1773 1774 ec = acpi_ec_alloc(); 1775 if (!ec) 1776 return; 1777 1778 /* 1779 * At this point, the namespace is initialized, so start to find 1780 * the namespace objects. 1781 */ 1782 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL); 1783 if (ACPI_FAILURE(status) || !ec->handle) { 1784 acpi_ec_free(ec); 1785 return; 1786 } 1787 1788 /* 1789 * When the DSDT EC is available, always re-configure boot EC to 1790 * have _REG evaluated. _REG can only be evaluated after the 1791 * namespace initialization. 1792 * At this point, the GPE is not fully initialized, so do not to 1793 * handle the events. 1794 */ 1795 ret = acpi_ec_setup(ec, NULL, false); 1796 if (ret) { 1797 acpi_ec_free(ec); 1798 return; 1799 } 1800 1801 boot_ec = ec; 1802 1803 acpi_handle_info(ec->handle, 1804 "Boot DSDT EC used to handle transactions\n"); 1805 } 1806 1807 /* 1808 * If the DSDT EC is not functioning, we still need to prepare a fully 1809 * functioning ECDT EC first in order to handle the events. 1810 * https://bugzilla.kernel.org/show_bug.cgi?id=115021 1811 */ 1812 static int __init acpi_ec_ecdt_start(void) 1813 { 1814 acpi_handle handle; 1815 1816 if (!boot_ec) 1817 return -ENODEV; 1818 /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */ 1819 if (!boot_ec_is_ecdt) 1820 return -ENODEV; 1821 1822 /* 1823 * At this point, the namespace and the GPE is initialized, so 1824 * start to find the namespace objects and handle the events. 1825 * 1826 * Note: ec->handle can be valid if this function is called after 1827 * acpi_ec_add(), hence the fast path. 1828 */ 1829 if (boot_ec->handle == ACPI_ROOT_OBJECT) { 1830 if (!acpi_ec_ecdt_get_handle(&handle)) 1831 return -ENODEV; 1832 boot_ec->handle = handle; 1833 } 1834 1835 /* Register to ACPI bus with PM ops attached */ 1836 return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC); 1837 } 1838 1839 #if 0 1840 /* 1841 * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not 1842 * set, for which case, we complete the QR_EC without issuing it to the 1843 * firmware. 1844 * https://bugzilla.kernel.org/show_bug.cgi?id=82611 1845 * https://bugzilla.kernel.org/show_bug.cgi?id=97381 1846 */ 1847 static int ec_flag_query_handshake(const struct dmi_system_id *id) 1848 { 1849 pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n"); 1850 EC_FLAGS_QUERY_HANDSHAKE = 1; 1851 return 0; 1852 } 1853 #endif 1854 1855 /* 1856 * On some hardware it is necessary to clear events accumulated by the EC during 1857 * sleep. These ECs stop reporting GPEs until they are manually polled, if too 1858 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) 1859 * 1860 * https://bugzilla.kernel.org/show_bug.cgi?id=44161 1861 * 1862 * Ideally, the EC should also be instructed NOT to accumulate events during 1863 * sleep (which Windows seems to do somehow), but the interface to control this 1864 * behaviour is not known at this time. 1865 * 1866 * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, 1867 * however it is very likely that other Samsung models are affected. 1868 * 1869 * On systems which don't accumulate _Q events during sleep, this extra check 1870 * should be harmless. 1871 */ 1872 static int ec_clear_on_resume(const struct dmi_system_id *id) 1873 { 1874 pr_debug("Detected system needing EC poll on resume.\n"); 1875 EC_FLAGS_CLEAR_ON_RESUME = 1; 1876 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; 1877 return 0; 1878 } 1879 1880 /* 1881 * Some ECDTs contain wrong register addresses. 1882 * MSI MS-171F 1883 * https://bugzilla.kernel.org/show_bug.cgi?id=12461 1884 */ 1885 static int ec_correct_ecdt(const struct dmi_system_id *id) 1886 { 1887 pr_debug("Detected system needing ECDT address correction.\n"); 1888 EC_FLAGS_CORRECT_ECDT = 1; 1889 return 0; 1890 } 1891 1892 /* 1893 * Some DSDTs contain wrong GPE setting. 1894 * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD 1895 * https://bugzilla.kernel.org/show_bug.cgi?id=195651 1896 */ 1897 static int ec_honor_ecdt_gpe(const struct dmi_system_id *id) 1898 { 1899 pr_debug("Detected system needing ignore DSDT GPE setting.\n"); 1900 EC_FLAGS_IGNORE_DSDT_GPE = 1; 1901 return 0; 1902 } 1903 1904 static const struct dmi_system_id ec_dmi_table[] __initconst = { 1905 { 1906 ec_correct_ecdt, "MSI MS-171F", { 1907 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), 1908 DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL}, 1909 { 1910 ec_honor_ecdt_gpe, "ASUS FX502VD", { 1911 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 1912 DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL}, 1913 { 1914 ec_honor_ecdt_gpe, "ASUS FX502VE", { 1915 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 1916 DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL}, 1917 { 1918 ec_honor_ecdt_gpe, "ASUS GL702VMK", { 1919 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 1920 DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL}, 1921 { 1922 ec_honor_ecdt_gpe, "ASUS X550VXK", { 1923 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 1924 DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL}, 1925 { 1926 ec_honor_ecdt_gpe, "ASUS X580VD", { 1927 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 1928 DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL}, 1929 { 1930 ec_clear_on_resume, "Samsung hardware", { 1931 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, 1932 {}, 1933 }; 1934 1935 void __init acpi_ec_ecdt_probe(void) 1936 { 1937 struct acpi_table_ecdt *ecdt_ptr; 1938 struct acpi_ec *ec; 1939 acpi_status status; 1940 int ret; 1941 1942 /* Generate a boot ec context. */ 1943 dmi_check_system(ec_dmi_table); 1944 status = acpi_get_table(ACPI_SIG_ECDT, 1, 1945 (struct acpi_table_header **)&ecdt_ptr); 1946 if (ACPI_FAILURE(status)) 1947 return; 1948 1949 if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) { 1950 /* 1951 * Asus X50GL: 1952 * https://bugzilla.kernel.org/show_bug.cgi?id=11880 1953 */ 1954 return; 1955 } 1956 1957 ec = acpi_ec_alloc(); 1958 if (!ec) 1959 return; 1960 1961 if (EC_FLAGS_CORRECT_ECDT) { 1962 ec->command_addr = ecdt_ptr->data.address; 1963 ec->data_addr = ecdt_ptr->control.address; 1964 } else { 1965 ec->command_addr = ecdt_ptr->control.address; 1966 ec->data_addr = ecdt_ptr->data.address; 1967 } 1968 1969 /* 1970 * Ignore the GPE value on Reduced Hardware platforms. 1971 * Some products have this set to an erroneous value. 1972 */ 1973 if (!acpi_gbl_reduced_hardware) 1974 ec->gpe = ecdt_ptr->gpe; 1975 1976 ec->handle = ACPI_ROOT_OBJECT; 1977 1978 /* 1979 * At this point, the namespace is not initialized, so do not find 1980 * the namespace objects, or handle the events. 1981 */ 1982 ret = acpi_ec_setup(ec, NULL, false); 1983 if (ret) { 1984 acpi_ec_free(ec); 1985 return; 1986 } 1987 1988 boot_ec = ec; 1989 boot_ec_is_ecdt = true; 1990 1991 pr_info("Boot ECDT EC used to handle transactions\n"); 1992 } 1993 1994 #ifdef CONFIG_PM_SLEEP 1995 static int acpi_ec_suspend(struct device *dev) 1996 { 1997 struct acpi_ec *ec = 1998 acpi_driver_data(to_acpi_device(dev)); 1999 2000 if (!pm_suspend_no_platform() && ec_freeze_events) 2001 acpi_ec_disable_event(ec); 2002 return 0; 2003 } 2004 2005 static int acpi_ec_suspend_noirq(struct device *dev) 2006 { 2007 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); 2008 2009 /* 2010 * The SCI handler doesn't run at this point, so the GPE can be 2011 * masked at the low level without side effects. 2012 */ 2013 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && 2014 ec->gpe >= 0 && ec->reference_count >= 1) 2015 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); 2016 2017 acpi_ec_enter_noirq(ec); 2018 2019 return 0; 2020 } 2021 2022 static int acpi_ec_resume_noirq(struct device *dev) 2023 { 2024 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); 2025 2026 acpi_ec_leave_noirq(ec); 2027 2028 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && 2029 ec->gpe >= 0 && ec->reference_count >= 1) 2030 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); 2031 2032 return 0; 2033 } 2034 2035 static int acpi_ec_resume(struct device *dev) 2036 { 2037 struct acpi_ec *ec = 2038 acpi_driver_data(to_acpi_device(dev)); 2039 2040 acpi_ec_enable_event(ec); 2041 return 0; 2042 } 2043 2044 void acpi_ec_mark_gpe_for_wake(void) 2045 { 2046 if (first_ec && !ec_no_wakeup) 2047 acpi_mark_gpe_for_wake(NULL, first_ec->gpe); 2048 } 2049 EXPORT_SYMBOL_GPL(acpi_ec_mark_gpe_for_wake); 2050 2051 void acpi_ec_set_gpe_wake_mask(u8 action) 2052 { 2053 if (pm_suspend_no_platform() && first_ec && !ec_no_wakeup) 2054 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); 2055 } 2056 2057 bool acpi_ec_dispatch_gpe(void) 2058 { 2059 u32 ret; 2060 2061 if (!first_ec) 2062 return false; 2063 2064 ret = acpi_dispatch_gpe(NULL, first_ec->gpe); 2065 if (ret == ACPI_INTERRUPT_HANDLED) { 2066 pm_pr_dbg("EC GPE dispatched\n"); 2067 return true; 2068 } 2069 return false; 2070 } 2071 #endif /* CONFIG_PM_SLEEP */ 2072 2073 static const struct dev_pm_ops acpi_ec_pm = { 2074 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq) 2075 SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) 2076 }; 2077 2078 static int param_set_event_clearing(const char *val, 2079 const struct kernel_param *kp) 2080 { 2081 int result = 0; 2082 2083 if (!strncmp(val, "status", sizeof("status") - 1)) { 2084 ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; 2085 pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n"); 2086 } else if (!strncmp(val, "query", sizeof("query") - 1)) { 2087 ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY; 2088 pr_info("Assuming SCI_EVT clearing on QR_EC writes\n"); 2089 } else if (!strncmp(val, "event", sizeof("event") - 1)) { 2090 ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT; 2091 pr_info("Assuming SCI_EVT clearing on event reads\n"); 2092 } else 2093 result = -EINVAL; 2094 return result; 2095 } 2096 2097 static int param_get_event_clearing(char *buffer, 2098 const struct kernel_param *kp) 2099 { 2100 switch (ec_event_clearing) { 2101 case ACPI_EC_EVT_TIMING_STATUS: 2102 return sprintf(buffer, "status"); 2103 case ACPI_EC_EVT_TIMING_QUERY: 2104 return sprintf(buffer, "query"); 2105 case ACPI_EC_EVT_TIMING_EVENT: 2106 return sprintf(buffer, "event"); 2107 default: 2108 return sprintf(buffer, "invalid"); 2109 } 2110 return 0; 2111 } 2112 2113 module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing, 2114 NULL, 0644); 2115 MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing"); 2116 2117 static struct acpi_driver acpi_ec_driver = { 2118 .name = "ec", 2119 .class = ACPI_EC_CLASS, 2120 .ids = ec_device_ids, 2121 .ops = { 2122 .add = acpi_ec_add, 2123 .remove = acpi_ec_remove, 2124 }, 2125 .drv.pm = &acpi_ec_pm, 2126 }; 2127 2128 static inline int acpi_ec_query_init(void) 2129 { 2130 if (!ec_query_wq) { 2131 ec_query_wq = alloc_workqueue("kec_query", 0, 2132 ec_max_queries); 2133 if (!ec_query_wq) 2134 return -ENODEV; 2135 } 2136 return 0; 2137 } 2138 2139 static inline void acpi_ec_query_exit(void) 2140 { 2141 if (ec_query_wq) { 2142 destroy_workqueue(ec_query_wq); 2143 ec_query_wq = NULL; 2144 } 2145 } 2146 2147 static const struct dmi_system_id acpi_ec_no_wakeup[] = { 2148 { 2149 .ident = "Thinkpad X1 Carbon 6th", 2150 .matches = { 2151 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 2152 DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"), 2153 }, 2154 }, 2155 { 2156 .ident = "ThinkPad X1 Carbon 6th", 2157 .matches = { 2158 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 2159 DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"), 2160 }, 2161 }, 2162 { 2163 .ident = "ThinkPad X1 Yoga 3rd", 2164 .matches = { 2165 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 2166 DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"), 2167 }, 2168 }, 2169 { }, 2170 }; 2171 2172 int __init acpi_ec_init(void) 2173 { 2174 int result; 2175 int ecdt_fail, dsdt_fail; 2176 2177 /* register workqueue for _Qxx evaluations */ 2178 result = acpi_ec_query_init(); 2179 if (result) 2180 return result; 2181 2182 /* 2183 * Disable EC wakeup on following systems to prevent periodic 2184 * wakeup from EC GPE. 2185 */ 2186 if (dmi_check_system(acpi_ec_no_wakeup)) { 2187 ec_no_wakeup = true; 2188 pr_debug("Disabling EC wakeup on suspend-to-idle\n"); 2189 } 2190 2191 /* Drivers must be started after acpi_ec_query_init() */ 2192 dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); 2193 /* 2194 * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is 2195 * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT 2196 * settings but invalid DSDT settings. 2197 * https://bugzilla.kernel.org/show_bug.cgi?id=196847 2198 */ 2199 ecdt_fail = acpi_ec_ecdt_start(); 2200 return ecdt_fail && dsdt_fail ? -ENODEV : 0; 2201 } 2202 2203 /* EC driver currently not unloadable */ 2204 #if 0 2205 static void __exit acpi_ec_exit(void) 2206 { 2207 2208 acpi_bus_unregister_driver(&acpi_ec_driver); 2209 acpi_ec_query_exit(); 2210 } 2211 #endif /* 0 */ 2212