1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #ifndef _MEI_DEV_H_ 8 #define _MEI_DEV_H_ 9 10 #include <linux/types.h> 11 #include <linux/cdev.h> 12 #include <linux/poll.h> 13 #include <linux/mei.h> 14 #include <linux/mei_cl_bus.h> 15 16 static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2) 17 { 18 return memcmp(&u1, &u2, sizeof(uuid_le)); 19 } 20 21 #include "hw.h" 22 #include "hbm.h" 23 24 #define MEI_SLOT_SIZE sizeof(u32) 25 #define MEI_RD_MSG_BUF_SIZE (128 * MEI_SLOT_SIZE) 26 27 /* 28 * Number of Maximum MEI Clients 29 */ 30 #define MEI_CLIENTS_MAX 256 31 32 /* 33 * maximum number of consecutive resets 34 */ 35 #define MEI_MAX_CONSEC_RESET 3 36 37 /* 38 * Number of File descriptors/handles 39 * that can be opened to the driver. 40 * 41 * Limit to 255: 256 Total Clients 42 * minus internal client for MEI Bus Messages 43 */ 44 #define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) 45 46 /* File state */ 47 enum file_state { 48 MEI_FILE_UNINITIALIZED = 0, 49 MEI_FILE_INITIALIZING, 50 MEI_FILE_CONNECTING, 51 MEI_FILE_CONNECTED, 52 MEI_FILE_DISCONNECTING, 53 MEI_FILE_DISCONNECT_REPLY, 54 MEI_FILE_DISCONNECT_REQUIRED, 55 MEI_FILE_DISCONNECTED, 56 }; 57 58 /* MEI device states */ 59 enum mei_dev_state { 60 MEI_DEV_UNINITIALIZED = 0, 61 MEI_DEV_INITIALIZING, 62 MEI_DEV_INIT_CLIENTS, 63 MEI_DEV_ENABLED, 64 MEI_DEV_RESETTING, 65 MEI_DEV_DISABLED, 66 MEI_DEV_POWERING_DOWN, 67 MEI_DEV_POWER_DOWN, 68 MEI_DEV_POWER_UP 69 }; 70 71 /** 72 * enum mei_dev_pxp_mode - MEI PXP mode state 73 * 74 * @MEI_DEV_PXP_DEFAULT: PCH based device, no initialization required 75 * @MEI_DEV_PXP_INIT: device requires initialization, send setup message to firmware 76 * @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware response 77 * @MEI_DEV_PXP_READY: device initialized 78 */ 79 enum mei_dev_pxp_mode { 80 MEI_DEV_PXP_DEFAULT = 0, 81 MEI_DEV_PXP_INIT = 1, 82 MEI_DEV_PXP_SETUP = 2, 83 MEI_DEV_PXP_READY = 3, 84 }; 85 86 /** 87 * enum mei_dev_reset_to_pxp - reset to PXP mode performed 88 * 89 * @MEI_DEV_RESET_TO_PXP_DEFAULT: before reset 90 * @MEI_DEV_RESET_TO_PXP_PERFORMED: reset performed 91 * @MEI_DEV_RESET_TO_PXP_DONE: reset processed 92 */ 93 enum mei_dev_reset_to_pxp { 94 MEI_DEV_RESET_TO_PXP_DEFAULT = 0, 95 MEI_DEV_RESET_TO_PXP_PERFORMED = 1, 96 MEI_DEV_RESET_TO_PXP_DONE = 2, 97 }; 98 99 const char *mei_dev_state_str(int state); 100 101 enum mei_file_transaction_states { 102 MEI_IDLE, 103 MEI_WRITING, 104 MEI_WRITE_COMPLETE, 105 }; 106 107 /** 108 * enum mei_cb_file_ops - file operation associated with the callback 109 * @MEI_FOP_READ: read 110 * @MEI_FOP_WRITE: write 111 * @MEI_FOP_CONNECT: connect 112 * @MEI_FOP_DISCONNECT: disconnect 113 * @MEI_FOP_DISCONNECT_RSP: disconnect response 114 * @MEI_FOP_NOTIFY_START: start notification 115 * @MEI_FOP_NOTIFY_STOP: stop notification 116 * @MEI_FOP_DMA_MAP: request client dma map 117 * @MEI_FOP_DMA_UNMAP: request client dma unmap 118 */ 119 enum mei_cb_file_ops { 120 MEI_FOP_READ = 0, 121 MEI_FOP_WRITE, 122 MEI_FOP_CONNECT, 123 MEI_FOP_DISCONNECT, 124 MEI_FOP_DISCONNECT_RSP, 125 MEI_FOP_NOTIFY_START, 126 MEI_FOP_NOTIFY_STOP, 127 MEI_FOP_DMA_MAP, 128 MEI_FOP_DMA_UNMAP, 129 }; 130 131 /** 132 * enum mei_cl_io_mode - io mode between driver and fw 133 * 134 * @MEI_CL_IO_TX_BLOCKING: send is blocking 135 * @MEI_CL_IO_TX_INTERNAL: internal communication between driver and FW 136 * 137 * @MEI_CL_IO_RX_NONBLOCK: recv is non-blocking 138 * 139 * @MEI_CL_IO_SGL: send command with sgl list. 140 */ 141 enum mei_cl_io_mode { 142 MEI_CL_IO_TX_BLOCKING = BIT(0), 143 MEI_CL_IO_TX_INTERNAL = BIT(1), 144 145 MEI_CL_IO_RX_NONBLOCK = BIT(2), 146 147 MEI_CL_IO_SGL = BIT(3), 148 }; 149 150 /* 151 * Intel MEI message data struct 152 */ 153 struct mei_msg_data { 154 size_t size; 155 unsigned char *data; 156 }; 157 158 struct mei_dma_data { 159 u8 buffer_id; 160 void *vaddr; 161 dma_addr_t daddr; 162 size_t size; 163 }; 164 165 /** 166 * struct mei_dma_dscr - dma address descriptor 167 * 168 * @vaddr: dma buffer virtual address 169 * @daddr: dma buffer physical address 170 * @size : dma buffer size 171 */ 172 struct mei_dma_dscr { 173 void *vaddr; 174 dma_addr_t daddr; 175 size_t size; 176 }; 177 178 /* Maximum number of processed FW status registers */ 179 #define MEI_FW_STATUS_MAX 6 180 /* Minimal buffer for FW status string (8 bytes in dw + space or '\0') */ 181 #define MEI_FW_STATUS_STR_SZ (MEI_FW_STATUS_MAX * (8 + 1)) 182 183 184 /* 185 * struct mei_fw_status - storage of FW status data 186 * 187 * @count: number of actually available elements in array 188 * @status: FW status registers 189 */ 190 struct mei_fw_status { 191 int count; 192 u32 status[MEI_FW_STATUS_MAX]; 193 }; 194 195 /** 196 * struct mei_me_client - representation of me (fw) client 197 * 198 * @list: link in me client list 199 * @refcnt: struct reference count 200 * @props: client properties 201 * @client_id: me client id 202 * @tx_flow_ctrl_creds: flow control credits 203 * @connect_count: number connections to this client 204 * @bus_added: added to bus 205 */ 206 struct mei_me_client { 207 struct list_head list; 208 struct kref refcnt; 209 struct mei_client_properties props; 210 u8 client_id; 211 u8 tx_flow_ctrl_creds; 212 u8 connect_count; 213 u8 bus_added; 214 }; 215 216 217 struct mei_cl; 218 219 /** 220 * struct mei_cl_cb - file operation callback structure 221 * 222 * @list: link in callback queue 223 * @cl: file client who is running this operation 224 * @fop_type: file operation type 225 * @buf: buffer for data associated with the callback 226 * @buf_idx: last read index 227 * @vtag: virtual tag 228 * @fp: pointer to file structure 229 * @status: io status of the cb 230 * @internal: communication between driver and FW flag 231 * @blocking: transmission blocking mode 232 * @ext_hdr: extended header 233 */ 234 struct mei_cl_cb { 235 struct list_head list; 236 struct mei_cl *cl; 237 enum mei_cb_file_ops fop_type; 238 struct mei_msg_data buf; 239 size_t buf_idx; 240 u8 vtag; 241 const struct file *fp; 242 int status; 243 u32 internal:1; 244 u32 blocking:1; 245 struct mei_ext_hdr *ext_hdr; 246 }; 247 248 /** 249 * struct mei_cl_vtag - file pointer to vtag mapping structure 250 * 251 * @list: link in map queue 252 * @fp: file pointer 253 * @vtag: corresponding vtag 254 * @pending_read: the read is pending on this file 255 */ 256 struct mei_cl_vtag { 257 struct list_head list; 258 const struct file *fp; 259 u8 vtag; 260 u8 pending_read:1; 261 }; 262 263 /** 264 * struct mei_cl - me client host representation 265 * carried in file->private_data 266 * 267 * @link: link in the clients list 268 * @dev: mei parent device 269 * @state: file operation state 270 * @tx_wait: wait queue for tx completion 271 * @rx_wait: wait queue for rx completion 272 * @wait: wait queue for management operation 273 * @ev_wait: notification wait queue 274 * @ev_async: event async notification 275 * @status: connection status 276 * @me_cl: fw client connected 277 * @fp: file associated with client 278 * @host_client_id: host id 279 * @vtag_map: vtag map 280 * @tx_flow_ctrl_creds: transmit flow credentials 281 * @rx_flow_ctrl_creds: receive flow credentials 282 * @timer_count: watchdog timer for operation completion 283 * @notify_en: notification - enabled/disabled 284 * @notify_ev: pending notification event 285 * @tx_cb_queued: number of tx callbacks in queue 286 * @writing_state: state of the tx 287 * @rd_pending: pending read credits 288 * @rd_completed_lock: protects rd_completed queue 289 * @rd_completed: completed read 290 * @dma: dma settings 291 * @dma_mapped: dma buffer is currently mapped. 292 * 293 * @cldev: device on the mei client bus 294 */ 295 struct mei_cl { 296 struct list_head link; 297 struct mei_device *dev; 298 enum file_state state; 299 wait_queue_head_t tx_wait; 300 wait_queue_head_t rx_wait; 301 wait_queue_head_t wait; 302 wait_queue_head_t ev_wait; 303 struct fasync_struct *ev_async; 304 int status; 305 struct mei_me_client *me_cl; 306 const struct file *fp; 307 u8 host_client_id; 308 struct list_head vtag_map; 309 u8 tx_flow_ctrl_creds; 310 u8 rx_flow_ctrl_creds; 311 u8 timer_count; 312 u8 notify_en; 313 u8 notify_ev; 314 u8 tx_cb_queued; 315 enum mei_file_transaction_states writing_state; 316 struct list_head rd_pending; 317 spinlock_t rd_completed_lock; /* protects rd_completed queue */ 318 struct list_head rd_completed; 319 struct mei_dma_data dma; 320 u8 dma_mapped; 321 322 struct mei_cl_device *cldev; 323 }; 324 325 #define MEI_TX_QUEUE_LIMIT_DEFAULT 50 326 #define MEI_TX_QUEUE_LIMIT_MAX 255 327 #define MEI_TX_QUEUE_LIMIT_MIN 30 328 329 /** 330 * struct mei_hw_ops - hw specific ops 331 * 332 * @host_is_ready : query for host readiness 333 * 334 * @hw_is_ready : query if hw is ready 335 * @hw_reset : reset hw 336 * @hw_start : start hw after reset 337 * @hw_config : configure hw 338 * 339 * @fw_status : get fw status registers 340 * @trc_status : get trc status register 341 * @pg_state : power gating state of the device 342 * @pg_in_transition : is device now in pg transition 343 * @pg_is_enabled : is power gating enabled 344 * 345 * @intr_clear : clear pending interrupts 346 * @intr_enable : enable interrupts 347 * @intr_disable : disable interrupts 348 * @synchronize_irq : synchronize irqs 349 * 350 * @hbuf_free_slots : query for write buffer empty slots 351 * @hbuf_is_ready : query if write buffer is empty 352 * @hbuf_depth : query for write buffer depth 353 * 354 * @write : write a message to FW 355 * 356 * @rdbuf_full_slots : query how many slots are filled 357 * 358 * @read_hdr : get first 4 bytes (header) 359 * @read : read a buffer from the FW 360 */ 361 struct mei_hw_ops { 362 363 bool (*host_is_ready)(struct mei_device *dev); 364 365 bool (*hw_is_ready)(struct mei_device *dev); 366 int (*hw_reset)(struct mei_device *dev, bool enable); 367 int (*hw_start)(struct mei_device *dev); 368 int (*hw_config)(struct mei_device *dev); 369 370 int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts); 371 int (*trc_status)(struct mei_device *dev, u32 *trc); 372 373 enum mei_pg_state (*pg_state)(struct mei_device *dev); 374 bool (*pg_in_transition)(struct mei_device *dev); 375 bool (*pg_is_enabled)(struct mei_device *dev); 376 377 void (*intr_clear)(struct mei_device *dev); 378 void (*intr_enable)(struct mei_device *dev); 379 void (*intr_disable)(struct mei_device *dev); 380 void (*synchronize_irq)(struct mei_device *dev); 381 382 int (*hbuf_free_slots)(struct mei_device *dev); 383 bool (*hbuf_is_ready)(struct mei_device *dev); 384 u32 (*hbuf_depth)(const struct mei_device *dev); 385 int (*write)(struct mei_device *dev, 386 const void *hdr, size_t hdr_len, 387 const void *data, size_t data_len); 388 389 int (*rdbuf_full_slots)(struct mei_device *dev); 390 391 u32 (*read_hdr)(const struct mei_device *dev); 392 int (*read)(struct mei_device *dev, 393 unsigned char *buf, unsigned long len); 394 }; 395 396 /* MEI bus API*/ 397 void mei_cl_bus_rescan_work(struct work_struct *work); 398 void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); 399 ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, 400 unsigned int mode); 401 ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, 402 unsigned int mode, unsigned long timeout); 403 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, 404 unsigned int mode, unsigned long timeout); 405 bool mei_cl_bus_rx_event(struct mei_cl *cl); 406 bool mei_cl_bus_notify_event(struct mei_cl *cl); 407 void mei_cl_bus_remove_devices(struct mei_device *bus); 408 int mei_cl_bus_init(void); 409 void mei_cl_bus_exit(void); 410 411 /** 412 * enum mei_pg_event - power gating transition events 413 * 414 * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition 415 * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete 416 * @MEI_PG_EVENT_RECEIVED: the driver received pg event 417 * @MEI_PG_EVENT_INTR_WAIT: the driver is waiting for a pg event interrupt 418 * @MEI_PG_EVENT_INTR_RECEIVED: the driver received pg event interrupt 419 */ 420 enum mei_pg_event { 421 MEI_PG_EVENT_IDLE, 422 MEI_PG_EVENT_WAIT, 423 MEI_PG_EVENT_RECEIVED, 424 MEI_PG_EVENT_INTR_WAIT, 425 MEI_PG_EVENT_INTR_RECEIVED, 426 }; 427 428 /** 429 * enum mei_pg_state - device internal power gating state 430 * 431 * @MEI_PG_OFF: device is not power gated - it is active 432 * @MEI_PG_ON: device is power gated - it is in lower power state 433 */ 434 enum mei_pg_state { 435 MEI_PG_OFF = 0, 436 MEI_PG_ON = 1, 437 }; 438 439 const char *mei_pg_state_str(enum mei_pg_state state); 440 441 /** 442 * struct mei_fw_version - MEI FW version struct 443 * 444 * @platform: platform identifier 445 * @major: major version field 446 * @minor: minor version field 447 * @buildno: build number version field 448 * @hotfix: hotfix number version field 449 */ 450 struct mei_fw_version { 451 u8 platform; 452 u8 major; 453 u16 minor; 454 u16 buildno; 455 u16 hotfix; 456 }; 457 458 #define MEI_MAX_FW_VER_BLOCKS 3 459 460 struct mei_dev_timeouts { 461 unsigned long hw_ready; /* Timeout on ready message, in jiffies */ 462 int connect; /* HPS: at least 2 seconds, in seconds */ 463 unsigned long cl_connect; /* HPS: Client Connect Timeout, in jiffies */ 464 int client_init; /* HPS: Clients Enumeration Timeout, in seconds */ 465 unsigned long pgi; /* PG Isolation time response, in jiffies */ 466 unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */ 467 unsigned long hbm; /* HBM operation timeout, in jiffies */ 468 unsigned long mkhi_recv; /* receive timeout, in jiffies */ 469 unsigned long link_reset_wait; /* link reset wait timeout, in jiffies */ 470 }; 471 472 /** 473 * struct mei_device - MEI private device struct 474 * 475 * @parent : device on a bus 476 * @dev : device object 477 * @cdev : character device pointer 478 * @minor : minor number allocated for device 479 * 480 * @write_list : write pending list 481 * @write_waiting_list : write completion list 482 * @ctrl_wr_list : pending control write list 483 * @ctrl_rd_list : pending control read list 484 * @tx_queue_limit: tx queues per client linit 485 * 486 * @file_list : list of opened handles 487 * @open_handle_count: number of opened handles 488 * 489 * @device_lock : big device lock 490 * @timer_work : MEI timer delayed work (timeouts) 491 * 492 * @recvd_hw_ready : hw ready message received flag 493 * 494 * @wait_hw_ready : wait queue for receive HW ready message form FW 495 * @wait_pg : wait queue for receive PG message from FW 496 * @wait_hbm_start : wait queue for receive HBM start message from FW 497 * 498 * @reset_count : number of consecutive resets 499 * @dev_state : device state 500 * @wait_dev_state: wait queue for device state change 501 * @hbm_state : state of host bus message protocol 502 * @pxp_mode : PXP device mode 503 * @init_clients_timer : HBM init handshake timeout 504 * 505 * @pg_event : power gating event 506 * @pg_domain : runtime PM domain 507 * 508 * @rd_msg_buf : control messages buffer 509 * @rd_msg_hdr : read message header storage 510 * @rd_msg_hdr_count : how many dwords were already read from header 511 * 512 * @hbuf_is_ready : query if the host host/write buffer is ready 513 * @dr_dscr: DMA ring descriptors: TX, RX, and CTRL 514 * 515 * @version : HBM protocol version in use 516 * @hbm_f_pg_supported : hbm feature pgi protocol 517 * @hbm_f_dc_supported : hbm feature dynamic clients 518 * @hbm_f_dot_supported : hbm feature disconnect on timeout 519 * @hbm_f_ev_supported : hbm feature event notification 520 * @hbm_f_fa_supported : hbm feature fixed address client 521 * @hbm_f_ie_supported : hbm feature immediate reply to enum request 522 * @hbm_f_os_supported : hbm feature support OS ver message 523 * @hbm_f_dr_supported : hbm feature dma ring supported 524 * @hbm_f_vt_supported : hbm feature vtag supported 525 * @hbm_f_cap_supported : hbm feature capabilities message supported 526 * @hbm_f_cd_supported : hbm feature client dma supported 527 * @hbm_f_gsc_supported : hbm feature gsc supported 528 * 529 * @fw_ver : FW versions 530 * 531 * @fw_f_fw_ver_supported : fw feature: fw version supported 532 * @fw_ver_received : fw version received 533 * 534 * @me_clients_rwsem: rw lock over me_clients list 535 * @me_clients : list of FW clients 536 * @me_clients_map : FW clients bit map 537 * @host_clients_map : host clients id pool 538 * 539 * @allow_fixed_address: allow user space to connect a fixed client 540 * @override_fixed_address: force allow fixed address behavior 541 * 542 * @timeouts: actual timeout values 543 * 544 * @reset_work : work item for the device reset 545 * @bus_rescan_work : work item for the bus rescan 546 * 547 * @device_list : mei client bus list 548 * @cl_bus_lock : client bus list lock 549 * 550 * @kind : kind of mei device 551 * 552 * @dbgfs_dir : debugfs mei root directory 553 * 554 * @gsc_reset_to_pxp : state of reset to the PXP mode 555 * 556 * @ops: : hw specific operations 557 * @hw : hw specific data 558 */ 559 struct mei_device { 560 struct device *parent; 561 struct device dev; 562 struct cdev *cdev; 563 int minor; 564 565 struct list_head write_list; 566 struct list_head write_waiting_list; 567 struct list_head ctrl_wr_list; 568 struct list_head ctrl_rd_list; 569 u8 tx_queue_limit; 570 571 struct list_head file_list; 572 long open_handle_count; 573 574 struct mutex device_lock; 575 struct delayed_work timer_work; 576 577 bool recvd_hw_ready; 578 /* 579 * waiting queue for receive message from FW 580 */ 581 wait_queue_head_t wait_hw_ready; 582 wait_queue_head_t wait_pg; 583 wait_queue_head_t wait_hbm_start; 584 585 /* 586 * mei device states 587 */ 588 unsigned long reset_count; 589 enum mei_dev_state dev_state; 590 wait_queue_head_t wait_dev_state; 591 enum mei_hbm_state hbm_state; 592 enum mei_dev_pxp_mode pxp_mode; 593 u16 init_clients_timer; 594 595 /* 596 * Power Gating support 597 */ 598 enum mei_pg_event pg_event; 599 #ifdef CONFIG_PM 600 struct dev_pm_domain pg_domain; 601 #endif /* CONFIG_PM */ 602 603 unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE]; 604 u32 rd_msg_hdr[MEI_RD_MSG_BUF_SIZE]; 605 int rd_msg_hdr_count; 606 607 /* write buffer */ 608 bool hbuf_is_ready; 609 610 struct mei_dma_dscr dr_dscr[DMA_DSCR_NUM]; 611 612 struct hbm_version version; 613 unsigned int hbm_f_pg_supported:1; 614 unsigned int hbm_f_dc_supported:1; 615 unsigned int hbm_f_dot_supported:1; 616 unsigned int hbm_f_ev_supported:1; 617 unsigned int hbm_f_fa_supported:1; 618 unsigned int hbm_f_ie_supported:1; 619 unsigned int hbm_f_os_supported:1; 620 unsigned int hbm_f_dr_supported:1; 621 unsigned int hbm_f_vt_supported:1; 622 unsigned int hbm_f_cap_supported:1; 623 unsigned int hbm_f_cd_supported:1; 624 unsigned int hbm_f_gsc_supported:1; 625 626 struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS]; 627 628 unsigned int fw_f_fw_ver_supported:1; 629 unsigned int fw_ver_received:1; 630 631 struct rw_semaphore me_clients_rwsem; 632 struct list_head me_clients; 633 DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX); 634 DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX); 635 636 bool allow_fixed_address; 637 bool override_fixed_address; 638 639 struct mei_dev_timeouts timeouts; 640 641 struct work_struct reset_work; 642 struct work_struct bus_rescan_work; 643 644 /* List of bus devices */ 645 struct list_head device_list; 646 struct mutex cl_bus_lock; 647 648 const char *kind; 649 650 #if IS_ENABLED(CONFIG_DEBUG_FS) 651 struct dentry *dbgfs_dir; 652 #endif /* CONFIG_DEBUG_FS */ 653 654 enum mei_dev_reset_to_pxp gsc_reset_to_pxp; 655 656 const struct mei_hw_ops *ops; 657 char hw[] __aligned(sizeof(void *)); 658 }; 659 660 static inline unsigned long mei_secs_to_jiffies(unsigned long sec) 661 { 662 return msecs_to_jiffies(sec * MSEC_PER_SEC); 663 } 664 665 /** 666 * mei_data2slots - get slots number from a message length 667 * 668 * @length: size of the messages in bytes 669 * 670 * Return: number of slots 671 */ 672 static inline u32 mei_data2slots(size_t length) 673 { 674 return DIV_ROUND_UP(length, MEI_SLOT_SIZE); 675 } 676 677 /** 678 * mei_hbm2slots - get slots number from a hbm message length 679 * length + size of the mei message header 680 * 681 * @length: size of the messages in bytes 682 * 683 * Return: number of slots 684 */ 685 static inline u32 mei_hbm2slots(size_t length) 686 { 687 return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, MEI_SLOT_SIZE); 688 } 689 690 /** 691 * mei_slots2data - get data in slots - bytes from slots 692 * 693 * @slots: number of available slots 694 * 695 * Return: number of bytes in slots 696 */ 697 static inline u32 mei_slots2data(int slots) 698 { 699 return slots * MEI_SLOT_SIZE; 700 } 701 702 /* 703 * mei init function prototypes 704 */ 705 void mei_device_init(struct mei_device *dev, 706 struct device *parent, 707 bool slow_fw, 708 const struct mei_hw_ops *hw_ops); 709 int mei_reset(struct mei_device *dev); 710 int mei_start(struct mei_device *dev); 711 int mei_restart(struct mei_device *dev); 712 void mei_stop(struct mei_device *dev); 713 void mei_cancel_work(struct mei_device *dev); 714 715 void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state); 716 717 int mei_dmam_ring_alloc(struct mei_device *dev); 718 void mei_dmam_ring_free(struct mei_device *dev); 719 bool mei_dma_ring_is_allocated(struct mei_device *dev); 720 void mei_dma_ring_reset(struct mei_device *dev); 721 void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len); 722 void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len); 723 u32 mei_dma_ring_empty_slots(struct mei_device *dev); 724 725 /* 726 * MEI interrupt functions prototype 727 */ 728 729 void mei_timer(struct work_struct *work); 730 void mei_schedule_stall_timer(struct mei_device *dev); 731 int mei_irq_read_handler(struct mei_device *dev, 732 struct list_head *cmpl_list, s32 *slots); 733 734 int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list); 735 void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list); 736 737 /* 738 * Register Access Function 739 */ 740 741 742 static inline int mei_hw_config(struct mei_device *dev) 743 { 744 return dev->ops->hw_config(dev); 745 } 746 747 static inline enum mei_pg_state mei_pg_state(struct mei_device *dev) 748 { 749 return dev->ops->pg_state(dev); 750 } 751 752 static inline bool mei_pg_in_transition(struct mei_device *dev) 753 { 754 return dev->ops->pg_in_transition(dev); 755 } 756 757 static inline bool mei_pg_is_enabled(struct mei_device *dev) 758 { 759 return dev->ops->pg_is_enabled(dev); 760 } 761 762 static inline int mei_hw_reset(struct mei_device *dev, bool enable) 763 { 764 return dev->ops->hw_reset(dev, enable); 765 } 766 767 static inline int mei_hw_start(struct mei_device *dev) 768 { 769 return dev->ops->hw_start(dev); 770 } 771 772 static inline void mei_clear_interrupts(struct mei_device *dev) 773 { 774 dev->ops->intr_clear(dev); 775 } 776 777 static inline void mei_enable_interrupts(struct mei_device *dev) 778 { 779 dev->ops->intr_enable(dev); 780 } 781 782 static inline void mei_disable_interrupts(struct mei_device *dev) 783 { 784 dev->ops->intr_disable(dev); 785 } 786 787 static inline void mei_synchronize_irq(struct mei_device *dev) 788 { 789 dev->ops->synchronize_irq(dev); 790 } 791 792 static inline bool mei_host_is_ready(struct mei_device *dev) 793 { 794 return dev->ops->host_is_ready(dev); 795 } 796 static inline bool mei_hw_is_ready(struct mei_device *dev) 797 { 798 return dev->ops->hw_is_ready(dev); 799 } 800 801 static inline bool mei_hbuf_is_ready(struct mei_device *dev) 802 { 803 return dev->ops->hbuf_is_ready(dev); 804 } 805 806 static inline int mei_hbuf_empty_slots(struct mei_device *dev) 807 { 808 return dev->ops->hbuf_free_slots(dev); 809 } 810 811 static inline u32 mei_hbuf_depth(const struct mei_device *dev) 812 { 813 return dev->ops->hbuf_depth(dev); 814 } 815 816 static inline int mei_write_message(struct mei_device *dev, 817 const void *hdr, size_t hdr_len, 818 const void *data, size_t data_len) 819 { 820 return dev->ops->write(dev, hdr, hdr_len, data, data_len); 821 } 822 823 static inline u32 mei_read_hdr(const struct mei_device *dev) 824 { 825 return dev->ops->read_hdr(dev); 826 } 827 828 static inline void mei_read_slots(struct mei_device *dev, 829 unsigned char *buf, unsigned long len) 830 { 831 dev->ops->read(dev, buf, len); 832 } 833 834 static inline int mei_count_full_read_slots(struct mei_device *dev) 835 { 836 return dev->ops->rdbuf_full_slots(dev); 837 } 838 839 static inline int mei_trc_status(struct mei_device *dev, u32 *trc) 840 { 841 if (dev->ops->trc_status) 842 return dev->ops->trc_status(dev, trc); 843 return -EOPNOTSUPP; 844 } 845 846 static inline int mei_fw_status(struct mei_device *dev, 847 struct mei_fw_status *fw_status) 848 { 849 return dev->ops->fw_status(dev, fw_status); 850 } 851 852 bool mei_hbuf_acquire(struct mei_device *dev); 853 854 bool mei_write_is_idle(struct mei_device *dev); 855 856 #if IS_ENABLED(CONFIG_DEBUG_FS) 857 void mei_dbgfs_register(struct mei_device *dev, const char *name); 858 void mei_dbgfs_deregister(struct mei_device *dev); 859 #else 860 static inline void mei_dbgfs_register(struct mei_device *dev, const char *name) {} 861 static inline void mei_dbgfs_deregister(struct mei_device *dev) {} 862 #endif /* CONFIG_DEBUG_FS */ 863 864 int mei_register(struct mei_device *dev, struct device *parent); 865 void mei_deregister(struct mei_device *dev); 866 867 #define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d dma=%1d ext=%1d internal=%1d comp=%1d" 868 #define MEI_HDR_PRM(hdr) \ 869 (hdr)->host_addr, (hdr)->me_addr, \ 870 (hdr)->length, (hdr)->dma_ring, (hdr)->extended, \ 871 (hdr)->internal, (hdr)->msg_complete 872 873 ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len); 874 /** 875 * mei_fw_status_str - fetch and convert fw status registers to printable string 876 * 877 * @dev: the device structure 878 * @buf: string buffer at minimal size MEI_FW_STATUS_STR_SZ 879 * @len: buffer len must be >= MEI_FW_STATUS_STR_SZ 880 * 881 * Return: number of bytes written or < 0 on failure 882 */ 883 static inline ssize_t mei_fw_status_str(struct mei_device *dev, 884 char *buf, size_t len) 885 { 886 struct mei_fw_status fw_status; 887 int ret; 888 889 buf[0] = '\0'; 890 891 ret = mei_fw_status(dev, &fw_status); 892 if (ret) 893 return ret; 894 895 ret = mei_fw_status2str(&fw_status, buf, MEI_FW_STATUS_STR_SZ); 896 897 return ret; 898 } 899 900 /** 901 * kind_is_gsc - checks whether the device is gsc 902 * 903 * @dev: the device structure 904 * 905 * Return: whether the device is gsc 906 */ 907 static inline bool kind_is_gsc(struct mei_device *dev) 908 { 909 /* check kind for NULL because it may be not set, like at the fist call to hw_start */ 910 return dev->kind && (strcmp(dev->kind, "gsc") == 0); 911 } 912 913 /** 914 * kind_is_gscfi - checks whether the device is gscfi 915 * 916 * @dev: the device structure 917 * 918 * Return: whether the device is gscfi 919 */ 920 static inline bool kind_is_gscfi(struct mei_device *dev) 921 { 922 /* check kind for NULL because it may be not set, like at the fist call to hw_start */ 923 return dev->kind && (strcmp(dev->kind, "gscfi") == 0); 924 } 925 #endif 926