1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* 3 * Copyright (C) 2005-2014, 2018-2021 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #ifndef __iwl_trans_h__ 8 #define __iwl_trans_h__ 9 10 #include <linux/ieee80211.h> 11 #include <linux/mm.h> /* for page_address */ 12 #include <linux/lockdep.h> 13 #include <linux/kernel.h> 14 15 #include "iwl-debug.h" 16 #include "iwl-config.h" 17 #include "fw/img.h" 18 #include "iwl-op-mode.h" 19 #include <linux/firmware.h> 20 #include "fw/api/cmdhdr.h" 21 #include "fw/api/txq.h" 22 #include "fw/api/dbg-tlv.h" 23 #include "iwl-dbg-tlv.h" 24 25 /** 26 * DOC: Transport layer - what is it ? 27 * 28 * The transport layer is the layer that deals with the HW directly. It provides 29 * an abstraction of the underlying HW to the upper layer. The transport layer 30 * doesn't provide any policy, algorithm or anything of this kind, but only 31 * mechanisms to make the HW do something. It is not completely stateless but 32 * close to it. 33 * We will have an implementation for each different supported bus. 34 */ 35 36 /** 37 * DOC: Life cycle of the transport layer 38 * 39 * The transport layer has a very precise life cycle. 40 * 41 * 1) A helper function is called during the module initialization and 42 * registers the bus driver's ops with the transport's alloc function. 43 * 2) Bus's probe calls to the transport layer's allocation functions. 44 * Of course this function is bus specific. 45 * 3) This allocation functions will spawn the upper layer which will 46 * register mac80211. 47 * 48 * 4) At some point (i.e. mac80211's start call), the op_mode will call 49 * the following sequence: 50 * start_hw 51 * start_fw 52 * 53 * 5) Then when finished (or reset): 54 * stop_device 55 * 56 * 6) Eventually, the free function will be called. 57 */ 58 59 #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON 60 61 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ 62 #define FH_RSCSR_FRAME_INVALID 0x55550000 63 #define FH_RSCSR_FRAME_ALIGN 0x40 64 #define FH_RSCSR_RPA_EN BIT(25) 65 #define FH_RSCSR_RADA_EN BIT(26) 66 #define FH_RSCSR_RXQ_POS 16 67 #define FH_RSCSR_RXQ_MASK 0x3F0000 68 69 struct iwl_rx_packet { 70 /* 71 * The first 4 bytes of the RX frame header contain both the RX frame 72 * size and some flags. 73 * Bit fields: 74 * 31: flag flush RB request 75 * 30: flag ignore TC (terminal counter) request 76 * 29: flag fast IRQ request 77 * 28-27: Reserved 78 * 26: RADA enabled 79 * 25: Offload enabled 80 * 24: RPF enabled 81 * 23: RSS enabled 82 * 22: Checksum enabled 83 * 21-16: RX queue 84 * 15-14: Reserved 85 * 13-00: RX frame size 86 */ 87 __le32 len_n_flags; 88 struct iwl_cmd_header hdr; 89 u8 data[]; 90 } __packed; 91 92 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt) 93 { 94 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 95 } 96 97 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) 98 { 99 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); 100 } 101 102 /** 103 * enum CMD_MODE - how to send the host commands ? 104 * 105 * @CMD_ASYNC: Return right away and don't wait for the response 106 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of 107 * the response. The caller needs to call iwl_free_resp when done. 108 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be 109 * called after this command completes. Valid only with CMD_ASYNC. 110 * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to 111 * SUSPEND and RESUME commands. We are in D3 mode when we set 112 * trans->system_pm_mode to IWL_PLAT_PM_MODE_D3. 113 */ 114 enum CMD_MODE { 115 CMD_ASYNC = BIT(0), 116 CMD_WANT_SKB = BIT(1), 117 CMD_SEND_IN_RFKILL = BIT(2), 118 CMD_WANT_ASYNC_CALLBACK = BIT(3), 119 CMD_SEND_IN_D3 = BIT(4), 120 }; 121 122 #define DEF_CMD_PAYLOAD_SIZE 320 123 124 /** 125 * struct iwl_device_cmd 126 * 127 * For allocation of the command and tx queues, this establishes the overall 128 * size of the largest command we send to uCode, except for commands that 129 * aren't fully copied and use other TFD space. 130 */ 131 struct iwl_device_cmd { 132 union { 133 struct { 134 struct iwl_cmd_header hdr; /* uCode API */ 135 u8 payload[DEF_CMD_PAYLOAD_SIZE]; 136 }; 137 struct { 138 struct iwl_cmd_header_wide hdr_wide; 139 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - 140 sizeof(struct iwl_cmd_header_wide) + 141 sizeof(struct iwl_cmd_header)]; 142 }; 143 }; 144 } __packed; 145 146 /** 147 * struct iwl_device_tx_cmd - buffer for TX command 148 * @hdr: the header 149 * @payload: the payload placeholder 150 * 151 * The actual structure is sized dynamically according to need. 152 */ 153 struct iwl_device_tx_cmd { 154 struct iwl_cmd_header hdr; 155 u8 payload[]; 156 } __packed; 157 158 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 159 160 /* 161 * number of transfer buffers (fragments) per transmit frame descriptor; 162 * this is just the driver's idea, the hardware supports 20 163 */ 164 #define IWL_MAX_CMD_TBS_PER_TFD 2 165 166 /* We need 2 entries for the TX command and header, and another one might 167 * be needed for potential data in the SKB's head. The remaining ones can 168 * be used for frags. 169 */ 170 #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3) 171 172 /** 173 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command 174 * 175 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's 176 * ring. The transport layer doesn't map the command's buffer to DMA, but 177 * rather copies it to a previously allocated DMA buffer. This flag tells 178 * the transport layer not to copy the command, but to map the existing 179 * buffer (that is passed in) instead. This saves the memcpy and allows 180 * commands that are bigger than the fixed buffer to be submitted. 181 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one. 182 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this 183 * chunk internally and free it again after the command completes. This 184 * can (currently) be used only once per command. 185 * Note that a TFD entry after a DUP one cannot be a normal copied one. 186 */ 187 enum iwl_hcmd_dataflag { 188 IWL_HCMD_DFL_NOCOPY = BIT(0), 189 IWL_HCMD_DFL_DUP = BIT(1), 190 }; 191 192 enum iwl_error_event_table_status { 193 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0), 194 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1), 195 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2), 196 IWL_ERROR_EVENT_TABLE_TCM = BIT(3), 197 }; 198 199 /** 200 * struct iwl_host_cmd - Host command to the uCode 201 * 202 * @data: array of chunks that composes the data of the host command 203 * @resp_pkt: response packet, if %CMD_WANT_SKB was set 204 * @_rx_page_order: (internally used to free response packet) 205 * @_rx_page_addr: (internally used to free response packet) 206 * @flags: can be CMD_* 207 * @len: array of the lengths of the chunks in data 208 * @dataflags: IWL_HCMD_DFL_* 209 * @id: command id of the host command, for wide commands encoding the 210 * version and group as well 211 */ 212 struct iwl_host_cmd { 213 const void *data[IWL_MAX_CMD_TBS_PER_TFD]; 214 struct iwl_rx_packet *resp_pkt; 215 unsigned long _rx_page_addr; 216 u32 _rx_page_order; 217 218 u32 flags; 219 u32 id; 220 u16 len[IWL_MAX_CMD_TBS_PER_TFD]; 221 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; 222 }; 223 224 static inline void iwl_free_resp(struct iwl_host_cmd *cmd) 225 { 226 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); 227 } 228 229 struct iwl_rx_cmd_buffer { 230 struct page *_page; 231 int _offset; 232 bool _page_stolen; 233 u32 _rx_page_order; 234 unsigned int truesize; 235 }; 236 237 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) 238 { 239 return (void *)((unsigned long)page_address(r->_page) + r->_offset); 240 } 241 242 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r) 243 { 244 return r->_offset; 245 } 246 247 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) 248 { 249 r->_page_stolen = true; 250 get_page(r->_page); 251 return r->_page; 252 } 253 254 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) 255 { 256 __free_pages(r->_page, r->_rx_page_order); 257 } 258 259 #define MAX_NO_RECLAIM_CMDS 6 260 261 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) 262 263 /* 264 * Maximum number of HW queues the transport layer 265 * currently supports 266 */ 267 #define IWL_MAX_HW_QUEUES 32 268 #define IWL_MAX_TVQM_QUEUES 512 269 270 #define IWL_MAX_TID_COUNT 8 271 #define IWL_MGMT_TID 15 272 #define IWL_FRAME_LIMIT 64 273 #define IWL_MAX_RX_HW_QUEUES 16 274 #define IWL_9000_MAX_RX_HW_QUEUES 6 275 276 /** 277 * enum iwl_wowlan_status - WoWLAN image/device status 278 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume 279 * @IWL_D3_STATUS_RESET: device was reset while suspended 280 */ 281 enum iwl_d3_status { 282 IWL_D3_STATUS_ALIVE, 283 IWL_D3_STATUS_RESET, 284 }; 285 286 /** 287 * enum iwl_trans_status: transport status flags 288 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed 289 * @STATUS_DEVICE_ENABLED: APM is enabled 290 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) 291 * @STATUS_INT_ENABLED: interrupts are enabled 292 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch 293 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode 294 * @STATUS_FW_ERROR: the fw is in error state 295 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands 296 * are sent 297 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent 298 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation 299 */ 300 enum iwl_trans_status { 301 STATUS_SYNC_HCMD_ACTIVE, 302 STATUS_DEVICE_ENABLED, 303 STATUS_TPOWER_PMI, 304 STATUS_INT_ENABLED, 305 STATUS_RFKILL_HW, 306 STATUS_RFKILL_OPMODE, 307 STATUS_FW_ERROR, 308 STATUS_TRANS_GOING_IDLE, 309 STATUS_TRANS_IDLE, 310 STATUS_TRANS_DEAD, 311 }; 312 313 static inline int 314 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) 315 { 316 switch (rb_size) { 317 case IWL_AMSDU_2K: 318 return get_order(2 * 1024); 319 case IWL_AMSDU_4K: 320 return get_order(4 * 1024); 321 case IWL_AMSDU_8K: 322 return get_order(8 * 1024); 323 case IWL_AMSDU_12K: 324 return get_order(16 * 1024); 325 default: 326 WARN_ON(1); 327 return -1; 328 } 329 } 330 331 static inline int 332 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size) 333 { 334 switch (rb_size) { 335 case IWL_AMSDU_2K: 336 return 2 * 1024; 337 case IWL_AMSDU_4K: 338 return 4 * 1024; 339 case IWL_AMSDU_8K: 340 return 8 * 1024; 341 case IWL_AMSDU_12K: 342 return 16 * 1024; 343 default: 344 WARN_ON(1); 345 return 0; 346 } 347 } 348 349 struct iwl_hcmd_names { 350 u8 cmd_id; 351 const char *const cmd_name; 352 }; 353 354 #define HCMD_NAME(x) \ 355 { .cmd_id = x, .cmd_name = #x } 356 357 struct iwl_hcmd_arr { 358 const struct iwl_hcmd_names *arr; 359 int size; 360 }; 361 362 #define HCMD_ARR(x) \ 363 { .arr = x, .size = ARRAY_SIZE(x) } 364 365 /** 366 * struct iwl_dump_sanitize_ops - dump sanitization operations 367 * @frob_txf: Scrub the TX FIFO data 368 * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header 369 * but that might be short or long (&struct iwl_cmd_header or 370 * &struct iwl_cmd_header_wide) 371 * @frob_mem: Scrub memory data 372 */ 373 struct iwl_dump_sanitize_ops { 374 void (*frob_txf)(void *ctx, void *buf, size_t buflen); 375 void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen); 376 void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen); 377 }; 378 379 /** 380 * struct iwl_trans_config - transport configuration 381 * 382 * @op_mode: pointer to the upper layer. 383 * @cmd_queue: the index of the command queue. 384 * Must be set before start_fw. 385 * @cmd_fifo: the fifo for host commands 386 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. 387 * @no_reclaim_cmds: Some devices erroneously don't set the 388 * SEQ_RX_FRAME bit on some notifications, this is the 389 * list of such notifications to filter. Max length is 390 * %MAX_NO_RECLAIM_CMDS. 391 * @n_no_reclaim_cmds: # of commands in list 392 * @rx_buf_size: RX buffer size needed for A-MSDUs 393 * if unset 4k will be the RX buffer size 394 * @bc_table_dword: set to true if the BC table expects the byte count to be 395 * in DWORD (as opposed to bytes) 396 * @scd_set_active: should the transport configure the SCD for HCMD queue 397 * @command_groups: array of command groups, each member is an array of the 398 * commands in the group; for debugging only 399 * @command_groups_size: number of command groups, to avoid illegal access 400 * @cb_data_offs: offset inside skb->cb to store transport data at, must have 401 * space for at least two pointers 402 * @fw_reset_handshake: firmware supports reset flow handshake 403 */ 404 struct iwl_trans_config { 405 struct iwl_op_mode *op_mode; 406 407 u8 cmd_queue; 408 u8 cmd_fifo; 409 unsigned int cmd_q_wdg_timeout; 410 const u8 *no_reclaim_cmds; 411 unsigned int n_no_reclaim_cmds; 412 413 enum iwl_amsdu_size rx_buf_size; 414 bool bc_table_dword; 415 bool scd_set_active; 416 const struct iwl_hcmd_arr *command_groups; 417 int command_groups_size; 418 419 u8 cb_data_offs; 420 bool fw_reset_handshake; 421 }; 422 423 struct iwl_trans_dump_data { 424 u32 len; 425 u8 data[]; 426 }; 427 428 struct iwl_trans; 429 430 struct iwl_trans_txq_scd_cfg { 431 u8 fifo; 432 u8 sta_id; 433 u8 tid; 434 bool aggregate; 435 int frame_limit; 436 }; 437 438 /** 439 * struct iwl_trans_rxq_dma_data - RX queue DMA data 440 * @fr_bd_cb: DMA address of free BD cyclic buffer 441 * @fr_bd_wid: Initial write index of the free BD cyclic buffer 442 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr 443 * @ur_bd_cb: DMA address of used BD cyclic buffer 444 */ 445 struct iwl_trans_rxq_dma_data { 446 u64 fr_bd_cb; 447 u32 fr_bd_wid; 448 u64 urbd_stts_wrptr; 449 u64 ur_bd_cb; 450 }; 451 452 /** 453 * struct iwl_trans_ops - transport specific operations 454 * 455 * All the handlers MUST be implemented 456 * 457 * @start_hw: starts the HW. From that point on, the HW can send interrupts. 458 * May sleep. 459 * @op_mode_leave: Turn off the HW RF kill indication if on 460 * May sleep 461 * @start_fw: allocates and inits all the resources for the transport 462 * layer. Also kick a fw image. 463 * May sleep 464 * @fw_alive: called when the fw sends alive notification. If the fw provides 465 * the SCD base address in SRAM, then provide it here, or 0 otherwise. 466 * May sleep 467 * @stop_device: stops the whole device (embedded CPU put to reset) and stops 468 * the HW. From that point on, the HW will be stopped but will still issue 469 * an interrupt if the HW RF kill switch is triggered. 470 * This callback must do the right thing and not crash even if %start_hw() 471 * was called but not &start_fw(). May sleep. 472 * @d3_suspend: put the device into the correct mode for WoWLAN during 473 * suspend. This is optional, if not implemented WoWLAN will not be 474 * supported. This callback may sleep. 475 * @d3_resume: resume the device after WoWLAN, enabling the opmode to 476 * talk to the WoWLAN image to get its status. This is optional, if not 477 * implemented WoWLAN will not be supported. This callback may sleep. 478 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. 479 * If RFkill is asserted in the middle of a SYNC host command, it must 480 * return -ERFKILL straight away. 481 * May sleep only if CMD_ASYNC is not set 482 * @tx: send an skb. The transport relies on the op_mode to zero the 483 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all 484 * the CSUM will be taken care of (TCP CSUM and IP header in case of 485 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP 486 * header if it is IPv4. 487 * Must be atomic 488 * @reclaim: free packet until ssn. Returns a list of freed packets. 489 * Must be atomic 490 * @txq_enable: setup a queue. To setup an AC queue, use the 491 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before 492 * this one. The op_mode must not configure the HCMD queue. The scheduler 493 * configuration may be %NULL, in which case the hardware will not be 494 * configured. If true is returned, the operation mode needs to increment 495 * the sequence number of the packets routed to this queue because of a 496 * hardware scheduler bug. May sleep. 497 * @txq_disable: de-configure a Tx queue to send AMPDUs 498 * Must be atomic 499 * @txq_set_shared_mode: change Tx queue shared/unshared marking 500 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep. 501 * @wait_txq_empty: wait until specific tx queue is empty. May sleep. 502 * @freeze_txq_timer: prevents the timer of the queue from firing until the 503 * queue is set to awake. Must be atomic. 504 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note 505 * that the transport needs to refcount the calls since this function 506 * will be called several times with block = true, and then the queues 507 * need to be unblocked only after the same number of calls with 508 * block = false. 509 * @write8: write a u8 to a register at offset ofs from the BAR 510 * @write32: write a u32 to a register at offset ofs from the BAR 511 * @read32: read a u32 register at offset ofs from the BAR 512 * @read_prph: read a DWORD from a periphery register 513 * @write_prph: write a DWORD to a periphery register 514 * @read_mem: read device's SRAM in DWORD 515 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory 516 * will be zeroed. 517 * @read_config32: read a u32 value from the device's config space at 518 * the given offset. 519 * @configure: configure parameters required by the transport layer from 520 * the op_mode. May be called several times before start_fw, can't be 521 * called after that. 522 * @set_pmi: set the power pmi state 523 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. 524 * Sleeping is not allowed between grab_nic_access and 525 * release_nic_access. 526 * @release_nic_access: let the NIC go to sleep. The "flags" parameter 527 * must be the same one that was sent before to the grab_nic_access. 528 * @set_bits_mask - set SRAM register according to value and mask. 529 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last 530 * TX'ed commands and similar. The buffer will be vfree'd by the caller. 531 * Note that the transport must fill in the proper file headers. 532 * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup 533 * of the trans debugfs 534 * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the 535 * context info. 536 * @interrupts: disable/enable interrupts to transport 537 */ 538 struct iwl_trans_ops { 539 540 int (*start_hw)(struct iwl_trans *iwl_trans); 541 void (*op_mode_leave)(struct iwl_trans *iwl_trans); 542 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, 543 bool run_in_rfkill); 544 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 545 void (*stop_device)(struct iwl_trans *trans); 546 547 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); 548 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, 549 bool test, bool reset); 550 551 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 552 553 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, 554 struct iwl_device_tx_cmd *dev_cmd, int queue); 555 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, 556 struct sk_buff_head *skbs); 557 558 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr); 559 560 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, 561 const struct iwl_trans_txq_scd_cfg *cfg, 562 unsigned int queue_wdg_timeout); 563 void (*txq_disable)(struct iwl_trans *trans, int queue, 564 bool configure_scd); 565 /* 22000 functions */ 566 int (*txq_alloc)(struct iwl_trans *trans, 567 __le16 flags, u8 sta_id, u8 tid, 568 int cmd_id, int size, 569 unsigned int queue_wdg_timeout); 570 void (*txq_free)(struct iwl_trans *trans, int queue); 571 int (*rxq_dma_data)(struct iwl_trans *trans, int queue, 572 struct iwl_trans_rxq_dma_data *data); 573 574 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, 575 bool shared); 576 577 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm); 578 int (*wait_txq_empty)(struct iwl_trans *trans, int queue); 579 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, 580 bool freeze); 581 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); 582 583 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 584 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); 585 u32 (*read32)(struct iwl_trans *trans, u32 ofs); 586 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); 587 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); 588 int (*read_mem)(struct iwl_trans *trans, u32 addr, 589 void *buf, int dwords); 590 int (*write_mem)(struct iwl_trans *trans, u32 addr, 591 const void *buf, int dwords); 592 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val); 593 void (*configure)(struct iwl_trans *trans, 594 const struct iwl_trans_config *trans_cfg); 595 void (*set_pmi)(struct iwl_trans *trans, bool state); 596 void (*sw_reset)(struct iwl_trans *trans); 597 bool (*grab_nic_access)(struct iwl_trans *trans); 598 void (*release_nic_access)(struct iwl_trans *trans); 599 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, 600 u32 value); 601 602 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, 603 u32 dump_mask, 604 const struct iwl_dump_sanitize_ops *sanitize_ops, 605 void *sanitize_ctx); 606 void (*debugfs_cleanup)(struct iwl_trans *trans); 607 void (*sync_nmi)(struct iwl_trans *trans); 608 int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len); 609 int (*set_reduce_power)(struct iwl_trans *trans, 610 const void *data, u32 len); 611 void (*interrupts)(struct iwl_trans *trans, bool enable); 612 }; 613 614 /** 615 * enum iwl_trans_state - state of the transport layer 616 * 617 * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed 618 * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet 619 * @IWL_TRANS_FW_ALIVE: FW has sent an alive response 620 */ 621 enum iwl_trans_state { 622 IWL_TRANS_NO_FW, 623 IWL_TRANS_FW_STARTED, 624 IWL_TRANS_FW_ALIVE, 625 }; 626 627 /** 628 * DOC: Platform power management 629 * 630 * In system-wide power management the entire platform goes into a low 631 * power state (e.g. idle or suspend to RAM) at the same time and the 632 * device is configured as a wakeup source for the entire platform. 633 * This is usually triggered by userspace activity (e.g. the user 634 * presses the suspend button or a power management daemon decides to 635 * put the platform in low power mode). The device's behavior in this 636 * mode is dictated by the wake-on-WLAN configuration. 637 * 638 * The terms used for the device's behavior are as follows: 639 * 640 * - D0: the device is fully powered and the host is awake; 641 * - D3: the device is in low power mode and only reacts to 642 * specific events (e.g. magic-packet received or scan 643 * results found); 644 * 645 * These terms reflect the power modes in the firmware and are not to 646 * be confused with the physical device power state. 647 */ 648 649 /** 650 * enum iwl_plat_pm_mode - platform power management mode 651 * 652 * This enumeration describes the device's platform power management 653 * behavior when in system-wide suspend (i.e WoWLAN). 654 * 655 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this 656 * device. In system-wide suspend mode, it means that the all 657 * connections will be closed automatically by mac80211 before 658 * the platform is suspended. 659 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). 660 */ 661 enum iwl_plat_pm_mode { 662 IWL_PLAT_PM_MODE_DISABLED, 663 IWL_PLAT_PM_MODE_D3, 664 }; 665 666 /** 667 * enum iwl_ini_cfg_state 668 * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given 669 * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded 670 * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs 671 * are corrupted. The rest of the debug TLVs will still be used 672 */ 673 enum iwl_ini_cfg_state { 674 IWL_INI_CFG_STATE_NOT_LOADED, 675 IWL_INI_CFG_STATE_LOADED, 676 IWL_INI_CFG_STATE_CORRUPTED, 677 }; 678 679 /* Max time to wait for nmi interrupt */ 680 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4) 681 682 /** 683 * struct iwl_dram_data 684 * @physical: page phy pointer 685 * @block: pointer to the allocated block/page 686 * @size: size of the block/page 687 */ 688 struct iwl_dram_data { 689 dma_addr_t physical; 690 void *block; 691 int size; 692 }; 693 694 /** 695 * struct iwl_fw_mon - fw monitor per allocation id 696 * @num_frags: number of fragments 697 * @frags: an array of DRAM buffer fragments 698 */ 699 struct iwl_fw_mon { 700 u32 num_frags; 701 struct iwl_dram_data *frags; 702 }; 703 704 /** 705 * struct iwl_self_init_dram - dram data used by self init process 706 * @fw: lmac and umac dram data 707 * @fw_cnt: total number of items in array 708 * @paging: paging dram data 709 * @paging_cnt: total number of items in array 710 */ 711 struct iwl_self_init_dram { 712 struct iwl_dram_data *fw; 713 int fw_cnt; 714 struct iwl_dram_data *paging; 715 int paging_cnt; 716 }; 717 718 /** 719 * struct iwl_trans_debug - transport debug related data 720 * 721 * @n_dest_reg: num of reg_ops in %dbg_dest_tlv 722 * @rec_on: true iff there is a fw debug recording currently active 723 * @dest_tlv: points to the destination TLV for debug 724 * @conf_tlv: array of pointers to configuration TLVs for debug 725 * @trigger_tlv: array of pointers to triggers TLVs for debug 726 * @lmac_error_event_table: addrs of lmacs error tables 727 * @umac_error_event_table: addr of umac error table 728 * @tcm_error_event_table: address of TCM error table 729 * @error_event_table_tlv_status: bitmap that indicates what error table 730 * pointers was recevied via TLV. uses enum &iwl_error_event_table_status 731 * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state 732 * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state 733 * @fw_mon_cfg: debug buffer allocation configuration 734 * @fw_mon_ini: DRAM buffer fragments per allocation id 735 * @fw_mon: DRAM buffer for firmware monitor 736 * @hw_error: equals true if hw error interrupt was received from the FW 737 * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location 738 * @active_regions: active regions 739 * @debug_info_tlv_list: list of debug info TLVs 740 * @time_point: array of debug time points 741 * @periodic_trig_list: periodic triggers list 742 * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON 743 * @ucode_preset: preset based on ucode 744 */ 745 struct iwl_trans_debug { 746 u8 n_dest_reg; 747 bool rec_on; 748 749 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; 750 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX]; 751 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv; 752 753 u32 lmac_error_event_table[2]; 754 u32 umac_error_event_table; 755 u32 tcm_error_event_table; 756 unsigned int error_event_table_tlv_status; 757 758 enum iwl_ini_cfg_state internal_ini_cfg; 759 enum iwl_ini_cfg_state external_ini_cfg; 760 761 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM]; 762 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM]; 763 764 struct iwl_dram_data fw_mon; 765 766 bool hw_error; 767 enum iwl_fw_ini_buffer_location ini_dest; 768 769 u64 unsupported_region_msk; 770 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID]; 771 struct list_head debug_info_tlv_list; 772 struct iwl_dbg_tlv_time_point_data 773 time_point[IWL_FW_INI_TIME_POINT_NUM]; 774 struct list_head periodic_trig_list; 775 776 u32 domains_bitmap; 777 u32 ucode_preset; 778 }; 779 780 struct iwl_dma_ptr { 781 dma_addr_t dma; 782 void *addr; 783 size_t size; 784 }; 785 786 struct iwl_cmd_meta { 787 /* only for SYNC commands, iff the reply skb is wanted */ 788 struct iwl_host_cmd *source; 789 u32 flags; 790 u32 tbs; 791 }; 792 793 /* 794 * The FH will write back to the first TB only, so we need to copy some data 795 * into the buffer regardless of whether it should be mapped or not. 796 * This indicates how big the first TB must be to include the scratch buffer 797 * and the assigned PN. 798 * Since PN location is 8 bytes at offset 12, it's 20 now. 799 * If we make it bigger then allocations will be bigger and copy slower, so 800 * that's probably not useful. 801 */ 802 #define IWL_FIRST_TB_SIZE 20 803 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) 804 805 struct iwl_pcie_txq_entry { 806 void *cmd; 807 struct sk_buff *skb; 808 /* buffer to free after command completes */ 809 const void *free_buf; 810 struct iwl_cmd_meta meta; 811 }; 812 813 struct iwl_pcie_first_tb_buf { 814 u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; 815 }; 816 817 /** 818 * struct iwl_txq - Tx Queue for DMA 819 * @q: generic Rx/Tx queue descriptor 820 * @tfds: transmit frame descriptors (DMA memory) 821 * @first_tb_bufs: start of command headers, including scratch buffers, for 822 * the writeback -- this is DMA memory and an array holding one buffer 823 * for each command on the queue 824 * @first_tb_dma: DMA address for the first_tb_bufs start 825 * @entries: transmit entries (driver state) 826 * @lock: queue lock 827 * @stuck_timer: timer that fires if queue gets stuck 828 * @trans: pointer back to transport (for timer) 829 * @need_update: indicates need to update read/write index 830 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 831 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 832 * @frozen: tx stuck queue timer is frozen 833 * @frozen_expiry_remainder: remember how long until the timer fires 834 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) 835 * @write_ptr: 1-st empty entry (index) host_w 836 * @read_ptr: last used entry (index) host_r 837 * @dma_addr: physical addr for BD's 838 * @n_window: safe queue window 839 * @id: queue id 840 * @low_mark: low watermark, resume queue if free space more than this 841 * @high_mark: high watermark, stop queue if free space less than this 842 * 843 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 844 * descriptors) and required locking structures. 845 * 846 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 847 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 848 * there might be HW changes in the future). For the normal TX 849 * queues, n_window, which is the size of the software queue data 850 * is also 256; however, for the command queue, n_window is only 851 * 32 since we don't need so many commands pending. Since the HW 852 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. 853 * This means that we end up with the following: 854 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 855 * SW entries: | 0 | ... | 31 | 856 * where N is a number between 0 and 7. This means that the SW 857 * data is a window overlayed over the HW queue. 858 */ 859 struct iwl_txq { 860 void *tfds; 861 struct iwl_pcie_first_tb_buf *first_tb_bufs; 862 dma_addr_t first_tb_dma; 863 struct iwl_pcie_txq_entry *entries; 864 /* lock for syncing changes on the queue */ 865 spinlock_t lock; 866 unsigned long frozen_expiry_remainder; 867 struct timer_list stuck_timer; 868 struct iwl_trans *trans; 869 bool need_update; 870 bool frozen; 871 bool ampdu; 872 int block; 873 unsigned long wd_timeout; 874 struct sk_buff_head overflow_q; 875 struct iwl_dma_ptr bc_tbl; 876 877 int write_ptr; 878 int read_ptr; 879 dma_addr_t dma_addr; 880 int n_window; 881 u32 id; 882 int low_mark; 883 int high_mark; 884 885 bool overflow_tx; 886 }; 887 888 /** 889 * struct iwl_trans_txqs - transport tx queues data 890 * 891 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 892 * @page_offs: offset from skb->cb to mac header page pointer 893 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer 894 * @queue_used - bit mask of used queues 895 * @queue_stopped - bit mask of stopped queues 896 * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler 897 */ 898 struct iwl_trans_txqs { 899 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 900 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; 901 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; 902 struct dma_pool *bc_pool; 903 size_t bc_tbl_size; 904 bool bc_table_dword; 905 u8 page_offs; 906 u8 dev_cmd_offs; 907 struct iwl_tso_hdr_page __percpu *tso_hdr_page; 908 909 struct { 910 u8 fifo; 911 u8 q_id; 912 unsigned int wdg_timeout; 913 } cmd; 914 915 struct { 916 u8 max_tbs; 917 u16 size; 918 u8 addr_size; 919 } tfd; 920 921 struct iwl_dma_ptr scd_bc_tbls; 922 }; 923 924 /** 925 * struct iwl_trans - transport common data 926 * 927 * @csme_own - true if we couldn't get ownership on the device 928 * @ops - pointer to iwl_trans_ops 929 * @op_mode - pointer to the op_mode 930 * @trans_cfg: the trans-specific configuration part 931 * @cfg - pointer to the configuration 932 * @drv - pointer to iwl_drv 933 * @status: a bit-mask of transport status flags 934 * @dev - pointer to struct device * that represents the device 935 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. 936 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. 937 * @hw_rf_id a u32 with the device RF ID 938 * @hw_id: a u32 with the ID of the device / sub-device. 939 * Set during transport allocation. 940 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 941 * @pm_support: set to true in start_hw if link pm is supported 942 * @ltr_enabled: set to true if the LTR is enabled 943 * @wide_cmd_header: true when ucode supports wide command header format 944 * @wait_command_queue: wait queue for sync commands 945 * @num_rx_queues: number of RX queues allocated by the transport; 946 * the transport must set this before calling iwl_drv_start() 947 * @iml_len: the length of the image loader 948 * @iml: a pointer to the image loader itself 949 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. 950 * The user should use iwl_trans_{alloc,free}_tx_cmd. 951 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before 952 * starting the firmware, used for tracing 953 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the 954 * start of the 802.11 header in the @rx_mpdu_cmd 955 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) 956 * @system_pm_mode: the system-wide power management mode in use. 957 * This mode is set dynamically, depending on the WoWLAN values 958 * configured from the userspace at runtime. 959 * @iwl_trans_txqs: transport tx queues data. 960 */ 961 struct iwl_trans { 962 bool csme_own; 963 const struct iwl_trans_ops *ops; 964 struct iwl_op_mode *op_mode; 965 const struct iwl_cfg_trans_params *trans_cfg; 966 const struct iwl_cfg *cfg; 967 struct iwl_drv *drv; 968 enum iwl_trans_state state; 969 unsigned long status; 970 971 struct device *dev; 972 u32 max_skb_frags; 973 u32 hw_rev; 974 u32 hw_rf_id; 975 u32 hw_id; 976 char hw_id_str[52]; 977 u32 sku_id[3]; 978 979 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; 980 981 bool pm_support; 982 bool ltr_enabled; 983 u8 pnvm_loaded:1; 984 u8 reduce_power_loaded:1; 985 986 const struct iwl_hcmd_arr *command_groups; 987 int command_groups_size; 988 bool wide_cmd_header; 989 990 wait_queue_head_t wait_command_queue; 991 u8 num_rx_queues; 992 993 size_t iml_len; 994 u8 *iml; 995 996 /* The following fields are internal only */ 997 struct kmem_cache *dev_cmd_pool; 998 char dev_cmd_pool_name[50]; 999 1000 struct dentry *dbgfs_dir; 1001 1002 #ifdef CONFIG_LOCKDEP 1003 struct lockdep_map sync_cmd_lockdep_map; 1004 #endif 1005 1006 struct iwl_trans_debug dbg; 1007 struct iwl_self_init_dram init_dram; 1008 1009 enum iwl_plat_pm_mode system_pm_mode; 1010 1011 const char *name; 1012 struct iwl_trans_txqs txqs; 1013 1014 /* pointer to trans specific struct */ 1015 /*Ensure that this pointer will always be aligned to sizeof pointer */ 1016 char trans_specific[] __aligned(sizeof(void *)); 1017 }; 1018 1019 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); 1020 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); 1021 1022 static inline void iwl_trans_configure(struct iwl_trans *trans, 1023 const struct iwl_trans_config *trans_cfg) 1024 { 1025 trans->op_mode = trans_cfg->op_mode; 1026 1027 trans->ops->configure(trans, trans_cfg); 1028 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); 1029 } 1030 1031 static inline int iwl_trans_start_hw(struct iwl_trans *trans) 1032 { 1033 might_sleep(); 1034 1035 return trans->ops->start_hw(trans); 1036 } 1037 1038 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) 1039 { 1040 might_sleep(); 1041 1042 if (trans->ops->op_mode_leave) 1043 trans->ops->op_mode_leave(trans); 1044 1045 trans->op_mode = NULL; 1046 1047 trans->state = IWL_TRANS_NO_FW; 1048 } 1049 1050 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1051 { 1052 might_sleep(); 1053 1054 trans->state = IWL_TRANS_FW_ALIVE; 1055 1056 trans->ops->fw_alive(trans, scd_addr); 1057 } 1058 1059 static inline int iwl_trans_start_fw(struct iwl_trans *trans, 1060 const struct fw_img *fw, 1061 bool run_in_rfkill) 1062 { 1063 int ret; 1064 1065 might_sleep(); 1066 1067 WARN_ON_ONCE(!trans->rx_mpdu_cmd); 1068 1069 clear_bit(STATUS_FW_ERROR, &trans->status); 1070 ret = trans->ops->start_fw(trans, fw, run_in_rfkill); 1071 if (ret == 0) 1072 trans->state = IWL_TRANS_FW_STARTED; 1073 1074 return ret; 1075 } 1076 1077 static inline void iwl_trans_stop_device(struct iwl_trans *trans) 1078 { 1079 might_sleep(); 1080 1081 trans->ops->stop_device(trans); 1082 1083 trans->state = IWL_TRANS_NO_FW; 1084 } 1085 1086 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, 1087 bool reset) 1088 { 1089 might_sleep(); 1090 if (!trans->ops->d3_suspend) 1091 return 0; 1092 1093 return trans->ops->d3_suspend(trans, test, reset); 1094 } 1095 1096 static inline int iwl_trans_d3_resume(struct iwl_trans *trans, 1097 enum iwl_d3_status *status, 1098 bool test, bool reset) 1099 { 1100 might_sleep(); 1101 if (!trans->ops->d3_resume) 1102 return 0; 1103 1104 return trans->ops->d3_resume(trans, status, test, reset); 1105 } 1106 1107 static inline struct iwl_trans_dump_data * 1108 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, 1109 const struct iwl_dump_sanitize_ops *sanitize_ops, 1110 void *sanitize_ctx) 1111 { 1112 if (!trans->ops->dump_data) 1113 return NULL; 1114 return trans->ops->dump_data(trans, dump_mask, 1115 sanitize_ops, sanitize_ctx); 1116 } 1117 1118 static inline struct iwl_device_tx_cmd * 1119 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) 1120 { 1121 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC); 1122 } 1123 1124 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 1125 1126 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, 1127 struct iwl_device_tx_cmd *dev_cmd) 1128 { 1129 kmem_cache_free(trans->dev_cmd_pool, dev_cmd); 1130 } 1131 1132 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 1133 struct iwl_device_tx_cmd *dev_cmd, int queue) 1134 { 1135 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) 1136 return -EIO; 1137 1138 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1139 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1140 return -EIO; 1141 } 1142 1143 return trans->ops->tx(trans, skb, dev_cmd, queue); 1144 } 1145 1146 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, 1147 int ssn, struct sk_buff_head *skbs) 1148 { 1149 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1150 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1151 return; 1152 } 1153 1154 trans->ops->reclaim(trans, queue, ssn, skbs); 1155 } 1156 1157 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, 1158 int ptr) 1159 { 1160 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1161 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1162 return; 1163 } 1164 1165 trans->ops->set_q_ptrs(trans, queue, ptr); 1166 } 1167 1168 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, 1169 bool configure_scd) 1170 { 1171 trans->ops->txq_disable(trans, queue, configure_scd); 1172 } 1173 1174 static inline bool 1175 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, 1176 const struct iwl_trans_txq_scd_cfg *cfg, 1177 unsigned int queue_wdg_timeout) 1178 { 1179 might_sleep(); 1180 1181 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1182 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1183 return false; 1184 } 1185 1186 return trans->ops->txq_enable(trans, queue, ssn, 1187 cfg, queue_wdg_timeout); 1188 } 1189 1190 static inline int 1191 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, 1192 struct iwl_trans_rxq_dma_data *data) 1193 { 1194 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data)) 1195 return -ENOTSUPP; 1196 1197 return trans->ops->rxq_dma_data(trans, queue, data); 1198 } 1199 1200 static inline void 1201 iwl_trans_txq_free(struct iwl_trans *trans, int queue) 1202 { 1203 if (WARN_ON_ONCE(!trans->ops->txq_free)) 1204 return; 1205 1206 trans->ops->txq_free(trans, queue); 1207 } 1208 1209 static inline int 1210 iwl_trans_txq_alloc(struct iwl_trans *trans, 1211 __le16 flags, u8 sta_id, u8 tid, 1212 int cmd_id, int size, 1213 unsigned int wdg_timeout) 1214 { 1215 might_sleep(); 1216 1217 if (WARN_ON_ONCE(!trans->ops->txq_alloc)) 1218 return -ENOTSUPP; 1219 1220 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1221 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1222 return -EIO; 1223 } 1224 1225 return trans->ops->txq_alloc(trans, flags, sta_id, tid, 1226 cmd_id, size, wdg_timeout); 1227 } 1228 1229 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, 1230 int queue, bool shared_mode) 1231 { 1232 if (trans->ops->txq_set_shared_mode) 1233 trans->ops->txq_set_shared_mode(trans, queue, shared_mode); 1234 } 1235 1236 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, 1237 int fifo, int sta_id, int tid, 1238 int frame_limit, u16 ssn, 1239 unsigned int queue_wdg_timeout) 1240 { 1241 struct iwl_trans_txq_scd_cfg cfg = { 1242 .fifo = fifo, 1243 .sta_id = sta_id, 1244 .tid = tid, 1245 .frame_limit = frame_limit, 1246 .aggregate = sta_id >= 0, 1247 }; 1248 1249 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout); 1250 } 1251 1252 static inline 1253 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, 1254 unsigned int queue_wdg_timeout) 1255 { 1256 struct iwl_trans_txq_scd_cfg cfg = { 1257 .fifo = fifo, 1258 .sta_id = -1, 1259 .tid = IWL_MAX_TID_COUNT, 1260 .frame_limit = IWL_FRAME_LIMIT, 1261 .aggregate = false, 1262 }; 1263 1264 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); 1265 } 1266 1267 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, 1268 unsigned long txqs, 1269 bool freeze) 1270 { 1271 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1272 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1273 return; 1274 } 1275 1276 if (trans->ops->freeze_txq_timer) 1277 trans->ops->freeze_txq_timer(trans, txqs, freeze); 1278 } 1279 1280 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, 1281 bool block) 1282 { 1283 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1284 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1285 return; 1286 } 1287 1288 if (trans->ops->block_txq_ptrs) 1289 trans->ops->block_txq_ptrs(trans, block); 1290 } 1291 1292 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, 1293 u32 txqs) 1294 { 1295 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) 1296 return -ENOTSUPP; 1297 1298 /* No need to wait if the firmware is not alive */ 1299 if (trans->state != IWL_TRANS_FW_ALIVE) { 1300 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1301 return -EIO; 1302 } 1303 1304 return trans->ops->wait_tx_queues_empty(trans, txqs); 1305 } 1306 1307 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) 1308 { 1309 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty)) 1310 return -ENOTSUPP; 1311 1312 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { 1313 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 1314 return -EIO; 1315 } 1316 1317 return trans->ops->wait_txq_empty(trans, queue); 1318 } 1319 1320 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1321 { 1322 trans->ops->write8(trans, ofs, val); 1323 } 1324 1325 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1326 { 1327 trans->ops->write32(trans, ofs, val); 1328 } 1329 1330 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) 1331 { 1332 return trans->ops->read32(trans, ofs); 1333 } 1334 1335 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) 1336 { 1337 return trans->ops->read_prph(trans, ofs); 1338 } 1339 1340 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, 1341 u32 val) 1342 { 1343 return trans->ops->write_prph(trans, ofs, val); 1344 } 1345 1346 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, 1347 void *buf, int dwords) 1348 { 1349 return trans->ops->read_mem(trans, addr, buf, dwords); 1350 } 1351 1352 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ 1353 do { \ 1354 if (__builtin_constant_p(bufsize)) \ 1355 BUILD_BUG_ON((bufsize) % sizeof(u32)); \ 1356 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ 1357 } while (0) 1358 1359 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) 1360 { 1361 u32 value; 1362 1363 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) 1364 return 0xa5a5a5a5; 1365 1366 return value; 1367 } 1368 1369 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, 1370 const void *buf, int dwords) 1371 { 1372 return trans->ops->write_mem(trans, addr, buf, dwords); 1373 } 1374 1375 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, 1376 u32 val) 1377 { 1378 return iwl_trans_write_mem(trans, addr, &val, 1); 1379 } 1380 1381 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) 1382 { 1383 if (trans->ops->set_pmi) 1384 trans->ops->set_pmi(trans, state); 1385 } 1386 1387 static inline void iwl_trans_sw_reset(struct iwl_trans *trans) 1388 { 1389 if (trans->ops->sw_reset) 1390 trans->ops->sw_reset(trans); 1391 } 1392 1393 static inline void 1394 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) 1395 { 1396 trans->ops->set_bits_mask(trans, reg, mask, value); 1397 } 1398 1399 #define iwl_trans_grab_nic_access(trans) \ 1400 __cond_lock(nic_access, \ 1401 likely((trans)->ops->grab_nic_access(trans))) 1402 1403 static inline void __releases(nic_access) 1404 iwl_trans_release_nic_access(struct iwl_trans *trans) 1405 { 1406 trans->ops->release_nic_access(trans); 1407 __release(nic_access); 1408 } 1409 1410 static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync) 1411 { 1412 if (WARN_ON_ONCE(!trans->op_mode)) 1413 return; 1414 1415 /* prevent double restarts due to the same erroneous FW */ 1416 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { 1417 iwl_op_mode_nic_error(trans->op_mode, sync); 1418 trans->state = IWL_TRANS_NO_FW; 1419 } 1420 } 1421 1422 static inline bool iwl_trans_fw_running(struct iwl_trans *trans) 1423 { 1424 return trans->state == IWL_TRANS_FW_ALIVE; 1425 } 1426 1427 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) 1428 { 1429 if (trans->ops->sync_nmi) 1430 trans->ops->sync_nmi(trans); 1431 } 1432 1433 void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr, 1434 u32 sw_err_bit); 1435 1436 static inline int iwl_trans_set_pnvm(struct iwl_trans *trans, 1437 const void *data, u32 len) 1438 { 1439 if (trans->ops->set_pnvm) { 1440 int ret = trans->ops->set_pnvm(trans, data, len); 1441 1442 if (ret) 1443 return ret; 1444 } 1445 1446 trans->pnvm_loaded = true; 1447 1448 return 0; 1449 } 1450 1451 static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans, 1452 const void *data, u32 len) 1453 { 1454 if (trans->ops->set_reduce_power) { 1455 int ret = trans->ops->set_reduce_power(trans, data, len); 1456 1457 if (ret) 1458 return ret; 1459 } 1460 1461 trans->reduce_power_loaded = true; 1462 return 0; 1463 } 1464 1465 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) 1466 { 1467 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED || 1468 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED; 1469 } 1470 1471 static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) 1472 { 1473 if (trans->ops->interrupts) 1474 trans->ops->interrupts(trans, enable); 1475 } 1476 1477 /***************************************************** 1478 * transport helper functions 1479 *****************************************************/ 1480 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, 1481 struct device *dev, 1482 const struct iwl_trans_ops *ops, 1483 const struct iwl_cfg_trans_params *cfg_trans); 1484 int iwl_trans_init(struct iwl_trans *trans); 1485 void iwl_trans_free(struct iwl_trans *trans); 1486 1487 /***************************************************** 1488 * driver (transport) register/unregister functions 1489 ******************************************************/ 1490 int __must_check iwl_pci_register_driver(void); 1491 void iwl_pci_unregister_driver(void); 1492 1493 #endif /* __iwl_trans_h__ */ 1494