1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 *
4 * Bluetooth support for Intel PCIe devices
5 *
6 * Copyright (C) 2024 Intel Corporation
7 */
8
9 /* Control and Status Register(BTINTEL_PCIE_CSR) */
10 #define BTINTEL_PCIE_CSR_BASE (0x000)
11 #define BTINTEL_PCIE_CSR_FUNC_CTRL_REG (BTINTEL_PCIE_CSR_BASE + 0x024)
12 #define BTINTEL_PCIE_CSR_HW_REV_REG (BTINTEL_PCIE_CSR_BASE + 0x028)
13 #define BTINTEL_PCIE_CSR_RF_ID_REG (BTINTEL_PCIE_CSR_BASE + 0x09C)
14 #define BTINTEL_PCIE_CSR_BOOT_STAGE_REG (BTINTEL_PCIE_CSR_BASE + 0x108)
15 #define BTINTEL_PCIE_CSR_IPC_CONTROL_REG (BTINTEL_PCIE_CSR_BASE + 0x10C)
16 #define BTINTEL_PCIE_CSR_IPC_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x110)
17 #define BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG (BTINTEL_PCIE_CSR_BASE + 0x114)
18 #define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
19 #define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
20 #define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
21 #define BTINTEL_PCIE_CSR_MBOX_1_REG (BTINTEL_PCIE_CSR_BASE + 0x170)
22 #define BTINTEL_PCIE_CSR_MBOX_2_REG (BTINTEL_PCIE_CSR_BASE + 0x174)
23 #define BTINTEL_PCIE_CSR_MBOX_3_REG (BTINTEL_PCIE_CSR_BASE + 0x178)
24 #define BTINTEL_PCIE_CSR_MBOX_4_REG (BTINTEL_PCIE_CSR_BASE + 0x17C)
25 #define BTINTEL_PCIE_CSR_MBOX_STATUS_REG (BTINTEL_PCIE_CSR_BASE + 0x180)
26 #define BTINTEL_PCIE_PRPH_DEV_ADDR_REG (BTINTEL_PCIE_CSR_BASE + 0x440)
27 #define BTINTEL_PCIE_PRPH_DEV_RD_REG (BTINTEL_PCIE_CSR_BASE + 0x458)
28 #define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460)
29
30 /* BTINTEL_PCIE_CSR Function Control Register */
31 #define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA (BIT(0))
32 #define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT (BIT(6))
33 #define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT (BIT(7))
34 #define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS (BIT(20))
35
36 #define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ (BIT(21))
37 /* Stop MAC Access disconnection request */
38 #define BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS (BIT(22))
39 #define BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ (BIT(23))
40
41 #define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS (BIT(28))
42 #define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON (BIT(29))
43 #define BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET (BIT(31))
44
45 /* Value for BTINTEL_PCIE_CSR_BOOT_STAGE register */
46 #define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM (BIT(0))
47 #define BTINTEL_PCIE_CSR_BOOT_STAGE_IML (BIT(1))
48 #define BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW (BIT(2))
49 #define BTINTEL_PCIE_CSR_BOOT_STAGE_ROM_LOCKDOWN (BIT(10))
50 #define BTINTEL_PCIE_CSR_BOOT_STAGE_IML_LOCKDOWN (BIT(11))
51 #define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_WARNING (BIT(12))
52 #define BTINTEL_PCIE_CSR_BOOT_STAGE_ABORT_HANDLER (BIT(13))
53 #define BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED (BIT(14))
54 #define BTINTEL_PCIE_CSR_BOOT_STAGE_MAC_ACCESS_ON (BIT(16))
55 #define BTINTEL_PCIE_CSR_BOOT_STAGE_ALIVE (BIT(23))
56 #define BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY (BIT(24))
57
58 /* Registers for MSI-X */
59 #define BTINTEL_PCIE_CSR_MSIX_BASE (0x2000)
60 #define BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0800)
61 #define BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0804)
62 #define BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0808)
63 #define BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK (BTINTEL_PCIE_CSR_MSIX_BASE + 0x080C)
64 #define BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0810)
65 #define BTINTEL_PCIE_CSR_MSIX_AUTOMASK_EN (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0814)
66 #define BTINTEL_PCIE_CSR_MSIX_IVAR_BASE (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0880)
67 #define BTINTEL_PCIE_CSR_MSIX_IVAR(cause) (BTINTEL_PCIE_CSR_MSIX_IVAR_BASE + (cause))
68
69 /* IOSF Debug Register */
70 #define BTINTEL_PCIE_DBGC_BASE_ADDR (0xf3800300)
71 #define BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x1C)
72 #define BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x2C)
73
74 #define BTINTEL_PCIE_DBG_IDX_BIT_MASK 0x0F
75 #define BTINTEL_PCIE_DBGC_DBG_BUF_IDX(data) (((data) >> 24) & BTINTEL_PCIE_DBG_IDX_BIT_MASK)
76 #define BTINTEL_PCIE_DBG_OFFSET_BIT_MASK 0xFFFFFF
77
78 /* The DRAM buffer count, each buffer size, and
79 * fragment buffer size
80 */
81 #define BTINTEL_PCIE_DBGC_BUFFER_COUNT 16
82 #define BTINTEL_PCIE_DBGC_BUFFER_SIZE (256 * 1024) /* 256 KB */
83
84 #define BTINTEL_PCIE_DBGC_FRAG_VERSION 1
85 #define BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT BTINTEL_PCIE_DBGC_BUFFER_COUNT
86
87 /* Magic number(4), version(4), size of payload length(4) */
88 #define BTINTEL_PCIE_DBGC_FRAG_HEADER_SIZE 12
89
90 /* Num of alloc Dbg buff (4) + (LSB(4), MSB(4), Size(4)) for each buffer */
91 #define BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE 196
92
93 /* Causes for the FH register interrupts */
94 enum msix_fh_int_causes {
95 BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0 = BIT(0), /* cause 0 */
96 BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1 = BIT(1), /* cause 1 */
97 };
98
99 /* Causes for the HW register interrupts */
100 enum msix_hw_int_causes {
101 BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
102 BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1 = BIT(1), /* cause 33 */
103 BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP = BIT(3), /* cause 35 */
104 };
105
106 /* PCIe device states
107 * Host-Device interface is active
108 * Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
109 * Host-Device interface is inactive(as reflected by IPC_SLEEP_CONTROL_CSR_AD)
110 */
111 enum {
112 BTINTEL_PCIE_STATE_D0 = 0,
113 BTINTEL_PCIE_STATE_D3_HOT = 2,
114 BTINTEL_PCIE_STATE_D3_COLD = 3,
115 };
116
117 enum {
118 BTINTEL_PCIE_CORE_HALTED,
119 BTINTEL_PCIE_HWEXP_INPROGRESS,
120 BTINTEL_PCIE_COREDUMP_INPROGRESS,
121 BTINTEL_PCIE_RECOVERY_IN_PROGRESS,
122 BTINTEL_PCIE_SETUP_DONE
123 };
124
125 enum btintel_pcie_tlv_type {
126 BTINTEL_CNVI_BT,
127 BTINTEL_WRITE_PTR,
128 BTINTEL_WRAP_CTR,
129 BTINTEL_TRIGGER_REASON,
130 BTINTEL_FW_SHA,
131 BTINTEL_CNVR_TOP,
132 BTINTEL_CNVI_TOP,
133 BTINTEL_DUMP_TIME,
134 BTINTEL_FW_BUILD,
135 BTINTEL_VENDOR,
136 BTINTEL_DRIVER
137 };
138
139 /* causes for the MBOX interrupts */
140 enum msix_mbox_int_causes {
141 BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX1 = BIT(0), /* cause MBOX1 */
142 BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX2 = BIT(1), /* cause MBOX2 */
143 BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX3 = BIT(2), /* cause MBOX3 */
144 BTINTEL_PCIE_CSR_MBOX_STATUS_MBOX4 = BIT(3), /* cause MBOX4 */
145 };
146
147 #define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
148
149 /* Minimum and Maximum number of MSI-X Vector
150 * Intel Bluetooth PCIe support only 1 vector
151 */
152 #define BTINTEL_PCIE_MSIX_VEC_MAX 1
153 #define BTINTEL_PCIE_MSIX_VEC_MIN 1
154
155 /* Default poll time for MAC access during init */
156 #define BTINTEL_DEFAULT_MAC_ACCESS_TIMEOUT_US 200000
157
158 /* Default interrupt timeout in msec */
159 #define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000
160
161 #define BTINTEL_PCIE_DX_TRANSITION_MAX_RETRIES 3
162
163 /* The number of descriptors in TX queues */
164 #define BTINTEL_PCIE_TX_DESCS_COUNT 32
165
166 /* The number of descriptors in RX queues */
167 #define BTINTEL_PCIE_RX_DESCS_COUNT 64
168
169 /* Number of Queue for TX and RX
170 * It indicates the index of the IA(Index Array)
171 */
172 enum {
173 BTINTEL_PCIE_TXQ_NUM = 0,
174 BTINTEL_PCIE_RXQ_NUM = 1,
175 BTINTEL_PCIE_NUM_QUEUES = 2,
176 };
177
178 /* The size of DMA buffer for TX and RX in bytes */
179 #define BTINTEL_PCIE_BUFFER_SIZE 4096
180
181 #define BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS 500
182
183 /* Doorbell vector for TFD */
184 #define BTINTEL_PCIE_TX_DB_VEC 0
185
186 /* Doorbell vector for FRBD */
187 #define BTINTEL_PCIE_RX_DB_VEC 513
188
189 /* RBD buffer size mapping */
190 #define BTINTEL_PCIE_RBD_SIZE_4K 0x04
191
192 /*
193 * Struct for Context Information (v2)
194 *
195 * All members are write-only for host and read-only for device.
196 *
197 * @version: Version of context information
198 * @size: Size of context information
199 * @config: Config with which host wants peripheral to execute
200 * Subset of capability register published by device
201 * @addr_tr_hia: Address of TR Head Index Array
202 * @addr_tr_tia: Address of TR Tail Index Array
203 * @addr_cr_hia: Address of CR Head Index Array
204 * @addr_cr_tia: Address of CR Tail Index Array
205 * @num_tr_ia: Number of entries in TR Index Arrays
206 * @num_cr_ia: Number of entries in CR Index Arrays
207 * @rbd_siz: RBD Size { 0x4=4K }
208 * @addr_tfdq: Address of TFD Queue(tx)
209 * @addr_urbdq0: Address of URBD Queue(tx)
210 * @num_tfdq: Number of TFD in TFD Queue(tx)
211 * @num_urbdq0: Number of URBD in URBD Queue(tx)
212 * @tfdq_db_vec: Queue number of TFD
213 * @urbdq0_db_vec: Queue number of URBD
214 * @addr_frbdq: Address of FRBD Queue(rx)
215 * @addr_urbdq1: Address of URBD Queue(rx)
216 * @num_frbdq: Number of FRBD in FRBD Queue(rx)
217 * @frbdq_db_vec: Queue number of FRBD
218 * @num_urbdq1: Number of URBD in URBD Queue(rx)
219 * @urbdq_db_vec: Queue number of URBDQ1
220 * @tr_msi_vec: Transfer Ring MSI-X Vector
221 * @cr_msi_vec: Completion Ring MSI-X Vector
222 * @dbgc_addr: DBGC first fragment address
223 * @dbgc_size: DBGC buffer size
224 * @early_enable: Enarly debug enable
225 * @dbg_output_mode: Debug output mode
226 * Bit[4] DBGC O/P { 0=SRAM, 1=DRAM(not relevant for NPK) }
227 * Bit[5] DBGC I/P { 0=BDBG, 1=DBGI }
228 * Bits[6:7] DBGI O/P(relevant if bit[5] = 1)
229 * 0=BT DBGC, 1=WiFi DBGC, 2=NPK }
230 * @dbg_preset: Debug preset
231 * @ext_addr: Address of context information extension
232 * @ext_size: Size of context information part
233 *
234 * Total 38 DWords
235 */
236 struct ctx_info {
237 u16 version;
238 u16 size;
239 u32 config;
240 u32 reserved_dw02;
241 u32 reserved_dw03;
242 u64 addr_tr_hia;
243 u64 addr_tr_tia;
244 u64 addr_cr_hia;
245 u64 addr_cr_tia;
246 u16 num_tr_ia;
247 u16 num_cr_ia;
248 u32 rbd_size:4,
249 reserved_dw13:28;
250 u64 addr_tfdq;
251 u64 addr_urbdq0;
252 u16 num_tfdq;
253 u16 num_urbdq0;
254 u16 tfdq_db_vec;
255 u16 urbdq0_db_vec;
256 u64 addr_frbdq;
257 u64 addr_urbdq1;
258 u16 num_frbdq;
259 u16 frbdq_db_vec;
260 u16 num_urbdq1;
261 u16 urbdq_db_vec;
262 u16 tr_msi_vec;
263 u16 cr_msi_vec;
264 u32 reserved_dw27;
265 u64 dbgc_addr;
266 u32 dbgc_size;
267 u32 early_enable:1,
268 reserved_dw31:3,
269 dbg_output_mode:4,
270 dbg_preset:8,
271 reserved2_dw31:16;
272 u64 ext_addr;
273 u32 ext_size;
274 u32 test_param;
275 u32 reserved_dw36;
276 u32 reserved_dw37;
277 } __packed;
278
279 /* Transfer Descriptor for TX
280 * @type: Not in use. Set to 0x0
281 * @size: Size of data in the buffer
282 * @addr: DMA Address of buffer
283 */
284 struct tfd {
285 u8 type;
286 u16 size;
287 u8 reserved;
288 u64 addr;
289 u32 reserved1;
290 } __packed;
291
292 /* URB Descriptor for TX
293 * @tfd_index: Index of TFD in TFDQ + 1
294 * @num_txq: Queue index of TFD Queue
295 * @cmpl_count: Completion count. Always 0x01
296 * @immediate_cmpl: Immediate completion flag: Always 0x01
297 */
298 struct urbd0 {
299 u32 tfd_index:16,
300 num_txq:8,
301 cmpl_count:4,
302 reserved:3,
303 immediate_cmpl:1;
304 } __packed;
305
306 /* FRB Descriptor for RX
307 * @tag: RX buffer tag (index of RX buffer queue)
308 * @addr: Address of buffer
309 */
310 struct frbd {
311 u32 tag:16,
312 reserved:16;
313 u32 reserved2;
314 u64 addr;
315 } __packed;
316
317 /* URB Descriptor for RX
318 * @frbd_tag: Tag from FRBD
319 * @status: Status
320 */
321 struct urbd1 {
322 u32 frbd_tag:16,
323 status:1,
324 reserved:14,
325 fixed:1;
326 } __packed;
327
328 /* RFH header in RX packet
329 * @packet_len: Length of the data in the buffer
330 * @rxq: RX Queue number
331 * @cmd_id: Command ID. Not in Use
332 */
333 struct rfh_hdr {
334 u64 packet_len:16,
335 rxq:6,
336 reserved:10,
337 cmd_id:16,
338 reserved1:16;
339 } __packed;
340
341 /* Internal data buffer
342 * @data: pointer to the data buffer
343 * @p_addr: physical address of data buffer
344 */
345 struct data_buf {
346 u8 *data;
347 dma_addr_t data_p_addr;
348 };
349
350 /* Index Array */
351 struct ia {
352 dma_addr_t tr_hia_p_addr;
353 u16 *tr_hia;
354 dma_addr_t tr_tia_p_addr;
355 u16 *tr_tia;
356 dma_addr_t cr_hia_p_addr;
357 u16 *cr_hia;
358 dma_addr_t cr_tia_p_addr;
359 u16 *cr_tia;
360 };
361
362 /* Structure for TX Queue
363 * @count: Number of descriptors
364 * @tfds: Array of TFD
365 * @urbd0s: Array of URBD0
366 * @buf: Array of data_buf structure
367 */
368 struct txq {
369 u16 count;
370
371 dma_addr_t tfds_p_addr;
372 struct tfd *tfds;
373
374 dma_addr_t urbd0s_p_addr;
375 struct urbd0 *urbd0s;
376
377 dma_addr_t buf_p_addr;
378 void *buf_v_addr;
379 struct data_buf *bufs;
380 };
381
382 /* Structure for RX Queue
383 * @count: Number of descriptors
384 * @frbds: Array of FRBD
385 * @urbd1s: Array of URBD1
386 * @buf: Array of data_buf structure
387 */
388 struct rxq {
389 u16 count;
390
391 dma_addr_t frbds_p_addr;
392 struct frbd *frbds;
393
394 dma_addr_t urbd1s_p_addr;
395 struct urbd1 *urbd1s;
396
397 dma_addr_t buf_p_addr;
398 void *buf_v_addr;
399 struct data_buf *bufs;
400 };
401
402 /* Structure for DRAM Buffer
403 * @count: Number of descriptors
404 * @buf: Array of data_buf structure
405 */
406 struct btintel_pcie_dbgc {
407 u16 count;
408
409 void *frag_v_addr;
410 dma_addr_t frag_p_addr;
411 u16 frag_size;
412
413 dma_addr_t buf_p_addr;
414 void *buf_v_addr;
415 struct data_buf *bufs;
416 };
417
418 struct btintel_pcie_dump_header {
419 const char *driver_name;
420 u32 cnvi_top;
421 u32 cnvr_top;
422 u16 fw_timestamp;
423 u8 fw_build_type;
424 u32 fw_build_num;
425 u32 fw_git_sha1;
426 u32 cnvi_bt;
427 u32 write_ptr;
428 u32 wrap_ctr;
429 u16 trigger_reason;
430 int state;
431 };
432
433 /* struct btintel_pcie_data
434 * @pdev: pci device
435 * @hdev: hdev device
436 * @flags: driver state
437 * @irq_lock: spinlock for MSI-X
438 * @hci_rx_lock: spinlock for HCI RX flow
439 * @base_addr: pci base address (from BAR)
440 * @msix_entries: array of MSI-X entries
441 * @msix_enabled: true if MSI-X is enabled;
442 * @alloc_vecs: number of interrupt vectors allocated
443 * @def_irq: default irq for all causes
444 * @fh_init_mask: initial unmasked rxq causes
445 * @hw_init_mask: initial unmaksed hw causes
446 * @boot_stage_cache: cached value of boot stage register
447 * @img_resp_cache: cached value of image response register
448 * @cnvi: CNVi register value
449 * @cnvr: CNVr register value
450 * @gp0_received: condition for gp0 interrupt
451 * @gp0_wait_q: wait_q for gp0 interrupt
452 * @tx_wait_done: condition for tx interrupt
453 * @tx_wait_q: wait_q for tx interrupt
454 * @workqueue: workqueue for RX work
455 * @rx_skb_q: SKB queue for RX packet
456 * @rx_work: RX work struct to process the RX packet in @rx_skb_q
457 * @dma_pool: DMA pool for descriptors, index array and ci
458 * @dma_p_addr: DMA address for pool
459 * @dma_v_addr: address of pool
460 * @ci_p_addr: DMA address for CI struct
461 * @ci: CI struct
462 * @ia: Index Array struct
463 * @txq: TX Queue struct
464 * @rxq: RX Queue struct
465 * @alive_intr_ctxt: Alive interrupt context
466 * @pm_sx_event: PM event on which system got suspended
467 */
468 struct btintel_pcie_data {
469 struct pci_dev *pdev;
470 struct hci_dev *hdev;
471
472 unsigned long flags;
473 /* lock used in MSI-X interrupt */
474 spinlock_t irq_lock;
475 /* lock to serialize rx events */
476 spinlock_t hci_rx_lock;
477
478 void __iomem *base_addr;
479
480 struct msix_entry msix_entries[BTINTEL_PCIE_MSIX_VEC_MAX];
481 bool msix_enabled;
482 u32 alloc_vecs;
483 u32 def_irq;
484
485 u32 fh_init_mask;
486 u32 hw_init_mask;
487
488 u32 boot_stage_cache;
489 u32 img_resp_cache;
490
491 u32 cnvi;
492 u32 cnvr;
493
494 bool gp0_received;
495 wait_queue_head_t gp0_wait_q;
496
497 bool tx_wait_done;
498 wait_queue_head_t tx_wait_q;
499
500 struct workqueue_struct *workqueue;
501 struct sk_buff_head rx_skb_q;
502 struct work_struct rx_work;
503
504 struct dma_pool *dma_pool;
505 dma_addr_t dma_p_addr;
506 void *dma_v_addr;
507
508 dma_addr_t ci_p_addr;
509 struct ctx_info *ci;
510 struct ia ia;
511 struct txq txq;
512 struct rxq rxq;
513 u32 alive_intr_ctxt;
514 struct btintel_pcie_dbgc dbgc;
515 struct btintel_pcie_dump_header dmp_hdr;
516 u8 pm_sx_event;
517 };
518
btintel_pcie_rd_reg32(struct btintel_pcie_data * data,u32 offset)519 static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
520 u32 offset)
521 {
522 return ioread32(data->base_addr + offset);
523 }
524
btintel_pcie_wr_reg8(struct btintel_pcie_data * data,u32 offset,u8 val)525 static inline void btintel_pcie_wr_reg8(struct btintel_pcie_data *data,
526 u32 offset, u8 val)
527 {
528 iowrite8(val, data->base_addr + offset);
529 }
530
btintel_pcie_wr_reg32(struct btintel_pcie_data * data,u32 offset,u32 val)531 static inline void btintel_pcie_wr_reg32(struct btintel_pcie_data *data,
532 u32 offset, u32 val)
533 {
534 iowrite32(val, data->base_addr + offset);
535 }
536
btintel_pcie_set_reg_bits(struct btintel_pcie_data * data,u32 offset,u32 bits)537 static inline void btintel_pcie_set_reg_bits(struct btintel_pcie_data *data,
538 u32 offset, u32 bits)
539 {
540 u32 r;
541
542 r = ioread32(data->base_addr + offset);
543 r |= bits;
544 iowrite32(r, data->base_addr + offset);
545 }
546
btintel_pcie_clr_reg_bits(struct btintel_pcie_data * data,u32 offset,u32 bits)547 static inline void btintel_pcie_clr_reg_bits(struct btintel_pcie_data *data,
548 u32 offset, u32 bits)
549 {
550 u32 r;
551
552 r = ioread32(data->base_addr + offset);
553 r &= ~bits;
554 iowrite32(r, data->base_addr + offset);
555 }
556
btintel_pcie_rd_dev_mem(struct btintel_pcie_data * data,u32 addr)557 static inline u32 btintel_pcie_rd_dev_mem(struct btintel_pcie_data *data,
558 u32 addr)
559 {
560 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_PRPH_DEV_ADDR_REG, addr);
561 return btintel_pcie_rd_reg32(data, BTINTEL_PCIE_PRPH_DEV_RD_REG);
562 }
563
564