xref: /linux/drivers/infiniband/hw/irdma/i40iw_hw.c (revision 34f7c6e7d4396090692a09789db231e12cb4762b)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "type.h"
5 #include "i40iw_hw.h"
6 #include "protos.h"
7 
8 static u32 i40iw_regs[IRDMA_MAX_REGS] = {
9 	I40E_PFPE_CQPTAIL,
10 	I40E_PFPE_CQPDB,
11 	I40E_PFPE_CCQPSTATUS,
12 	I40E_PFPE_CCQPHIGH,
13 	I40E_PFPE_CCQPLOW,
14 	I40E_PFPE_CQARM,
15 	I40E_PFPE_CQACK,
16 	I40E_PFPE_AEQALLOC,
17 	I40E_PFPE_CQPERRCODES,
18 	I40E_PFPE_WQEALLOC,
19 	I40E_PFINT_DYN_CTLN(0),
20 	I40IW_DB_ADDR_OFFSET,
21 
22 	I40E_GLPCI_LBARCTRL,
23 	I40E_GLPE_CPUSTATUS0,
24 	I40E_GLPE_CPUSTATUS1,
25 	I40E_GLPE_CPUSTATUS2,
26 	I40E_PFINT_AEQCTL,
27 	I40E_PFINT_CEQCTL(0),
28 	I40E_VSIQF_CTL(0),
29 	I40E_PFHMC_PDINV,
30 	I40E_GLHMC_VFPDINV(0),
31 	I40E_GLPE_CRITERR,
32 	0xffffffff      /* PFINT_RATEN not used in FPK */
33 };
34 
35 static u32 i40iw_stat_offsets_32[IRDMA_HW_STAT_INDEX_MAX_32] = {
36 	I40E_GLPES_PFIP4RXDISCARD(0),
37 	I40E_GLPES_PFIP4RXTRUNC(0),
38 	I40E_GLPES_PFIP4TXNOROUTE(0),
39 	I40E_GLPES_PFIP6RXDISCARD(0),
40 	I40E_GLPES_PFIP6RXTRUNC(0),
41 	I40E_GLPES_PFIP6TXNOROUTE(0),
42 	I40E_GLPES_PFTCPRTXSEG(0),
43 	I40E_GLPES_PFTCPRXOPTERR(0),
44 	I40E_GLPES_PFTCPRXPROTOERR(0),
45 	I40E_GLPES_PFRXVLANERR(0)
46 };
47 
48 static u32 i40iw_stat_offsets_64[IRDMA_HW_STAT_INDEX_MAX_64] = {
49 	I40E_GLPES_PFIP4RXOCTSLO(0),
50 	I40E_GLPES_PFIP4RXPKTSLO(0),
51 	I40E_GLPES_PFIP4RXFRAGSLO(0),
52 	I40E_GLPES_PFIP4RXMCPKTSLO(0),
53 	I40E_GLPES_PFIP4TXOCTSLO(0),
54 	I40E_GLPES_PFIP4TXPKTSLO(0),
55 	I40E_GLPES_PFIP4TXFRAGSLO(0),
56 	I40E_GLPES_PFIP4TXMCPKTSLO(0),
57 	I40E_GLPES_PFIP6RXOCTSLO(0),
58 	I40E_GLPES_PFIP6RXPKTSLO(0),
59 	I40E_GLPES_PFIP6RXFRAGSLO(0),
60 	I40E_GLPES_PFIP6RXMCPKTSLO(0),
61 	I40E_GLPES_PFIP6TXOCTSLO(0),
62 	I40E_GLPES_PFIP6TXPKTSLO(0),
63 	I40E_GLPES_PFIP6TXFRAGSLO(0),
64 	I40E_GLPES_PFIP6TXMCPKTSLO(0),
65 	I40E_GLPES_PFTCPRXSEGSLO(0),
66 	I40E_GLPES_PFTCPTXSEGLO(0),
67 	I40E_GLPES_PFRDMARXRDSLO(0),
68 	I40E_GLPES_PFRDMARXSNDSLO(0),
69 	I40E_GLPES_PFRDMARXWRSLO(0),
70 	I40E_GLPES_PFRDMATXRDSLO(0),
71 	I40E_GLPES_PFRDMATXSNDSLO(0),
72 	I40E_GLPES_PFRDMATXWRSLO(0),
73 	I40E_GLPES_PFRDMAVBNDLO(0),
74 	I40E_GLPES_PFRDMAVINVLO(0),
75 	I40E_GLPES_PFIP4RXMCOCTSLO(0),
76 	I40E_GLPES_PFIP4TXMCOCTSLO(0),
77 	I40E_GLPES_PFIP6RXMCOCTSLO(0),
78 	I40E_GLPES_PFIP6TXMCOCTSLO(0),
79 	I40E_GLPES_PFUDPRXPKTSLO(0),
80 	I40E_GLPES_PFUDPTXPKTSLO(0)
81 };
82 
83 static u64 i40iw_masks[IRDMA_MAX_MASKS] = {
84 	I40E_PFPE_CCQPSTATUS_CCQP_DONE,
85 	I40E_PFPE_CCQPSTATUS_CCQP_ERR,
86 	I40E_CQPSQ_STAG_PDID,
87 	I40E_CQPSQ_CQ_CEQID,
88 	I40E_CQPSQ_CQ_CQID,
89 	I40E_COMMIT_FPM_CQCNT,
90 };
91 
92 static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
93 	I40E_PFPE_CCQPSTATUS_CCQP_DONE_S,
94 	I40E_PFPE_CCQPSTATUS_CCQP_ERR_S,
95 	I40E_CQPSQ_STAG_PDID_S,
96 	I40E_CQPSQ_CQ_CEQID_S,
97 	I40E_CQPSQ_CQ_CQID_S,
98 	I40E_COMMIT_FPM_CQCNT_S,
99 };
100 
101 /**
102  * i40iw_config_ceq- Configure CEQ interrupt
103  * @dev: pointer to the device structure
104  * @ceq_id: Completion Event Queue ID
105  * @idx: vector index
106  * @enable: Enable CEQ interrupt when true
107  */
108 static void i40iw_config_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
109 			     bool enable)
110 {
111 	u32 reg_val;
112 
113 	reg_val = FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_INDX, ceq_id) |
114 		  FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_TYPE, QUEUE_TYPE_CEQ);
115 	wr32(dev->hw, I40E_PFINT_LNKLSTN(idx - 1), reg_val);
116 
117 	reg_val = FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX, 0x3) |
118 		  FIELD_PREP(I40E_PFINT_DYN_CTLN_INTENA, 0x1);
119 	wr32(dev->hw, I40E_PFINT_DYN_CTLN(idx - 1), reg_val);
120 
121 	reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
122 		  FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
123 		  FIELD_PREP(I40E_PFINT_CEQCTL_NEXTQ_INDX, NULL_QUEUE_INDEX) |
124 		  FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 0x3);
125 
126 	wr32(dev->hw, i40iw_regs[IRDMA_GLINT_CEQCTL] + 4 * ceq_id, reg_val);
127 }
128 
129 /**
130  * i40iw_ena_irq - Enable interrupt
131  * @dev: pointer to the device structure
132  * @idx: vector index
133  */
134 static void i40iw_ena_irq(struct irdma_sc_dev *dev, u32 idx)
135 {
136 	u32 val;
137 
138 	val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 0x1) |
139 	      FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 0x1) |
140 	      FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0x3);
141 	wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), val);
142 }
143 
144 /**
145  * i40iw_disable_irq - Disable interrupt
146  * @dev: pointer to the device structure
147  * @idx: vector index
148  */
149 static void i40iw_disable_irq(struct irdma_sc_dev *dev, u32 idx)
150 {
151 	wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), 0);
152 }
153 
154 static const struct irdma_irq_ops i40iw_irq_ops = {
155 	.irdma_cfg_aeq = irdma_cfg_aeq,
156 	.irdma_cfg_ceq = i40iw_config_ceq,
157 	.irdma_dis_irq = i40iw_disable_irq,
158 	.irdma_en_irq = i40iw_ena_irq,
159 };
160 
161 void i40iw_init_hw(struct irdma_sc_dev *dev)
162 {
163 	int i;
164 	u8 __iomem *hw_addr;
165 
166 	for (i = 0; i < IRDMA_MAX_REGS; ++i) {
167 		hw_addr = dev->hw->hw_addr;
168 
169 		if (i == IRDMA_DB_ADDR_OFFSET)
170 			hw_addr = NULL;
171 
172 		dev->hw_regs[i] = (u32 __iomem *)(i40iw_regs[i] + hw_addr);
173 	}
174 
175 	for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_32; ++i)
176 		dev->hw_stats_regs_32[i] = i40iw_stat_offsets_32[i];
177 
178 	for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_64; ++i)
179 		dev->hw_stats_regs_64[i] = i40iw_stat_offsets_64[i];
180 
181 	dev->hw_attrs.first_hw_vf_fpm_id = I40IW_FIRST_VF_FPM_ID;
182 	dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
183 
184 	for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
185 		dev->hw_shifts[i] = i40iw_shifts[i];
186 
187 	for (i = 0; i < IRDMA_MAX_MASKS; ++i)
188 		dev->hw_masks[i] = i40iw_masks[i];
189 
190 	dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
191 	dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
192 	dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
193 	dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
194 	dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
195 	dev->ceq_itr_mask_db = NULL;
196 	dev->aeq_itr_mask_db = NULL;
197 	dev->irq_ops = &i40iw_irq_ops;
198 
199 	/* Setup the hardware limits, hmc may limit further */
200 	dev->hw_attrs.uk_attrs.max_hw_wq_frags = I40IW_MAX_WQ_FRAGMENT_COUNT;
201 	dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
202 	dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
203 	dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
204 	dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
205 	dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
206 	dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
207 	dev->hw_attrs.uk_attrs.max_hw_rq_quanta = I40IW_QP_SW_MAX_RQ_QUANTA;
208 	dev->hw_attrs.uk_attrs.max_hw_wq_quanta = I40IW_QP_SW_MAX_WQ_QUANTA;
209 	dev->hw_attrs.uk_attrs.max_hw_sq_chunk = I40IW_MAX_QUANTA_PER_WR;
210 	dev->hw_attrs.max_hw_pds = I40IW_MAX_PDS;
211 	dev->hw_attrs.max_stat_inst = I40IW_MAX_STATS_COUNT;
212 	dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
213 	dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
214 	dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
215 }
216