xref: /titanic_44/usr/src/uts/common/io/bnxe/577xx/drivers/common/include/mm_vbd.h (revision d14abf155341d55053c76eeec58b787a456b753b)
1 
2 /*****************************************************************************
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  *
22  * Copyright 2014 QLogic Corporation
23  * The contents of this file are subject to the terms of the
24  * QLogic End User License (the "License").
25  * You may not use this file except in compliance with the License.
26  *
27  * You can obtain a copy of the License at
28  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
29  * QLogic_End_User_Software_License.txt
30  * See the License for the specific language governing permissions
31  * and limitations under the License.
32  *
33  *****************************************************************************/
34 
35 #if defined(_VBD_)
36 #include <ntddk.h>
37 #elif defined(_VBD_CMD_)
38 #include "vc_os_emul.h"
39 #endif
40 
41 #include "../../Windows/b10bdrv/um_lock.h"
42 
43 // portable integer type of the pointer size for current platform (64/32)
44 typedef ULONG_PTR mm_int_ptr_t;
45 
46 typedef spin_lock_t mm_spin_lock_t;
47 
48 #if defined(_IA64_) || defined(_VBD_CMD_)
49 #define mm_read_barrier_imp()  KeMemoryBarrier()
50 #else
51 #define mm_read_barrier_imp()  KeMemoryBarrierWithoutFence()
52 #endif
53 
54 /* Sections that are different between VBD_CMD and VBD (shouldn't be alot...)  */
55 #if defined(_VBD_)
56 lm_status_t mm_get_bar_offset_imp(struct _lm_device_t *pdev,
57                                   u8_t barn,
58                                   lm_address_t *bar_addr);
59 
60 lm_status_t mm_get_bar_size_imp(struct _lm_device_t *pdev,
61                                  u8_t bar_num,
62                                  u32_t *bar_sz);
63 
64 #else
65 
66 #define mm_get_bar_offset_imp(pdev, bar_num, bar_addr) \
67     lm_get_bar_offset_direct(pdev, bar_num, bar_addr)
68 
69 #define mm_get_bar_size_imp(pdev, bar_num, val_p) \
70     lm_get_bar_size_direct(pdev, bar_num, val_p)
71 
72 
73 #endif
74 
75 #define mm_write_barrier_imp() KeMemoryBarrier()
76 #define mm_barrier_imp()       KeMemoryBarrier()
77 
78 #define mm_atomic_set_imp(_p, _v) InterlockedExchange((long*)(_p), (long)(_v))
79 
80 #define mm_atomic_dec_imp(_p) InterlockedDecrement((long*)(_p))
81 #define mm_atomic_inc_imp(_p) InterlockedIncrement((long*)(_p))
82 
83 #define mm_atomic_add_imp(_p, _v) \
84     InterlockedExchangeAdd((long*)(_p), (long)(_v))
85 #define mm_atomic_sub_imp(_p, _v) \
86     InterlockedExchangeAdd((long*)(_p), -1*(long)(_v))
87 
88 #define mm_atomic_and_imp(_p, _v)      InterlockedAnd((long*)(_p), (long)(_v))
89 #define mm_atomic_long_and_imp(_p, _v) mm_atomic_and_imp((_p), (_v))
90 
91 #define mm_atomic_or_imp(_p, _v)      InterlockedOr((long*)(_p), (long)(_v) )
92 #define mm_atomic_long_or_imp(_p, _v) mm_atomic_or_imp((_p), (_v))
93 
94 #define mm_atomic_read_imp(_p) \
95     InterlockedExchangeAdd((long*)(_p), (long)(0))
96 #define mm_atomic_long_read_imp(_p) mm_atomic_read_imp((_p))
97 
98 #define mm_atomic_cmpxchg_imp(_p, _old_val, _new_val) \
99     InterlockedCompareExchange(_p, (long)_new_val, (long)_old_val )
100 
101 
102 #define MM_WRITE_DOORBELL_IMP(PDEV, BAR, CID, VAL) \
103     if (IS_PFDEV(pdev)) \
104     { \
105         LM_BAR_WR32_ADDRESS((PDEV), ((u8_t *)PFDEV(PDEV)->context_info->array[VF_TO_PF_CID((PDEV), (CID))].cid_resc.mapped_cid_bar_addr + (DPM_TRIGER_TYPE)), (VAL)); \
106     } \
107     else \
108     { \
109         LM_BAR_WR32_ADDRESS((PDEV), ((u8_t *)(PDEV)->context_info->array[VF_TO_PF_CID((PDEV), (CID))].cid_resc.mapped_cid_bar_addr), (VAL)); \
110     }
111 
112 #define MM_REGISTER_LPME_IMP(_pdev, _func, _b_fw_access, _b_queue_for_fw) \
113     mm_register_lpme((_pdev), (_func), (_b_fw_access), (_b_queue_for_fw))
114 
115 
116 void MM_ACQUIRE_SPQ_LOCK_IMP(struct _lm_device_t *_pdev);
117 void MM_RELEASE_SPQ_LOCK_IMP(struct _lm_device_t *_pdev);
118 void MM_ACQUIRE_SPQ_LOCK_DPC_IMP(struct _lm_device_t *_pdev);
119 void MM_RELEASE_SPQ_LOCK_DPC_IMP(struct _lm_device_t *_pdev);
120 
121 void MM_ACQUIRE_CID_LOCK_IMP(struct _lm_device_t *_pdev);
122 void MM_RELEASE_CID_LOCK_IMP(struct _lm_device_t *_pdev);
123 
124 void MM_ACQUIRE_REQUEST_LOCK_IMP(struct _lm_device_t *_pdev);
125 void MM_RELEASE_REQUEST_LOCK_IMP(struct _lm_device_t *_pdev);
126 
127 #define MM_ACQUIRE_REQUEST_LOCK_DPC_IMP(pdev)
128 #define MM_RELEASE_REQUEST_LOCK_DPC_IMP(pdev)
129 
130 void MM_ACQUIRE_PHY_LOCK_IMP(struct _lm_device_t * pDev);
131 void MM_RELEASE_PHY_LOCK_IMP(struct _lm_device_t * pDev);
132 void MM_ACQUIRE_PHY_LOCK_DPC_IMP(struct _lm_device_t * pDev);
133 void MM_RELEASE_PHY_LOCK_DPC_IMP(struct _lm_device_t * pDev);
134 
135 void MM_ACQUIRE_ISLES_CONTROL_LOCK_IMP(struct _lm_device_t *_pdev);
136 void MM_RELEASE_ISLES_CONTROL_LOCK_IMP(struct _lm_device_t *_pdev);
137 void MM_ACQUIRE_ISLES_CONTROL_LOCK_DPC_IMP(struct _lm_device_t *_pdev);
138 void MM_RELEASE_ISLES_CONTROL_LOCK_DPC_IMP(struct _lm_device_t *_pdev);
139 
140 #define MM_ACQUIRE_RAMROD_COMP_LOCK_IMP(_pdev)
141 #define MM_RELEASE_RAMROD_COMP_LOCK_IMP(_pdev)
142 
143 void MM_ACQUIRE_MCP_LOCK_IMP(struct _lm_device_t *_pdev);
144 void MM_RELEASE_MCP_LOCK_IMP(struct _lm_device_t *_pdev);
145 
146 void MM_ACQUIRE_ISLES_CONTROL_LOCK_IMP(struct _lm_device_t *_pdev);
147 void MM_RELEASE_ISLES_CONTROL_LOCK_IMP(struct _lm_device_t *_pdev);
148 void MM_ACQUIRE_ISLES_CONTROL_LOCK_DPC_IMP(struct _lm_device_t *_pdev);
149 void MM_RELEASE_ISLES_CONTROL_LOCK_DPC_IMP(struct _lm_device_t *_pdev);
150 
151 void MM_ACQUIRE_IND_REG_LOCK_IMP(struct _lm_device_t *_pdev);
152 void MM_RELEASE_IND_REG_LOCK_IMP(struct _lm_device_t *_pdev);
153 
154 void MM_ACQUIRE_LOADER_LOCK_IMP();
155 void MM_RELEASE_LOADER_LOCK_IMP();
156 
157 void MM_ACQUIRE_SP_REQ_MGR_LOCK_IMP(struct _lm_device_t *_pdev);
158 void MM_RELEASE_SP_REQ_MGR_LOCK_IMP(struct _lm_device_t *_pdev);
159 
160 void MM_ACQUIRE_SB_LOCK_IMP(struct _lm_device_t *_pdev, u8_t _sb_idx);
161 void MM_RELEASE_SB_LOCK_IMP(struct _lm_device_t *_pdev, u8_t _sb_idx);
162 
163 void MM_ACQUIRE_ETH_CON_LOCK_IMP(struct _lm_device_t *_pdev);
164 void MM_RELEASE_ETH_CON_LOCK_IMP(struct _lm_device_t *_pdev);
165 
166 #ifdef VF_INVOLVED
167 
168 void MM_ACQUIRE_PF_LOCK_IMP(struct _lm_device_t *_pdev);
169 void MM_RELEASE_PF_LOCK_IMP(struct _lm_device_t *_pdev);
170 
171 void MM_ACQUIRE_VFS_STATS_LOCK_IMP(struct _lm_device_t *_pdev);
172 void MM_RELEASE_VFS_STATS_LOCK_IMP(struct _lm_device_t *_pdev);
173 void MM_ACQUIRE_VFS_STATS_LOCK_DPC_IMP(struct _lm_device_t *_pdev);
174 void MM_RELEASE_VFS_STATS_LOCK_DPC_IMP(struct _lm_device_t *_pdev);
175 
176 void
177 mm_sriov_invalidate_vf_block(
178     struct _lm_device_t *pdev,
179     u16_t       vf_id,
180     u64_t       invalidate_bock);
181 
182 #endif /* VF_INVOLVED */
183 
184 
185 lm_status_t mm_er_initiate_recovery_imp(struct _lm_device_t * pdev);
186 
187 typedef void lm_generic_dpc_func(struct _lm_device_t *pdev);
188 lm_status_t mm_register_dpc_imp(struct _lm_device_t *_pdev,
189                                 lm_generic_dpc_func *func);
190 
191 void mm_empty_ramrod_received_imp(struct _lm_device_t *_pdev,
192                                   const u32_t empty_data);
193 
194 void mm_dbus_start_if_enabled_imp(struct _lm_device_t *_pdev);
195 void mm_dbus_stop_if_started_imp(struct _lm_device_t *_pdev);
196 
197 
198 #ifdef BIG_ENDIAN
199 // LE
200 #define mm_le16_to_cpu_imp(val) SWAP_BYTES16(val)
201 #define mm_cpu_to_le16_imp(val) SWAP_BYTES16(val)
202 #define mm_le32_to_cpu_imp(val) SWAP_BYTES32(val)
203 #define mm_cpu_to_le32_imp(val) SWAP_BYTES32(val)
204 // BE
205 #define mm_be32_to_cpu_imp(val) (val)
206 #define mm_cpu_to_be32_imp(val) (val)
207 #define mm_be16_to_cpu_imp(val) (val)
208 #define mm_cpu_to_be16_imp(val) (val)
209 #else /* LITTLE_ENDIAN */
210 // LE
211 #define mm_le16_to_cpu_imp(val) (val)
212 #define mm_cpu_to_le16_imp(val) (val)
213 #define mm_le32_to_cpu_imp(val) (val)
214 #define mm_cpu_to_le32_imp(val) (val)
215 // BE
216 #define mm_be32_to_cpu_imp(val) SWAP_BYTES32(val)
217 #define mm_cpu_to_be32_imp(val) SWAP_BYTES32(val)
218 #define mm_be16_to_cpu_imp(val) SWAP_BYTES16(val)
219 #define mm_cpu_to_be16_imp(val) SWAP_BYTES16(val)
220 #endif /* ifdef BIG_ENDIAN */
221 
222 u32_t mm_get_cpu_count();
223 
224 
225 #define RESOURCE_TRACE_FLAG_COUNTERS 0x01
226 #define RESOURCE_TRACE_FLAG_DESC     0x02
227 #define RESOURCE_TRACE_FLAG_MDL      0x04 // Currently - not working well!!!
228 
229 #define MEM_TRACE_FLAG_HIGH          (RESOURCE_TRACE_FLAG_COUNTERS | RESOURCE_TRACE_FLAG_DESC)
230 #define MEM_TRACE_FLAG_DEFAULT       RESOURCE_TRACE_FLAG_COUNTERS
231 
232 #define RESOURCE_TRACE_INC(_pdev, _cli_idx, _type, _field)             \
233 {                                                                      \
234     DbgBreakIf((_cli_idx) >= MAX_DO_TYPE_CNT);                         \
235     DbgBreakIf((_type) >= RESOURCE_TYPE_MAX);                          \
236     InterlockedIncrement((long*)&_pdev->resource_list.                 \
237                                    type_counters_arr[_cli_idx][_type]. \
238                                      _field);                          \
239 }
240 
241 #define RESOURCE_TRACE_DEC(_pdev, _cli_idx, _type, _field)             \
242 {                                                                      \
243     DbgBreakIf((_cli_idx) >= MAX_DO_TYPE_CNT);                         \
244     DbgBreakIf((_type) >= RESOURCE_TYPE_MAX);                          \
245     InterlockedDecrement((long*)&_pdev->resource_list.                 \
246                                    type_counters_arr[_cli_idx][_type]. \
247                                      _field);                          \
248 }
249 
250 #define RESOURCE_TRACE_ADD(_pdev, _cli_idx, _type, _field, _size)         \
251 {                                                                         \
252     DbgBreakIf((_cli_idx) >= MAX_DO_TYPE_CNT);                            \
253     DbgBreakIf((_type) >= RESOURCE_TYPE_MAX);                             \
254     InterlockedExchangeAdd((long*)&(_pdev->resource_list.                 \
255                                       type_counters_arr[_cli_idx][_type]. \
256                                         _field), (long)(_size));          \
257 }
258 
259 #define RESOURCE_TRACE_SUB(_pdev, _cli_idx, _type, _field, _size) \
260     RESOURCE_TRACE_ADD( _pdev, _cli_idx, _type, _field, 0L-(long)_size)
261 
262 #define RESOURCE_TRACE_UPDATE_PEAK(_pdev, _cli_idx, _type)                  \
263 {                                                                           \
264     DbgBreakIf((_cli_idx) >= MAX_DO_TYPE_CNT);                              \
265     DbgBreakIf((_type) >= RESOURCE_TYPE_MAX);                               \
266     if (_pdev->resource_list.type_counters_arr[_cli_idx][_type].size >      \
267         _pdev->resource_list.type_counters_arr[_cli_idx][_type].size_peak)  \
268     {                                                                       \
269         _pdev->resource_list.type_counters_arr[_cli_idx][_type].size_peak = \
270             _pdev->resource_list.type_counters_arr[_cli_idx][_type].size;   \
271     }                                                                       \
272     if (_pdev->resource_list.type_counters_arr[_cli_idx][_type].cnt >       \
273         _pdev->resource_list.type_counters_arr[_cli_idx][_type].cnt_peak)   \
274     {                                                                       \
275         _pdev->resource_list.type_counters_arr[_cli_idx][_type].cnt_peak =  \
276             _pdev->resource_list.type_counters_arr[_cli_idx][_type].cnt;    \
277     }                                                                       \
278 }
279 
280 
281 /* this is _NTDDK_ only... */
282 u32_t mm_get_wol_flags(struct _lm_device_t* pdev);
283 
284 /* this is _NTDDK_ only... */
285 u32_t mm_get_vmq_cnt(struct _lm_device_t* pdev);
286 
287 /* this is _NTDDK_ only... */
288 u32_t mm_get_feature_flags(struct _lm_device_t* pdev);
289 
290 u32_t mm_get_cap_offset(struct _lm_device_t *pdev, u32_t cap_id);
291 
292 
293 void mm_dcb_indicate_event(
294     IN struct _lm_device_t  *pdev,
295     IN lm_event_code_t      event,
296     IN u8_t                 *event_buf,
297     IN u32_t                event_buf_size
298     );
299 #define MM_DCB_INDICATE_EVENT(_pdev,_event,_event_buf, _event_buf_size)     mm_dcb_indicate_event(_pdev,_event,_event_buf, _event_buf_size)
300 
301 u32_t
302 mm_dcb_mp_l2_is_enable(struct _lm_device_t	*pdev);
303 #define MM_DCB_MP_L2_IS_ENABLE(_pdev)  (mm_dcb_mp_l2_is_enable(pdev))
304 
305