xref: /titanic_51/usr/src/uts/common/io/bnxe/577xx/drivers/common/include/mm.h (revision d14abf155341d55053c76eeec58b787a456b753b)
1 
2 /*****************************************************************************
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  *
22  * Copyright 2014 QLogic Corporation
23  * The contents of this file are subject to the terms of the
24  * QLogic End User License (the "License").
25  * You may not use this file except in compliance with the License.
26  *
27  * You can obtain a copy of the License at
28  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
29  * QLogic_End_User_Software_License.txt
30  * See the License for the specific language governing permissions
31  * and limitations under the License.
32  *
33  *****************************************************************************/
34 
35 #ifndef _MM_H
36 #define _MM_H
37 
38 #include <sys/va_list.h>
39 
40 /*
41    This define is relevant for MS compilers.
42    So the main purpose here is to comply with older MS compilers
43    as well as non-MS compilers
44 */
45 #ifndef FORCEINLINE
46 #if defined(_MSC_VER) && (_MSC_VER >= 1200) /* Windows */
47 #define FORCEINLINE __forceinline
48 #else
49 #define FORCEINLINE __inline
50 #endif /* _MSC_VER */
51 #endif /* !FORCEINLINE */
52 
53 /* common lpme callback used by multiple platforms */
54 typedef void lm_generic_workitem_function(struct _lm_device_t *pdev);
55 lm_status_t mm_register_lpme(struct _lm_device_t *_pdev,
56                              lm_generic_workitem_function *func,
57                              const u8_t b_fw_access,
58                              const u8_t b_queue_for_fw);
59 
60 /* mm_i2c for special elink query */
61 lm_status_t mm_i2c_update(struct _lm_device_t *pdev);
62 
63 /* query system time - for time stamps */
64 u64_t       mm_query_system_time(void);
65 
66 #if defined(UEFI)
67 #include "mm_uefi.h"
68 #elif defined(DOS)
69 #include "mm_dos.h"
70 #elif defined(__LINUX) || defined (USER_LINUX)
71 #include "mm_linux.h"
72 #elif defined(__SunOS)
73 #include "mm_solaris.h"
74 #elif defined(__USER_MODE_DEBUG)
75 #include "mm_user_mode_debug.h"
76 #elif defined(_VBD_) || defined(_VBD_CMD_)
77 #include "mm_vbd.h"
78 #elif defined (NDISMONO) // VBD
79 #include "mm_ndismono.h"
80 #endif
81 
82 unsigned int mm_crc32(unsigned char *address, unsigned int size, unsigned int crc);
83 
84 #define mm_read_barrier()  mm_read_barrier_imp()
85 #define mm_write_barrier() mm_write_barrier_imp()
86 #define mm_barrier()       mm_barrier_imp()
87 
88 #define mm_atomic_set(/* u32_t* */_p, /* u32_t */_v) mm_atomic_set_imp(_p, _v)
89 
90 #define mm_atomic_dec(/* u32_t* */_p) mm_atomic_dec_imp(_p)
91 #define mm_atomic_inc(/* u32_t* */_p) mm_atomic_inc_imp(_p)
92 
93 #define mm_atomic_add(/* u32_t* */_p, /* u32_t */_v) mm_atomic_add_imp(_p, _v)
94 
95 #define mm_atomic_sub(/* u32_t* */_p, /* u32_t */_v) mm_atomic_sub_imp(_p, _v)
96 
97 #define mm_atomic_and(/* u32_t* */_p, /* u32_t */_v) mm_atomic_and_imp(_p, _v)
98 #define mm_atomic_long_and(/* unsigned long* */_p, /* unsigned long */_v) \
99     mm_atomic_long_and_imp(_p, _v)
100 
101 #define mm_atomic_or(/* u32_t* */_p, /* u32_t */_v)  mm_atomic_or_imp(_p, _v)
102 #define mm_atomic_long_or(/* unsigned long* */_p, /* unsigned long */_v) \
103     mm_atomic_long_or_imp(_p, _v)
104 
105 #define mm_atomic_read(/* u32_t* */_p) mm_atomic_read_imp(_p)
106 #define mm_atomic_long_read(/* unsigned long* */_p)  \
107     mm_atomic_long_read_imp(_p)
108 
109 #define mm_atomic_cmpxchg(/* u32_t* */_p, /* u32_t */_old_val, /* u32_t */_new_val) \
110     mm_atomic_cmpxchg_imp(_p, _old_val, _new_val)
111 
112 #define MM_WRITE_DOORBELL(/* struct _lm_device_t* */PDEV, /* u32_t */BAR, /* u32_t */CID, /* u32_t */VAL) \
113     MM_WRITE_DOORBELL_IMP(PDEV, BAR, CID, VAL)
114 
115 #define MM_REGISTER_LPME(/* struct _lm_device_t* */_pdev, /* lm_generic_workitem_function */_func, /* u8_t */_b_fw_access, /* u8_t */_b_queue_for_fw) \
116     MM_REGISTER_LPME_IMP(_pdev, _func, _b_fw_access, _b_queue_for_fw)
117 
118 #define MM_ACQUIRE_SPQ_LOCK(/* struct _lm_device_t* */pdev)     MM_ACQUIRE_SPQ_LOCK_IMP(pdev)
119 #define MM_RELEASE_SPQ_LOCK(/* struct _lm_device_t* */pdev)     MM_RELEASE_SPQ_LOCK_IMP(pdev)
120 #define MM_ACQUIRE_SPQ_LOCK_DPC(/* struct _lm_device_t* */pdev) MM_ACQUIRE_SPQ_LOCK_DPC(pdev)
121 #define MM_RELEASE_SPQ_LOCK_DPC(/* struct _lm_device_t* */pdev) MM_RELEASE_SPQ_LOCK_DPC(pdev)
122 
123 #define MM_ACQUIRE_CID_LOCK(/* struct _lm_device_t* */pdev) MM_ACQUIRE_CID_LOCK_IMP(pdev)
124 #define MM_RELEASE_CID_LOCK(/* struct _lm_device_t* */pdev) MM_RELEASE_CID_LOCK_IMP(pdev)
125 
126 #define MM_ACQUIRE_REQUEST_LOCK(/* struct _lm_device_t* */pdev) MM_ACQUIRE_REQUEST_LOCK_IMP(pdev)
127 #define MM_RELEASE_REQUEST_LOCK(/* struct _lm_device_t* */pdev) MM_RELEASE_REQUEST_LOCK_IMP(pdev)
128 
129 #define MM_ACQUIRE_REQUEST_LOCK_DPC(/* struct _lm_device_t* */pdev) MM_ACQUIRE_REQUEST_LOCK_DPC_IMP(pdev)
130 #define MM_RELEASE_REQUEST_LOCK_DPC(/* struct _lm_device_t* */pdev) MM_RELEASE_REQUEST_LOCK_DPC_IMP(pdev)
131 
132 #define MM_ACQUIRE_PHY_LOCK(/* struct _lm_device_t* */pdev)     MM_ACQUIRE_PHY_LOCK_IMP(pdev)
133 #define MM_RELEASE_PHY_LOCK(/* struct _lm_device_t* */pdev)     MM_RELEASE_PHY_LOCK_IMP(pdev)
134 #define MM_ACQUIRE_PHY_LOCK_DPC(/* struct _lm_device_t* */pdev) MM_ACQUIRE_PHY_LOCK_DPC_IMP(pdev)
135 #define MM_RELEASE_PHY_LOCK_DPC(/* struct _lm_device_t* */pdev) MM_RELEASE_PHY_LOCK_DPC_IMP(pdev)
136 
137 #define MM_ACQUIRE_MCP_LOCK(/* struct _lm_device_t* */pdev) MM_ACQUIRE_MCP_LOCK_IMP(pdev)
138 #define MM_RELEASE_MCP_LOCK(/* struct _lm_device_t* */pdev) MM_RELEASE_MCP_LOCK_IMP(pdev)
139 
140 #define MM_ACQUIRE_ISLES_CONTROL_LOCK(/* struct _lm_device_t* */pdev)     MM_ACQUIRE_ISLES_CONTROL_LOCK_IMP(pdev)
141 #define MM_RELEASE_ISLES_CONTROL_LOCK(/* struct _lm_device_t* */pdev)     MM_RELEASE_ISLES_CONTROL_LOCK_IMP(pdev)
142 #define MM_ACQUIRE_ISLES_CONTROL_LOCK_DPC(/* struct _lm_device_t* */pdev) MM_ACQUIRE_ISLES_CONTROL_LOCK_DPC_IMP(pdev)
143 #define MM_RELEASE_ISLES_CONTROL_LOCK_DPC(/* struct _lm_device_t* */pdev) MM_RELEASE_ISLES_CONTROL_LOCK_DPC_IMP(pdev)
144 
145 #define MM_ACQUIRE_RAMROD_COMP_LOCK(/* struct _lm_device_t* */pdev) MM_ACQUIRE_RAMROD_COMP_LOCK_IMP(pdev)
146 #define MM_RELEASE_RAMROD_COMP_LOCK(/* struct _lm_device_t* */pdev) MM_RELEASE_RAMROD_COMP_LOCK_IMP(pdev)
147 
148 #define MM_ACQUIRE_IND_REG_LOCK(/* struct _lm_device_t* */pdev) MM_ACQUIRE_IND_REG_LOCK_IMP(pdev)
149 #define MM_RELEASE_IND_REG_LOCK(/* struct _lm_device_t* */pdev) MM_RELEASE_IND_REG_LOCK_IMP(pdev)
150 
151 #define MM_ACQUIRE_LOADER_LOCK() MM_ACQUIRE_LOADER_LOCK_IMP()
152 #define MM_RELEASE_LOADER_LOCK() MM_RELEASE_LOADER_LOCK_IMP()
153 
154 #define MM_ACQUIRE_SP_REQ_MGR_LOCK(/* struct _lm_device_t* */pdev) MM_ACQUIRE_SP_REQ_MGR_LOCK_IMP(pdev)
155 #define MM_RELEASE_SP_REQ_MGR_LOCK(/* struct _lm_device_t* */pdev) MM_RELEASE_SP_REQ_MGR_LOCK_IMP(pdev)
156 
157 #define MM_ACQUIRE_SB_LOCK(/* struct _lm_device_t* */pdev, /* u8_t */sb_idx) MM_ACQUIRE_SB_LOCK_IMP(pdev, sb_idx)
158 #define MM_RELEASE_SB_LOCK(/* struct _lm_device_t* */pdev, /* u8_t */sb_idx) MM_RELEASE_SB_LOCK_IMP(pdev, sb_idx)
159 
160 void mm_init_lock(struct _lm_device_t *_pdev, mm_spin_lock_t *spinlock);
161 
162 #ifdef _VBD_
163 #if defined(NTDDI_WIN8)
164 __drv_maxIRQL(DISPATCH_LEVEL)
165 __drv_at(lock->irql, __drv_savesIRQL)
166 __drv_setsIRQL(DISPATCH_LEVEL)
167 #endif
168 #endif
169 lm_status_t mm_acquire_lock( mm_spin_lock_t *spinlock);
170 
171 #ifdef _VBD_
172 #if defined(NTDDI_WIN8)
173 _IRQL_requires_(DISPATCH_LEVEL)
174 __drv_at(lock->irql, __drv_restoresIRQL )
175 #endif
176 #endif
177 lm_status_t mm_release_lock( mm_spin_lock_t *spinlock);
178 
179 #define MM_ACQUIRE_ETH_CON_LOCK(/* struct _lm_device_t* */pdev) MM_ACQUIRE_ETH_CON_LOCK_IMP(pdev)
180 #define MM_RELEASE_ETH_CON_LOCK(/* struct _lm_device_t* */pdev) MM_RELEASE_ETH_CON_LOCK_IMP(pdev)
181 
182 #ifdef VF_INVOLVED
183 
184 #define MM_ACQUIRE_PF_LOCK(/* struct _lm_device_t* */pdev) MM_ACQUIRE_PF_LOCK_IMP(pdev)
185 #define MM_RELEASE_PF_LOCK(/* struct _lm_device_t* */pdev) MM_RELEASE_PF_LOCK_IMP(pdev)
186 
187 #define MM_ACQUIRE_VFS_STATS_LOCK(/* struct _lm_device_t* */pdev)     MM_ACQUIRE_VFS_STATS_LOCK_IMP(pdev)
188 #define MM_RELEASE_VFS_STATS_LOCK(/* struct _lm_device_t* */pdev)     MM_RELEASE_VFS_STATS_LOCK_IMP(pdev)
189 #define MM_ACQUIRE_VFS_STATS_LOCK_DPC(/* struct _lm_device_t* */pdev) MM_ACQUIRE_VFS_STATS_LOCK_DPC_IMP(pdev)
190 #define MM_RELEASE_VFS_STATS_LOCK_DPC(/* struct _lm_device_t* */pdev) MM_RELEASE_VFS_STATS_LOCK_DPC_IMP(pdev)
191 
192 #endif /* VF_INVOLVED */
193 
194 
195 #define mm_er_initiate_recovery(/* struct _lm_device_t* */pdev) \
196     mm_er_initiate_recovery_imp(pdev)
197 
198 #define MM_REGISTER_DPC(/* struct _lm_device_t* */_pdev, /* lm_generic_dpc_func */_func) \
199     mm_register_dpc_imp(_pdev, _func)
200 
201 #define MM_EMPTY_RAMROD_RECEIVED(/* struct _lm_device_t* */pdev, /* lm_cli_idx_t */lm_cli_idx) \
202     mm_empty_ramrod_received_imp(pdev, lm_cli_idx)
203 
204 #define mm_dbus_start_if_enable(/* struct _lm_device_t* */pdev) \
205     mm_dbus_start_if_enabled_imp(pdev)
206 #define mm_dbus_stop_if_started(/* struct _lm_device_t* */pdev) \
207     mm_dbus_stop_if_started_imp(pdev)
208 
209 
210 /* Busy delay for the specified microseconds. */
211 void mm_wait(struct _lm_device_t *pdev,
212              u32_t delay_us);
213 
214 /* Read a PCI configuration register (must be 32-bit aligned) */
215 lm_status_t mm_read_pci(struct _lm_device_t *pdev,
216                         u32_t pci_reg,
217                         u32_t *reg_value);
218 
219 /* Write a PCI configuration register (must be 32-bit aligned) */
220 lm_status_t mm_write_pci(struct _lm_device_t *pdev,
221                          u32_t pci_reg,
222                          u32_t reg_value);
223 
224 /*
225  * Map the base address of the device registers to system address space so
226  * that registers are accessible. The base address will be unmapped when the
227  * driver unloads.
228  */
229 void * mm_map_io_base(struct _lm_device_t *pdev,
230                       lm_address_t base_addr,
231                       u32_t size,
232                       u8_t bar);
233 
234 /* Read driver configuration.  It is called from lm_get_dev_info. */
235 lm_status_t mm_get_user_config(struct _lm_device_t *pdev);
236 
237 /* Get the size of a packet descriptor. */
238 u32_t mm_desc_size(struct _lm_device_t *pdev,
239                    u32_t desc_type);
240 #define DESC_TYPE_L2TX_PACKET 0
241 #define DESC_TYPE_L2RX_PACKET 1
242 
243 
244 /* XXX
245 mm_map_io_space(struct _lm_device_t * pLM,
246                 lm_address_t  physAddr,
247                 u8_t          bar,
248                 u32_t         offset,
249                 u32_t         size,
250                 void *        pHandle);
251 */
252 #ifdef __SunOS
253 void *
254 mm_map_io_space_solaris(struct _lm_device_t *      pLM,
255                         lm_address_t       physAddr,
256                         u8_t               bar,
257                         u32_t              offset,
258                         u32_t              size,
259                         ddi_acc_handle_t * pRegAccHandle);
260 #else
261 void *
262 mm_map_io_space(struct _lm_device_t *pdev,
263                 lm_address_t phys_addr,
264                 u32_t size);
265 #endif
266 
267 void mm_unmap_io_space(struct _lm_device_t *pdev,
268                        void *virt_addr,
269                        u32_t size);
270 
271 
272 void * mm_alloc_mem_imp(struct _lm_device_t *pdev,
273                         u32_t mem_size,
274                         const char* sz_file,
275                         const unsigned long line,
276                         u8_t cli_idx);
277 #define mm_alloc_mem(_pdev, _mem_size, cli_idx) \
278     mm_alloc_mem_imp((_pdev), (_mem_size), __FILE_STRIPPED__, __LINE__, (cli_idx));
279 
280 
281 void * mm_alloc_phys_mem_imp(struct _lm_device_t* pdev,
282                              u32_t mem_size,
283                              lm_address_t* phys_mem,
284                              u8_t mem_type,
285                              const char* sz_file,
286                              const unsigned long line,
287                              u8_t cli_idx);
288 #define mm_alloc_phys_mem(_pdev, _mem_size, _phys_mem, _mem_type, cli_idx) \
289     mm_alloc_phys_mem_imp((_pdev), (_mem_size), (_phys_mem), (_mem_type), __FILE_STRIPPED__, __LINE__, (cli_idx));
290 
291 
292 void * mm_rt_alloc_mem_imp(struct _lm_device_t* pdev,
293                            u32_t mem_size,
294                            const char* sz_file,
295                            const unsigned long line,
296                            u8_t cli_idx);
297 #define mm_rt_alloc_mem(_pdev, _mem_size, cli_idx) \
298     mm_rt_alloc_mem_imp((_pdev), (_mem_size), __FILE_STRIPPED__, __LINE__, (cli_idx));
299 
300 
301 void * mm_alloc_phys_mem_align_imp(struct _lm_device_t* pdev,
302                                    u32_t mem_size,
303                                    lm_address_t* phys_mem,
304                                    u32_t alignment,
305                                    u8_t mem_type,
306                                    const char* sz_file,
307                                    const unsigned long line,
308                                    u8_t cli_idx ) ;
309 
310 #define mm_alloc_phys_mem_align(_pdev, _mem_size, _phys_mem, _alignment, _mem_type, cli_idx) \
311     mm_alloc_phys_mem_align_imp((_pdev), (_mem_size), (_phys_mem), (_alignment), (_mem_type), __FILE_STRIPPED__, __LINE__, (cli_idx));
312 
313 
314 void * mm_rt_alloc_phys_mem_imp(struct _lm_device_t* pdev,
315                                 u32_t mem_size,
316                                 lm_address_t* phys_mem,
317                                 u8_t mem_type,
318                                 const char* sz_file,
319                                 const unsigned long line,
320                                 u8_t cli_idx);
321 
322 #define mm_rt_alloc_phys_mem(_pdev, _mem_size, _phys_mem, _flush_type, cli_idx) \
323     mm_rt_alloc_phys_mem_imp((_pdev), (_mem_size), (_phys_mem), (_flush_type), __FILE_STRIPPED__, __LINE__, (cli_idx));
324 
325 
326 #define PHYS_MEM_TYPE_UNSPECIFIED 0
327 #define PHYS_MEM_TYPE_NONCACHED   1
328 
329 
330 void mm_rt_free_mem(struct _lm_device_t *pdev,
331                     void *mem_virt,
332                     u32_t mem_size,
333                     u8_t cli_idx);
334 
335 void mm_rt_free_phys_mem(struct _lm_device_t *pdev,
336                          u32_t mem_size,
337                          void *virt_mem,
338                          lm_address_t phys_mem,
339                          u8_t cli_idx);
340 
341 
342 void mm_memset(void *buf, u8_t val, u32_t mem_size);
343 #define mm_mem_zero(buf, mem_size) mm_memset((buf), 0, (mem_size))
344 
345 void mm_memcpy(void *destenation, const void *source, u32_t mem_size);
346 
347 u8_t mm_memcmp(void *buf1, void *buf2, u32_t count);
348 
349 
350 /* Returns current high-definition time. */
351 u64_t mm_get_current_time(struct _lm_device_t *pdev);
352 
353 
354 /*
355  * This routine is called to indicate completion of a transmit request.
356  * If 'packet' is not NULL, all the packets in the completion queue will
357  * be indicated.  Otherwise, only 'packet' will be indicated.
358  */
359 void mm_indicate_tx(struct _lm_device_t *pdev,
360                     u32_t chain_idx,
361                     s_list_t *packet_list);
362 
363 
364 /**
365  * @brief
366  *      a function that enables lm to indicate rx packets
367  *      directly. In regular rx indication flow, the call is
368  *      made from UM -> Um request the rx packets and then
369  *      indicates them. This function, at time of writing, was
370  *      used just for aborting packets but may be used for any
371  *      kind of indication.
372  *
373  * @param pdev
374  * @param chain_idx
375  * @param packet_list
376  * @param ind_status   - SUCCESS / ABORTED
377  */
378 void mm_indicate_rx(struct _lm_device_t *pdev,
379                     u32_t                chain_idx,
380                     s_list_t            *packet_list,
381                     lm_status_t          ind_status);
382 
383 /* Indicate the current phy link status. */
384 void mm_indicate_link(struct _lm_device_t *pdev,
385                       lm_status_t link,
386                       lm_medium_t medium);
387 
388 /* Indicate a critical HW error that requires to completely
389    stop all access to the device */
390 void mm_indicate_hw_failure(struct _lm_device_t *pdev);
391 
392 /* Call the lm_task_cb_t callback function after the specified delay. */
393 typedef void(*lm_task_cb_t)(struct _lm_device_t *pdev, void *param);
394 lm_status_t mm_schedule_task(struct _lm_device_t *pdev,
395                              u32_t delay_ms,
396                              lm_task_cb_t task,
397                              void *param);
398 
399 
400 /* XXX needs description... */
401 void mm_set_done(struct _lm_device_t *pdev,
402                  u32_t cid,
403                  void *cookie);
404 
405 
406 struct sq_pending_command;
407 
408 void mm_return_sq_pending_command(struct _lm_device_t * pdev,
409                                   struct sq_pending_command * pending);
410 
411 struct sq_pending_command * mm_get_sq_pending_command(struct _lm_device_t * pdev);
412 
413 
414 u32_t mm_copy_packet_buf(struct _lm_device_t *pdev,
415                          struct _lm_packet_t *lmpkt, /* packet to copy from */
416                          u8_t *mem_buf,              /* buffer to copy to */
417                          u32_t size);                /* number of bytes to copy */
418 
419 
420 lm_status_t mm_event_log_generic_arg_fwd(struct _lm_device_t* pdev,
421                                          const lm_log_id_t lm_log_id,
422                                          va_list ap);
423 
424 lm_status_t mm_event_log_generic(struct _lm_device_t* pdev,
425                                  const lm_log_id_t lm_log_id,
426                                  ...);
427 
428 void mm_print_bdf(int, void*);
429 
430 
431 /* common alloc and zero memory routine used for all platforms */
432 static __inline void * mm_rt_zalloc_mem(struct _lm_device_t * pdev, u32_t size)
433 {
434     void * ptr;
435 
436     ptr = mm_rt_alloc_mem(pdev, size, 0);
437 
438     if (ptr)
439     {
440         mm_mem_zero(ptr, size);
441     }
442 
443     return ptr;
444 }
445 
446 
447 u32_t mm_build_ver_string(struct _lm_device_t * pdev);
448 
449 
450 #ifdef VF_INVOLVED
451 
452 #ifndef VF_TO_PF_STANDARD_BLOCK_ID
453 #define VF_TO_PF_STANDARD_BLOCK_ID 0x100
454 #endif
455 
456 struct _lm_vf_pf_message_t;
457 struct _lm_vf_info_t;
458 struct _lm_sriov_info_t;
459 
460 void mm_vf_pf_arm_trigger(struct _lm_device_t *pdev,
461                           struct _lm_vf_pf_message_t *mess);
462 
463 lm_status_t mm_vf_pf_write_block_to_sw_channel(struct _lm_device_t *pdev,
464                                                u32_t block_id,
465                                                void *buffer,
466                                                u32_t length);
467 
468 lm_status_t mm_vf_pf_read_block_from_sw_channel(struct _lm_device_t *pdev,
469                                                 u32_t block_id,
470                                                 void *buffer,
471                                                 u32_t *length);
472 
473 lm_status_t mm_vf_pf_sw_ch_process_standard_request(struct _lm_device_t *pdev,
474                                                     u16_t relative_vf_id,
475                                                     void *virt_buffer,
476                                                     u32_t length);
477 
478 lm_status_t mm_vf_pf_sw_ch_retrieve_standard_response(struct _lm_device_t *pdev,
479                                                       u16_t relative_vf_id,
480                                                       void *virt_buffer,
481                                                       u32_t length);
482 
483 lm_status_t mm_vf_pf_hw_ch_process_standard_request(struct _lm_device_t *pdev,
484                                                     u8_t vf_id,
485                                                     lm_address_t *vf_pf_message);
486 
487 lm_status_t mm_vf_pf_upload_standard_request(struct _lm_device_t *pdev,
488                                              u8_t vf_id,
489                                              lm_address_t *vf_pf_message);
490 
491 lm_status_t mm_vf_en(struct _lm_device_t* pdev,
492                      u16_t vfs_num);
493 
494 void mm_vf_dis(struct _lm_device_t* pdev);
495 
496 u16_t mm_get_extended_caps(struct _lm_device_t *pdev,
497                            u16_t capabilityID);
498 
499 lm_status_t mm_get_sriov_info(struct _lm_device_t *pdev,
500                               struct _lm_sriov_info_t *info);
501 
502 lm_status_t mm_pf_get_queues_number(struct _lm_device_t *pdev,
503                                     struct _lm_vf_info_t *vf_info,
504                                     u8_t *num_rxqs,
505                                     u8_t *num_txqs);
506 
507 lm_status_t mm_pf_get_filters_number(struct _lm_device_t *pdev,
508                                      struct _lm_vf_info_t *vf_info,
509                                      u8_t *num_mac_filters,
510                                      u8_t *num_vlan_filters,
511                                      u8_t *num_mc_filters);
512 
513 lm_status_t mm_pf_get_macs(struct _lm_device_t *pdev,
514                            struct _lm_vf_info_t *vf_info,
515                            u8_t *permanent_mac_addr,
516                            u8_t *current_mac_addr);
517 
518 void mm_report_malicious_vf(struct _lm_device_t *pdev, struct _lm_vf_info_t *vf_info);
519 
520 #endif /* ifdef VF_INVOLVED */
521 
522 
523 #ifdef BIG_ENDIAN
524 // LE
525 #define mm_le16_to_cpu(val) mm_le16_to_cpu_imp(val)
526 #define mm_cpu_to_le16(val) mm_cpu_to_le16_imp(val)
527 #define mm_le32_to_cpu(val) mm_le32_to_cpu_imp(val)
528 #define mm_cpu_to_le32(val) mm_cpu_to_le32_imp(val)
529 // BE
530 #define mm_be32_to_cpu(val) mm_be32_to_cpu_imp(val)
531 #define mm_cpu_to_be32(val) mm_cpu_to_be32_imp(val)
532 #define mm_be16_to_cpu(val) mm_be16_to_cpu_imp(val)
533 #define mm_cpu_to_be16(val) mm_cpu_to_be16_imp(val)
534 #else /* LITTLE_ENDIAN */
535 // LE
536 #define mm_le16_to_cpu(val) mm_le16_to_cpu_imp(val)
537 #define mm_cpu_to_le16(val) mm_cpu_to_le16_imp(val)
538 #define mm_le32_to_cpu(val) mm_le32_to_cpu_imp(val)
539 #define mm_cpu_to_le32(val) mm_cpu_to_le32_imp(val)
540 // BE
541 #define mm_be32_to_cpu(val) mm_be32_to_cpu_imp(val)
542 #define mm_cpu_to_be32(val) mm_cpu_to_be32_imp(val)
543 #define mm_be16_to_cpu(val) mm_be16_to_cpu_imp(val)
544 #define mm_cpu_to_be16(val) mm_cpu_to_be16_imp(val)
545 #endif /* ifdef BIG_ENDIAN */
546 
547 
548 #define mm_get_bar_offset(/* struct _lm_device_t* */pdev, /* u8_t */bar_num, /* lm_address_t* */bar_addr) \
549     mm_get_bar_offset_imp(pdev, bar_num, bar_addr)
550 
551 #define mm_get_bar_size(/* struct _lm_device_t* */pdev, /* u8_t */bar_num, /* u32_t* */val_p) \
552     mm_get_bar_size_imp(pdev, bar_num, val_p)
553 
554 
555 #endif /* _MM_H */
556 
557