xref: /titanic_51/usr/src/uts/common/io/bnxe/577xx/drivers/common/include/l4/mm_l4if.h (revision d14abf155341d55053c76eeec58b787a456b753b)
1 /*******************************************************************************
2 * mm_l4if.h - L4 mm interface
3 ******************************************************************************/
4 #ifndef _MM_L4IF_H
5 #define _MM_L4IF_H
6 
7 
8 /* per OS methods */
9 #if defined(DOS)
10 #include "sync.h"
11 #define MM_INIT_TCP_LOCK_HANDLE()
12 
13 #define mm_acquire_tcp_lock(_pdev, con)  LOCK()
14 #define mm_release_tcp_lock(_pdev, con)  UNLOCK()
15 
16 #define MM_ACQUIRE_TOE_LOCK(_pdev)      LOCK()
17 #define MM_RELEASE_TOE_LOCK(_pdev)      UNLOCK()
18 
19 #define MM_ACQUIRE_TOE_GRQ_LOCK(_pdev, idx)  LOCK()
20 #define MM_RELEASE_TOE_GRQ_LOCK(_pdev, idx)  UNLOCK()
21 
22 #define MM_ACQUIRE_TOE_GRQ_LOCK_DPC(_pdev, idx) LOCK()
23 #define MM_RELEASE_TOE_GRQ_LOCK_DPC(_pdev, idx) UNLOCK()
24 
25 #elif defined(__LINUX) || defined(__SunOS)
26 void
27 mm_acquire_tcp_lock(
28     struct _lm_device_t *pdev,
29     lm_tcp_con_t *tcp_con);
30 
31 void
32 mm_release_tcp_lock(
33     struct _lm_device_t *pdev,
34     lm_tcp_con_t *tcp_con);
35 
36 #define MM_INIT_TCP_LOCK_HANDLE()
37 
38 void MM_ACQUIRE_TOE_LOCK(struct _lm_device_t *_pdev);
39 void MM_RELEASE_TOE_LOCK(struct _lm_device_t *_pdev);
40 void MM_ACQUIRE_TOE_GRQ_LOCK(struct _lm_device_t *_pdev, u8_t idx);
41 void MM_RELEASE_TOE_GRQ_LOCK(struct _lm_device_t *_pdev, u8_t idx);
42 void MM_ACQUIRE_TOE_GRQ_LOCK_DPC(struct _lm_device_t *_pdev, u8_t idx);
43 void MM_RELEASE_TOE_GRQ_LOCK_DPC(struct _lm_device_t *_pdev, u8_t idx);
44 
45 #elif defined(_VBD_) || defined(_VBD_CMD_)
46 
47 #if USE_QUEUED_SLOCK
48 
49 void
50 mm_acquire_tcp_q_lock(
51     lm_device_t *pdev,
52     lm_tcp_con_t *tcp_con,
53     void *ql_hdl);
54 void
55 mm_release_tcp_q_lock(
56     lm_device_t *pdev,
57     lm_tcp_con_t *tcp_con,
58     void *ql_hdl);
59 
60 /* MM_INIT_TCP_LOCK_HANDLE:
61  * a macro for decleration of KLOCK_QUEUE_HANDLE in stack, to be declared
62  * in stack by every lm/um caller to mm_acquire_tcp_q_lock.
63  * since KLOCK_QUEUE_HANDLE is a WDM structure that can't be compiled
64  * in lm, we define a size SIZEOF_QL_HDL that should be larger/equal to
65  * sizeof(KLOCK_QUEUE_HANDLE) */
66 #define SIZEOF_QL_HDL 24 // 24 is the size KLOCK_QUEUE_HANDLE structure in Win 64 bit, so it supossed to be good enough for both 32 & 64
67 #define MM_INIT_TCP_LOCK_HANDLE()   u8_t __ql_hdl[SIZEOF_QL_HDL] = {0}
68 #define mm_acquire_tcp_lock(pdev,tcp_con)   mm_acquire_tcp_q_lock((pdev),(tcp_con),__ql_hdl)
69 #define mm_release_tcp_lock(pdev,tcp_con)   mm_release_tcp_q_lock((pdev),(tcp_con),__ql_hdl)
70 
71 #else /* USE_QUEUED_SLOCK */
72 
73 #define MM_INIT_TCP_LOCK_HANDLE()
74 
75 void
76 mm_acquire_tcp_lock(
77     lm_device_t *pdev,
78     lm_tcp_con_t *tcp_con);
79 
80 void
81 mm_release_tcp_lock(
82     lm_device_t *pdev,
83     lm_tcp_con_t *tcp_con);
84 
85 #endif /* USE_QUEUED_SLOCK */
86 
87 void MM_ACQUIRE_TOE_LOCK(lm_device_t *_pdev);
88 void MM_RELEASE_TOE_LOCK(lm_device_t *_pdev);
89 void MM_ACQUIRE_TOE_GRQ_LOCK(lm_device_t *_pdev, u8_t idx);
90 void MM_RELEASE_TOE_GRQ_LOCK(lm_device_t *_pdev, u8_t idx);
91 void MM_ACQUIRE_TOE_GRQ_LOCK_DPC(lm_device_t *_pdev, u8_t idx);
92 void MM_RELEASE_TOE_GRQ_LOCK_DPC(lm_device_t *_pdev, u8_t idx);
93 
94 #elif defined(__USER_MODE_DEBUG)
95 
96 #define MM_INIT_TCP_LOCK_HANDLE()
97 
98 __inline static void mm_acquire_tcp_lock(
99     struct _lm_device_t *pdev,
100     lm_tcp_con_t *tcp_con)
101 {
102     DbgMessage(pdev, INFORMl4, "Acquiring tcp lock for con %p\n", tcp_con);
103 }
104 
105 __inline static void mm_release_tcp_lock(
106     struct _lm_device_t *pdev,
107     lm_tcp_con_t *tcp_con)
108 {
109     DbgMessage(pdev, INFORMl4, "Releasing tcp lock for con %p\n", tcp_con);
110 }
111 
112 #define MM_ACQUIRE_TOE_LOCK(_pdev)          DbgMessage(pdev, INFORMl4, "Acquiring global toe lock\n");
113 #define MM_RELEASE_TOE_LOCK(_pdev)          DbgMessage(pdev, INFORMl4, "Releasing global toe lock\n");
114 #define MM_ACQUIRE_TOE_GRQ_LOCK(_pdev, idx) DbgMessage(pdev, INFORMl4, "Acquiring global toe grq lock\n");
115 #define MM_RELEASE_TOE_GRQ_LOCK(_pdev, idx) DbgMessage(pdev, INFORMl4, "Releasing global toe grq lock\n");
116 #define MM_ACQUIRE_TOE_GRQ_LOCK_DPC(_pdev, idx) DbgMessage(pdev, INFORMl4, "Acquiring global toe grq lock\n");
117 #define MM_RELEASE_TOE_GRQ_LOCK_DPC(_pdev, idx) DbgMessage(pdev, INFORMl4, "Releasing global toe grq lock\n");
118 
119 #elif defined (NDISMONO)
120 /*
121  * stubs for NDIS
122  */
123 #define MM_INIT_TCP_LOCK_HANDLE()
124 
125 void
126 mm_acquire_tcp_lock(
127     lm_device_t *pdev,
128     lm_tcp_con_t *tcp_con);
129 
130 void
131 mm_release_tcp_lock(
132     lm_device_t *pdev,
133     lm_tcp_con_t *tcp_con);
134 
135 void MM_ACQUIRE_TOE_LOCK(lm_device_t *_pdev);
136 void MM_RELEASE_TOE_LOCK(lm_device_t *_pdev);
137 void MM_ACQUIRE_TOE_GRQ_LOCK(lm_device_t *_pdev, u8_t idx);
138 void MM_RELEASE_TOE_GRQ_LOCK(lm_device_t *_pdev, u8_t idx);
139 void MM_ACQUIRE_TOE_GRQ_LOCK_DPC(lm_device_t *_pdev, u8_t idx);
140 void MM_RELEASE_TOE_GRQ_LOCK_DPC(lm_device_t *_pdev, u8_t idx);
141 
142 #endif /* NDISMONO */
143 
144 u32_t mm_tcp_rx_peninsula_to_rq_copy_dmae(
145     struct _lm_device_t * pdev,
146     lm_tcp_state_t      * tcp,
147     lm_address_t          gen_buf_phys,    /* Memory buffer to copy from */
148     u32_t                 gen_buf_offset,
149     lm_tcp_buffer_t     * tcp_buf,         /* TCP buffer to copy to      */
150     u32_t                 tcp_buf_offset,
151     u32_t                 nbytes
152     );
153 
154 void mm_tcp_comp_slow_path_request(
155     struct _lm_device_t *pdev,
156     lm_tcp_state_t *tcp,
157     lm_tcp_slow_path_request_t *sp_request);
158 
159 /** Description:
160  *  - complete Tx and Rx application buffers towards the client
161  *    (with any kind of completion status)
162  *  - handle various pending �down stream� tasks: post more application buffers,
163  *    post graceful disconnect request (Tx only)
164  * Assumptions:
165  *  - in each given lm buffer with flag BUFFER_END the field �app_buf_xferred�
166  *    was correctly set by the caller */
167 void mm_tcp_complete_bufs(
168     struct _lm_device_t *pdev,
169     lm_tcp_state_t      *tcp,
170     lm_tcp_con_t        *tcp_con,   /* Rx OR Tx connection */
171     s_list_t            *buf_list,  /* list of lm_tcp_buffer_t */
172     lm_status_t         lm_status   /* completion status for all given TBs */
173     );
174 
175 
176 
177 /**
178  * Description:
179  *        Returns TRUE if generic data OR preposted buffer is being indicated to the client
180  * for the given connection and FALSE otherwise.
181  *
182  */
183 u8_t mm_tcp_indicating_bufs(
184     lm_tcp_con_t * con
185     );
186 
187 /** Description:
188  *  - Completes graceful disconnect request towards client with the given status.
189  * Assumptions:
190  *  - Assumptions described in client.disconnect_tcp_done() (see design doc)
191  *  - The connection's lock is already taken by the caller
192  */
193 void mm_tcp_abort_bufs (
194     IN    struct _lm_device_t     * pdev,  /* device handle */
195     IN    lm_tcp_state_t          * tcp,   /* L4 state handle */
196     IN    lm_tcp_con_t            * con,   /* connection handle */
197     IN    lm_status_t               status /* status to abort buffers with */
198     );
199 
200 /**
201  * Description:
202  *    Indicates toward the client reception of the remote FIN.
203  *
204  */
205 void mm_tcp_indicate_fin_received(
206     IN   struct _lm_device_t     * pdev,   /* device handle */
207     IN   lm_tcp_state_t          * tcp
208     );
209 
210 /**
211  * Description:
212  *    Indicates toward the client reception of the remote RST.
213  *
214  */
215 void mm_tcp_indicate_rst_received(
216     IN   struct _lm_device_t     * pdev,          /* device handle */
217     IN   lm_tcp_state_t          * tcp
218     );
219 
220 
221 /**
222  * Description:
223  *      Indicates toward the client the completion of the FIN request.
224  */
225 void mm_tcp_graceful_disconnect_done(
226     IN   struct _lm_device_t     * pdev,    /* device handle */
227     IN   lm_tcp_state_t          * tcp,     /* L4 state handle */
228     IN   lm_status_t               status   /* May be SUCCESS, ABORTED or UPLOAD IN PROGRESS */
229     );
230 
231 
232 
233 /** Description
234  *  This function is called by lm when there are generic buffers that need indication
235  *  - indicate received data using generic buffers to the client (client.indicate_tcp_rx_buf)
236  *  - receive the buffered data by calling lm_get_buffered_data, and notify the lm of the
237  *    status by calling lm_buffer_data_indicated after returning from client.indicate_tcp_rx_buf
238  */
239 void mm_tcp_rx_indicate_gen (
240     struct _lm_device_t * pdev,
241     lm_tcp_state_t      * tcp
242     );
243 
244 /** Description
245  *  Removes Generic Buffers from the generic buffer pool and passes them to the LM.
246  *
247  *  Returns:
248  *  - The actual number of buffers returned (may be less than required_bufs in case there are not
249  *    enough buffers in the pool)
250  */
251 u32_t mm_tcp_get_gen_bufs(
252     struct _lm_device_t * pdev,
253     d_list_t            * gb_list,
254     u32_t                 nbufs,
255     u8_t                  sb_idx
256     );
257 
258 /** Description
259  *  Returns a list of generic buffers to the generic buffer pool
260  * Assumption:
261  *  gen_buf is a list of generic buffers that ALL need to be returned to the pool
262  */
263 #define MM_TCP_RGB_COMPENSATE_GRQS      0x01
264 #define MM_TCP_RGB_COLLECT_GEN_BUFS     0x02
265 
266 #define MM_TCP_RGB_USE_ALL_GEN_BUFS     0x80
267 
268 #define NON_EXISTENT_SB_IDX             0xFF
269 
270 void mm_tcp_return_gen_bufs(
271     struct _lm_device_t * pdev,
272     lm_tcp_gen_buf_t    * gen_buf,
273     u32_t                 flags,
274     u8_t                  grq_idx
275     );
276 
277 
278 void mm_tcp_return_list_of_gen_bufs(
279     struct _lm_device_t * pdev,
280     d_list_t            * gen_buf_list,
281     u32_t                 flags,
282     u8_t                  grq_idx
283     );
284 
285 /** Description
286  *  Copys data from a memory buffer to the tcp buffer using client_if.copy_l4buffer
287  * Assumptions:
288  * - size of mem_buf is larger than nbytes
289  * Returns:
290  * - The actual number of bytes copied
291  */
292 u32_t mm_tcp_copy_to_tcp_buf(
293     struct _lm_device_t * pdev,
294     lm_tcp_state_t      * tcp_state,
295     lm_tcp_buffer_t     * tcp_buf,         /* TCP buffer to copy to      */
296     u8_t                * mem_buf,         /* Memory buffer to copy from */
297     u32_t                 tcp_buf_offset,
298     u32_t                 nbytes
299     );
300 
301 void
302 mm_tcp_indicate_retrieve_indication(
303     struct _lm_device_t *pdev,
304     lm_tcp_state_t *tcp_state,
305     l4_upload_reason_t upload_reason);
306 
307 /** Description
308  *  This function is used for updating the required number of generic buffer pools
309  *  given an old and new mss and initial receive window. It is called as a result of an
310  *  update to one of these parameters
311  */
312 void mm_tcp_update_required_gen_bufs(
313     struct _lm_device_t * pdev,
314     u32_t  new_mss,
315     u32_t  old_mss,
316     u32_t  new_initial_rcv_wnd,
317     u32_t  old_initial_rcv_wnd);
318 
319 /** Description
320  *  completes a path upload request. It completes the request to the client
321  *  only if coplete_to_client is true...
322  */
323 void mm_tcp_complete_path_upload_request(
324     struct _lm_device_t * pdev,
325     lm_path_state_t     * path);
326 
327 
328 /** Description
329  * called when the upload neigh request is completed. This occurs when the last path dependent
330  * of a path state that is in the upload_pending state has been upload completed
331  * Assumptions
332  *  - caller holds the TOE LOCK
333  */
334 void mm_tcp_complete_neigh_upload_request(
335     struct _lm_device_t * pdev,
336     lm_neigh_state_t    * neigh
337     );
338 
339 /* Post an empty ramrod initiated by TOE. */
340 lm_status_t mm_tcp_post_empty_slow_path_request(
341     struct _lm_device_t * pdev,
342     lm_tcp_state_t      * tcp,
343     u32_t                 request_type);
344 
345 /* Delete the tcp state (initiated from lm)  */
346 void mm_tcp_del_tcp_state(
347     struct _lm_device_t * pdev,
348     lm_tcp_state_t * tcp);
349 
350 #endif /* _MM_L4IF_H */
351