1 /*******************************************************************************
2 * lm_l4if.h - L4 lm interface
3 ******************************************************************************/
4 #ifndef _LM_L4IF_H
5 #define _LM_L4IF_H
6
7 lm_status_t lm_tcp_init_chip_common(
8 struct _lm_device_t *pdev);
9
10 lm_status_t lm_tcp_init(
11 struct _lm_device_t *pdev);
12
13 lm_status_t lm_tcp_init_resc(struct _lm_device_t *pdev, u8_t b_is_init );
14 lm_status_t lm_tcp_init_chip(struct _lm_device_t *pdev);
15 lm_status_t lm_tcp_start_chip(struct _lm_device_t *pdev);
16
17 lm_status_t
18 lm_tcp_set_ofld_params(
19 struct _lm_device_t *pdev,
20 lm_state_block_t *state_blk,
21 l4_ofld_params_t *params);
22
23 lm_status_t lm_tcp_init_neigh_state(
24 struct _lm_device_t *pdev,
25 lm_state_block_t *state_blk,
26 lm_neigh_state_t *neigh,
27 l4_neigh_const_state_t *neigh_const,
28 l4_neigh_cached_state_t *neigh_cached,
29 l4_neigh_delegated_state_t *neigh_delegated);
30
31 lm_status_t lm_tcp_init_path_state(
32 struct _lm_device_t *pdev,
33 lm_state_block_t *state_blk,
34 lm_path_state_t *path,
35 lm_neigh_state_t *neigh,
36 l4_path_const_state_t *path_const,
37 l4_path_cached_state_t *path_cached,
38 l4_path_delegated_state_t *path_delegated);
39
40 lm_status_t lm_tcp_init_tcp_state(
41 struct _lm_device_t *pdev,
42 lm_state_block_t *state_blk,
43 lm_tcp_state_t *tcp,
44 lm_path_state_t *path,
45 l4_tcp_const_state_t *tcp_const,
46 l4_tcp_cached_state_t *tcp_cached,
47 l4_tcp_delegated_state_t *tcp_delegated,
48 u32_t tcp_cid_addr);
49
50 /** Description:
51 * Initialize the tx/rx connection fields and resources
52 * Parameters
53 * - mblk: memory block for the virtual memory
54 * - phy_mblk: memory block for the physical memory
55 */
56 lm_status_t lm_tcp_init_tcp_resc(
57 struct _lm_device_t *pdev,
58 lm_tcp_state_t *tcp,
59 lm_tcp_mem_block_t * mblk,
60 lm_tcp_phy_mem_block_t * phy_mblk);
61
62 /** Description
63 * Post buffered data
64 */
65 lm_status_t lm_tcp_post_buffered_data(
66 struct _lm_device_t *pdev,
67 lm_tcp_state_t *tcp,
68 d_list_t *buffered_data);
69
70 /** Description
71 * Init sp_data phys and virt memory for a given tcp state to
72 * the sp_req_mgr sp_data memory
73 */
74 void lm_tcp_init_tcp_sp_data_mem(
75 struct _lm_device_t *pdev,
76 lm_tcp_state_t *tcp
77 );
78
79 /** Description:
80 * Initialize the common fields, or fields specific for rx/tx that use the
81 * same space in the memory block (such as doorbell-data)
82 * Parameters
83 * - mblk: memory block for the virtual memory
84 * - phy_mblk: memory block for the physical memory
85 */
86 lm_status_t lm_tcp_init_tcp_common(
87 struct _lm_device_t *pdev,
88 lm_tcp_state_t *tcp);
89
90 /* Get the required size for a connections virtual memory
91 * Parameters:
92 * - tcp_state: A specific tcp state that the size is requested for. If NULL, then
93 * the default size is returned
94 */
95 u32_t lm_tcp_get_virt_size(
96 struct _lm_device_t * pdev,
97 lm_tcp_state_t * tcp_state);
98
99 /* Get the required size for a connections physical memory
100 * Assumptions: Physical memory size is the same for all connections
101 */
102 u32_t lm_tcp_get_phys_size(
103 struct _lm_device_t * pdev);
104
105 lm_status_t lm_tcp_post_upload_path_request (
106 struct _lm_device_t * pdev,
107 lm_path_state_t * path_state,
108 l4_path_delegated_state_t * ret_delegated);
109
110 lm_status_t lm_tcp_post_upload_neigh_request(
111 struct _lm_device_t * pdev,
112 lm_neigh_state_t * neigh_state);
113
114 /* Desciption:
115 * delete tcp state from lm _except_ from actual freeing of memory.
116 * the task of freeing of memory is done in lm_tcp_free_tcp_state()
117 * Assumptions:
118 * global toe lock is taken by the caller
119 */
120 void lm_tcp_del_tcp_state(
121 struct _lm_device_t *pdev,
122 lm_tcp_state_t *tcp);
123
124 /* Desciption:
125 * delete path state from lm
126 * Assumptions:
127 * global toe lock is taken by the caller
128 */
129 void lm_tcp_del_path_state(
130 struct _lm_device_t *pdev,
131 lm_path_state_t *path);
132
133 /* Desciption:
134 * delete neigh state from lm
135 * Assumptions:
136 * global toe lock is taken by the caller
137 */
138 void lm_tcp_del_neigh_state(
139 struct _lm_device_t *pdev,
140 lm_neigh_state_t *neigh);
141
142 void lm_tcp_free_tcp_resc(
143 struct _lm_device_t *pdev,
144 lm_tcp_state_t *tcp);
145
146 lm_status_t lm_tcp_post_slow_path_request(
147 struct _lm_device_t *pdev,
148 lm_tcp_state_t *tcp,
149 lm_tcp_slow_path_request_t *request);
150
151 /* initiate offload request completion */
152 void lm_tcp_comp_initiate_offload_request(
153 struct _lm_device_t *pdev,
154 lm_tcp_state_t *tcp,
155 u32_t comp_status);
156
157 lm_status_t lm_tcp_tx_post_buf(
158 struct _lm_device_t *pdev,
159 lm_tcp_state_t *tcp,
160 lm_tcp_buffer_t *tcp_buf,
161 lm_frag_list_t *frag_list);
162
163
164
165 lm_status_t lm_tcp_rx_post_buf(
166 struct _lm_device_t *pdev,
167 lm_tcp_state_t *tcp,
168 lm_tcp_buffer_t *tcp_buf,
169 lm_frag_list_t *frag_list
170 );
171
172 /** Description
173 * Returns data that is buffered in the generic buffers to the mm.
174 * after this function completes, and the data is indicated to the client
175 * the next function (lm_tcp_rx_buffered_data_indicated) should be called.
176 * Assumptions:
177 * - function is called as a result of a call to mm_tcp_rx_indicate_gen
178 * - return_buf_ctx will be sent to lm_tcp_rx_buffered_data_indicated and to l4_buffer_return
179 * Returns:
180 * - LM_STATUS_SUCCESS - buffered data succesfully passed to mm
181 * - LM_STATUS_FAILURE - no more buffered data
182 */
183 lm_status_t lm_tcp_rx_get_buffered_data(
184 IN struct _lm_device_t * pdev,
185 IN lm_tcp_state_t * tcp,
186 OUT lm_frag_list_t ** frag_list,
187 OUT lm_tcp_gen_buf_t ** gen_buf /* head of indications generic buffer */
188 );
189
190 /** Description
191 * Called from the flow of terminate. Returns data that is buffered in the generic buffers
192 * with no conditions
193 * Assumptions:
194 * - function is called as a result of a terminate
195 * - return_buf_ctx will be sent to l4_buffer_return
196 */
197 lm_status_t lm_tcp_rx_get_buffered_data_from_terminate (
198 IN struct _lm_device_t * pdev,
199 IN lm_tcp_state_t * tcp,
200 OUT lm_frag_list_t ** frag_list,
201 OUT lm_tcp_gen_buf_t ** gen_buf /* head of indications generic buffer */
202 );
203
204 /** Description
205 * Called by the mm to notify the result of the indication
206 * accepted_bytes contains the number of bytes that were accepted by the client. This value can
207 * be less than the indicated number of bytes. In which case the indication was a partially succesful
208 * indication
209 * Assumption:
210 * - This function is called as a result of a call to mm_tcp_rx_indicate_gen call
211 * and only after lm_tcp_rx_get_buffered_data was called.
212 * - return_buf_ctx is the buffer returned to lm_tcp_rx_get_buffered_data
213 * - accepted_bytes <= indicated number of bytes
214 */
215 void lm_tcp_rx_buffered_data_indicated(
216 struct _lm_device_t * pdev,
217 lm_tcp_state_t * tcp,
218 u32_t accepted_bytes,
219 lm_tcp_gen_buf_t * gen_buf /* head of indications generic buffer */
220 );
221
222 /** Description
223 * If connection is still open updates the sws, updates the pending return indications
224 */
225 void lm_tcp_rx_indication_returned(
226 struct _lm_device_t * pdev,
227 lm_tcp_state_t * tcp,
228 lm_tcp_gen_buf_t * gen_buf/* head of indications generic buffer */
229 );
230
231 /** Description
232 * Called:
233 * 1. when a buffer is returned from a client and the connection is already closed
234 * 2. when upload_completion returns from the client
235 * Checks if the connection is dead and can be deleted (depending on state,
236 * and pending return indications)
237 * If the call is due to (2), changes the state to UPLOAD_DONE
238 * 3. when offload completion is proceesed and we service deferred cqes,
239 * its possible that the connection was uploaded while waiting to the offload completion
240 * Assumptions:
241 * SP and Rx locks are taken by the caller
242 * Return:
243 * TRUE - if connection can be deleted i.e. state = UPLOAD_DONE,
244 * and all pending indications returned
245 * FALSE - o/w
246 */
247 u8_t lm_tcp_is_tcp_dead(
248 struct _lm_device_t * pdev,
249 lm_tcp_state_t * tcp,
250 u8_t op
251 );
252 #define TCP_IS_DEAD_OP_RTRN_BUFS (0)
253 #define TCP_IS_DEAD_OP_UPLD_COMP (1)
254 #define TCP_IS_DEAD_OP_OFLD_COMP_DFRD (2)
255
256 /** Description
257 * checks the state of the connection (POST_BLOCKED or NOT)
258 * Returns
259 * SUCCESS - if connection is open
260 * CONNECTION_CLOSED - if connection is blocked
261 */
262 lm_status_t lm_tcp_con_status(
263 struct _lm_device_t * pdev,
264 lm_tcp_con_t * rx_con);
265
266 /** Description
267 * calculates the size of a generic buffer based on min_gen_buf_size and mtu
268 * this function should be called at init, it does not initialize the lm
269 * toe_info parameter
270 * Assumptions:
271 * mtu and min_gen_buf_size are initialized
272 * Returns:
273 * size of generic buffer
274 */
275 u32_t lm_tcp_calc_gen_buf_size(struct _lm_device_t * pdev);
276
277 /** Description
278 * extracts the size of a generic buffer from the lmdev
279 */
280 #define LM_TCP_GEN_BUF_SIZE(lmdev) ((lmdev)->toe_info.gen_buf_size)
281
282 u8_t lm_toe_is_tx_completion(struct _lm_device_t *pdev, u8_t drv_toe_rss_id);
283 u8_t lm_toe_is_rx_completion(struct _lm_device_t *pdev, u8_t drv_toe_rss_id);
284 u8_t lm_toe_is_rcq_suspended(struct _lm_device_t *pdev, u8_t drv_toe_rss_id);
285 void lm_toe_service_tx_intr(struct _lm_device_t *pdev, u8_t drv_toe_rss_id);
286 void lm_toe_service_rx_intr(struct _lm_device_t *pdev, u8_t drv_toe_rss_id);
287 void lm_tcp_clear_grqs(struct _lm_device_t * lmdev);
288
289 /*********************** TOE RSS ******************************/
290 /**
291 * @Description: Update TOE RSS. The origin of this call is when getting
292 * an OS RSS update. It's actually by L2 interface and not
293 * L4. However, the ramrods are separate for L4 + L2 due to the
294 * assumptions by the different protocols of what the data is
295 * in the indirection table.
296 *
297 * @Assumptions: Called BEFORE calling L2
298 * enable-rss!!
299 *
300 * @param pdev
301 * @param chain_indirection_table - table of TOE RCQ chain values
302 * @param table_size - size of table above
303 * @param enable - is this enable/disable rss if it's disable, the
304 * table will all point to the same entry
305 *
306 * @return lm_status_t - PENDING is completion will arrive asyncrounoulsy
307 * - SUCCESS if no ramrod is sent (for example table didn't change)
308 * - FAILURE o/w
309 */
310 lm_status_t lm_tcp_update_rss(struct _lm_device_t * pdev, u8_t * chain_indirection_table,
311 u32_t table_size, u8_t enable);
312
313
314 /* This functions sets the update window mode. We work in two modes:
315 * SHORT_LOOP and LONG_LOOP.
316 * SHORT_LOOP: if generic indication succeeded, the window is update immediately by the accepted bytes
317 * LONG_LOOP: if generic indication succeeded, the window is updated only when the buffer is returned via l4_return_buffer
318 */
319 #define LM_TCP_SET_UPDATE_WINDOW_MODE(lmdev, mode) (lmdev)->toe_info.update_window_mode = mode
320
321 #define LM_TCP_GET_UPDATE_WINDOW_MODE(lmdev) ((lmdev)->toe_info.update_window_mode)
322
323
324
325 /**
326 * Description:
327 * - Post a fin request BD in the bd chain
328 * Returns:
329 * - SUCCESS - fin request was posted on the BD chain
330 * - CONNECTION CLOSED- as described in lm_tcp_tx_post_buf()
331 */
332 lm_status_t lm_tcp_graceful_disconnect(
333 IN struct _lm_device_t * pdev, /* device handle */
334 IN lm_tcp_state_t * tcp_state /* L4 state */
335 );
336
337 /** Description
338 * check if there is a pending remote disconnect on the rx connection.
339 * This function is called from the um, after buffers have been posted. If there is a
340 * remote disconnect pending, it will be processed.
341 */
lm_tcp_rx_is_remote_disconnect_pending(lm_tcp_state_t * tcp_state)342 __inline static u8_t lm_tcp_rx_is_remote_disconnect_pending(lm_tcp_state_t * tcp_state)
343 {
344 lm_tcp_con_t * rx_con = tcp_state->rx_con;
345 lm_tcp_con_rx_gen_info_t * gen_info = &rx_con->u.rx.gen_info;
346
347 return (u8_t)(!(rx_con->flags & TCP_RX_POST_BLOCKED) &&
348 (gen_info->peninsula_nbytes == 0) &&
349 (rx_con->u.rx.flags & (TCP_CON_FIN_IND_PENDING | TCP_CON_RST_IND_PENDING)));
350
351 }
352
353 /** Description
354 * checks whether it is OK to update the tcp state. We only update if the connection
355 * is not being offload/uploaded/invalidated i.e. normal or aborted.
356 */
lm_tcp_ok_to_update(lm_tcp_state_t * tcp)357 __inline static u8_t lm_tcp_ok_to_update(lm_tcp_state_t * tcp)
358 {
359 /* a state status is changed to invalidate only after the invalidate is completed, therefore
360 * to make sure a state isn't in the process of being invalidated we check it's flags to see
361 * whether an invalidate request has already been posted. */
362 return (u8_t)(((tcp->hdr.status == STATE_STATUS_NORMAL) ||
363 (tcp->hdr.status == STATE_STATUS_ABORTED)) &&
364 !(tcp->rx_con->flags & TCP_INV_REQ_POSTED));
365 }
366
367 /**
368 * Description:
369 * initializes the lm data in a slow path request given the request parameters
370 */
371 void lm_init_sp_req_type (
372 struct _lm_device_t * pdev,
373 lm_tcp_state_t * tcp,
374 lm_tcp_slow_path_request_t * lm_req,
375 void * req_input_data);
376
377 /**
378 * Description (for following two functions)
379 * finds the next tcp states dependent of the path/neigh
380 * given the previous tcp state. If tcp_state is NULL, it
381 * returns the first such tcp_state
382 * Returns
383 * tcp_state: if such exists
384 * NULL: if there are no more tcp states dependent of the
385 * given path/neigh
386 */
387 lm_tcp_state_t * lm_tcp_get_next_path_dependent(
388 struct _lm_device_t *pdev,
389 void *path_state,
390 lm_tcp_state_t * tcp_state);
391
392 lm_tcp_state_t * lm_tcp_get_next_neigh_dependent(
393 struct _lm_device_t *pdev,
394 void * neigh_state,
395 lm_tcp_state_t * tcp_state);
396
397
398 /**
399 * Description
400 * finds the next neigh state following by given the
401 * previous neigh_state. If neigh_state is NULL, it returns
402 * the first neigh_state in list of neigh states
403 * Returns
404 * neigh_state: if exists
405 * NULL: if neigh list is empty or no more neigh states in
406 * the list
407 */
408 lm_neigh_state_t * lm_tcp_get_next_neigh(
409 struct _lm_device_t *pdev,
410 lm_neigh_state_t * neigh_state);
411
412 /**
413 * Description
414 * finds the next path states matched non NULL neigh
415 * If neigh_state is NULL, it returns the next path state in
416 * list of path states
417 * Returns
418 * path_state: if such exists
419 * NULL: if there are no more path states dependent of the
420 * given neigh (in not NULL)
421 */
422 lm_path_state_t * lm_tcp_get_next_path(
423 struct _lm_device_t *pdev,
424 lm_neigh_state_t * neigh_state,
425 lm_path_state_t * path_state);
426
427 /**
428 * Description
429 * finds the next tcp states in list of tcp
430 *
431 * Returns
432 * tcp_state: if such exists
433 * NULL: if there are no more tcp states in the list
434 */
435
436 lm_tcp_state_t * lm_tcp_get_next_tcp(
437 struct _lm_device_t *pdev,
438 lm_tcp_state_t * tcp_state);
439
440 /* GilR 8/22/2006 - TBD - temp implementation, for debugging. to be removed?/wrapped with "#if DBG"? */
441 void lm_tcp_internal_query(
442 IN struct _lm_device_t * pdev);
443
444 /**
445 * Returns the number of entries needed in frag list
446 * taking into an account the CWnd and MSS
447 */
448 u32_t lm_tcp_calc_frag_cnt(
449 struct _lm_device_t * pdev,
450 lm_tcp_state_t * tcp
451 );
452
453 /** Description
454 * function is called whenever the UM allocates more generic buffers
455 */
456 void lm_tcp_rx_gen_bufs_alloc_cb(
457 struct _lm_device_t * pdev);
458
459 /** Description
460 * Callback function for cids being recylced
461 */
462 void lm_tcp_recycle_cid_cb(
463 struct _lm_device_t *pdev,
464 void *cookie,
465 s32_t cid);
466
467 void lm_tcp_init_num_of_blocks_per_connection(
468 struct _lm_device_t *pdev,
469 u8_t num);
470
471 u8_t lm_tcp_get_num_of_blocks_per_connection(
472 struct _lm_device_t *pdev);
473
474 lm_status_t lm_tcp_erase_connection(
475 IN struct _lm_device_t * pdev,
476 IN lm_tcp_state_t * tcp);
477
478 u8_t lm_tcp_get_src_ip_cam_byte(
479 IN struct _lm_device_t * pdev,
480 IN lm_path_state_t * path);
481
482 lm_tcp_state_t* lm_tcp_find_offloaded_tcp_tuple(struct _lm_device_t * pdev, u8_t src_ip_byte, u8_t src_tcp_b, u8_t dst_tcp_b, lm_tcp_state_t * prev_tcp);
483
484
485 void lm_tcp_rx_clear_isles(struct _lm_device_t * pdev, lm_tcp_state_t * tcp_state, d_list_t * isles_list);
486
487 u8_t * lm_tcp_get_pattern(struct _lm_device_t *,
488 lm_tcp_state_t * tcp,
489 u8_t pattern_idx,
490 u32_t offset,
491 u32_t * pattern_size);
492
493 void lm_tcp_set_pattern_offset(struct _lm_device_t * pdev,
494 lm_tcp_state_t * tcp,
495 u8_t pattern_idx,
496 u32_t offset);
497
498 u32_t lm_tcp_find_pattern_offset(struct _lm_device_t * pdev, u8_t * sub_buf, u32_t sub_buf_size);
499
500 #endif /* _LM_L4IF_H */
501