xref: /titanic_52/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/l4/include/lm_l4st.h (revision d14abf155341d55053c76eeec58b787a456b753b)
1 /*******************************************************************************
2  * lm_l4st.h - L4 lm data structures
3  ******************************************************************************/
4 #ifndef _LM_L4ST_H
5 #define _LM_L4ST_H
6 
7 #include "l4states.h"
8 #include "bd_chain_st.h"
9 #include "lm_sp_req_mgr.h"
10 #include "toe_constants.h"
11 
12 #define MAX_L4_RX_CHAIN                16
13 #define MAX_L4_TX_CHAIN                16
14 
15 #define PATTERN_COUNTER_IDX_COMPLETION   0
16 #define PATTERN_COUNTER_IDX_CQE         1
17 #define MAX_PATTERN_IDX                 2
18 
19 typedef struct _lm_tcp_integrity_info_t
20 {
21     u32_t current_offset_in_pattern_buf[MAX_PATTERN_IDX];
22     u32_t skip_bytes_in_incoming_buf[MAX_PATTERN_IDX];
23     u32_t  is_offsets_initialized;
24 } lm_tcp_integrity_info_t;
25 
26 
27 typedef struct _lm_toe_integrity_info_t
28 {
29     u8_t  *  pattern_buf;
30     u32_t   pattern_buf_size;
31     u32_t   pattern_size;
32 } lm_toe_integrity_info_t;
33 
34 
35 /*******************************************************************************
36  * 'Posting' TCP buffer.
37  ******************************************************************************/
38 typedef struct _lm_tcp_buffer_t
39 {
40     /* Must be the first entry in this structure. */
41     s_list_entry_t      link;
42 
43     /* Corresponds to l4buffer_t buffer_size.
44      * The number of bytes in this buffer may not corresponds to the
45      * the number of bytes of the application buffer.  An application buffer
46      * could span multiple tcp_bufs.  The flags field is used to mark the
47      * starting and the end of an application buffer. */
48     u32_t               size;
49 
50     /* Number of bytes that were not completed yet */
51     u32_t               more_to_comp;
52 
53     u32_t                flags;      /* Flags for indicating the start and end of an io buffer. */
54     #define TCP_BUF_FLAG_NONE                   0x00
55     #define TCP_BUF_FLAG_L4_POST_START          0x01
56     #define TCP_BUF_FLAG_L4_POST_END            0x02
57     #define TCP_BUF_FLAG_L4_RESERVED1           0x04  /* used in Teton for dummy buffer. */
58     #define TCP_BUF_FLAG_L4_SPLIT               0x04  /* Used in everest for split buffer Everest cleans before completing to miniport */
59     #define TCP_BUF_FLAG_L4_RESERVED2           0x08  /* used only in Miniport as 'last post' */
60     #define TCP_BUF_FLAG_L4_RX_NO_PUSH          0x10
61     #define TCP_BUF_FLAG_L4_PARTIAL_FILLED      0x20
62     /* NOTE: lm_tcp_buffer flags values must correspond to flags definition in l4buffer_t */
63 
64     u16_t               bd_used;    /* Number of BDs consumed in the bd chain for this tcp buffer */
65     u16_t               _pad;
66 
67     /* These fields are valid when TCP_BUF_FLAG_L4_POST_END flag is set. */
68     u32_t app_buf_size;     /* Number of bytes of all buffers from BUFFER_START till BUFFER_END */
69     u32_t app_buf_xferred;  /* Number of bytes xferred on all buffers from BUFFER_START till BUFFER_END */
70 } lm_tcp_buffer_t;
71 
72 /*******************************************************************************
73  * state header.
74  * Each state must start with this entry, which is used for chaining
75  * states together and for identifying a particular state.
76  ******************************************************************************/
77 typedef struct _lm_state_header_t
78 {
79     d_list_entry_t link;
80     struct _lm_state_block_t *state_blk;
81 
82     u32_t state_id;
83     #define STATE_ID_UNKNOWN                    0
84     #define STATE_ID_TCP                        1
85     #define STATE_ID_PATH                       2
86     #define STATE_ID_NEIGH                      3
87 
88     u32_t status;
89     #define STATE_STATUS_UNKNOWN                0
90     #define STATE_STATUS_INIT                   1
91     #define STATE_STATUS_INIT_CONTEXT           2
92     #define STATE_STATUS_OFFLOAD_PENDING        3
93     #define STATE_STATUS_NORMAL                 4
94     #define STATE_STATUS_ABORTED                5
95     #define STATE_STATUS_INVALIDATED            6
96     #define STATE_STATUS_UPLOAD_PENDING         7
97     #define STATE_STATUS_UPLOAD_DONE            8
98     #define STATE_STATUS_INIT_OFFLOAD_ERR       9
99     #define STATE_STATUS_ERR                    10
100 } lm_state_header_t;
101 
102 /*******************************************************************************
103  * neighbor state
104  ******************************************************************************/
105 typedef struct _lm_neigh_state_t
106 {
107     lm_state_header_t           hdr;
108 
109     l4_neigh_const_state_t      neigh_const;
110     l4_neigh_cached_state_t     neigh_cached;
111     l4_neigh_delegated_state_t  neigh_delegated;
112 
113     /* network reachability */
114     u32_t                       host_reachability_time;
115     u32_t                       nic_reachability_time;
116     u8_t                        stale;
117     u8_t                        _pad[3];
118 
119     /* debug */
120     u32_t                       num_dependents; /* number of dependent path states */
121 } lm_neigh_state_t;
122 
123 /*******************************************************************************
124  * path state
125  ******************************************************************************/
126 typedef struct _lm_path_state_t
127 {
128     lm_state_header_t           hdr;
129 
130     lm_neigh_state_t            *neigh;         /* parent neighbor state */
131 
132     l4_path_const_state_t       path_const;
133     l4_path_cached_state_t      path_cached;
134     l4_path_delegated_state_t   path_delegated;
135 
136     /* debug */
137     u32_t                       num_dependents; /* number of dependent tcp states */
138 } lm_path_state_t;
139 
140 /*******************************************************************************
141  * queue element buffer - for buffering queue elements (of any type)
142  ******************************************************************************/
143 typedef struct _lm_tcp_qe_buffer_t
144 {
145     char *first;
146     char *tail;
147     char *head;
148     char *last;
149 
150     u32_t qe_size; /* queue element size */
151     u32_t left;
152 } lm_tcp_qe_buffer_t;
153 
154 
155 /*******************************************************************************
156  * Memory Blocks
157  ******************************************************************************/
158 typedef struct _lm_tcp_mem_block_t
159 {
160     s_list_entry_t   link;  /* Must be the first entry... */
161 
162     u8_t           * start; /* Start of the memory block */
163     u8_t           * free;  /* Pointer to the start of the remaining free space of the block */
164     u32_t            total; /* Size of the entire block */
165     u32_t            left;  /* free bytes left in the block */
166     u8_t             flags;  /* virt-memblock-pool member or not */
167     #define MBLK_RETURN_TO_POOL 0x1
168 } lm_tcp_mem_block_t;
169 
170 typedef struct _lm_tcp_phy_mem_block_t
171 {
172     s_list_entry_t   link;
173 
174     u8_t           * start; /* Start of the memory block */
175     u8_t           * free;  /* Pointer to the start of the remaining free space of the block */
176     u32_t            total; /* Size of the entire block */
177     u32_t            left;  /* free bytes left in the block */
178 
179     lm_address_t     start_phy;
180     lm_address_t     free_phy;
181 } lm_tcp_phy_mem_block_t;
182 
183 #define DEBUG_OOO_CQE
184 typedef struct _lm_isle_t
185 {
186     d_list_entry_t   isle_link;
187     d_list_t         isle_gen_bufs_list_head;
188     u32_t            isle_nbytes;
189 #ifdef DEBUG_OOO_CQE
190     u32_t            dedicated_cid;
191     u32_t            recent_ooo_combined_cqe;
192 #endif
193 } lm_isle_t;
194 
195 #ifdef DEBUG_OOO_CQE
196 #define SET_DEBUG_OOO_INFO(_isle, _cmd, _data) \
197              (_isle)->recent_ooo_combined_cqe = ((((_cmd) << TOE_RX_CQE_COMPLETION_OPCODE_SHIFT) & TOE_RX_CQE_COMPLETION_OPCODE) \
198                                                 | (((_data) << TOE_RX_CQE_OOO_PARAMS_NBYTES_SHIFT) & TOE_RX_CQE_OOO_PARAMS_NBYTES))
199 #define GET_RECENT_OOO_CMD(_isle) \
200              (((_isle)->recent_ooo_combined_cqe &  TOE_RX_CQE_COMPLETION_OPCODE) >>  TOE_RX_CQE_COMPLETION_OPCODE_SHIFT)
201 #define GET_RECENT_OOO_DATA(_isle) \
202              (((_isle)->recent_ooo_combined_cqe &  TOE_RX_CQE_OOO_PARAMS_NBYTES) >>  TOE_RX_CQE_OOO_PARAMS_NBYTES_SHIFT)
203 #endif
204 
205 /*******************************************************************************
206  * Rx connection's generic buffers info.
207  ******************************************************************************/
208 typedef struct _lm_tcp_con_rx_gen_info_t
209 {
210     d_list_t         peninsula_list;     /* accessed only via lock */
211     d_list_t         dpc_peninsula_list; /* accessed lock-free only in dpc */
212 
213     d_list_t         isles_list;
214     lm_isle_t        first_isle;
215     lm_isle_t      * current_isle;
216     u8_t             current_isle_number;
217     u8_t             max_number_of_isles;
218     u8_t             _isle_pad[2];
219 
220     lm_frag_list_t * frag_list;         /* allocated in initialization of connection      */
221     u32_t            max_frag_count;    /* the number of frags statically allocated       */
222 
223     u32_t            peninsula_nbytes;
224     u32_t            dpc_peninsula_nbytes;
225     u32_t            isle_nbytes;
226     u16_t            first_buf_offset;
227 
228     /* How many buffers (head of indications) were indicated for this connection and haven't
229      * returned yet from NDIS. We need to know that to make sure we don't delete the connection
230      * before all buffers pointing to it have returned.
231      */
232     u16_t            pending_return_indications;
233     /* bytes indicated that their buffers have not yet been returned, this is a value that will increase
234      * the window. If we're uploaded and we still have pending_indicated_bytes we need to increase them immediataly
235      * and not wait... */
236     u32_t            pending_indicated_bytes;
237 
238     /* Each indication may result in us updating the window - this depends on the #of bytes accepted AND the update_window_mode
239      * we're in. We aggregate this over all indications (mm_tcp_rx_indicate_gen may be called several times if more generic data
240      * was received during indicate). This field is updated ONLY by the function lm_tcp_rx_buffered_data_indicated, and is accessed
241      * once the mm_tcp_rx_indicate_gen function completes. The main reason for this aggregation, unfortunatelly, is for passing
242      * SpartaTest - receive_indications, which expects a specific number of indications.  */
243     u32_t            add_sws_bytes;
244 
245     u8_t             wait_for_isle_left;
246     u8_t            _padding;
247 
248     /* The update window mode is taken from the toe information before an indication
249      * We can't use the main copy because it may change between the time we indicate
250      * (after we've marked the buffer) and the time we get an answer (and need to determine
251      * whether to update the window or not) */
252     u8_t             update_window_mode;
253 
254     /*  debug/statistics */
255     /* DEFINITION: A generic buffer can be 'done' with as a result of a succesfull indicate or as a result of a copy
256      * operation to an application buffer. (regardless of its state before: partially indicated/partially copied).
257      * We count the number of times generic buffers were 'done' with */
258     u8_t             peninsula_blocked;     /* peninsula is blocked as a result of terminate (get_buffered_data) */
259     u32_t            num_buffers_indicated; /* 'done' with as a result of an indicate */
260     u32_t            num_buffers_copied_grq;/* # grq buffers copied */
261     u32_t            num_buffers_copied_rq; /* # rq buffers copied TBD how to count*/
262     u32_t            num_bytes_indicated;   /* all bytes indicated in either full/partial indications */
263     u32_t            copy_gen_buf_fail_cnt; /* counts the number of times a client.copy operation failed */
264     u32_t            copy_gen_buf_dmae_cnt; /* counts the number of times dmae copy operation was used */
265     u32_t            num_success_indicates; /* number of times indicate succeeded */
266     u32_t            num_failed_indicates;  /* number of times indicate failed */
267     u32_t            bufs_indicated_rejected; /* number of rejected bufs */
268     u64_t            bytes_copied_cnt_in_process;
269     u64_t            bytes_copied_cnt_in_post;
270     u64_t            bytes_copied_cnt_in_comp;
271     u64_t            bytes_indicated_accepted;
272     u64_t            bytes_indicated_rejected;
273     u32_t            dont_send_to_system_more_then_rwin;
274     u32_t            num_non_full_indications;
275 
276 } lm_tcp_con_rx_gen_info_t;
277 
278 /*******************************************************************************
279  * Rx connection's receive window information for silly window syndrome avoidance
280  ******************************************************************************/
281 #define MAX_INITIAL_RCV_WND 0x80000000 /* 2GB (due to cyclic counters and window-update algorithm */
282 
283 /* DWA: Delayed Window Update Algorithm : the twin of DCA, delay the window updates according to the delayed completions. */
284 
285 #define MAX_DW_THRESH_ENTRY_CNT 16 /* a new entry is created each time we see a NDC completion (non-delayed-complete). We
286                                     * can limit these to 16 'active completions' i.e. completions that haven't received a
287                                     * window-update yet. FW-DCA works with quad-buffer, therefore 16 is more than enough. */
288 
289 typedef struct _lm_tcp_rx_dwa_info {
290     u32_t dw_thresh[MAX_DW_THRESH_ENTRY_CNT]; /* delayed window update thresholds. */
291     u8_t  head;                               /* head of the the cyclic buffer dw_thresh (next empty entry) */
292     u8_t  tail;                               /* tail of the the cyclic buffer dw_thresh */
293     u16_t _pad;
294 } lm_tcp_rx_dwa_info;
295 
296 typedef struct _lm_tcp_con_rx_sws_info_t
297 {
298     u32_t  drv_rcv_win_right_edge; /* The drivers window right edge (shadow of fw, and may be
299                                     * larger if the difference is smaller than mss) */
300     u32_t  mss;                     /* min(tcp_const.remote_mss,
301                                       parent_path->path_cached.path_mtu - HEADERS size) */
302     u32_t extra_bytes;
303 
304     u8_t   timer_on;
305 } lm_tcp_con_rx_sws_info_t;
306 
307 /*******************************************************************************
308  * Rx connection's special information
309  ******************************************************************************/
310 typedef struct _lm_tcp_con_rx_t
311 {
312     lm_tcp_con_rx_gen_info_t    gen_info;
313     lm_tcp_con_rx_sws_info_t    sws_info;
314 
315     /* Last bd written to: required in spcecial case of very large application buffers
316      * not fitting into the bd-chain . */
317     struct toe_rx_bd * last_rx_bd;
318 
319     /* Remember a remote disconnect event until all received data is
320      * completed/indicated successfully to the client */
321     u8_t                        flags;
322     #define TCP_CON_RST_IND_PENDING             0x1
323     #define TCP_CON_FIN_IND_PENDING             0x2
324     u8_t                        zero_byte_posted_during_ind;
325     u8_t                        check_data_integrity_on_complete;
326     u8_t                        check_data_integrity_on_receive;
327     u32_t                       compared_bytes;
328 
329     u32_t skp_bytes_copied; /* counter of bytes that were already copied to the buffer at post that we
330                              * will receive a skip for which we'll need to ignore...This counter must be protected
331                              * by a lock */
332     /* GilR 4/3/2006 - TBA - add lm tcp con rx debug/stats fields? */
333     u32_t rx_zero_byte_recv_reqs; /* #Zero byte receeive requests */
334 } lm_tcp_con_rx_t;
335 
336 /*******************************************************************************
337  * Tx connection's special information
338  ******************************************************************************/
339 typedef struct _lm_tcp_con_tx_t
340 {
341     u16_t   bds_without_comp_flag; /* counter of consecutive BDs without CompFlag */
342     u8_t   flags;
343     #define TCP_CON_FIN_REQ_LM_INTERNAL     0x1 /* FIN request completion should
344                                          * not be indicated to mm */
345     #define TCP_CON_RST_IND_NOT_SAFE 0x2
346 
347 
348     u8_t   _pad;
349     u32_t mss;
350 } lm_tcp_con_tx_t;
351 
352 
353 /*******************************************************************************
354  * TCP connection - rx OR tx
355  ******************************************************************************/
356 /* This structure is used to collect information during a DPC without taking the
357  * fp-lock. All fields in this structure must be accessed ONLY from within the
358  * the DPC
359  */
360 typedef struct _lm_tcp_dpc_info_t
361 {
362     s_list_entry_t    link; /* must be the first entry here */
363     s_list_entry_t  * dpc_completed_tail; /* points to the tail of the sub-list of active_tb_list that needs to
364                                            * be completed. */
365     u32_t             dpc_bufs_completed; /* number of buffers completed in the dpc (aggregated during process
366                                            * stage for fast splitting of the active_tb_list at completion stage)*/
367     u32_t             dpc_rq_placed_bytes; /* how many bytes were placed on rq as a result of rq-cmp / copying from grq->rq */
368     u32_t             dpc_actual_bytes_completed; /* number of bytes completed to client - aggregated during process stage */
369     u16_t             dpc_bd_used;        /* number of bds used - aggregated during process stage */
370     u16_t             dpc_flags;          /* flags marked during cqe processing - only accessed during processing and
371                                            * snapshot-ed under a lock */
372     #define LM_TCP_DPC_RESET_RECV 0x1
373     #define LM_TCP_DPC_FIN_RECV   0x2
374     #define LM_TCP_DPC_FIN_CMP    0x4
375     #define LM_TCP_DPC_KA_TO      0x8
376     #define LM_TCP_DPC_RT_TO      0x10
377     #define LM_TCP_DPC_URG        0x20
378     #define LM_TCP_DPC_RAMROD_CMP 0x40
379 //    #define LM_TCP_DPC_NDC        0x80
380     #define LM_TCP_DPC_DBT_RE     0x100
381     #define LM_TCP_DPC_OPT_ERR    0x200
382     #define LM_TCP_DPC_UPLD_CLOSE 0x400
383     #define LM_TCP_DPC_FIN_RECV_UPL 0x800
384     #define LM_TCP_DPC_TOO_BIG_ISLE     0x1000
385     #define LM_TCP_DPC_TOO_MANY_ISLES   0x2000
386 
387 /*
388     #define LM_TCP_COMPLETE_FP (LM_TCP_DPC_RESET_RECV | LM_TCP_DPC_FIN_RECV | LM_TCP_DPC_FIN_RECV_UPL | LM_TCP_DPC_FIN_CMP | \
389                                 LM_TCP_DPC_KA_TO | LM_TCP_DPC_RT_TO | LM_TCP_DPC_URG | LM_TCP_DPC_RAMROD_CMP | LM_TCP_DPC_NDC | \
390                                 LM_TCP_DPC_DBT_RE | LM_TCP_DPC_OPT_ERR | LM_TCP_DPC_UPLD_CLOSE | \
391                                 LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES)
392 */
393     /* dpc snapshot parameters: taken before an operation that can release a lock is done
394      * in lm_tcp_xx_complete_fp */
395     u16_t              snapshot_flags; /* only accessed under lock */
396 
397     /* we have special cases where lm blocks um from posting until a specific buffer gets completed, we have a flag for this
398      * this flag is accessed with the post flow, so it should be protected by a lock, therefore we remember we have to unset it
399      * in the completion stage (under a lock) */
400     u8_t               dpc_unblock_post;
401     /* debug / stats */
402     u8_t              dpc_comp_blocked;
403 
404     /* the window size returned from the fw after window size decreasment request returned, written back to the fw */
405     u32_t             dpc_fw_wnd_after_dec;
406 
407 } lm_tcp_dpc_info_t;
408 
409 typedef struct _lm_tcp_con_t
410 {
411     lm_tcp_dpc_info_t dpc_info; /* must be the first field */
412 
413     struct _lm_tcp_state_t * tcp_state; /* The tcp state associated with this connection */
414 
415     union {
416         volatile struct toe_rx_db_data  *rx;
417         volatile struct toe_tx_db_data  *tx;
418     } db_data;
419     lm_address_t        phys_db_data;
420 
421     /* rx/tx tcp connection info. */
422     union
423     {
424         lm_tcp_con_rx_t rx;
425         lm_tcp_con_tx_t tx;
426     } u;
427 
428     lm_bd_chain_t bd_chain;
429 
430     /* List of posted buffers (i.e. attached to the bd chain) */
431     s_list_t        active_tb_list;
432     u32_t           rq_nbytes; /* how many bytes are in the active-tb-list */
433 
434     /* buffer of cqes that represent the last X cqes received */
435     lm_tcp_qe_buffer_t history_cqes;
436 
437     u32_t           type;
438     #define TCP_CON_TYPE_RX                     1
439     #define TCP_CON_TYPE_TX                     2
440 
441     /* accumulator of currently posted application buffer bytes.
442      * accumulated in order to set lm_tcp_buffer.app_buf_size of
443      * the last tcp buffer of the application buffer */
444     u32_t           app_buf_bytes_acc_post;
445 
446     /* accumulator of currently completed application buffer bytes.
447      * accumulated in order to set lm_tcp_buffer.app_buf_xferred of
448      * the last tcp buffer of the application buffer */
449     u32_t           app_buf_bytes_acc_comp;
450 
451     u32_t           db_more_bytes;  /* number of bytes to be produced in next doorbell */
452     u16_t           db_more_bufs;   /* number of tcp buffers to be produced in next doorbell */
453     u16_t           db_more_bds;    /* number of bds to be produced in next doorbell */
454 
455     /* flags are used for managing the connection's posting/completing/indicating state machines */
456     u32_t           flags;
457     #define TCP_FIN_REQ_POSTED                              0x0001
458     #define TCP_RST_REQ_POSTED                              0x0002
459     #define TCP_INV_REQ_POSTED                              0x0004
460     #define TCP_TRM_REQ_POSTED                              0x0008
461     #define TCP_FIN_REQ_COMPLETED                           0x0010
462     #define TCP_RST_REQ_COMPLETED                           0x0020
463     #define TCP_INV_REQ_COMPLETED                           0x0040
464     #define TCP_TRM_REQ_COMPLETED                           0x0080
465     #define TCP_REMOTE_FIN_RECEIVED                         0x0100
466     #define TCP_REMOTE_RST_RECEIVED                         0x0200
467     #define TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED        0x0400
468     #define TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED        0x0800
469     #define TCP_INDICATE_REJECTED                           0x1000
470     #define TCP_POST_BLOCKED                                0x2000
471     #define TCP_COMP_BLOCKED                                0x4000
472     #define TCP_COMP_DEFERRED                               0x8000
473     #define TCP_BUFFERS_ABORTED                            0x10000
474     #define TCP_DEFERRED_PROCESSING                        0x20000
475     #define TCP_POST_DELAYED                               0x40000 /* lm sets this when posting buffers is delay for some reason */
476     #define TCP_POST_COMPLETE_SPLIT                        0x80000 /* lm sets this when every split  buffer that'll be posted will be completed immediately */
477     #define TCP_POST_NO_SKP                               0x100000 /* lm sets this when there will  be no more skp completions from fw (comp blocked...)  */
478     #define TCP_UPLOAD_REQUESTED                          0x200000 /* lm sets this when FW requests an upload for any reason - after this is set, no more uploads will be requested*/
479     #define TCP_DB_BLOCKED                                0x400000
480     #define TCP_RX_DB_BLOCKED       (TCP_REMOTE_FIN_RECEIVED | TCP_REMOTE_RST_RECEIVED | TCP_DB_BLOCKED)
481     #define TCP_TX_DB_BLOCKED       (TCP_REMOTE_RST_RECEIVED | TCP_DB_BLOCKED)
482     #define TCP_TX_POST_BLOCKED     (TCP_FIN_REQ_POSTED | TCP_RST_REQ_POSTED | TCP_INV_REQ_POSTED | TCP_TRM_REQ_POSTED | \
483                                      TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED | TCP_POST_BLOCKED \
484                                      )
485                                     /* GilR 4/4/2006 - TBD - open issue with Hav, for Tx POST BLOCKED we might not wait for 'rx indicated' after RST received */
486     #define TCP_RX_POST_BLOCKED     (TCP_RST_REQ_POSTED | TCP_INV_REQ_POSTED | TCP_TRM_REQ_POSTED | \
487                                      TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED | TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED | \
488                                      TCP_POST_BLOCKED)
489     #define TCP_TX_COMP_BLOCKED     (TCP_RST_REQ_COMPLETED | TCP_FIN_REQ_COMPLETED | TCP_REMOTE_RST_RECEIVED | \
490                                      TCP_INV_REQ_COMPLETED | TCP_TRM_REQ_COMPLETED | TCP_COMP_BLOCKED \
491                                      )
492     #define TCP_RX_COMP_BLOCKED     (TCP_RST_REQ_COMPLETED | TCP_REMOTE_FIN_RECEIVED | TCP_REMOTE_RST_RECEIVED | \
493                                      TCP_INV_REQ_COMPLETED | TCP_TRM_REQ_COMPLETED | TCP_COMP_BLOCKED \
494                                      )
495     #define TCP_TX_COMP_DEFERRED     TCP_COMP_DEFERRED
496     #define TCP_RX_COMP_DEFERRED     TCP_COMP_DEFERRED
497     #define TCP_RX_IND_BLOCKED      (TCP_RST_REQ_POSTED | TCP_INV_REQ_POSTED | TCP_TRM_REQ_POSTED | TCP_INDICATE_REJECTED)
498 
499     /* GilR 4/3/2006 - TBA - add lm con debug/statistics */
500     u64_t bytes_post_cnt;       /* cyclic counter of posted application buffer bytes */
501     u64_t bytes_comp_cnt;       /* cyclic counter of completed application buffer bytes (including skipped bytes due to push) */
502     u64_t bytes_push_skip_cnt;
503     u64_t bytes_skip_post_cnt;  /* skipped post because of generic data */
504     u32_t buffer_skip_post_cnt; /* skipped post because of generic data */
505     u32_t buffer_post_cnt;
506     u32_t buffer_completed_cnt;
507     u32_t rq_completion_calls;
508     u32_t partially_completed_buf_cnt; /* included in 'buffer_completed_cnt' above */
509     u32_t buffer_aborted_cnt;
510     u64_t bytes_aborted_cnt;       /* cyclic counter of aborted application buffer bytes */
511     u32_t bytes_trm_aborted_cnt;   /* cyclic counter of bytes received with rst ramrod completion */
512     u32_t fp_db_cnt;   /* Fast path doorbell counter - doens't count Adv. Wnd. doorbells*/
513     u32_t indicate_once_more_cnt;
514     u32_t droped_non_empty_isles;
515     u32_t droped_empty_isles;
516     u32_t rx_post_blocked;
517     u32_t zb_rx_post_blocked;
518     u32_t partially_filled_buf_sent;
519     u32_t abortion_under_flr;
520 } lm_tcp_con_t;
521 
522 
523 /*******************************************************************************
524  * Slow path request information
525  ******************************************************************************/
526 /* structure used for storing the data returned by a completion of a slow-path request */
527 typedef union _lm_tcp_slow_path_ret_data_t
528 {
529     struct {
530         lm_frag_list_t            * frag_list;
531         struct _lm_tcp_gen_buf_t  * ret_buf_ctx;
532     } tcp_upload_data;
533 } lm_tcp_slow_path_ret_data_t;
534 
535 /* structure used for storing the data required for a slow-path request */
536 typedef struct _lm_tcp_path_relink_cached_t
537 {
538     l4_path_cached_state_t  path_cached;
539     l4_neigh_cached_state_t neigh_cached;
540 } lm_tcp_path_relink_cached_t;
541 
542 typedef union _lm_tcp_slow_path_sent_data_t {
543     struct {
544         void * data;
545     } tcp_update_data;
546 } lm_tcp_slow_path_sent_data_t ;
547 
548 typedef union _lm_tcp_slow_path_phys_data_t
549 {
550     struct toe_context toe_ctx; /* used by query slow path request */
551     struct toe_update_ramrod_cached_params update_ctx; /* used by update slow path request */
552 
553 } lm_tcp_slow_path_phys_data_t;
554 
555 typedef struct _lm_tcp_slow_path_data_t {
556     lm_tcp_slow_path_phys_data_t  * virt_addr;
557     lm_address_t                    phys_addr;
558 }lm_tcp_slow_path_data_t ;
559 
560 typedef struct _lm_tcp_slow_path_request_t
561 {
562     lm_sp_req_common_t sp_req_common;
563     lm_tcp_slow_path_ret_data_t  ret_data;    /* SP req. output data */
564     lm_tcp_slow_path_sent_data_t sent_data;   /* SP req. input data  */
565 
566     u32_t    type;
567     #define SP_REQUEST_NONE                         0
568     #define SP_REQUEST_INITIATE_OFFLOAD             1
569     #define SP_REQUEST_TERMINATE_OFFLOAD            2
570     #define SP_REQUEST_QUERY                        3
571     #define SP_REQUEST_UPDATE_TCP                   4
572     #define SP_REQUEST_UPDATE_PATH                  5
573     #define SP_REQUEST_UPDATE_NEIGH                 6
574     #define SP_REQUEST_INVALIDATE                   7
575     #define SP_REQUEST_ABORTIVE_DISCONNECT          8
576     #define SP_REQUEST_TERMINATE1_OFFLOAD           9
577     #define SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT 10 /* used only for LOCAL graceful disconnect */
578     #define SP_REQUEST_PENDING_REMOTE_DISCONNECT    11 /* used for both abortive and graceful disconnect */
579     #define SP_REQUEST_PENDING_TX_RST               12 /* used for TX Reset received while buffers in the active-tb-list */
580     #define SP_REQUEST_BLOCKED                      13 /* when there is no pending connection, we just want to block sp-command
581                                                         * for example, delay offload */
582     #define SP_REQUEST_UPDATE_PATH_RELINK           14
583     lm_status_t status; /* request completion status */
584 } lm_tcp_slow_path_request_t;
585 
586 /*******************************************************************************
587  * information required for calculating the TCP state on 'query'
588  * and 'terminate' completions
589  ******************************************************************************/
590 typedef struct _lm_tcp_state_calculation_t
591 {
592     u64_t fin_request_time;     /* written by Tx path, when a fin request is posted to the chip */
593     u64_t fin_completed_time;   /* written by Tx path, when a fin request is completed by the chip */
594     u64_t fin_reception_time;   /* written by Rx path, when a remote fin is received */
595     u8_t  con_rst_flag;         /* set whenever chip reports RST reception or RST sent completion */
596     u8_t  con_upld_close_flag;  /* set whenever chip reports request to upload a connection after SYN was received or FIN_WAIT2 timer expired */
597     u8_t  _pad[2];
598 } lm_tcp_state_calculation_t;
599 
600 /*******************************************************************************
601  * tcp state
602  ******************************************************************************/
603 typedef struct _lm_tcp_state_t
604 {
605     lm_state_header_t           hdr;
606     lm_path_state_t             *path;
607     lm_tcp_con_t                *rx_con;
608     lm_tcp_con_t                *tx_con;
609     lm_tcp_slow_path_request_t  *sp_request;
610     lm_tcp_slow_path_data_t     sp_req_data;
611     lm_tcp_state_calculation_t  tcp_state_calc;
612     void                        *ctx_virt; /* Can point to different structures depending on the ulp_type */
613     lm_address_t                ctx_phys;
614     l4_tcp_delegated_state_t    tcp_delegated;
615     l4_tcp_const_state_t        tcp_const;
616     l4_tcp_cached_state_t       tcp_cached;
617 
618     u32_t                       cid;
619     #define TCP_CID_MASK 0xffffff
620 
621     /* synchronization between Tx and Rx completions of slow path events */
622     u16_t                       sp_flags;
623     #define SP_REQUEST_COMPLETED_RX     0x001
624     #define SP_REQUEST_COMPLETED_TX     0x002
625     #define REMOTE_RST_INDICATED_RX     0x004
626     #define REMOTE_RST_INDICATED_TX     0x008
627     /* mainly for debugging purposes... slow-path indications when there is no fp... */
628     #define SP_TCP_OFLD_REQ_POSTED      0x010
629     #define SP_TCP_SRC_REQ_POSTED       0x020
630     #define SP_TCP_TRM_REQ_POSTED       0x040
631     #define SP_TCP_QRY_REQ_POSTED       0x080
632     #define SP_TCP_OFLD_REQ_COMP        0x100
633     #define SP_TCP_SRC_REQ_COMP         0x200
634     #define SP_TCP_TRM_REQ_COMP         0x400
635     #define SP_TCP_QRY_REQ_COMP         0x800
636 
637     u8_t                       in_searcher;   /* was the tcp state added to searcher hash */
638     u8_t                       ulp_type;
639     void *                     aux_memory;
640     u32_t                      aux_mem_size;
641     u8_t                       type_of_aux_memory;
642     #define TCP_CON_AUX_RT_MEM          0x1
643     u8_t                       aux_mem_flag;
644     #define TCP_CON_AUX_RT_MEM_SUCCSESS_ALLOCATION          0x1
645     #define TCP_CON_AUX_RT_MEM_FAILED_ALLOCATION            0x2
646 
647     u8_t                       sp_request_pending_completion;
648     u8_t                       pending_abortive_disconnect;
649 
650     lm_tcp_integrity_info_t    integrity_info;
651     /* GilR 4/3/2006 - TBA - add lm tcp state debug/statistics */
652 } lm_tcp_state_t;
653 
654 /*******************************************************************************
655  * Generic TCP buffer.
656  ******************************************************************************/
657 typedef struct _lm_tcp_gen_buf_t
658 {
659     d_list_entry_t   link;  /* MUST be the first field in this structure */
660     /* generic buffers create a list of generic buffers. The next element is infact a d_list_entry,
661      * however, the generic buffer list is not always accessed as a d_list, it is sometime traversed as
662     * a list ending with NULL */
663     #define NEXT_GEN_BUF(_gen_buf) (struct _lm_tcp_gen_buf_t *)d_list_next_entry(&((_gen_buf)->link))
664     #define PREV_GEN_BUF(_gen_buf) (struct _lm_tcp_gen_buf_t *)d_list_prev_entry(&((_gen_buf)->link))
665 
666     lm_address_t     buf_phys;
667     lm_tcp_state_t * tcp;    /* mainly for updating pending_return_indications */
668     u8_t           * buf_virt;
669 
670     /* Following 4 fields are used for supporting SWS accessed when buffer is returned */
671     u32_t            ind_bytes; /* set only in buffer that is head of indication - how many bytes were indicated */
672     u32_t            ind_nbufs; /** how many buffers were included in the indication. Needed for:
673                                  *  - returning buffers to generic pool
674                                  *  - efficiently restore the peninsula list */
675     /** refcnt required only if we support RcvIndicationSize > 0 */
676     u16_t            refcnt; /* reference count for number of times the buffer was succesfully indicated to um */
677     u16_t            placed_bytes;
678 
679     /* The FREE_WHEN_DONE flag indicates that this generic buffer
680      * contains the buffered data received when doing tcp_offload and when it is completed, this
681      * generic buffer is freed back into system memory instead of the generic buffer pool. */
682     u8_t flags;
683     #define GEN_FLAG_FREE_WHEN_DONE 0x01
684     #define GEN_FLAG_SWS_UPDATE     0x02 /* In certain cases succesfull indication updates the window immediately, however
685                                           * when we enter a 'safe-mode' we wait for the generic buffers to return before we
686                                           * update the window. This flag indicates whether or not we have to update. */
687 
688     u16_t phys_offset;                   /* When allocating gen bufs for buffered data, save the offset
689                                             from the original phys addr, and use it when when we free the gen buf */
690 
691 } lm_tcp_gen_buf_t;
692 
693 
694 /*******************************************************************************
695  * generic buffer queue
696  ******************************************************************************/
697 typedef struct _lm_tcp_grq_t
698 {
699     lm_bd_chain_t       bd_chain;
700 
701     /* List of posted generic buffers (i.e. attached to the bd chain) */
702     d_list_t            active_gen_list;
703 
704     /* List of returned generic buffers, may be used to immediate compensation this grq */
705     d_list_t            aux_gen_list;
706 
707     lm_isle_t*          isles_pool;
708     /* Flag indicating that the grq needs to be compensated after generic buffers are allocated... */
709     u8_t                grq_compensate_on_alloc;
710     u8_t                grq_invloved_in_rss;
711 
712     u16_t               low_bds_threshold;
713     u16_t               high_bds_threshold;
714 
715     s16_t               number_of_isles_delta;
716     s32_t               gen_bufs_in_isles_delta;
717 
718     /* statistics */
719     u16_t               max_grqs_per_dpc;     /* maximum grqs compensated in dpc */
720     u16_t               num_grqs_last_dpc;
721     u16_t               num_deficient;        /* number of times compensation wasn't complete */
722     u16_t               avg_grqs_per_dpc;
723     u32_t               avg_dpc_cnt;
724     u32_t               sum_grqs_last_x_dpcs;
725     u32_t               gen_bufs_compensated_from_bypass_only;
726     u32_t               gen_bufs_compensated_till_low_threshold;
727     u32_t               gen_bufs_collected_to_later_compensation;
728 } lm_tcp_grq_t;
729 
730 /*******************************************************************************
731  * L4 receive completion queue
732  ******************************************************************************/
733 typedef struct _lm_tcp_rcq_t
734 {
735     lm_bd_chain_t   bd_chain;
736 
737     /* points directly to the TOE Rx index in the USTORM part
738      * of the non-default status block */
739     u16_t volatile      *hw_con_idx_ptr;
740 
741     /* for RSS indirection table update synchronization */
742     u8_t                rss_update_pending; /* unused */
743     u8_t                suspend_processing;
744 	u32_t				update_cid;
745 	u32_t				rss_update_stats_quiet;
746 	u32_t				rss_update_stats_sleeping;
747 	u32_t				rss_update_stats_delayed;
748     u32_t               rss_update_processing_delayed;
749     u32_t               rss_update_processing_continued;
750     u32_t               rss_update_processing_max_continued;
751 
752     /* statistics */
753     u16_t               max_cqes_per_dpc;
754     u16_t               num_cqes_last_dpc;
755     u16_t               avg_cqes_per_dpc;
756     u16_t               _pad16;
757     u32_t               avg_dpc_cnt;
758     u32_t               sum_cqes_last_x_dpcs;
759 
760     lm_hc_sb_info_t     hc_sb_info;
761 
762 } lm_tcp_rcq_t;
763 
764 /*******************************************************************************
765  * L4 send completion queue
766  ******************************************************************************/
767 typedef struct _lm_tcp_scq_t
768 {
769     lm_bd_chain_t   bd_chain;
770 
771     /* points directly to the TOE Tx index in the CSTORM part
772      * of the non-default status block */
773     u16_t volatile      *hw_con_idx_ptr;
774 
775     /* statistics */
776     u16_t               max_cqes_per_dpc;
777     u16_t               num_cqes_last_dpc;
778     u16_t               avg_cqes_per_dpc;
779     u16_t               _pad16;
780     u32_t               avg_dpc_cnt;
781     u32_t               sum_cqes_last_x_dpcs;
782 
783     lm_hc_sb_info_t     hc_sb_info;
784 
785 } lm_tcp_scq_t;
786 
787 /*******************************************************************************
788  * states block - includes all offloaded states and possibly other offload
789  * information of a specific client.
790  ******************************************************************************/
791 typedef struct _lm_state_block_t
792 {
793     d_list_t                tcp_list;
794     d_list_t                path_list;
795     d_list_t                neigh_list;
796 } lm_state_block_t;
797 
798 
799 typedef struct _lm_toe_statistics_t
800 {
801     u32_t  total_ofld; /* cyclic counter of number of offloaded tcp states */
802     u32_t  total_upld; /* cyclic counter of number of uploaded tcp states */
803     s32_t  total_indicated; /* cyclic counter of number of generic indications (sum of connections pending...) */
804     s32_t  total_indicated_returned; /* cyclic counter of number of generic indications that have returned */
805 
806     /* aggregative per-connections statistics */
807     u32_t rx_rq_complete_calls;     /* #RQ completion calls (total, copy + zero copy) */
808     u32_t rx_rq_bufs_completed;     /* #RQ completion buffers */
809     u64_t rx_bytes_completed_total; /* #RQ completion bytes */
810 
811     u32_t rx_accepted_indications;     /* #GRQ completion calls (indicate) */
812     u32_t rx_bufs_indicated_accepted;  /* #GRQ completion buffers */
813     u64_t rx_bytes_indicated_accepted; /* #GRQ completion bytes */
814 
815     u32_t rx_rejected_indications;     /* #failed or partially consumed indicate calls */
816     u32_t rx_bufs_indicated_rejected;  /* #GRQ completion bytes */
817     u64_t rx_bytes_indicated_rejected; /* #GRQ completion bytes */
818 
819     u32_t rx_zero_byte_recv_reqs;     /* #Zero byte receeive requests */
820     u32_t rx_bufs_copied_grq;         /* #VBD copy bufs total */
821     u32_t rx_bufs_copied_rq;          /* #VBD copy bufs total */
822     u32_t _pad32_1;
823     u64_t rx_bytes_copied_in_post;    /* #VBD copy bytes in post phase*/
824     u64_t rx_bytes_copied_in_comp;    /* #VBD copy bytes in completion phase */
825     u64_t rx_bytes_copied_in_process; /* #VBD copy bytes in process phase */
826 
827     /* post */
828     u32_t rx_bufs_posted_total;
829     u32_t rx_bufs_skipped_post;
830     u64_t rx_bytes_skipped_post;
831     u64_t rx_bytes_posted_total;
832 
833     /* push related */
834     u64_t rx_bytes_skipped_push;
835     u32_t rx_partially_completed_buf_cnt;
836 
837     /* abort */
838     u32_t rx_buffer_aborted_cnt;
839 
840     u32_t tx_rq_complete_calls;
841     u32_t tx_rq_bufs_completed;
842     u64_t tx_bytes_posted_total;
843     u64_t tx_bytes_completed_total;
844 
845     u32_t total_dbg_upld_requested;
846     u32_t total_fin_upld_requested;
847     u32_t total_rst_upld_requested;
848     u32_t total_close_upld_requested;
849     u32_t total_dbt_upld_requested;
850     u32_t total_opt_upld_requested;
851     u32_t total_big_isle_upld_requesed;
852     u32_t total_many_isles_upld_requesed;
853     u32_t total_upld_requested[L4_UPLOAD_REASON_MAX];
854     u32_t con_state_on_upload[L4_TCP_CON_STATE_MAX];
855     u32_t total_bytes_lost_on_upload;
856     u32_t total_droped_non_empty_isles;
857     u32_t total_droped_empty_isles;
858     u32_t total_rx_post_blocked;
859     u32_t total_zb_rx_post_blocked;
860     u32_t total_cfc_delete_error;
861     u32_t total_num_non_full_indications;
862     u32_t total_aux_mem_success_allocations;
863     u32_t total_aux_mem_failed_allocations;
864     u32_t total_rx_abortion_under_flr;
865     u32_t total_tx_abortion_under_flr;
866     u32_t max_number_of_isles_in_single_con;
867     u32_t total_aborive_disconnect_during_completion;
868     u32_t total_pending_aborive_disconnect_completed;
869     u32_t total_aborive_disconnect_completed;
870 
871     u64_t total_buffered_data;
872 } lm_toe_statistics_t;
873 
874 typedef struct _lm_toe_isles_t
875 {
876     s32_t gen_bufs_in_isles;
877     s32_t max_gen_bufs_in_isles;
878     s16_t number_of_isles;
879     s16_t max_number_of_isles;
880     u8_t  l4_decrease_archipelago;
881     u8_t  __pad[3];
882 } lm_toe_isles_t;
883 
884 /*******************************************************************************
885  * toe info - all TOE (L4) information/data structures of the lm_device
886  ******************************************************************************/
887 typedef struct _lm_toe_info_t
888 {
889     struct _lm_device_t     *pdev;
890     lm_state_block_t        state_blk;
891 
892     lm_toe_statistics_t     stats;
893     lm_toe_isles_t          archipelago;
894 
895     lm_tcp_scq_t            scqs[MAX_L4_TX_CHAIN];
896     lm_tcp_rcq_t            rcqs[MAX_L4_RX_CHAIN];
897     lm_tcp_grq_t            grqs[MAX_L4_RX_CHAIN];
898 
899     u8_t                    indirection_table[TOE_INDIRECTION_TABLE_SIZE];
900     u32_t                   rss_update_cnt; /* GilR 4/4/2006 - TBD on RSS indirection table update implementation */
901     u32_t                   gen_buf_size;   /* The size of a generic buffer based on gen_buf_min_size and mtu */
902 
903     u8_t                    state;
904     #define                 LM_TOE_STATE_NONE       0
905     #define                 LM_TOE_STATE_INIT       1
906     #define                 LM_TOE_STATE_NORMAL     2
907 
908     /* Once a generic indication succeeded and the buffers are given to the client we have to choose whether we want
909      * to give a window-update immediately (short-loop) or wait for the buffer to return (long-loop). The mode is determined
910      * by a set of rules in the UM related to the generic buffer pool and its state. The UM sets this parameter for the lm,
911      * and at each indication the lm checks which mode it is in, marks the generic buffer and gives a window-update accordingly  */
912     u8_t                    update_window_mode;
913     #define                 LM_TOE_UPDATE_MODE_LONG_LOOP  0
914     #define                 LM_TOE_UPDATE_MODE_SHORT_LOOP 1
915 
916     /* This field is used to indicate that certain events have occured in TOE. Should be updated under TOE-LOCK */
917     u8_t                    toe_events;
918     #define                 LM_TOE_EVENT_WINDOW_DECREASE 0x1
919     u8_t                    __pad[1];
920 
921     lm_toe_integrity_info_t integrity_info;
922 
923     /* Slow-path data for toe-rss (common and not per connection, therefore located here! ) */
924     struct toe_rss_update_ramrod_data * rss_update_data;
925     lm_address_t                        rss_update_data_phys;
926 } lm_toe_info_t;
927 
928 
929 #endif /* _LM_L4ST_H */
930