1
2 #include "lm5710.h"
3 #include "lm.h"
4 #include "lm_l4sp.h"
5 #include "command.h"
6 #include "context.h"
7 #include "bd_chain.h"
8 #include "mm.h"
9 #include "mm_l4if.h"
10 #include "lm_l4fp.h"
11 #include "lm_l4sp.h"
12 #include "everest_l5cm_constants.h"
13 #include "l4debug.h"
14
15 /* Sizes of objects that need to be allocated in physical memory */
16 #define TOE_SP_PHYS_DATA_SIZE ((sizeof(lm_tcp_slow_path_phys_data_t) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
17 #define TOE_DB_RX_DATA_SIZE ((sizeof(struct toe_rx_db_data) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
18 #define TOE_DB_TX_DATA_SIZE ((sizeof(struct toe_tx_db_data) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
19
20 #define TCP_XCM_DEFAULT_DEL_ACK_MAX_CNT 2
21
22 l4_tcp_con_state_t lm_tcp_calc_state (
23 lm_device_t * pdev,
24 lm_tcp_state_t * tcp,
25 u8_t fin_was_sent
26 );
27
28 /** Description Callback function for spe being completed
29 * internally in vbd driver (not via FW)
30 */
31 void lm_tcp_comp_cb(
32 struct _lm_device_t *pdev,
33 struct sq_pending_command *pending);
34
35
36 /* GilR 11/13/2006 - TODO - ttl is temporarily overloaded for ethearel capture L4/L2 debugging */
37 #define TOE_DBG_TTL 200
38 #define ISCSI_DBG_TTL 222
39
40 #define TIMERS_TICKS_PER_SEC (u32_t)(1000)//(1 / TIMERS_TICK_SIZE_CHIP)
41 #define TSEMI_CLK1_TICKS_PER_SEC (u32_t)(1000)//(1 / TSEMI_CLK1_RESUL_CHIP)
42
lm_get_num_of_cashed_grq_bds(struct _lm_device_t * pdev)43 u32_t lm_get_num_of_cashed_grq_bds(struct _lm_device_t *pdev)
44 {
45 return USTORM_TOE_GRQ_CACHE_NUM_BDS;
46 }
47
48 // this function is used only to verify that the defines above are correct (on compile time - save the runtime checkings...)
_fake_func_verify_defines(void)49 static void _fake_func_verify_defines(void)
50 {
51 ASSERT_STATIC( TIMERS_TICKS_PER_SEC == (1 / TIMERS_TICK_SIZE_CHIP) ) ;
52 ASSERT_STATIC( TSEMI_CLK1_TICKS_PER_SEC == (1 / TSEMI_CLK1_RESUL_CHIP) ) ;
53 }
54
lm_time_resolution(lm_device_t * pdev,u32_t src_time,u32_t src_ticks_per_sec,u32_t trg_ticks_per_sec)55 static __inline u32_t lm_time_resolution(
56 lm_device_t *pdev,
57 u32_t src_time,
58 u32_t src_ticks_per_sec,
59 u32_t trg_ticks_per_sec)
60 {
61 u64_t result;
62 u64_t tmp_result;
63 u32_t dev_factor;
64
65 DbgBreakIf(!(src_ticks_per_sec && trg_ticks_per_sec));
66
67 if (trg_ticks_per_sec > src_ticks_per_sec){
68 dev_factor = trg_ticks_per_sec / src_ticks_per_sec;
69 result = src_time * dev_factor;
70 } else {
71 tmp_result = src_time * trg_ticks_per_sec;
72
73 #if defined(_VBD_)
74 result = CEIL_DIV(tmp_result, src_ticks_per_sec);
75 #else
76 /* Here we try a avoid 64-bit division operation */
77 if (tmp_result < 0xffffffff) {
78 result = (u32_t)tmp_result / src_ticks_per_sec;
79 } else {
80 /* src_ticks_per_sec and trg_ticks_per_sec parameters come
81 from NDIS and so far the values observed were 100 or 1000,
82 depending on Windows version. These parameters define
83 TCP timers resolution and are unlikely to change significantly
84 in the future.
85 So, here we assume that if (src_time * trg_ticks_per_sec) product
86 is out of 32-bit range it is because src_time value.
87 */
88 DbgBreakIf(src_time < src_ticks_per_sec);
89 result = ((u64_t)(src_time / src_ticks_per_sec)) * trg_ticks_per_sec;
90 }
91 #endif
92 }
93
94 if(src_time && !result) {
95 result = 1;
96 }
97 DbgMessage(pdev, VERBOSEl4sp,
98 "lm_time_resulition: src_time=%d, src_ticks_per_sec=%d, trg_ticks_per_sec=%d, result=%d\n",
99 src_time, src_ticks_per_sec, trg_ticks_per_sec, result);
100
101 DbgBreakIf(result > 0xffffffff);
102 return (u32_t)result;
103 }
104
lm_tcp_erase_connection(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)105 lm_status_t lm_tcp_erase_connection(
106 IN struct _lm_device_t * pdev,
107 IN lm_tcp_state_t * tcp)
108 {
109 lm_status_t status = LM_STATUS_SUCCESS;
110 lm_tcp_con_t *rx_con;
111 lm_tcp_con_t *tx_con;
112 MM_INIT_TCP_LOCK_HANDLE();
113 if (!lm_fl_reset_is_inprogress(pdev)) {
114 return LM_STATUS_FAILURE;
115 }
116
117 DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x)\n",tcp->cid);
118 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
119 rx_con = tcp->rx_con;
120 tx_con = tcp->tx_con;
121 mm_acquire_tcp_lock(pdev, tx_con);
122 tx_con->flags |= TCP_POST_BLOCKED;
123 lm_tcp_abort_bufs(pdev, tcp, tx_con, LM_STATUS_CONNECTION_CLOSED);
124 if (tx_con->abortion_under_flr) {
125 DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x): Tx aborted\n",tcp->cid);
126 }
127 mm_release_tcp_lock(pdev, tx_con);
128
129 /* Rx abortive part... */
130
131 mm_acquire_tcp_lock(pdev, rx_con);
132 /* Abort pending buffers */
133 rx_con->flags |= TCP_POST_BLOCKED;
134 if (mm_tcp_indicating_bufs(rx_con)) {
135 DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x): under indication\n",tcp->cid);
136 DbgBreak();
137 mm_release_tcp_lock(pdev, rx_con);
138 return LM_STATUS_FAILURE;
139 }
140 lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_CONNECTION_CLOSED);
141 if (rx_con->abortion_under_flr) {
142 DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x): Rx aborted\n",tcp->cid);
143 }
144
145 mm_release_tcp_lock(pdev, rx_con);
146 }
147 mm_tcp_del_tcp_state(pdev,tcp);
148 return status;
149 }
150
lm_tcp_flush_db(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)151 void lm_tcp_flush_db(
152 struct _lm_device_t * pdev,
153 lm_tcp_state_t *tcp)
154 {
155 struct toe_tx_doorbell dq_flush_msg;
156 lm_tcp_con_t *rx_con, *tx_con;
157 MM_INIT_TCP_LOCK_HANDLE();
158
159 DbgBreakIf(!(pdev && tcp));
160
161 if (tcp->ulp_type != TOE_CONNECTION_TYPE) {
162 DbgMessage(pdev, WARNl4sp, "##lm_tcp_flush_db is not sent for connection(0x%x) of type %d\n",tcp->cid, tcp->ulp_type);
163 return;
164 }
165
166 DbgMessage(pdev, INFORMl4sp, "##lm_tcp_flush_db (cid=0x%x)\n",tcp->cid);
167 rx_con = tcp->rx_con;
168 tx_con = tcp->tx_con;
169
170 dq_flush_msg.hdr.data = (TOE_CONNECTION_TYPE << DOORBELL_HDR_T_CONN_TYPE_SHIFT);
171 dq_flush_msg.params = TOE_TX_DOORBELL_FLUSH;
172 dq_flush_msg.nbytes = 0;
173
174
175 mm_acquire_tcp_lock(pdev, tx_con);
176 tx_con->flags |= TCP_DB_BLOCKED;
177 mm_release_tcp_lock(pdev, tx_con);
178
179 mm_acquire_tcp_lock(pdev, rx_con);
180 rx_con->flags |= TCP_DB_BLOCKED;
181 mm_release_tcp_lock(pdev, rx_con);
182
183 DOORBELL(pdev, tcp->cid, *((u32_t *)&dq_flush_msg));
184 }
185
186 /* Desciption:
187 * allocate l4 resources
188 * Assumptions:
189 * - lm_init_params was already called
190 * Returns:
191 * SUCCESS or any failure */
lm_tcp_alloc_resc(lm_device_t * pdev)192 static lm_status_t lm_tcp_alloc_resc(lm_device_t *pdev)
193 {
194 lm_toe_info_t *toe_info;
195 lm_bd_chain_t *bd_chain;
196 u32_t mem_size;
197 long i;
198 u8_t mm_cli_idx = 0;
199
200 DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_alloc_resc\n");
201
202 // NOP, call this function only to prevent compile warning.
203 _fake_func_verify_defines();
204
205 mm_cli_idx = LM_RESOURCE_NDIS;//!!DP mm_cli_idx_to_um_idx(LM_CLI_IDX_NDIS);
206
207 toe_info = &pdev->toe_info;
208 LM_TOE_FOREACH_TSS_IDX(pdev, i)
209 {
210 /* allocate SCQs */
211 bd_chain = &toe_info->scqs[i].bd_chain;
212 mem_size = pdev->params.l4_scq_page_cnt * LM_PAGE_SIZE;
213 bd_chain->bd_chain_virt = mm_alloc_phys_mem(pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
214 if (!bd_chain->bd_chain_virt) {
215 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
216 return LM_STATUS_RESOURCE;
217 }
218 mm_memset(bd_chain->bd_chain_virt, 0, mem_size);
219 }
220
221 LM_TOE_FOREACH_RSS_IDX(pdev, i)
222 {
223 /* allocate RCQs */
224 bd_chain = &toe_info->rcqs[i].bd_chain;
225 mem_size = pdev->params.l4_rcq_page_cnt * LM_PAGE_SIZE;
226 bd_chain->bd_chain_virt = mm_alloc_phys_mem(pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
227 if (!bd_chain->bd_chain_virt) {
228 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
229 return LM_STATUS_RESOURCE;
230 }
231 mm_memset(bd_chain->bd_chain_virt, 0, mem_size);
232
233 /* allocate GRQs */
234 bd_chain = &toe_info->grqs[i].bd_chain;
235 mem_size = pdev->params.l4_grq_page_cnt * LM_PAGE_SIZE;
236 bd_chain->bd_chain_virt = mm_alloc_phys_mem(pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
237 if (!bd_chain->bd_chain_virt) {
238 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
239 return LM_STATUS_RESOURCE;
240 }
241 mm_memset(bd_chain->bd_chain_virt, 0, mem_size);
242
243 DbgBreakIf(toe_info->grqs[i].isles_pool);
244 if (!pdev->params.l4_isles_pool_size) {
245 pdev->params.l4_isles_pool_size = 2 * T_TCP_ISLE_ARRAY_SIZE;
246 } else if (pdev->params.l4_isles_pool_size < T_TCP_ISLE_ARRAY_SIZE) {
247 pdev->params.l4_isles_pool_size = T_TCP_ISLE_ARRAY_SIZE;
248 }
249 mem_size = pdev->params.l4_isles_pool_size * sizeof(lm_isle_t);
250 toe_info->grqs[i].isles_pool = (lm_isle_t*)mm_alloc_mem(pdev, mem_size, mm_cli_idx);
251 if (!toe_info->grqs[i].isles_pool) {
252 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
253 return LM_STATUS_RESOURCE;
254 }
255 mm_memset(toe_info->grqs[i].isles_pool, 0, mem_size);
256 }
257 if (pdev->params.l4_data_integrity) {
258 u32_t pb_idx;
259 pdev->toe_info.integrity_info.pattern_size = 256;
260 pdev->toe_info.integrity_info.pattern_buf_size = 0x10000 + pdev->toe_info.integrity_info.pattern_size;
261 pdev->toe_info.integrity_info.pattern_buf = mm_alloc_mem(pdev, pdev->toe_info.integrity_info.pattern_buf_size, mm_cli_idx);
262 if (!pdev->toe_info.integrity_info.pattern_buf) {
263 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
264 return LM_STATUS_RESOURCE;
265 }
266 for (pb_idx = 0; pb_idx < pdev->toe_info.integrity_info.pattern_buf_size; pb_idx++) {
267 pdev->toe_info.integrity_info.pattern_buf[pb_idx] = pb_idx % pdev->toe_info.integrity_info.pattern_size;
268 }
269 }
270
271 /* Allocate rss-update physical data */
272 pdev->toe_info.rss_update_data = (struct toe_rss_update_ramrod_data *)
273 mm_alloc_phys_mem(pdev, sizeof(*pdev->toe_info.rss_update_data),
274 &pdev->toe_info.rss_update_data_phys,
275 0,0);
276
277 if (pdev->toe_info.rss_update_data == NULL)
278 {
279 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
280 return LM_STATUS_RESOURCE;
281 }
282
283 return LM_STATUS_SUCCESS;
284 }
285
_lm_get_default_l4cli_params(lm_device_t * pdev,l4_ofld_params_t * l4_params)286 static void _lm_get_default_l4cli_params(lm_device_t *pdev, l4_ofld_params_t *l4_params)
287 {
288 lm_params_t *def_params = &pdev->params;
289
290 DbgBreakIf(def_params->l4cli_ack_frequency > 0xff);
291 l4_params->ack_frequency = def_params->l4cli_ack_frequency & 0xff;
292
293 DbgBreakIf(def_params->l4cli_delayed_ack_ticks > 0xff);
294 l4_params->delayed_ack_ticks = def_params->l4cli_delayed_ack_ticks & 0xff;
295
296 DbgBreakIf(def_params->l4cli_doubt_reachability_retx > 0xff);
297 l4_params->doubt_reachability_retx = def_params->l4cli_doubt_reachability_retx & 0xff;
298
299 l4_params->dup_ack_threshold = def_params->l4cli_dup_ack_threshold;
300
301 DbgBreakIf((def_params->l4cli_flags != 0) &&
302 (def_params->l4cli_flags != OFLD_PARAM_FLAG_SNAP_ENCAP));
303 l4_params->flags = def_params->l4cli_flags;
304
305 DbgBreakIf(def_params->l4cli_max_retx > 0xff);
306 l4_params->max_retx = def_params->l4cli_max_retx & 0xff;
307
308 l4_params->nce_stale_ticks = def_params->l4cli_nce_stale_ticks;
309 l4_params->push_ticks = def_params->l4cli_push_ticks;
310
311 DbgBreakIf(def_params->l4cli_starting_ip_id > 0xffff);
312 l4_params->starting_ip_id = def_params->l4cli_starting_ip_id & 0xffff;
313
314 l4_params->sws_prevention_ticks = def_params->l4cli_sws_prevention_ticks;
315 l4_params->ticks_per_second = def_params->l4cli_ticks_per_second;
316
317 }
318
319 /** Description
320 * requests generic buffers from the generic buffer pool and attaches the generic buffers
321 * to the grq-bd chain. It attaches the amount of buffers received, no matter if they were
322 * less than requested. Function always tries to fill bd-chain (i.e. requests bd_chain->bd_left)
323 * Assumptions:
324 * - called after the generic buffer pool is ready to deliver generic buffers
325 * - who ever will call this function will handle checking if a work item for allocating more
326 * buffers is needed.
327 * Returns:
328 * - TRUE: buffers were written
329 * - FALSE: o/w
330 */
lm_tcp_rx_fill_grq(struct _lm_device_t * pdev,u8_t sb_idx,d_list_t * bypass_gen_pool_list,u8_t filling_mode)331 u8_t lm_tcp_rx_fill_grq(struct _lm_device_t * pdev, u8_t sb_idx, d_list_t * bypass_gen_pool_list, u8_t filling_mode)
332 {
333 lm_toe_info_t * toe_info;
334 lm_tcp_grq_t * grq;
335 struct toe_rx_grq_bd * grq_bd;
336 lm_tcp_gen_buf_t * curr_gen_buf;
337 lm_bd_chain_t * bd_chain;
338 d_list_t tmp_gen_buf_list;
339 d_list_t free_gen_buf_list;
340 u16_t num_bufs; /* limited by bd_chain->bd_left */
341 u16_t num_bufs_threshold;
342 u32_t num_bypass_buffs;
343 u32_t avg_dpc_cnt;
344
345 toe_info = &pdev->toe_info;
346 grq = &toe_info->grqs[sb_idx];
347 bd_chain = &grq->bd_chain;
348 num_bufs = bd_chain->bd_left; /* required number of bufs from grq pool */
349
350 DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_fill_grq bd_left (to be filled)= %d\n", bd_chain->bd_left);
351
352 if (!pdev->params.l4_grq_filling_threshold_divider) {
353 num_bufs_threshold = 1;
354 } else {
355 if (pdev->params.l4_grq_filling_threshold_divider < 2) {
356 pdev->params.l4_grq_filling_threshold_divider = 2;
357 }
358 num_bufs_threshold = bd_chain->capacity / pdev->params.l4_grq_filling_threshold_divider;
359 }
360
361 d_list_init(&tmp_gen_buf_list, NULL, NULL, 0);
362 d_list_init(&free_gen_buf_list, NULL, NULL, 0);
363 if (bypass_gen_pool_list != NULL) {
364 num_bypass_buffs = d_list_entry_cnt(bypass_gen_pool_list);
365 } else {
366 num_bypass_buffs = 0;
367 }
368
369 if (filling_mode == FILL_GRQ_MIN_CASHED_BDS) {
370 u16_t bufs_in_chain = bd_chain->capacity - num_bufs;
371 if (bufs_in_chain >= USTORM_TOE_GRQ_CACHE_NUM_BDS) {
372 return 0;
373 } else {
374 num_bufs = USTORM_TOE_GRQ_CACHE_NUM_BDS - bufs_in_chain;
375 }
376 } else if (filling_mode == FILL_GRQ_LOW_THRESHOLD) {
377 u16_t bufs_in_chain = bd_chain->capacity - num_bufs;
378 DbgBreakIf(grq->low_bds_threshold < USTORM_TOE_GRQ_CACHE_NUM_BDS);
379 if (grq->low_bds_threshold < USTORM_TOE_GRQ_CACHE_NUM_BDS) {
380 grq->low_bds_threshold = 3*GRQ_XOFF_TH;
381 }
382 if (bufs_in_chain >= grq->low_bds_threshold) {
383 return 0;
384 } else {
385 num_bufs = grq->low_bds_threshold - bufs_in_chain;
386 }
387 } else {
388 if (grq->high_bds_threshold) {
389 u16_t bufs_in_chain = bd_chain->capacity - num_bufs;
390 if (bufs_in_chain >= grq->high_bds_threshold) {
391 return 0;
392 } else {
393 num_bufs = grq->high_bds_threshold - bufs_in_chain;
394 }
395 }
396 if (num_bufs < num_bufs_threshold) {
397 if (num_bufs > num_bypass_buffs) {
398 num_bufs = (u16_t)num_bypass_buffs; /* Partly fill grq from bypass only*/
399 grq->gen_bufs_compensated_from_bypass_only += num_bypass_buffs;
400 }
401 if (!num_bufs) {
402 return 0; /* nothing to fill or to fill later and more
403 to avoid abundant GEN_POOL_LOCK acquiring*/
404 }
405 }
406 }
407
408 if (num_bypass_buffs < num_bufs) {
409 /* we can safely cast the returned value since we know we ask for max 2^16 */
410 u16_t num_required_buffs = num_bufs - num_bypass_buffs;
411 mm_tcp_get_gen_bufs(pdev, &tmp_gen_buf_list, num_required_buffs, sb_idx);
412 }
413 while ((d_list_entry_cnt(&tmp_gen_buf_list) < num_bufs) && num_bypass_buffs) {
414 lm_tcp_gen_buf_t * tmp_buf = NULL;
415 d_list_entry_t * curr_entry = d_list_pop_head(bypass_gen_pool_list);
416 tmp_buf = (lm_tcp_gen_buf_t *)curr_entry;
417 DbgBreakIf(!curr_entry);
418 if (tmp_buf->flags & GEN_FLAG_FREE_WHEN_DONE)
419 {
420 d_list_push_head(&free_gen_buf_list, curr_entry);
421 }
422 else
423 {
424 d_list_push_head(&tmp_gen_buf_list, curr_entry);
425 }
426 num_bypass_buffs--;
427 }
428 num_bufs = (u16_t)d_list_entry_cnt(&tmp_gen_buf_list);
429 if ((bypass_gen_pool_list != NULL) && d_list_entry_cnt(&free_gen_buf_list))
430 {
431 d_list_add_tail(bypass_gen_pool_list, &free_gen_buf_list);
432 }
433 /* stats... */
434 grq->num_grqs_last_dpc = num_bufs;
435 if (grq->num_grqs_last_dpc) { /* Exclude zeroed value from statistics*/
436 if (grq->num_grqs_last_dpc > grq->max_grqs_per_dpc) {
437 grq->max_grqs_per_dpc = grq->num_grqs_last_dpc;
438 }
439 /* we don't want to wrap around...*/
440 if ((grq->sum_grqs_last_x_dpcs + grq->num_grqs_last_dpc) < grq->sum_grqs_last_x_dpcs) {
441 grq->avg_dpc_cnt = 0;
442 grq->sum_grqs_last_x_dpcs = 0;
443 }
444 grq->sum_grqs_last_x_dpcs += grq->num_grqs_last_dpc;
445 grq->avg_dpc_cnt++;
446 avg_dpc_cnt = grq->avg_dpc_cnt;
447 if (avg_dpc_cnt) { /*Prevent division by 0*/
448 grq->avg_grqs_per_dpc = grq->sum_grqs_last_x_dpcs / avg_dpc_cnt;
449 } else {
450 grq->sum_grqs_last_x_dpcs = 0;
451 }
452 }
453
454 DbgBreakIf(num_bufs != tmp_gen_buf_list.cnt);
455
456 if (num_bufs < bd_chain->bd_left) {
457 grq->num_deficient++;
458 }
459
460 if (!num_bufs) {
461 DbgMessage(pdev, WARNl4rx, "no buffers returned from generic pool\n");
462 return 0; /* nothing to do */
463 }
464 curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(&tmp_gen_buf_list);
465
466 if (filling_mode == FILL_GRQ_LOW_THRESHOLD) {
467 grq->gen_bufs_compensated_till_low_threshold += num_bufs;
468 }
469 while (num_bufs--) {
470 DbgBreakIf(SIG(curr_gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
471 DbgMessage(pdev, VERBOSEl4rx, "curr_gen_buf->buf_virt=0x%p, END_SIG=0x%x\n", curr_gen_buf->buf_virt,
472 END_SIG(curr_gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)));
473 DbgBreakIf(END_SIG(curr_gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
474
475 /* initialize curr_gen_buf */
476 curr_gen_buf->ind_bytes = 0;
477 curr_gen_buf->ind_nbufs = 0;
478 curr_gen_buf->placed_bytes = 0;
479 curr_gen_buf->refcnt = 0;
480 curr_gen_buf->tcp = NULL;
481
482 grq_bd = (struct toe_rx_grq_bd *)lm_toe_bd_chain_produce_bd(bd_chain);
483 DbgBreakIf(!grq_bd);
484 /* attach gen buf to grq */
485 DbgBreakIf(!curr_gen_buf || !curr_gen_buf->buf_phys.as_u64);
486 grq_bd->addr_hi = curr_gen_buf->buf_phys.as_u32.high;
487 grq_bd->addr_lo = curr_gen_buf->buf_phys.as_u32.low;
488
489 curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_next_entry(&curr_gen_buf->link);
490 /* enlist gen buf to active list will be done at the end of the loop (more efficient) */
491 }
492
493 if (bd_chain->bd_left) {
494 DbgMessage(pdev, INFORMl4rx, "GRQ bd-chain wasn't filled completely\n");
495 }
496 if (d_list_entry_cnt(&tmp_gen_buf_list))
497 {
498 d_list_add_tail(&grq->active_gen_list, &tmp_gen_buf_list);
499 }
500 return (tmp_gen_buf_list.cnt != 0); /* how many buffers were actually placed */
501 }
502
503 /* Desciption:
504 * initialize l4 VBD resources
505 * Assumptions:
506 * - lm_init_params was already called
507 * - lm_tcp_alloc_resc was already called
508 * - um GRQ pool is ready to supply buffers to lm (?)
509 * Returns:
510 * SUCCESS or any failure */
lm_tcp_init_resc(struct _lm_device_t * pdev,u8_t b_is_init)511 lm_status_t lm_tcp_init_resc(struct _lm_device_t *pdev, u8_t b_is_init )
512 {
513 lm_toe_info_t *toe_info;
514 lm_bd_chain_t *bd_chain;
515 long i;
516 u16_t volatile * sb_indexes;
517 u32_t sb_id;
518
519 DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_resc\n");
520 toe_info = &pdev->toe_info;
521 toe_info->state = LM_TOE_STATE_INIT;
522
523 /* init rest of toe_info fields */
524 toe_info->rss_update_cnt = 0;
525 toe_info->gen_buf_size = lm_tcp_calc_gen_buf_size(pdev);
526 LM_TCP_SET_UPDATE_WINDOW_MODE(pdev, LM_TOE_UPDATE_MODE_SHORT_LOOP);
527
528 if( b_is_init )
529 {
530 d_list_init(&toe_info->state_blk.neigh_list, NULL, NULL, 0);
531 d_list_init(&toe_info->state_blk.path_list, NULL, NULL, 0);
532 d_list_init(&toe_info->state_blk.tcp_list, NULL, NULL, 0);
533 }
534
535 /* TODO: consider enabling the assertion */
536 //DbgBreakIf(pdev->ofld_info.state_blks[STATE_BLOCK_TOE]);
537 pdev->ofld_info.state_blks[STATE_BLOCK_TOE] = &toe_info->state_blk;
538
539 LM_TOE_FOREACH_TSS_IDX(pdev, i)
540 {
541 /* init SCQs */
542 lm_tcp_scq_t *scq = &toe_info->scqs[i];
543 bd_chain = &scq->bd_chain;
544 lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
545 bd_chain->bd_chain_phy, (u16_t)pdev->params.l4_scq_page_cnt, sizeof(struct toe_tx_cqe), 1, TRUE);
546 /* Assign the SCQ chain consumer pointer to the consumer index in the status block. */
547 sb_id = RSS_ID_TO_SB_ID(i);
548 #ifdef _VBD_
549 if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
550 {
551 sb_id = LM_NON_RSS_SB(pdev);
552 }
553 #endif
554 sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
555 sb_indexes[HC_INDEX_TOE_TX_CQ_CONS] = 0;
556 scq->hw_con_idx_ptr = sb_indexes + HC_INDEX_TOE_TX_CQ_CONS;
557 scq->hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_TYPE;
558 scq->hc_sb_info.hc_index_value = HC_INDEX_TOE_TX_CQ_CONS;
559 }
560
561
562 /* Before initializing GRQs, we need to check if there are left-overs from before (incase this isn't the iniitiali 'init', for that we need to clear
563 * them - but outside the loop... */
564 if ( !b_is_init ) {
565 /* we need to return what ever buffers are still on the grq back to the pool before
566 * the new initialization... */
567 lm_tcp_clear_grqs(pdev);
568 }
569
570 LM_TOE_FOREACH_RSS_IDX(pdev, i)
571 {
572 lm_tcp_rcq_t *rcq = &toe_info->rcqs[i];
573 lm_tcp_grq_t *grq = &toe_info->grqs[i];
574 u8_t byte_counter_id;
575
576 sb_id = RSS_ID_TO_SB_ID(i);
577 #ifdef _VBD_
578 if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
579 {
580 sb_id = LM_NON_RSS_SB(pdev);
581 }
582 #endif
583 byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
584
585 /* init RCQs */
586 bd_chain = &rcq->bd_chain;
587 lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
588 bd_chain->bd_chain_phy, (u16_t)pdev->params.l4_rcq_page_cnt, sizeof(struct toe_rx_cqe), 1, TRUE);
589 rcq->rss_update_pending = 0;
590 rcq->suspend_processing = FALSE;
591 rcq->update_cid = 0;
592
593 /* Assign the RCQ chain consumer pointer to the consumer index in the status block. */
594 sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
595 sb_indexes[HC_INDEX_TOE_RX_CQ_CONS] = 0;
596 rcq->hw_con_idx_ptr = sb_indexes + HC_INDEX_TOE_RX_CQ_CONS;
597 rcq->hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_SL_TYPE;
598 rcq->hc_sb_info.hc_index_value = HC_INDEX_TOE_RX_CQ_CONS;
599 if (IS_PFDEV(pdev))
600 {
601 rcq->hc_sb_info.iro_dhc_offset = CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, HC_INDEX_TOE_RX_CQ_CONS);
602 }
603 else
604 {
605 DbgMessage(pdev, FATAL, "Dhc not implemented for VF yet\n");
606 }
607
608 /* init GRQs */
609 if( b_is_init )
610 {
611 d_list_init(&grq->active_gen_list, NULL, NULL, 0);
612 d_list_init(&grq->aux_gen_list, NULL, NULL, 0);
613 if ((u8_t)i != LM_TOE_BASE_RSS_ID(pdev) ) {
614 grq->grq_compensate_on_alloc = TRUE;
615 pdev->toe_info.grqs[i].high_bds_threshold = 3*GRQ_XOFF_TH + 1;
616 } else {
617 grq->grq_compensate_on_alloc = FALSE;
618 pdev->toe_info.grqs[i].high_bds_threshold = 0;
619 }
620 grq->low_bds_threshold = 3*GRQ_XOFF_TH;
621 }
622
623 bd_chain = &grq->bd_chain;
624 lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
625 bd_chain->bd_chain_phy, (u16_t)pdev->params.l4_grq_page_cnt, sizeof(struct toe_rx_grq_bd), 0, TRUE);
626 /* fill GRQ (minimum mode)*/
627 lm_tcp_rx_fill_grq(pdev, (u8_t)i, NULL, FILL_GRQ_MIN_CASHED_BDS);
628 }
629
630
631 LM_TOE_FOREACH_RSS_IDX(pdev, i)
632 {
633 // lm_tcp_grq_t *grq = &toe_info->grqs[i];
634 lm_tcp_rx_fill_grq(pdev, (u8_t)i, NULL, FILL_GRQ_FULL);
635 }
636
637 return LM_STATUS_SUCCESS;
638 }
639
640
641 /* init cstorm internal memory for toe
642 * assumption - strom's common intmem (if any) already initiated */
_lm_tcp_init_cstorm_intmem(lm_device_t * pdev)643 static void _lm_tcp_init_cstorm_intmem(lm_device_t *pdev)
644 {
645 lm_toe_info_t *toe_info;
646 lm_address_t phys_addr;
647 lm_tcp_scq_t *scq;
648 u16_t idx;
649 u8_t drv_toe_rss_id;
650 u8_t port;
651 u8_t fw_sb_id;
652
653 toe_info = &pdev->toe_info;
654 port = PORT_ID(pdev);
655
656 LM_TOE_FOREACH_TSS_IDX(pdev, drv_toe_rss_id)
657 {
658 scq = &toe_info->scqs[drv_toe_rss_id];
659
660 /* SCQ consumer ptr - scq first page addr */
661 phys_addr = lm_bd_chain_phys_addr(&scq->bd_chain, 0);
662 DbgBreakIf(CSTORM_TOE_CQ_CONS_PTR_LO_SIZE != 4);
663
664 LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_CSTRORM_INTMEM);
665
666 DbgBreakIf (CSTORM_TOE_CQ_CONS_PTR_HI_SIZE != 4);
667 LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_CSTRORM_INTMEM);
668
669 /* SCQ producer idx */
670 idx = lm_bd_chain_prod_idx(&scq->bd_chain);
671
672 DbgBreakIf(CSTORM_TOE_CQ_PROD_SIZE != 2);
673 LM_INTMEM_WRITE16(pdev, CSTORM_TOE_CQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_CSTRORM_INTMEM);
674
675 /* SCQ consumer idx */
676 idx = lm_bd_chain_cons_idx(&scq->bd_chain);
677 DbgBreakIf(idx != 0);
678
679 DbgBreakIf(CSTORM_TOE_CQ_CONS_SIZE != 2);
680 LM_INTMEM_WRITE16(pdev, CSTORM_TOE_CQ_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_CSTRORM_INTMEM);
681
682 /* SCQ second page addr */
683 phys_addr = lm_bd_chain_phys_addr(&scq->bd_chain, 1);
684
685 DbgBreakIf(CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_SIZE != 4);
686 LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_CSTRORM_INTMEM);
687
688 DbgBreakIf(CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_SIZE != 4);
689 LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_CSTRORM_INTMEM);
690
691 DbgBreakIf(CSTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_SIZE != 1);
692
693 LM_INTMEM_WRITE8(pdev, CSTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), 1, BAR_CSTRORM_INTMEM);
694
695 //LM_INTMEM_WRITE8(pdev, CSTORM_TOE_STATUS_BLOCK_ID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), BAR_CSTRORM_INTMEM);
696 fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(drv_toe_rss_id));
697 #ifdef _VBD_
698 if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
699 {
700 fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(LM_NON_RSS_SB(pdev)));
701 if (drv_toe_rss_id != LM_NON_RSS_CHAIN(pdev))
702 {
703 DbgBreak();
704 }
705 }
706 #endif
707 LM_INTMEM_WRITE8(pdev, CSTORM_TOE_STATUS_BLOCK_ID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), fw_sb_id, BAR_CSTRORM_INTMEM);
708 LM_INTMEM_WRITE8(pdev, CSTORM_TOE_STATUS_BLOCK_INDEX_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), HC_INDEX_TOE_TX_CQ_CONS, BAR_CSTRORM_INTMEM);
709 }
710 }
711
712 /* init ustorm offload params private to TOE */
_lm_set_ofld_params_ustorm_toe(lm_device_t * pdev,l4_ofld_params_t * l4_params)713 static void _lm_set_ofld_params_ustorm_toe(lm_device_t *pdev, l4_ofld_params_t *l4_params)
714 {
715 u8_t func;
716 u32_t val32;
717
718 func = FUNC_ID(pdev);
719
720 /* global push timer ticks */
721 /* This value is in milliseconds instead of ticks in SNP
722 * and Longhorn. In the future microsoft will change these
723 * values to ticks. TBA : When fix takes place, uncomment first line and remove second line */
724 /* val32 = lm_time_resolution(pdev, l4_params->push_ticks, l4_params->ticks_per_second, 1000); */
725 val32 = lm_time_resolution(pdev, l4_params->push_ticks, 1000, 1000);
726
727 DbgBreakIf (USTORM_TOE_TCP_PUSH_TIMER_TICKS_SIZE != 4);
728 LM_INTMEM_WRITE32(pdev, USTORM_TOE_TCP_PUSH_TIMER_TICKS_OFFSET(func), val32, BAR_USTRORM_INTMEM);
729 }
730
731 /* init ustorm internal memory for toe
732 * assumption - strom's common intmem (if any) already initiated */
_lm_tcp_init_ustorm_intmem(lm_device_t * pdev)733 static void _lm_tcp_init_ustorm_intmem(lm_device_t *pdev)
734 {
735 lm_toe_info_t *toe_info;
736 lm_address_t phys_addr;
737 lm_tcp_rcq_t *rcq;
738 lm_tcp_grq_t *grq;
739 struct toe_rx_grq_bd *grq_bd;
740 u16_t idx;
741 u8_t drv_toe_rss_id, grq_bd_idx;
742 u8_t port;
743 u8_t fw_sb_id;
744 u8_t sw_sb_id;
745
746 toe_info = &pdev->toe_info;
747 port = PORT_ID(pdev);
748
749 _lm_set_ofld_params_ustorm_toe(pdev, &(pdev->ofld_info.l4_params));
750
751 LM_TOE_FOREACH_RSS_IDX(pdev,drv_toe_rss_id)
752 {
753
754 rcq = &toe_info->rcqs[drv_toe_rss_id];
755 grq = &toe_info->grqs[drv_toe_rss_id];
756
757 /* GRQ cache bds */
758 grq_bd = (struct toe_rx_grq_bd *)grq->bd_chain.bd_chain_virt;
759
760 DbgBreakIf( USTORM_TOE_GRQ_CACHE_NUM_BDS > lm_bd_chain_usable_bds_per_page(&grq->bd_chain));
761
762 for(grq_bd_idx = 0; grq_bd_idx < USTORM_TOE_GRQ_CACHE_NUM_BDS; grq_bd_idx++) {
763 LM_INTMEM_WRITE32(pdev, USTORM_GRQ_CACHE_BD_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id) ,port,grq_bd_idx), grq_bd->addr_lo, BAR_USTRORM_INTMEM);
764 LM_INTMEM_WRITE32(pdev, USTORM_GRQ_CACHE_BD_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id),port,grq_bd_idx), grq_bd->addr_hi, BAR_USTRORM_INTMEM);
765 grq_bd++;
766 }
767
768 /* GRQ cache prod idx */
769 DbgBreakIf (USTORM_TOE_GRQ_LOCAL_PROD_SIZE != 1);
770 LM_INTMEM_WRITE8(pdev, USTORM_TOE_GRQ_LOCAL_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), (u8_t)USTORM_TOE_GRQ_CACHE_NUM_BDS, BAR_USTRORM_INTMEM);
771
772 /* GRQ cache cons idx */
773 DbgBreakIf (USTORM_TOE_GRQ_LOCAL_CONS_SIZE != 1);
774 LM_INTMEM_WRITE8(pdev, USTORM_TOE_GRQ_LOCAL_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), 0, BAR_USTRORM_INTMEM);
775
776 /* GRQ producer idx */
777 idx = lm_bd_chain_prod_idx(&grq->bd_chain);
778 DbgBreakIf (USTORM_TOE_GRQ_PROD_SIZE != 2);
779 LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_USTRORM_INTMEM);
780
781 /* GRQ consumer idx */
782 DbgBreakIf (USTORM_TOE_GRQ_CONS_SIZE != 2);
783 LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), (u8_t)USTORM_TOE_GRQ_CACHE_NUM_BDS, BAR_USTRORM_INTMEM);
784
785 /* GRQ consumer ptr */
786 phys_addr = lm_bd_chain_phys_addr(&grq->bd_chain, 0);
787 LM_INC64(&phys_addr, sizeof(struct toe_rx_grq_bd) * USTORM_TOE_GRQ_CACHE_NUM_BDS);
788
789 DbgBreakIf (USTORM_TOE_GRQ_CONS_PTR_LO_SIZE != 4);
790 LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_USTRORM_INTMEM);
791
792 DbgBreakIf (USTORM_TOE_GRQ_CONS_PTR_HI_SIZE != 4);
793 LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_USTRORM_INTMEM);
794
795 /* Generic buffer size */
796 DbgBreakIf (USTORM_TOE_GRQ_BUF_SIZE_SIZE != 2);
797
798 DbgBreakIf(LM_TCP_GEN_BUF_SIZE(pdev) > 0xffff); /* the size available in ustorm */
799 LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_BUF_SIZE_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), (u16_t)LM_TCP_GEN_BUF_SIZE(pdev), BAR_USTRORM_INTMEM);
800
801 /* RCQ consumer ptr - rcq first page addr */
802 phys_addr = lm_bd_chain_phys_addr(&rcq->bd_chain, 0);
803
804 DbgBreakIf (USTORM_TOE_CQ_CONS_PTR_LO_SIZE != 4);
805 LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_USTRORM_INTMEM);
806
807 DbgBreakIf (USTORM_TOE_CQ_CONS_PTR_HI_SIZE != 4);
808 LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_USTRORM_INTMEM);
809
810 /* RCQ second page addr */
811 phys_addr = lm_bd_chain_phys_addr(&rcq->bd_chain, 1);
812
813 DbgBreakIf (USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_SIZE != 4);
814 LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_USTRORM_INTMEM);
815
816 DbgBreakIf (USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_SIZE != 4);
817 LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_USTRORM_INTMEM);
818
819 DbgBreakIf (USTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_SIZE != 1);
820 LM_INTMEM_WRITE8(pdev, USTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), 1, BAR_USTRORM_INTMEM);
821
822 /* RCQ producer idx */
823 idx = lm_bd_chain_prod_idx(&rcq->bd_chain);
824
825 DbgBreakIf (USTORM_TOE_CQ_PROD_SIZE != 2);
826 LM_INTMEM_WRITE16(pdev, USTORM_TOE_CQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_USTRORM_INTMEM);
827 if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
828 u32_t l4_quasi_byte_counter;
829 u16_t prod_idx_diff = lm_bd_chain_prod_idx(&rcq->bd_chain) - rcq->bd_chain.bds_per_page * rcq->bd_chain.page_cnt;
830 l4_quasi_byte_counter = prod_idx_diff;
831 l4_quasi_byte_counter <<= 16;
832 // LM_INTMEM_WRITE32(pdev, CSTORM_BYTE_COUNTER_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), HC_INDEX_TOE_RX_CQ_CONS), l4_quasi_byte_counter, BAR_CSTRORM_INTMEM);
833 LM_INTMEM_WRITE32(pdev, rcq->hc_sb_info.iro_dhc_offset, l4_quasi_byte_counter, BAR_CSTRORM_INTMEM);
834 }
835 /* RCQ consumer idx */
836 idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
837 DbgBreakIf(idx != 0);
838
839 DbgBreakIf (USTORM_TOE_CQ_CONS_SIZE != 2);
840 LM_INTMEM_WRITE16(pdev, USTORM_TOE_CQ_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_USTRORM_INTMEM);
841
842 fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(drv_toe_rss_id));
843 sw_sb_id = RSS_ID_TO_SB_ID(drv_toe_rss_id);
844 if (RSS_ID_TO_SB_ID(drv_toe_rss_id) >= MAX_NDSB) { //To suppress Prefast warning
845 DbgBreak();
846 break;
847 }
848 #ifdef _VBD_
849 if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
850 {
851 fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(LM_NON_RSS_SB(pdev)));
852 sw_sb_id = LM_NON_RSS_SB(pdev);
853 if (drv_toe_rss_id != LM_NON_RSS_CHAIN(pdev))
854 {
855 DbgBreak();
856 }
857 }
858 #endif
859 if (CHIP_IS_E1x(pdev)) {
860
861 if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
862 pdev->vars.status_blocks_arr[RSS_ID_TO_SB_ID(drv_toe_rss_id)].hc_status_block_data.e1x_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags |= HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
863 } else {
864 pdev->vars.status_blocks_arr[RSS_ID_TO_SB_ID(drv_toe_rss_id)].hc_status_block_data.e1x_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
865 }
866 LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id)
867 + OFFSETOF(struct hc_status_block_data_e1x, index_data)
868 + sizeof(struct hc_index_data)*HC_INDEX_TOE_RX_CQ_CONS
869 + OFFSETOF(struct hc_index_data,flags),
870 pdev->vars.status_blocks_arr[RSS_ID_TO_SB_ID(drv_toe_rss_id)].hc_status_block_data.e1x_sb_data.index_data[HC_INDEX_ETH_RX_CQ_CONS].flags, BAR_CSTRORM_INTMEM);
871 } else {
872
873 if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
874 pdev->vars.status_blocks_arr[sw_sb_id].hc_status_block_data.e2_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags |= HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
875 } else {
876 pdev->vars.status_blocks_arr[sw_sb_id].hc_status_block_data.e2_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
877 }
878 LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id)
879 + OFFSETOF(struct hc_status_block_data_e2, index_data)
880 + sizeof(struct hc_index_data)*HC_INDEX_TOE_RX_CQ_CONS
881 + OFFSETOF(struct hc_index_data,flags),
882 pdev->vars.status_blocks_arr[sw_sb_id].hc_status_block_data.e2_sb_data.index_data[HC_INDEX_ETH_RX_CQ_CONS].flags, BAR_CSTRORM_INTMEM);
883
884 }
885
886 // LM_INTMEM_WRITE8(pdev, USTORM_TOE_STATUS_BLOCK_ID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port),LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), BAR_USTRORM_INTMEM);
887 LM_INTMEM_WRITE8(pdev, USTORM_TOE_STATUS_BLOCK_ID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port),fw_sb_id, BAR_USTRORM_INTMEM);
888 LM_INTMEM_WRITE8(pdev, USTORM_TOE_STATUS_BLOCK_INDEX_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), HC_INDEX_TOE_RX_CQ_CONS, BAR_USTRORM_INTMEM);
889 }
890
891 /* Initialize Indirection Table : Only in entries that match status - blocks : L4 base--> L4 base + cnt */
892 DbgBreakIf (USTORM_INDIRECTION_TABLE_ENTRY_SIZE != 1);
893
894 if (pdev->params.l4_enable_rss == L4_RSS_DISABLED) {
895 LM_TOE_FOREACH_RSS_IDX(pdev, idx)
896 {
897 LM_INTMEM_WRITE8(pdev, USTORM_INDIRECTION_TABLE_OFFSET(port) + LM_TOE_FW_RSS_ID(pdev,idx), LM_TOE_FW_RSS_ID(pdev,(u8_t)idx), BAR_USTRORM_INTMEM);
898 }
899 } else {
900 for (idx = 0; idx < RSS_INDIRECTION_TABLE_SIZE; idx++) {
901 LM_INTMEM_WRITE8(pdev,USTORM_INDIRECTION_TABLE_OFFSET(port) + idx, pdev->toe_info.indirection_table[idx], BAR_USTRORM_INTMEM);
902 }
903 }
904 }
905
906 /* init tstorm offload params common to TOE/RDMA/ISCSI */
_lm_set_ofld_params_tstorm_common(lm_device_t * pdev,l4_ofld_params_t * l4_params)907 static void _lm_set_ofld_params_tstorm_common(lm_device_t *pdev, l4_ofld_params_t *l4_params)
908 {
909 u8_t func;
910 u32_t dup_ack_threshold;
911
912 func = FUNC_ID(pdev);
913
914 dup_ack_threshold = l4_params->dup_ack_threshold;
915 if(dup_ack_threshold > TCP_TSTORM_MAX_DUP_ACK_TH) {
916 DbgMessage(pdev, WARNl4sp,
917 "given dup_ack_threshold (=%d) too high. setting it to maximum allowed (=%d)\n",
918 dup_ack_threshold, TCP_TSTORM_MAX_DUP_ACK_TH);
919 dup_ack_threshold = TCP_TSTORM_MAX_DUP_ACK_TH;
920 }
921
922 DbgBreakIf (TSTORM_TCP_DUPLICATE_ACK_THRESHOLD_SIZE != 4);
923 LM_INTMEM_WRITE32(pdev, TSTORM_TCP_DUPLICATE_ACK_THRESHOLD_OFFSET(func), dup_ack_threshold, BAR_TSTRORM_INTMEM);
924
925 /* MaxCwnd */
926 DbgBreakIf (TSTORM_TCP_MAX_CWND_SIZE != 4);
927 if(pdev->params.network_type == LM_NETOWRK_TYPE_WAN) {
928 LM_INTMEM_WRITE32(pdev, TSTORM_TCP_MAX_CWND_OFFSET(func), pdev->params.max_cwnd_wan, BAR_TSTRORM_INTMEM);
929 } else {
930 DbgBreakIf(pdev->params.network_type != LM_NETOWRK_TYPE_LAN);
931 LM_INTMEM_WRITE32(pdev, TSTORM_TCP_MAX_CWND_OFFSET(func), pdev->params.max_cwnd_lan, BAR_TSTRORM_INTMEM);
932 }
933 }
934
935 /* init tstorm offload params private to TOE */
_lm_set_ofld_params_tstorm_toe(lm_device_t * pdev,l4_ofld_params_t * l4_params)936 static void _lm_set_ofld_params_tstorm_toe(lm_device_t *pdev, l4_ofld_params_t *l4_params)
937 {
938 u8_t func;
939
940 func = FUNC_ID(pdev);
941
942 /* max retransmit (TOE param only) */
943 DbgBreakIf (TSTORM_TOE_MAX_SEG_RETRANSMIT_SIZE != 4);
944 LM_INTMEM_WRITE32(pdev, TSTORM_TOE_MAX_SEG_RETRANSMIT_OFFSET(func), l4_params->max_retx, BAR_TSTRORM_INTMEM);
945
946 /* TcpDoubtReachability (TOE param only) */
947 DbgBreakIf (TSTORM_TOE_DOUBT_REACHABILITY_SIZE != 1);
948 LM_INTMEM_WRITE8(pdev, TSTORM_TOE_DOUBT_REACHABILITY_OFFSET(func), l4_params->doubt_reachability_retx, BAR_TSTRORM_INTMEM);
949
950 }
951
952 /* init tstorm internal memory for toe
953 * assumption - strom's common intmem already initiated */
_lm_tcp_init_tstorm_intmem(lm_device_t * pdev)954 static void _lm_tcp_init_tstorm_intmem(lm_device_t *pdev)
955 {
956 _lm_set_ofld_params_tstorm_toe(pdev, &(pdev->ofld_info.l4_params));
957
958 DbgBreakIf (TSTORM_TOE_MAX_DOMINANCE_VALUE_SIZE != 1);
959 LM_INTMEM_WRITE8(pdev, TSTORM_TOE_MAX_DOMINANCE_VALUE_OFFSET, (u8_t)pdev->params.l4_max_dominance_value, BAR_TSTRORM_INTMEM);
960 DbgBreakIf (TSTORM_TOE_DOMINANCE_THRESHOLD_SIZE != 1);
961 LM_INTMEM_WRITE8(pdev, TSTORM_TOE_DOMINANCE_THRESHOLD_OFFSET, (u8_t)pdev->params.l4_dominance_threshold, BAR_TSTRORM_INTMEM);
962
963 }
964
965
966 /* init xstorm offload params common to TOE/RDMA/ISCSI */
_lm_set_ofld_params_xstorm_common(lm_device_t * pdev,l4_ofld_params_t * l4_params)967 static void _lm_set_ofld_params_xstorm_common(lm_device_t *pdev, l4_ofld_params_t *l4_params)
968 {
969 u8_t func, ack_frequency;
970 u32_t val32, max_reg, tmr_reg, delayed_ack_ticks;
971
972 func = FUNC_ID(pdev);
973 if (PORT_ID(pdev)) {
974 max_reg = XCM_REG_GLB_DEL_ACK_MAX_CNT_1;
975 tmr_reg = XCM_REG_GLB_DEL_ACK_TMR_VAL_1;
976 } else {
977 max_reg = XCM_REG_GLB_DEL_ACK_MAX_CNT_0;
978 tmr_reg = XCM_REG_GLB_DEL_ACK_TMR_VAL_0;
979 }
980 /* if ack_frequency is 0, it means use default value of 2. */
981 /* delayed max ack count, (both in internal ram and in XCM!!!) */
982 ack_frequency = l4_params->ack_frequency;
983 if(ack_frequency < TCP_XCM_MIN_GLB_DEL_ACK_MAX_CNT) {
984 DbgMessage(pdev, WARNl4sp,
985 "given ack_frequency (=%d) too low. setting it to minimum allowed (=%d)\n",
986 ack_frequency, TCP_XCM_DEFAULT_DEL_ACK_MAX_CNT);
987 ack_frequency = TCP_XCM_DEFAULT_DEL_ACK_MAX_CNT;
988 }
989
990
991 DbgBreakIf (XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_SIZE != 1);
992 LM_INTMEM_WRITE8(pdev, XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func), ack_frequency, BAR_XSTRORM_INTMEM);
993 REG_WR(pdev, max_reg, ack_frequency);
994
995 /* This value is in milliseconds instead of ticks in SNP
996 * and Longhorn. In the future microsoft will change these
997 * values to ticks. TBA : When fix takes place, uncomment first line and remove second line */
998 /* delayed_ack_ticks = lm_time_resolution(pdev, l4_params->delayed_ack_ticks, l4_params->ticks_per_second, 1000); */
999 delayed_ack_ticks = lm_time_resolution(pdev, l4_params->delayed_ack_ticks, 1000, TIMERS_TICKS_PER_SEC);
1000
1001 /* delayed ack timer */
1002 REG_WR(pdev, tmr_reg, delayed_ack_ticks);
1003
1004 /* sws timer */
1005 /* This value (sws_prevention_ticks) is in milliseconds instead of ticks in SNP
1006 * and Longhorn. In the future microsoft will change these
1007 * values to ticks. TBA : When fix takes place, uncomment first line and remove second line */
1008 /* val32 = lm_time_resolution(pdev, l4_params->sws_prevention_ticks, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC); */
1009 val32 = lm_time_resolution(pdev, l4_params->sws_prevention_ticks, 1000 , TIMERS_TICKS_PER_SEC);
1010
1011 DbgBreakIf (XSTORM_TCP_TX_SWS_TIMER_VAL_SIZE != 4);
1012 LM_INTMEM_WRITE32(pdev, XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), val32, BAR_XSTRORM_INTMEM);
1013
1014 DbgBreakIf (XSTORM_COMMON_RTC_RESOLUTION_SIZE != 2);
1015 LM_INTMEM_WRITE16(pdev, XSTORM_COMMON_RTC_RESOLUTION_OFFSET, 1000 / l4_params->ticks_per_second , BAR_XSTRORM_INTMEM);
1016 }
1017
1018 /* init xstorm offload params private to TOE */
_lm_set_ofld_params_xstorm_toe(lm_device_t * pdev,l4_ofld_params_t * l4_params)1019 static void _lm_set_ofld_params_xstorm_toe(lm_device_t *pdev, l4_ofld_params_t *l4_params)
1020 {
1021 u8_t func;
1022
1023 func = FUNC_ID(pdev);
1024
1025 DbgBreakIf (XSTORM_TOE_LLC_SNAP_ENABLED_SIZE != 1);
1026 if(l4_params->flags & OFLD_PARAM_FLAG_SNAP_ENCAP) {
1027 LM_INTMEM_WRITE8(pdev, XSTORM_TOE_LLC_SNAP_ENABLED_OFFSET(func), 1, BAR_XSTRORM_INTMEM);
1028 } else {
1029 LM_INTMEM_WRITE8(pdev, XSTORM_TOE_LLC_SNAP_ENABLED_OFFSET(func), 0, BAR_XSTRORM_INTMEM);
1030 }
1031 }
1032
1033 /* init xstorm internal memory for toe
1034 * assumption - strom's common intmem already initiated */
_lm_tcp_init_xstorm_intmem(lm_device_t * pdev)1035 static void _lm_tcp_init_xstorm_intmem(lm_device_t *pdev)
1036 {
1037 _lm_set_ofld_params_xstorm_toe(pdev, &(pdev->ofld_info.l4_params));
1038 }
1039
1040 /* Desciption:
1041 * init chip internal memory and hw that is common for TOE, ISCSI and RDMA
1042 * Assumptions:
1043 * - lm_init_params was already called
1044 * Returns:
1045 * SUCCESS or any failure */
lm_tcp_init_chip_common(lm_device_t * pdev)1046 lm_status_t lm_tcp_init_chip_common(lm_device_t *pdev)
1047 {
1048 l4_ofld_params_t l4_params;
1049 u8_t func;
1050
1051 DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_chip_common\n");
1052 DbgBreakIf(!pdev);
1053
1054 func = FUNC_ID(pdev);
1055
1056 _lm_get_default_l4cli_params(pdev, &l4_params);
1057
1058 pdev->ofld_info.l4_params = l4_params;
1059
1060 /* init common internal memory/hw for each storm
1061 * (c+u storms do not have common offload params) */
1062 _lm_set_ofld_params_xstorm_common(pdev, &l4_params);
1063 _lm_set_ofld_params_tstorm_common(pdev, &l4_params);
1064
1065
1066 /* init internal memory constatns (non-dependant on l4_params)*/
1067
1068 /* enable delayed acks */
1069 DbgBreakIf (XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_SIZE != 1);
1070 LM_INTMEM_WRITE8(pdev, XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1 /* always enabled */, BAR_XSTRORM_INTMEM);
1071
1072 /* ip id (init value currently constant: 0x8000) */
1073 DbgBreakIf (XSTORM_TCP_IPID_SIZE != 2);
1074 LM_INTMEM_WRITE16(pdev, XSTORM_TCP_IPID_OFFSET(func), TOE_XSTORM_IP_ID_INIT_HI, BAR_XSTRORM_INTMEM);
1075
1076 return LM_STATUS_SUCCESS;
1077 }
1078
1079 /* Desciption:
1080 * init chip internal memory for L4
1081 * Returns:
1082 * SUCCESS or any failure */
lm_tcp_init_chip(lm_device_t * pdev)1083 lm_status_t lm_tcp_init_chip(lm_device_t *pdev)
1084 {
1085 DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_chip\n");
1086
1087 /* GilR 4/9/2006 - TODO - Assaf - RSS indirection table default initialization, done in L2? */
1088
1089 /* init XSTORM internal RAM */
1090 _lm_tcp_init_xstorm_intmem(pdev);
1091
1092 /* init CSTORM internal RAM */
1093 _lm_tcp_init_cstorm_intmem(pdev);
1094
1095 /* init TSTORM internal RAM */
1096 _lm_tcp_init_tstorm_intmem(pdev);
1097
1098 /* init USTORM internal RAM */
1099 _lm_tcp_init_ustorm_intmem(pdev);
1100
1101 return LM_STATUS_SUCCESS;
1102 }
1103
1104 /* Desciption:
1105 * send TOE START ramrod wait for completion and return
1106 * Assumptions:
1107 * - there is no pending slow path request for the leading connection (cid=0)
1108 * - interrupts are already enabled
1109 * Returns:
1110 * SUCCESS or any failure */
lm_tcp_start_chip(lm_device_t * pdev)1111 lm_status_t lm_tcp_start_chip(lm_device_t *pdev)
1112 {
1113 lm_toe_info_t *toe_info;
1114 u32_t to_cnt = 100000; /* GilR 4/9/2006 - TBA - 'to_cnt' in lm_tcp_init_chip need to be removed? */
1115 u64_t data;
1116 struct toe_init_ramrod_data toe_init_data;
1117
1118 DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_start_chip\n");
1119
1120 toe_info = &pdev->toe_info;
1121
1122 /* send TOE INIT ramrod and wait for completion */
1123 DbgBreakIf(toe_info->state != LM_TOE_STATE_INIT);
1124
1125 toe_init_data.rss_num = LM_TOE_FW_RSS_ID(pdev,LM_TOE_BASE_RSS_ID(pdev));
1126 data = *((u64_t*)(&toe_init_data));
1127 lm_command_post(pdev, LM_SW_LEADING_RSS_CID(pdev), RAMROD_OPCODE_TOE_INIT, CMD_PRIORITY_NORMAL, TOE_CONNECTION_TYPE, data);
1128 while (toe_info->state != LM_TOE_STATE_NORMAL && to_cnt) {
1129 mm_wait(pdev,100);
1130 to_cnt--;
1131 }
1132 /* GilR 5/16/2006 - TODO - DbgBreakIf(toe_info->state != LM_TOE_STATE_NORMAL); commented out for windows user mode */
1133 if(toe_info->state != LM_TOE_STATE_NORMAL) {
1134 #ifndef _VBD_CMD_
1135 DbgMessage(pdev, FATAL, "TOE init ramrod did not complete\n");
1136 #else
1137 toe_info->state = LM_TOE_STATE_NORMAL;
1138 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INIT, TOE_CONNECTION_TYPE, LM_SW_LEADING_RSS_CID(pdev));
1139 #endif
1140
1141 #if defined(_VBD_)
1142 DbgBreak();
1143 #endif
1144 }
1145
1146 /* cid recycled cb registration */
1147 lm_cid_recycled_cb_register(pdev, TOE_CONNECTION_TYPE, lm_tcp_recycle_cid_cb);
1148
1149 /* Sq-completion cb registration (sq that get completed internally in driver */
1150 lm_sq_comp_cb_register(pdev, TOE_CONNECTION_TYPE, lm_tcp_comp_cb);
1151
1152 return LM_STATUS_SUCCESS;
1153 }
1154
1155 /* Desciption:
1156 * allocate and initiate l4 (lm driver and chip)
1157 * Assumptions:
1158 * - lm_init_params was already called
1159 * - um GRQ pool is ready to supply buffers to lm (?)
1160 * - there is no pending slow path request for the leading connection (cid=0)
1161 * - interrupts are already enabled
1162 * Returns:
1163 * SUCCESS or any failure */
lm_tcp_init(lm_device_t * pdev)1164 lm_status_t lm_tcp_init(lm_device_t *pdev)
1165 {
1166 lm_toe_info_t *toe_info;
1167 lm_status_t lm_status;
1168
1169 DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init\n");
1170 if (IS_VFDEV(pdev)) {
1171 DbgMessage(pdev, FATAL, "###lm_tcp_init is not supported for VF\n");
1172 return LM_STATUS_SUCCESS;
1173 }
1174
1175 toe_info = &pdev->toe_info;
1176 mm_memset(toe_info, 0 , sizeof(lm_toe_info_t));
1177 toe_info->pdev = pdev;
1178
1179 /* allocate resources */
1180 lm_status = lm_tcp_alloc_resc(pdev);
1181 DbgBreakIf((lm_status!=LM_STATUS_SUCCESS) && DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1182 if (lm_status != LM_STATUS_SUCCESS) {
1183 return lm_status;
1184 }
1185
1186 /* initialize resources */
1187 lm_status = lm_tcp_init_resc(pdev, TRUE);
1188 DbgBreakIf(lm_status!=LM_STATUS_SUCCESS);
1189 if (lm_status != LM_STATUS_SUCCESS) {
1190 return lm_status;
1191 }
1192
1193 /* initialize chip resources */
1194 lm_status = lm_tcp_init_chip(pdev);
1195 DbgBreakIf(lm_status!=LM_STATUS_SUCCESS);
1196 if (lm_status != LM_STATUS_SUCCESS) {
1197 return lm_status;
1198 }
1199
1200 /* activate chip for tcp */
1201 lm_status = lm_tcp_start_chip(pdev);
1202 DbgBreakIf(lm_status!=LM_STATUS_SUCCESS);
1203 if (lm_status != LM_STATUS_SUCCESS) {
1204 return lm_status;
1205 }
1206
1207 return lm_status;
1208 }
1209
1210 /* Desciption:
1211 * handle TOE init protocol ramrod completion */
lm_tcp_init_ramrod_comp(lm_device_t * pdev)1212 void lm_tcp_init_ramrod_comp(lm_device_t *pdev)
1213 {
1214 lm_toe_info_t *toe_info;
1215
1216 DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_ramrod_comp\n");
1217 DbgBreakIf(!pdev);
1218
1219 toe_info = &pdev->toe_info;
1220 DbgBreakIf(toe_info->state != LM_TOE_STATE_INIT);
1221 toe_info->state = LM_TOE_STATE_NORMAL;
1222 }
1223
1224 /* Desciption:
1225 * handle TOE RSS-update ramrod completion
1226 * Assumptions:
1227 * - called once for each RCQ
1228 */
lm_tcp_rss_update_ramrod_comp(struct _lm_device_t * pdev,lm_tcp_rcq_t * rcq,u32_t cid,u32_t update_stats_type,u8_t update_suspend_rcq)1229 void lm_tcp_rss_update_ramrod_comp(
1230 struct _lm_device_t *pdev,
1231 lm_tcp_rcq_t *rcq,
1232 u32_t cid,
1233 u32_t update_stats_type,
1234 u8_t update_suspend_rcq)
1235 {
1236
1237 /* decrement the completion count and check if we need to suspend processing */
1238 DbgBreakIf(rcq->suspend_processing == TRUE);
1239
1240 /* Update update statistics - These statistics indicate which FW flow was taken and also count the overall number of updates */
1241 DbgMessage(pdev, INFORMl4sp, "lm_tcp_rss_update_ramrod_comp(): %d\n",update_stats_type);
1242 switch (update_stats_type) {
1243 case TOE_RSS_UPD_QUIET:
1244 rcq->rss_update_stats_quiet++;
1245 break;
1246 case TOE_RSS_UPD_SLEEPING:
1247 rcq->rss_update_stats_sleeping++;
1248 break;
1249 case TOE_RSS_UPD_DELAYED:
1250 rcq->rss_update_stats_delayed++;
1251 break;
1252 default:
1253 DbgBreak();
1254 break;
1255 }
1256
1257 /* This is a hack due to the fact the FW has a hard time providing the cid on which the ramrod was sent on */
1258 /* I know that I sent the ramrod on the leading connection so I use it here instead of the cid on the cqe (update cid) */
1259 /* If the driver ever changes the cid on which the rmarod is snt on this line will have to be changed as well - UGLY, UGLY */
1260 rcq->update_cid = LM_SW_LEADING_RSS_CID(pdev);
1261
1262 /* This is what should have been if the FW alwys put the ramrod cid on these completions
1263 rcq->update_cid = cid;
1264 */
1265 if (update_suspend_rcq) {
1266 lm_tcp_rss_update_suspend_rcq(pdev, rcq);
1267 } else {
1268 rcq->rss_update_processing_delayed++;
1269 }
1270 }
1271
1272 /* Desciption:
1273 * Checks whether the rcq processing should be suspended as a result of an rss update
1274 */
lm_tcp_rss_update_suspend_rcq(IN struct _lm_device_t * pdev,IN lm_tcp_rcq_t * rcq)1275 void lm_tcp_rss_update_suspend_rcq(
1276 IN struct _lm_device_t * pdev,
1277 IN lm_tcp_rcq_t * rcq)
1278 {
1279 void * cookie = NULL;
1280 /* This function is called once when an update completion is encountered and the rcq porcessing is not suspended yet.
1281 * At all other times it is called only if the rcq processing is already suspended. */
1282 if (rcq->suspend_processing == FALSE)
1283 {
1284 /* decrment the expected completion counter */
1285 mm_atomic_dec(&pdev->params.update_comp_cnt);
1286 /* Toe specific... to determine who completes the ramrod. */
1287 if (mm_atomic_dec(&pdev->params.update_toe_comp_cnt) == 0)
1288 {
1289 /* Everyone is done. Time to return credit to the slowpath ring... */
1290 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_RSS_UPDATE,
1291 TOE_CONNECTION_TYPE, LM_TOE_FW_RSS_ID(pdev, LM_TOE_BASE_RSS_ID(pdev)));
1292 }
1293 }
1294 rcq->suspend_processing = pdev->params.update_toe_comp_cnt ? TRUE : FALSE;
1295
1296 if (rcq->suspend_processing == FALSE)
1297 {
1298 /* processing was suspended and can now be resumed, try to complete the update ramrod */
1299 DbgMessage(pdev, INFORMl4sp, "lm_tcp_rss_update_suspend_rcq(): calling lm_eth_update_ramrod_comp\n");
1300 if (mm_atomic_dec(&pdev->params.update_suspend_cnt) == 0)
1301 {
1302 if (pdev->slowpath_info.set_rss_cookie)
1303 {
1304 cookie = (void *)pdev->slowpath_info.set_rss_cookie;
1305 pdev->slowpath_info.set_rss_cookie = NULL;
1306 mm_set_done(pdev, rcq->update_cid, cookie);
1307 }
1308 }
1309 }
1310 }
1311
1312
1313
1314 /* Desciption:
1315 * initiate a caller allocated lm neighbor state
1316 * Assumptions:
1317 * - caller already zeroed given neigh state
1318 * Returns:
1319 * SUCCESS or any failure */
lm_tcp_init_neigh_state(struct _lm_device_t * pdev,lm_state_block_t * state_blk,lm_neigh_state_t * neigh,l4_neigh_const_state_t * neigh_const,l4_neigh_cached_state_t * neigh_cached,l4_neigh_delegated_state_t * neigh_delegated)1320 lm_status_t lm_tcp_init_neigh_state(
1321 struct _lm_device_t *pdev,
1322 lm_state_block_t *state_blk,
1323 lm_neigh_state_t *neigh,
1324 l4_neigh_const_state_t *neigh_const,
1325 l4_neigh_cached_state_t *neigh_cached,
1326 l4_neigh_delegated_state_t *neigh_delegated)
1327 {
1328 DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_neigh_state\n");
1329 DbgBreakIf(!(pdev && state_blk && neigh && neigh_const && neigh_cached && neigh_delegated));
1330
1331 neigh->hdr.state_blk = state_blk;
1332 neigh->hdr.state_id = STATE_ID_NEIGH;
1333 neigh->hdr.status = STATE_STATUS_NORMAL;
1334 d_list_push_tail(&state_blk->neigh_list, &neigh->hdr.link);
1335 neigh->num_dependents = 0;
1336
1337 mm_memcpy(&neigh->neigh_cached, neigh_cached, sizeof(neigh->neigh_cached));
1338 mm_memcpy(&neigh->neigh_const, neigh_const, sizeof(neigh->neigh_const));
1339 mm_memcpy(&neigh->neigh_delegated, neigh_delegated, sizeof(neigh->neigh_delegated));
1340
1341 neigh->host_reachability_time = 0; /* SHOULD BE: (mm_get_current_time() - neigh_cached->host_reachability_delta) */
1342 neigh->nic_reachability_time = 0; /* SHOULD BE: (mm_get_current_time() - neigh_delegated->nic_reachability_delta) */
1343 neigh->stale = 0;
1344
1345 return LM_STATUS_SUCCESS;
1346 }
1347
1348 /* Desciption:
1349 * initiate a caller allocated lm path state
1350 * Assumptions:
1351 * - caller already zeroed given path state
1352 * Returns:
1353 * SUCCESS or any failure */
lm_tcp_init_path_state(struct _lm_device_t * pdev,lm_state_block_t * state_blk,lm_path_state_t * path,lm_neigh_state_t * neigh,l4_path_const_state_t * path_const,l4_path_cached_state_t * path_cached,l4_path_delegated_state_t * path_delegated)1354 lm_status_t lm_tcp_init_path_state(
1355 struct _lm_device_t *pdev,
1356 lm_state_block_t *state_blk,
1357 lm_path_state_t *path,
1358 lm_neigh_state_t *neigh,
1359 l4_path_const_state_t *path_const,
1360 l4_path_cached_state_t *path_cached,
1361 l4_path_delegated_state_t *path_delegated)
1362 {
1363 DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_path_state\n");
1364 DbgBreakIf(!(pdev && state_blk && path && neigh && path_const && path_cached && path_delegated));
1365 DbgBreakIf(neigh->hdr.state_id != STATE_ID_NEIGH || neigh->hdr.status != STATE_STATUS_NORMAL);
1366
1367 path->hdr.state_blk = state_blk;
1368 path->hdr.state_id = STATE_ID_PATH;
1369 path->hdr.status = STATE_STATUS_NORMAL;
1370 d_list_push_tail(&state_blk->path_list, &path->hdr.link);
1371 path->neigh = neigh;
1372 neigh->num_dependents++;
1373 path->num_dependents = 0;
1374
1375 mm_memcpy(&path->path_cached, path_cached, sizeof(path->path_cached));
1376 mm_memcpy(&path->path_const, path_const, sizeof(path->path_const));
1377 mm_memcpy(&path->path_delegated, path_delegated, sizeof(path->path_delegated));
1378
1379 return LM_STATUS_SUCCESS;
1380 }
1381
1382 /* Desciption:
1383 * initiate a caller allocated lm tcp state
1384 * Assumptions:
1385 * - caller already zeroed given tcp state
1386 * - caller already set the tx/rx_con pointers of the given
1387 * tcp state to pre-allocated tx/rx cons
1388 * Returns:
1389 * SUCCESS or any failure */
lm_tcp_init_tcp_state(struct _lm_device_t * pdev,lm_state_block_t * state_blk,lm_tcp_state_t * tcp,lm_path_state_t * path,l4_tcp_const_state_t * tcp_const,l4_tcp_cached_state_t * tcp_cached,l4_tcp_delegated_state_t * tcp_delegated,u32_t tcp_cid_addr)1390 lm_status_t lm_tcp_init_tcp_state(
1391 struct _lm_device_t *pdev,
1392 lm_state_block_t *state_blk,
1393 lm_tcp_state_t *tcp,
1394 lm_path_state_t *path,
1395 l4_tcp_const_state_t *tcp_const,
1396 l4_tcp_cached_state_t *tcp_cached,
1397 l4_tcp_delegated_state_t *tcp_delegated,
1398 u32_t tcp_cid_addr)
1399 {
1400 DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_tcp_state, ptr=%p, src_port=%d\n", tcp, tcp_const->src_port);
1401 DbgBreakIf(!(pdev && state_blk && tcp && path && tcp_const && tcp_cached && tcp_delegated));
1402 DbgBreakIf(path->hdr.state_id != STATE_ID_PATH || path->hdr.status != STATE_STATUS_NORMAL);
1403
1404 /* We need to determine the ULP_TYPE and get ourselves a cid if one doesn't already exist */
1405 if (!tcp_cid_addr)
1406 {
1407 tcp->ulp_type = TOE_CONNECTION_TYPE;
1408 }
1409 else
1410 {
1411 tcp->ulp_type = lm_map_cid_to_proto(pdev, tcp_cid_addr);
1412 tcp->cid = tcp_cid_addr;
1413 lm_set_cid_resc(pdev, TOE_CONNECTION_TYPE, tcp, tcp_cid_addr);
1414 }
1415
1416 tcp->hdr.state_blk = state_blk;
1417 tcp->hdr.state_id = STATE_ID_TCP;
1418 tcp->hdr.status = STATE_STATUS_INIT;
1419 d_list_push_tail(&state_blk->tcp_list, &tcp->hdr.link);
1420 tcp->path = path;
1421 path->num_dependents++;
1422
1423 if (tcp->ulp_type == TOE_CONNECTION_TYPE)
1424 {
1425 pdev->toe_info.stats.total_ofld++;
1426 }
1427 else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
1428 {
1429 pdev->iscsi_info.run_time.stats.total_ofld++;
1430 }
1431
1432 mm_memcpy(&tcp->tcp_cached, tcp_cached, sizeof(tcp->tcp_cached));
1433 mm_memcpy(&tcp->tcp_const, tcp_const, sizeof(tcp->tcp_const));
1434 mm_memcpy(&tcp->tcp_delegated, tcp_delegated, sizeof(tcp->tcp_delegated));
1435
1436 /* the rest of the tcp state's fields that require initialization value other than 0,
1437 * will be initialized later (when lm_tcp_init_tx_con/lm_tcp_init_rx_con/lm_tcp_init_tcp_context are called) */
1438
1439 return LM_STATUS_SUCCESS;
1440 }
1441
1442 /* calc connection's mss according to path_mtu and remote MSS */
_lm_tcp_calc_mss(u32_t path_mtu,u16_t remote_mss,u8_t is_ipv6,u8_t ts_enabled,u8_t llc_snap_enabled,u8_t vlan_enabled)1443 static u32_t _lm_tcp_calc_mss(u32_t path_mtu, u16_t remote_mss, u8_t is_ipv6, u8_t ts_enabled,
1444 u8_t llc_snap_enabled, u8_t vlan_enabled)
1445 {
1446 #define MIN_MTU 576 /* rfc 793 */
1447 #define IPV4_HDR_LEN 20
1448 #define IPV6_HDR_LEN 40
1449 #define TCP_HDR_LEN 20
1450 #define TCP_OPTION_LEN 12
1451 #define LLC_SNAP_LEN 8
1452 #define VLAN_LEN 4
1453
1454 u32_t mss = 0;
1455 u32_t hdrs = TCP_HDR_LEN;
1456
1457 UNREFERENCED_PARAMETER_(vlan_enabled);
1458 UNREFERENCED_PARAMETER_(llc_snap_enabled);
1459
1460 if(is_ipv6) {
1461 hdrs += IPV6_HDR_LEN;
1462 } else {
1463 hdrs += IPV4_HDR_LEN;
1464 }
1465 #ifdef LLC_SNAP_HEADER_ROOMS_WITH_PAYLOAD
1466 /*
1467 LLC_SNAP_HEADER_ROOMS_WITH_PAYLOAD never was defined. Nobody remembers when LLC/SNAP protocol was tested but
1468 in any case don't use payload to room LLC/SNAP header
1469 */
1470 if (llc_snap_enabled) {
1471 hdrs += LLC_SNAP_LEN;
1472 }
1473 #endif
1474 #ifdef VLAN_HEADER_ROOMS_WITH_PAYLOAD
1475 /*
1476 VLAN_HEADER_ROOMS_WITH_PAYLOAD never was defined and below strings is reminder that once there was problem of
1477 decreasing (-4) data payload size because of VLAN header rooming with payload CQ39709
1478 */
1479 if (vlan_enabled) {
1480 hdrs += VLAN_LEN;
1481 }
1482 #endif
1483 DbgBreakIf(path_mtu < MIN_MTU);
1484 mss = path_mtu - hdrs;
1485
1486 if(mss > remote_mss) {
1487 mss = remote_mss;
1488 }
1489 if(ts_enabled) {
1490 mss -= TCP_OPTION_LEN;
1491 }
1492 if (!mss) {
1493 DbgBreakIf(!mss);
1494 mss = 1; /*mss may be used as divider, so let's prevent division by zero*/
1495 }
1496 return mss;
1497 }
1498
1499 /** Description
1500 * calculate the fragment count for a given initial receive window and mss
1501 * The fragment count is based on the maximum size we will need to do for a single
1502 * indication
1503 */
_lm_tcp_calc_frag_cnt(lm_device_t * pdev,u32_t initial_rcv_wnd,u32_t mss)1504 static u32_t _lm_tcp_calc_frag_cnt(lm_device_t * pdev, u32_t initial_rcv_wnd, u32_t mss)
1505 {
1506 u32_t frag_cnt;
1507
1508 frag_cnt = initial_rcv_wnd / mss;
1509 if (frag_cnt < (0x10000 / mss)) {
1510 frag_cnt = 0x10000 / mss;
1511 }
1512
1513 if ((pdev->params.l4_max_rcv_wnd_size > 0x10000) && (frag_cnt > (pdev->params.l4_max_rcv_wnd_size / mss))) {
1514 frag_cnt = pdev->params.l4_max_rcv_wnd_size / mss;
1515 }
1516 frag_cnt = frag_cnt * 2 + 1;
1517
1518 if (pdev->params.l4_max_gen_buf_cnt && (frag_cnt > pdev->params.l4_max_gen_buf_cnt)) {
1519 frag_cnt = pdev->params.l4_max_gen_buf_cnt;
1520 }
1521 return frag_cnt;
1522 }
1523
lm_tcp_calc_frag_cnt(lm_device_t * pdev,lm_tcp_state_t * tcp)1524 u32_t lm_tcp_calc_frag_cnt(
1525 lm_device_t * pdev,
1526 lm_tcp_state_t * tcp
1527 )
1528 {
1529 u32_t mss, frag_cnt;
1530 DbgBreakIf(!(pdev && tcp));
1531 mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
1532 tcp->tcp_const.remote_mss,
1533 (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
1534 tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
1535 FALSE,
1536 tcp->path->neigh->neigh_const.vlan_tag != 0);
1537
1538 frag_cnt = _lm_tcp_calc_frag_cnt(pdev, tcp->tcp_cached.initial_rcv_wnd, mss);
1539
1540 return frag_cnt;
1541 }
1542
1543
1544
_lm_tcp_init_qe_buffer(struct _lm_device_t * pdev,lm_tcp_qe_buffer_t * qe_buffer,u8_t * mem_virt,u32_t cnt,u8_t cqe_size)1545 static void _lm_tcp_init_qe_buffer(
1546 struct _lm_device_t * pdev,
1547 lm_tcp_qe_buffer_t * qe_buffer,
1548 u8_t * mem_virt,
1549 u32_t cnt,
1550 u8_t cqe_size)
1551 {
1552 UNREFERENCED_PARAMETER_(pdev);
1553
1554 qe_buffer->left = cnt;
1555 qe_buffer->first = (char *)mem_virt;
1556 qe_buffer->head = qe_buffer->first;
1557 qe_buffer->tail = qe_buffer->first;
1558 qe_buffer->last = qe_buffer->first;
1559 qe_buffer->last += (qe_buffer->left-1)*cqe_size;
1560 qe_buffer->qe_size = cqe_size;
1561 }
1562
1563 /** Description
1564 * function calculates the amount of virtual memory required for the RX connection
1565 * Return
1566 * amount of virtual memory required
1567 */
lm_tcp_rx_con_get_virt_size(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)1568 u32_t lm_tcp_rx_con_get_virt_size(struct _lm_device_t * pdev, lm_tcp_state_t * tcp)
1569 {
1570 u32_t frag_cnt;
1571 u32_t mem_size;
1572 u32_t mss;
1573
1574 /* The calculation for frag_cnt is based on the calculation from Teton's init_rx_tcp_resc()
1575 * also the assertion is taken from Teton */
1576 DbgBreakIf(tcp->tcp_cached.initial_rcv_wnd == 0);
1577 /* the rx_con may not be initialized at this state, therefore we can't rely on the mss being initialized. */
1578 mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
1579 tcp->tcp_const.remote_mss,
1580 (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
1581 tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
1582 pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
1583 tcp->path->neigh->neigh_const.vlan_tag != 0);
1584
1585 frag_cnt = _lm_tcp_calc_frag_cnt(pdev, tcp->tcp_cached.initial_rcv_wnd, mss);
1586
1587
1588 DbgMessage(pdev, INFORMl4rx, "Calc #frags for rx-con initial_rcv_wnd: %d frag_cnt: %d\n", tcp->tcp_cached.initial_rcv_wnd, frag_cnt);
1589
1590 mem_size = sizeof(lm_frag_list_t) + (frag_cnt - 1)*sizeof(lm_frag_t);
1591
1592 return mem_size;
1593 }
1594
lm_tcp_init_tcp_sp_data_mem(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)1595 void lm_tcp_init_tcp_sp_data_mem(
1596 struct _lm_device_t *pdev,
1597 lm_tcp_state_t *tcp
1598 )
1599 {
1600 /* slow-path physical memory */
1601 /* allocation of physical area for sp request */
1602 lm_sp_req_manager_t *sp_req_mgr = NULL;
1603
1604 sp_req_mgr = lm_cid_sp_req_mgr(pdev, tcp->cid);
1605 if CHK_NULL(sp_req_mgr)
1606 {
1607 DbgBreakIf(!sp_req_mgr);
1608 return;
1609 }
1610 DbgBreakIf(sp_req_mgr->sp_data_phys_addr.as_u32.low & CACHE_LINE_SIZE_MASK);
1611 tcp->sp_req_data.phys_addr = sp_req_mgr->sp_data_phys_addr;
1612 tcp->sp_req_data.virt_addr = sp_req_mgr->sp_data_virt_addr;
1613 }
1614
1615
lm_tcp_init_tcp_phys_mem(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_phy_mem_block_t * phy_mblk)1616 void lm_tcp_init_tcp_phys_mem(
1617 struct _lm_device_t *pdev,
1618 lm_tcp_state_t *tcp,
1619 lm_tcp_phy_mem_block_t * phy_mblk)
1620 {
1621 lm_tcp_con_t * con;
1622 u32_t mem_size;
1623 u16_t page_cnt,page_idx;
1624 u32_t idx = 0;
1625 u8_t bd_size;
1626 u8_t block_idx;
1627
1628 #if (LM_PAGE_SIZE != 4096)
1629 #error (LM_PAGE_SIZE != 4096) /* currently FW assumes a tx chain page is 4KB */
1630 #endif
1631
1632 /* Init physical memory */
1633 /* bd-chains */
1634 con = tcp->tx_con;
1635 page_cnt = (u16_t)pdev->params.l4_tx_chain_page_cnt;
1636 bd_size = sizeof(struct toe_tx_bd);
1637 block_idx = 0;
1638 for (idx = 0 ; idx < 2; idx++) {
1639 mem_size = LM_PAGE_SIZE;
1640 for (page_idx = 0; page_idx < page_cnt; page_idx++) {
1641 if (phy_mblk[block_idx].left < mem_size) {
1642 block_idx++;
1643 DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
1644 }
1645 DbgBreakIf(phy_mblk[block_idx].left < mem_size);
1646 lm_bd_chain_add_page(pdev,&con->bd_chain,phy_mblk[block_idx].free, phy_mblk[block_idx].free_phy, bd_size, TRUE);
1647 phy_mblk[block_idx].free += mem_size;
1648 phy_mblk[block_idx].left -= mem_size;
1649 LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
1650 }
1651 /* rx-con */
1652 con = tcp->rx_con;
1653 page_cnt = (u16_t)pdev->params.l4_rx_chain_page_cnt;
1654 bd_size = sizeof(struct toe_rx_bd);
1655 }
1656
1657 /* slow-path physical memory */
1658 /* allocation of physical area for sp request */
1659 mem_size = TOE_SP_PHYS_DATA_SIZE;
1660
1661 if (phy_mblk[block_idx].left < mem_size) {
1662 block_idx++;
1663 DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
1664 }
1665 DbgBreakIf(mem_size > phy_mblk[block_idx].left);
1666 DbgBreakIf(phy_mblk[block_idx].free_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1667 tcp->sp_req_data.phys_addr = phy_mblk[block_idx].free_phy;
1668 tcp->sp_req_data.virt_addr = (lm_tcp_slow_path_phys_data_t *)phy_mblk[block_idx].free;
1669 mm_memset(tcp->sp_req_data.virt_addr, 0, mem_size);
1670 phy_mblk[block_idx].free += mem_size;
1671 phy_mblk[block_idx].left -= mem_size;
1672 LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
1673
1674 /* doorbell data */
1675 /* init tx part */
1676 mem_size = TOE_DB_TX_DATA_SIZE;
1677 if (phy_mblk[block_idx].left < mem_size) {
1678 block_idx++;
1679 DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
1680 }
1681 DbgBreakIf(mem_size > phy_mblk[block_idx].left);
1682 DbgBreakIf(phy_mblk[block_idx].free_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1683 tcp->tx_con->phys_db_data = phy_mblk[block_idx].free_phy;
1684 tcp->tx_con->db_data.tx = (volatile struct toe_tx_db_data *)phy_mblk[block_idx].free;
1685 tcp->tx_con->db_data.tx->flags = 0;
1686 tcp->tx_con->db_data.tx->bds_prod = 0;
1687 /* init tx db data to snd.una (+ sizeof sent unacked data that will
1688 * be initiated when sent unacked data is posted): */
1689 tcp->tx_con->db_data.tx->bytes_prod_seq = tcp->tcp_delegated.send_una;
1690 phy_mblk[block_idx].free += mem_size;
1691 phy_mblk[block_idx].left -= mem_size;
1692 LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
1693
1694
1695 /* init rx part */
1696 if (phy_mblk[block_idx].left < mem_size) {
1697 block_idx++;
1698 DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
1699 }
1700 mem_size = TOE_DB_RX_DATA_SIZE;
1701 DbgBreakIf(mem_size > phy_mblk[block_idx].left);
1702 DbgBreakIf(phy_mblk[block_idx].free_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1703 tcp->rx_con->phys_db_data = phy_mblk[block_idx].free_phy;
1704 tcp->rx_con->db_data.rx = (volatile struct toe_rx_db_data *)phy_mblk[block_idx].free;
1705 phy_mblk[block_idx].free += mem_size;
1706 phy_mblk[block_idx].left -= mem_size;
1707 LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
1708 tcp->rx_con->db_data.rx->rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
1709 /* we also need to initialize the driver copy of the rcv_win_right_edge */
1710 tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
1711 tcp->rx_con->db_data.rx->bds_prod = 0;
1712 tcp->rx_con->db_data.rx->bytes_prod = 0;
1713 tcp->rx_con->db_data.rx->consumed_grq_bytes = 0;
1714 tcp->rx_con->db_data.rx->flags = 0;
1715 tcp->rx_con->db_data.rx->reserved1 = 0;
1716 }
1717
lm_tcp_init_tcp_virt_mem(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_mem_block_t * mblk)1718 void lm_tcp_init_tcp_virt_mem(
1719 struct _lm_device_t *pdev,
1720 lm_tcp_state_t *tcp,
1721 lm_tcp_mem_block_t * mblk)
1722 {
1723 lm_tcp_con_t * con;
1724 u32_t mem_size;
1725
1726 u32_t idx = 0;
1727 u8_t cqe_size;
1728
1729 con = tcp->tx_con;
1730 cqe_size = sizeof(struct toe_tx_cqe);
1731 for (idx = 0; idx < 2; idx++) {
1732 /* allocation of buffers for history CQEs */
1733 if (pdev->params.l4_history_cqe_cnt) {
1734 mem_size = pdev->params.l4_history_cqe_cnt*cqe_size;
1735 DbgBreakIf(mblk->left < mem_size);
1736 _lm_tcp_init_qe_buffer(pdev, &con->history_cqes, mblk->free, pdev->params.l4_history_cqe_cnt, cqe_size);
1737 mblk->free += mem_size;
1738 mblk->left -= mem_size;
1739 } else {
1740 DbgBreakMsg("MichalS: Currently History Count = 0 is not SUPPORTED\n");
1741 }
1742 con = tcp->rx_con;
1743 cqe_size = sizeof(struct toe_rx_cqe);
1744 }
1745
1746 /* rx frag list */
1747 mem_size = lm_tcp_rx_con_get_virt_size(pdev, tcp);
1748 DbgBreakIf(mblk->left < mem_size);
1749
1750 tcp->rx_con->u.rx.gen_info.frag_list = (lm_frag_list_t *)mblk->free;
1751 mblk->free += mem_size;
1752 mblk->left -= mem_size;
1753
1754 }
lm_tcp_init_tcp_resc(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_mem_block_t * mblk,lm_tcp_phy_mem_block_t * phy_mblk)1755 lm_status_t lm_tcp_init_tcp_resc(
1756 struct _lm_device_t *pdev,
1757 lm_tcp_state_t *tcp,
1758 lm_tcp_mem_block_t * mblk,
1759 lm_tcp_phy_mem_block_t * phy_mblk)
1760 {
1761 DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_tx_con\n");
1762 DbgBreakIf(!(pdev && tcp));
1763
1764 /* tx-specific */
1765 tcp->tx_con->type = TCP_CON_TYPE_TX;
1766 mm_memset(&tcp->tx_con->u.tx, 0, sizeof(lm_tcp_con_tx_t));
1767
1768 tcp->tx_con->flags = (TCP_POST_BLOCKED | TCP_COMP_BLOCKED);
1769 tcp->tx_con->tcp_state = tcp;
1770 s_list_init(&tcp->tx_con->active_tb_list, NULL, NULL, 0);
1771
1772 /* rx-specific */
1773 tcp->rx_con->type = TCP_CON_TYPE_RX;
1774 mm_memset(&tcp->rx_con->u.rx, 0, sizeof(lm_tcp_con_rx_t));
1775
1776 tcp->rx_con->flags = (TCP_POST_BLOCKED | TCP_COMP_BLOCKED);
1777 tcp->rx_con->tcp_state = tcp;
1778 s_list_init(&tcp->rx_con->active_tb_list, NULL, NULL, 0);
1779
1780 lm_tcp_init_tcp_phys_mem(pdev,tcp,phy_mblk);
1781
1782 lm_tcp_init_tcp_virt_mem(pdev,tcp,mblk);
1783
1784
1785 tcp->rx_con->u.rx.sws_info.mss = tcp->tx_con->u.tx.mss =
1786 _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
1787 tcp->tcp_const.remote_mss,
1788 (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
1789 tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
1790 pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
1791 tcp->path->neigh->neigh_const.vlan_tag != 0);
1792
1793
1794
1795 tcp->rx_con->u.rx.gen_info.max_frag_count = _lm_tcp_calc_frag_cnt(pdev, tcp->tcp_cached.initial_rcv_wnd, tcp->rx_con->u.rx.sws_info.mss);
1796 return LM_STATUS_SUCCESS;
1797 }
1798
1799 /* Function returns the required size for a virtual connection. If tcp_state is given,
1800 * the size is calculated for the specific connection given, o/w the default size is given.
1801 */
lm_tcp_get_virt_size(struct _lm_device_t * pdev,lm_tcp_state_t * tcp_state)1802 u32_t lm_tcp_get_virt_size(
1803 struct _lm_device_t * pdev,
1804 lm_tcp_state_t * tcp_state)
1805 {
1806 u32_t virt_size = 0;
1807 u32_t mss = 0;
1808 u32_t const chain_idx = LM_SW_LEADING_RSS_CID(pdev);
1809
1810 virt_size =
1811 pdev->params.l4_history_cqe_cnt*sizeof(struct toe_tx_cqe) +
1812 pdev->params.l4_history_cqe_cnt*sizeof(struct toe_rx_cqe);
1813
1814 if (tcp_state)
1815 {
1816 virt_size += lm_tcp_rx_con_get_virt_size(pdev,tcp_state);
1817 }
1818 else
1819 {
1820 #define LM_TCP_DEFAULT_WINDOW_SIZE 0x10000
1821
1822 if(CHK_NULL(pdev) ||
1823 ERR_IF((ARRSIZE(pdev->params.l2_cli_con_params) <= chain_idx) ||
1824 (CHIP_IS_E1H(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1H)) || /* TODO E2 add IS_E2*/
1825 (CHIP_IS_E1(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1)) ))
1826 {
1827 DbgBreakIf(1);
1828 return 0;
1829 }
1830
1831 mss = _lm_tcp_calc_mss(pdev->params.l2_cli_con_params[chain_idx].mtu, 0xffff, FALSE, FALSE, FALSE, FALSE);
1832 virt_size += sizeof(lm_frag_list_t) +
1833 (_lm_tcp_calc_frag_cnt(pdev, LM_TCP_DEFAULT_WINDOW_SIZE, mss) - 1)*sizeof(lm_frag_t);
1834 }
1835 return virt_size;
1836 }
1837
lm_tcp_get_phys_size(struct _lm_device_t * pdev)1838 u32_t lm_tcp_get_phys_size(
1839 struct _lm_device_t * pdev)
1840 {
1841 u32_t mem_size = TOE_SP_PHYS_DATA_SIZE + TOE_DB_TX_DATA_SIZE + TOE_DB_RX_DATA_SIZE;
1842
1843 mem_size = ((mem_size / LM_PAGE_SIZE) + 1) * LM_PAGE_SIZE;
1844
1845 mem_size += pdev->params.l4_rx_chain_page_cnt*LM_PAGE_SIZE + /* rx bd-chain */
1846 pdev->params.l4_tx_chain_page_cnt*LM_PAGE_SIZE; /* tx bd-chain */
1847
1848 return mem_size;
1849 }
1850
lm_tcp_post_buffered_data(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,d_list_t * buffered_data)1851 lm_status_t lm_tcp_post_buffered_data(
1852 struct _lm_device_t *pdev,
1853 lm_tcp_state_t *tcp,
1854 d_list_t *buffered_data)
1855 {
1856 lm_tcp_con_rx_gen_info_t * gen_info = NULL;
1857 lm_tcp_gen_buf_t * curr_gen_buf = NULL;
1858
1859 DbgBreakIf(!buffered_data);
1860 if(!d_list_is_empty(buffered_data)) {
1861 gen_info = &tcp->rx_con->u.rx.gen_info;
1862 curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(buffered_data);
1863 DbgBreakIf(!d_list_is_empty(&gen_info->peninsula_list));
1864 d_list_add_head(&gen_info->peninsula_list, buffered_data);
1865 /* initialize peninsula_nbytes */
1866 while (curr_gen_buf) {
1867 gen_info->peninsula_nbytes += curr_gen_buf->placed_bytes;
1868 curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_next_entry(&curr_gen_buf->link);
1869 }
1870
1871 DbgBreakIf(tcp->rx_con->flags & TCP_INDICATE_REJECTED);
1872 tcp->rx_con->flags |= TCP_RX_COMP_DEFERRED; /* TCP_INDICATE_REJECTED was here to wait rx buffers from OS.
1873 With TCP_RX_COMP_DEFERRED flag processing of completion
1874 SP_REQUEST_INITIATE_OFFLOAD will indicate the buffered data
1875 if it needed */
1876 }
1877
1878
1879 return LM_STATUS_SUCCESS;
1880 }
1881
1882 /* calculate tcp pseudo check sum.
1883 * input and retured value in _network_ order */
lm_tcp_calc_tcp_pseudo_checksum(struct _lm_device_t * pdev,u32_t n_src_ip[4],u32_t n_dst_ip[4],u8_t ip_type)1884 static u16_t lm_tcp_calc_tcp_pseudo_checksum(
1885 struct _lm_device_t *pdev,
1886 u32_t n_src_ip[4],
1887 u32_t n_dst_ip[4],
1888 u8_t ip_type)
1889 {
1890 #define D_IP_PROTOCOL_TCP 6
1891 u32_t sum = 0;
1892 int i;
1893
1894 if(ip_type == IP_VERSION_IPV4) { /* IPV4 */
1895 sum += n_src_ip[0] & 0xffff;
1896 sum += (n_src_ip[0]>>16) & 0xffff;
1897
1898 sum += n_dst_ip[0] & 0xffff;
1899 sum += (n_dst_ip[0]>>16) & 0xffff;
1900 } else {
1901 for (i = 0; i < 4; i++) {
1902 sum += n_src_ip[i] & 0xffff;
1903 sum += (n_src_ip[i]>>16) & 0xffff;
1904 }
1905 for (i = 0; i < 4; i++) {
1906 sum += n_dst_ip[i] & 0xffff;
1907 sum += (n_dst_ip[i]>>16) & 0xffff;
1908 }
1909 }
1910
1911 sum += HTON16((u16_t)(D_IP_PROTOCOL_TCP));
1912
1913 /* Fold 32-bit sum to 16 bits */
1914 while( sum >> 16 ) {
1915 sum = (sum & 0xffff) + (sum >> 16);
1916 }
1917
1918 DbgMessage(pdev, VERBOSEl4sp,
1919 "_lm_tcp_calc_tcp_pseudo_checksum: n_src_ip=%x, n_dst_ip=%x, (u16_t)sum=%x\n",
1920 n_src_ip[0], n_dst_ip[0], (u16_t)sum);
1921
1922 return (u16_t)sum;
1923 }
1924
1925 /* find the bd in the bd chain that contains snd_nxt, the offset of snd_nxt
1926 * within this bd, and the base address of the page that contains this bd. */
lm_locate_snd_next_info(lm_tcp_con_t * tx_con,u32_t snd_nxt,u32_t snd_una,u16_t * bd_idx,u16_t * bd_offset,lm_address_t * page_addr)1927 static lm_status_t lm_locate_snd_next_info(
1928 lm_tcp_con_t * tx_con,
1929 u32_t snd_nxt,
1930 u32_t snd_una,
1931 u16_t * bd_idx,
1932 u16_t * bd_offset,
1933 lm_address_t * page_addr)
1934 {
1935 u32_t cur_seq = 0;
1936 struct toe_tx_bd * cur_tx_bd = NULL;
1937
1938 /* we assume that the first byte of the first application buffer equals SND.UNA
1939 * we need to find SND.NXT relative to this */
1940 DbgMessage(NULL, VERBOSEl4sp, "### lm_locate_snd_next_info\n");
1941
1942 /* want to make sure the consumer is still zero ... */
1943 if ((tx_con->bd_chain.cons_idx != 0) ||
1944 (S32_SUB(tx_con->bytes_post_cnt ,S32_SUB(snd_nxt, snd_una)) < 0) ||
1945 (tx_con->bytes_comp_cnt))
1946 {
1947 DbgBreakIf(tx_con->bd_chain.cons_idx != 0);
1948 DbgBreakIf(S32_SUB(tx_con->bytes_post_cnt ,S32_SUB(snd_nxt, snd_una)) < 0);
1949 DbgBreakIf(tx_con->bytes_comp_cnt); /* nothing should be completed yet */
1950 return LM_STATUS_INVALID_PARAMETER;
1951 }
1952
1953 *bd_idx = 0;
1954 *bd_offset = 0;
1955 *page_addr = tx_con->bd_chain.bd_chain_phy;
1956
1957 if (lm_bd_chain_prod_idx(&tx_con->bd_chain) == 0) {
1958 /* If the producer is '0', chain is empty. bd_idx/offset are 0 */
1959 if ((tx_con->bytes_post_cnt > 0) ||
1960 (snd_nxt != snd_una))
1961 {
1962 DbgBreakIf(tx_con->bytes_post_cnt > 0);
1963 /* Notice: This case was seen and its a bug in the MS stack: delegated: snd_nxt > snd_una but WITHOUT unacked data */
1964 DbgBreakIf(snd_nxt != snd_una);
1965 return LM_STATUS_INVALID_PARAMETER;
1966 }
1967 return LM_STATUS_SUCCESS;
1968 }
1969
1970 cur_seq = snd_una;
1971 cur_tx_bd = (struct toe_tx_bd *)tx_con->bd_chain.bd_chain_virt;
1972
1973 while ((*bd_idx < lm_bd_chain_prod_idx(&tx_con->bd_chain))
1974 && S32_SUB(snd_nxt, cur_seq + cur_tx_bd->size) >= 0) {
1975 /* Advance to the next bd. */
1976 cur_seq += cur_tx_bd->size;
1977 lm_bd_chain_incr_bd(&tx_con->bd_chain, page_addr, (void**)&cur_tx_bd, bd_idx);
1978 }
1979
1980 /* make sure assignment is legit. */
1981 if ((S32_SUB(snd_nxt, cur_seq) < 0) ||
1982 (S32_SUB(snd_nxt, cur_seq) > 0xffff))
1983 {
1984 DbgBreakIf(S32_SUB(snd_nxt, cur_seq) < 0 );
1985 DbgBreakIf(S32_SUB(snd_nxt, cur_seq) > 0xffff );
1986 return LM_STATUS_INVALID_PARAMETER;
1987 }
1988
1989 *bd_offset = S32_SUB(snd_nxt, cur_seq);
1990 return LM_STATUS_SUCCESS;
1991 }
1992
_lm_tcp_init_xstorm_toe_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)1993 static lm_status_t _lm_tcp_init_xstorm_toe_context(
1994 struct _lm_device_t *pdev,
1995 lm_tcp_state_t * tcp)
1996 {
1997 struct toe_context * ctx = (struct toe_context *)tcp->ctx_virt;
1998 struct xstorm_toe_ag_context * xctx_ag = &ctx->xstorm_ag_context;
1999 struct xstorm_toe_st_context * xctx_st = &ctx->xstorm_st_context.context;
2000 lm_address_t mem_phys = {{0}};
2001 u16_t bd_idx = 0;
2002 u16_t bd_offset = 0;
2003 lm_status_t lm_status = LM_STATUS_SUCCESS;
2004
2005 /* xstorm ag context */
2006 mm_memset(xctx_ag, 0, sizeof(struct xstorm_toe_ag_context));
2007
2008 if(tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_NAGLING)
2009 {
2010 xctx_ag->agg_vars1 |= XSTORM_TOE_AG_CONTEXT_NAGLE_EN;
2011 }
2012 /* Initialize Send-Una info */
2013 mem_phys = lm_bd_chain_phys_addr(&tcp->tx_con->bd_chain, 0);
2014 xctx_ag->cmp_bd_cons = 0; /* idx of bd with snd.una - always 0 */
2015 xctx_ag->cmp_bd_page_0_to_31 = mem_phys.as_u32.low; /* page that includes the snd.una */
2016 xctx_ag->cmp_bd_page_32_to_63 = mem_phys.as_u32.high; /* page that includes the snd.una */
2017 xctx_ag->cmp_bd_start_seq = tcp->tcp_delegated.send_una; /* the sequence number of the first byte in the bd which holds SndUna */
2018
2019 /* more_to_send: The difference between SndNxt and the last byte in the bd pointed by bd prod */
2020 if (tcp->tx_con->bytes_comp_cnt)
2021 {
2022 DbgBreakIf(tcp->tx_con->bytes_comp_cnt);
2023 return LM_STATUS_INVALID_PARAMETER;
2024 }
2025 xctx_ag->more_to_send = S32_SUB(tcp->tx_con->bytes_post_cnt,(S32_SUB(tcp->tcp_delegated.send_next,tcp->tcp_delegated.send_una)));
2026 if ((tcp->tx_con->flags & TCP_FIN_REQ_POSTED) && !(tcp->tx_con->flags & TCP_FIN_REQ_COMPLETED)) {
2027 xctx_ag->more_to_send--; /* the fin byte on the bd chain is not counted */
2028 }
2029
2030 /* xstorm st context */
2031 mm_memset(xctx_st, 0, sizeof(struct xstorm_toe_st_context));
2032 lm_status = lm_locate_snd_next_info(tcp->tx_con, tcp->tcp_delegated.send_next, tcp->tcp_delegated.send_una,
2033 &bd_idx, &bd_offset, &mem_phys);
2034 if (lm_status != LM_STATUS_SUCCESS)
2035 {
2036 return lm_status;
2037 }
2038 xctx_st->toe.tx_bd_cons = bd_idx; /* index of bd that includes snd_nxt */
2039 xctx_st->toe.tx_bd_offset = bd_offset; /* offset of snd_nxt within its bd */
2040 xctx_st->toe.tx_bd_page_base_hi = mem_phys.as_u32.high;
2041 xctx_st->toe.tx_bd_page_base_lo = mem_phys.as_u32.low;
2042
2043 xctx_st->toe.bd_prod = lm_bd_chain_prod_idx(&tcp->tx_con->bd_chain); /* Bd containing the last byte the application wishes to trasnmit */
2044 xctx_st->toe.driver_doorbell_info_ptr_lo = tcp->tx_con->phys_db_data.as_u32.low;
2045 xctx_st->toe.driver_doorbell_info_ptr_hi = tcp->tx_con->phys_db_data.as_u32.high;
2046
2047 return LM_STATUS_SUCCESS;
2048 }
2049
2050
_lm_tcp_init_ustorm_toe_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2051 static lm_status_t _lm_tcp_init_ustorm_toe_context(
2052 struct _lm_device_t *pdev,
2053 lm_tcp_state_t *tcp)
2054 {
2055 struct toe_context * ctx = (struct toe_context *)tcp->ctx_virt;
2056 struct ustorm_toe_ag_context *uctx_ag = &ctx->ustorm_ag_context;
2057 struct ustorm_toe_st_context *uctx_st = &ctx->ustorm_st_context.context;
2058 lm_address_t mem_phys = {{0}};
2059
2060 /* Calculate the crc8 for CDU Validation */
2061 mm_memset(uctx_ag, 0, sizeof(struct ustorm_toe_ag_context));
2062
2063 /* ustorm_ag_context */
2064 uctx_ag->rq_prod = 0;
2065 uctx_ag->driver_doorbell_info_ptr_hi = tcp->rx_con->phys_db_data.as_u32.high;
2066 uctx_ag->driver_doorbell_info_ptr_lo = tcp->rx_con->phys_db_data.as_u32.low;
2067
2068 /* ustorm_st_context */
2069 mm_memset(uctx_st, 0, sizeof(struct ustorm_toe_st_context));
2070 uctx_st->indirection_ram_offset = (u16_t)tcp->tcp_const.hash_value;
2071 uctx_st->pen_grq_placed_bytes = tcp->rx_con->u.rx.gen_info.peninsula_nbytes;
2072 DbgMessage(pdev, INFORMl4sp, "_lm_tcp_init_ustorm_toe_context: IRO is 0x%x, IS is %d\n",
2073 uctx_st->indirection_ram_offset, uctx_st->__indirection_shift);
2074 if ((tcp->tcp_cached.rcv_indication_size > 0xffff) ||
2075 (tcp->tcp_cached.rcv_indication_size != 0))
2076 {
2077 DbgBreakIf(tcp->tcp_cached.rcv_indication_size > 0xffff);
2078 DbgBreakIf(tcp->tcp_cached.rcv_indication_size != 0); /* TBA receive_indication_size != 0 not supported : if it is we need to change initialization below */
2079 return LM_STATUS_INVALID_PARAMETER;
2080 }
2081 /* We set the ustorm context to rcv_indication_size = 1 byte, this means that the first packet that is placed on GRQ,
2082 * that exceeds or equals 1 byte is indicated immediately, without arming the push timer, the first packet is identified by
2083 * a packet that is placed while there are no GRQ placed bytes, every time that the driver advertises 'consumedGRQ', GRQ placed bytes
2084 * is decreased by the number, bringing it back to '0' will bring us back to the state where the next packet with 1 byte will be indicated.
2085 * We added this feature due to a sparta test called ReceiveIndication, which sends a fairly small packet and expects it to be indicated straight
2086 * awat, for some reason the small RQ buffer doesn't make it's way to the VBD... */
2087 uctx_st->rcv_indication_size = 1;
2088 mem_phys = lm_bd_chain_phys_addr(&tcp->rx_con->bd_chain, 0);
2089 uctx_st->pen_ring_params.rq_cons = 0;
2090 uctx_st->pen_ring_params.rq_cons_addr_hi = mem_phys.as_u32.high;
2091 uctx_st->pen_ring_params.rq_cons_addr_lo = mem_phys.as_u32.low;
2092
2093 uctx_st->prev_rcv_win_right_edge = tcp->rx_con->db_data.rx->rcv_win_right_edge;
2094
2095 if (pdev->params.l4_ignore_grq_push_enabled)
2096 {
2097 SET_FLAGS(uctx_st->flags2, USTORM_TOE_ST_CONTEXT_IGNORE_GRQ_PUSH);
2098 }
2099
2100 if (pdev->params.l4_enable_rss == L4_RSS_DYNAMIC)
2101 {
2102 SET_FLAGS( uctx_st->flags2, USTORM_TOE_ST_CONTEXT_RSS_UPDATE_ENABLED );
2103 }
2104 /*DbgMessage(pdev, FATAL, "_lm_tcp_init_ustorm_toe_context(): uctx_st->initial_rcv_wnd=%d\n", tcp->tcp_cached.initial_rcv_wnd);*/
2105 uctx_st->initial_rcv_wnd = tcp->tcp_cached.initial_rcv_wnd;
2106 uctx_st->rcv_nxt = tcp->tcp_delegated.recv_next;
2107
2108 return LM_STATUS_SUCCESS;
2109 }
2110
_lm_tcp_init_cstorm_toe_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2111 static lm_status_t _lm_tcp_init_cstorm_toe_context(
2112 struct _lm_device_t *pdev,
2113 lm_tcp_state_t *tcp)
2114 {
2115 struct toe_context *ctx = (struct toe_context *)tcp->ctx_virt;
2116 struct cstorm_toe_ag_context *cctx_ag = &ctx->cstorm_ag_context;
2117 struct cstorm_toe_st_context *cctx_st = &ctx->cstorm_st_context.context;
2118 lm_address_t mem_phys = {{0}};
2119
2120 mm_memset(cctx_ag, 0, sizeof(struct cstorm_toe_ag_context));
2121
2122 if (tcp->tcp_cached.initial_rcv_wnd > MAX_INITIAL_RCV_WND)
2123 {
2124 /* we can't support more than the maximum receive window due to cyclic counters we use for
2125 * recv_next, recv_win_seq, updates, window increase */
2126 DbgBreakIfAll(tcp->tcp_cached.initial_rcv_wnd > MAX_INITIAL_RCV_WND);
2127 return LM_STATUS_INVALID_PARAMETER;
2128 }
2129
2130 /* cstorm_ag_context */
2131 cctx_ag->bd_prod = lm_bd_chain_prod_idx(&tcp->tx_con->bd_chain); /* Bd containing the last byte the application wishes to trasnmit */
2132 cctx_ag->rel_seq = tcp->tcp_delegated.send_una;
2133 cctx_ag->snd_max = tcp->tcp_delegated.send_max;
2134
2135 /* cstorm_st_context */
2136 mm_memset(cctx_st, 0, sizeof(struct cstorm_toe_st_context));
2137 mem_phys = lm_bd_chain_phys_addr(&tcp->tx_con->bd_chain, 0);
2138 cctx_st->bds_ring_page_base_addr_hi = mem_phys.as_u32.high; /* page that includes the snd.una */
2139 cctx_st->bds_ring_page_base_addr_lo = mem_phys.as_u32.low; /* page that includes the snd.una */
2140 cctx_st->bd_cons = 0; /* idx of bd with snd.una - always 0 */
2141 if (ERR_IF(tcp->tcp_const.hash_value >= (u8_t)USTORM_INDIRECTION_TABLE_SIZE)) {
2142 if (tcp->tcp_const.hash_value >= (u8_t)USTORM_INDIRECTION_TABLE_SIZE)
2143 {
2144 DbgBreakIfAll(tcp->tcp_const.hash_value >= (u8_t)USTORM_INDIRECTION_TABLE_SIZE);
2145 return LM_STATUS_INVALID_PARAMETER;
2146 }
2147 tcp->tcp_const.hash_value = LM_TOE_FW_RSS_ID(pdev,LM_TOE_BASE_RSS_ID(pdev));
2148 }
2149
2150 cctx_st->prev_snd_max = tcp->tcp_delegated.send_una;
2151
2152
2153
2154
2155 /* For TOE RSS the values in the USTORM (RSS) must differ from the one in CSTORM (TSS)
2156 2 options:
2157 a. base chain.
2158 b. value of most up-to-date indirection table.
2159 */
2160 if (pdev->params.l4_enable_rss == L4_RSS_DISABLED)
2161 {
2162 cctx_st->cpu_id = LM_TOE_FW_RSS_ID(pdev,LM_TOE_BASE_RSS_ID(pdev));
2163 }
2164 else
2165 {
2166 cctx_st->cpu_id = pdev->toe_info.indirection_table[tcp->tcp_const.hash_value];
2167 }
2168
2169 cctx_st->free_seq = tcp->tcp_delegated.send_una - 1; /* (snd.una - 1 - offset of snd.una byte in its buffer (which is always 0)) */
2170
2171 return LM_STATUS_SUCCESS;
2172 }
2173
_lm_tcp_init_tstorm_toe_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2174 static lm_status_t _lm_tcp_init_tstorm_toe_context(
2175 struct _lm_device_t *pdev,
2176 lm_tcp_state_t * tcp)
2177 {
2178 struct toe_context * ctx = (struct toe_context *)tcp->ctx_virt;
2179 struct tstorm_toe_ag_context * tctx_ag = &ctx->tstorm_ag_context;
2180 struct tstorm_toe_st_context * tctx_st = &ctx->tstorm_st_context.context;
2181
2182 UNREFERENCED_PARAMETER_(pdev);
2183
2184 /* tstorm ag context */
2185 mm_mem_zero(tctx_ag, sizeof(struct tstorm_toe_ag_context));
2186
2187 /* tstorm st context */
2188 mm_mem_zero(tctx_st, sizeof(struct tstorm_toe_st_context));
2189
2190 return LM_STATUS_SUCCESS;
2191 }
2192
_lm_tcp_init_timers_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2193 static lm_status_t _lm_tcp_init_timers_context(
2194 struct _lm_device_t *pdev,
2195 lm_tcp_state_t *tcp)
2196 {
2197 struct toe_context * ctx = (struct toe_context *)tcp->ctx_virt;
2198 /* timers_context */
2199 SET_FLAGS(ctx->timers_context.flags, TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG);
2200
2201 UNREFERENCED_PARAMETER_(pdev);
2202
2203 return LM_STATUS_SUCCESS;
2204 }
2205
_lm_tcp_init_toe_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2206 static lm_status_t _lm_tcp_init_toe_context(
2207 struct _lm_device_t *pdev,
2208 lm_tcp_state_t *tcp)
2209 {
2210 lm_status_t lm_status = LM_STATUS_SUCCESS;
2211
2212 lm_status = _lm_tcp_init_xstorm_toe_context(pdev, tcp);
2213 if (lm_status != LM_STATUS_SUCCESS) {
2214 return lm_status;
2215 }
2216 lm_status = _lm_tcp_init_ustorm_toe_context(pdev, tcp);
2217 if (lm_status != LM_STATUS_SUCCESS) {
2218 return lm_status;
2219 }
2220 lm_status = _lm_tcp_init_cstorm_toe_context(pdev, tcp);
2221 if (lm_status != LM_STATUS_SUCCESS) {
2222 return lm_status;
2223 }
2224 lm_status = _lm_tcp_init_tstorm_toe_context(pdev, tcp);
2225 if (lm_status != LM_STATUS_SUCCESS) {
2226 return lm_status;
2227 }
2228 lm_status = _lm_tcp_init_timers_context(pdev, tcp);
2229 if (lm_status != LM_STATUS_SUCCESS) {
2230 return lm_status;
2231 }
2232
2233 /* now we need to configure the cdu-validation data */
2234 lm_set_cdu_validation_data(pdev, tcp->cid, FALSE /* don't invalidate */);
2235 return LM_STATUS_SUCCESS;
2236 }
2237
2238
_lm_tcp_init_tstorm_tcp_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2239 static lm_status_t _lm_tcp_init_tstorm_tcp_context(
2240 struct _lm_device_t *pdev,
2241 lm_tcp_state_t *tcp
2242 )
2243 {
2244 /* TODO: unify iscsi + toe structure name */
2245 struct tstorm_toe_tcp_ag_context_section *ttcp_ag;
2246 struct tstorm_tcp_st_context_section *ttcp_st;
2247 l4_ofld_params_t *l4_params = &pdev->ofld_info.l4_params;
2248 lm_path_state_t *path = tcp->path;
2249 lm_neigh_state_t *neigh = path->neigh;
2250 u32_t sm_rtt, sm_delta;
2251 u32_t snd_wnd;
2252
2253 ASSERT_STATIC(sizeof(struct tstorm_toe_tcp_ag_context_section) == sizeof(struct tstorm_tcp_tcp_ag_context_section) );
2254 if (tcp->ulp_type == TOE_CONNECTION_TYPE)
2255 {
2256 ttcp_ag = &((struct toe_context *)tcp->ctx_virt)->tstorm_ag_context.tcp;
2257 ttcp_st = &((struct toe_context *)tcp->ctx_virt)->tstorm_st_context.context.tcp;
2258 }
2259 else
2260 {
2261 ttcp_ag = (struct tstorm_toe_tcp_ag_context_section *)&((struct iscsi_context *)tcp->ctx_virt)->tstorm_ag_context.tcp;
2262 ttcp_st = &((struct iscsi_context *)tcp->ctx_virt)->tstorm_st_context.tcp;
2263 }
2264 mm_mem_zero(ttcp_ag, sizeof(struct tstorm_toe_tcp_ag_context_section));
2265 mm_mem_zero(ttcp_st, sizeof(struct tstorm_tcp_st_context_section));
2266
2267 /* tstorm_ag_context */
2268 ttcp_ag->snd_max = tcp->tcp_delegated.send_max;
2269 ttcp_ag->snd_nxt = tcp->tcp_delegated.send_next;
2270 ttcp_ag->snd_una = tcp->tcp_delegated.send_una;
2271
2272 /* tstorm_st_context*/
2273 // starting FW 7.6.5, the DA_EN is a "don't care" for iSCSI as it is set in pf init to FW
2274 // iSCSI FW overrides this flag according to pf init value regardless context init here.
2275 ttcp_st->flags2 |= TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; /* DA timer always on */
2276
2277 // DA_COUNTER_EN should stay always on since FW will not use it in case DA_EN is off.
2278 ttcp_st->flags2 |= TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN; /* DA counter always on */
2279 ttcp_st->dup_ack_count = tcp->tcp_delegated.dup_ack_count;
2280
2281 if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP) {
2282 ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS;
2283 }
2284 if(tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE) {
2285 ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED;
2286 if ((tcp->tcp_cached.ka_time_out == 0) ||
2287 (tcp->tcp_cached.ka_interval == 0))
2288 {
2289 DbgBreakIf(tcp->tcp_cached.ka_time_out == 0);
2290 DbgBreakIf(tcp->tcp_cached.ka_interval == 0);
2291 return LM_STATUS_INVALID_PARAMETER;
2292 }
2293 }
2294 if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_WIN_SCALING) {
2295 ttcp_st->snd_wnd_scale = tcp->tcp_const.snd_seg_scale;
2296 }
2297
2298 ttcp_st->cwnd = tcp->tcp_delegated.send_cwin - tcp->tcp_delegated.send_una; /* i.e. ndis_tcp_delegated->CWnd */
2299 /* bugbug: driver workaround - wnd may be 0xffffffff, in this case we change it to 2^30 - since FW has an assumption this value
2300 * doesn't wrap-around, configuring it to 0xffffffff may cause it to wrap around and then change from a very large cwnd to a ver
2301 * small one - we give 2^30 which is the largest cwnd that can be advertised. */
2302 if (ttcp_st->cwnd == 0xffffffff) {
2303 ttcp_st->cwnd = 0x40000000;
2304 }
2305
2306 ttcp_st->ka_interval =
2307 lm_time_resolution(pdev, tcp->tcp_cached.ka_interval, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
2308 ttcp_st->ka_max_probe_count = tcp->tcp_cached.ka_probe_cnt;
2309 if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max) { /* KA is running (?) */
2310 ttcp_st->ka_probe_count = tcp->tcp_delegated.u.keep_alive.probe_cnt;
2311 } else { /* retransmit is running (?) */
2312 ttcp_st->ka_probe_count = 0;
2313 }
2314 ttcp_st->ka_timeout =
2315 lm_time_resolution(pdev, tcp->tcp_cached.ka_time_out, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
2316
2317 /* Set the src mac addr in tstorm context:
2318 * In both big and little endian architectures, the mac addr is given from the client in an array of
2319 * 6 chars. Therefore, regardless the endian architectue, we need to swap this array into the little endian
2320 * convention of the tstorm context. */
2321 ttcp_st->msb_mac_address = mm_cpu_to_le16(NTOH16(*(u16 *)(&neigh->neigh_const.src_addr[0])));
2322 ttcp_st->mid_mac_address = mm_cpu_to_le16(NTOH16(*(u16 *)(&neigh->neigh_const.src_addr[2])));
2323 ttcp_st->lsb_mac_address = mm_cpu_to_le16(NTOH16(*(u16 *)(&neigh->neigh_const.src_addr[4])));
2324
2325 ttcp_st->max_rt_time =
2326 lm_time_resolution(pdev, tcp->tcp_cached.max_rt, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
2327 /* GilR: place holder, to be enabled in v0_18_1 when proper FW support is included */
2328 //ttcp_st->max_seg_retransmit_en = 0;
2329 if (ttcp_st->max_rt_time == 0) { /* GilR 9/19/2006 - TBD - currently FW does not handle the '0' case correctly. */
2330 ttcp_st->max_rt_time = 0xffffffff;
2331 ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN;
2332 //ctx->tstorm_st_context.tcp.max_seg_retransmit_en = 1;
2333 }
2334
2335 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2336 if (tcp->rx_con->u.rx.sws_info.mss > 0xffff)
2337 {
2338 DbgBreakIf(tcp->rx_con->u.rx.sws_info.mss > 0xffff);
2339 return LM_STATUS_INVALID_PARAMETER;
2340 }
2341 ttcp_st->mss = tcp->rx_con->u.rx.sws_info.mss & 0xffff;
2342 } else {
2343 /* we must calc mss here since it is possible that we don't have rx_con (iscsi) */
2344 ttcp_st->mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
2345 tcp->tcp_const.remote_mss,
2346 (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
2347 tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
2348 pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
2349 tcp->path->neigh->neigh_const.vlan_tag != 0) & 0xffff;
2350
2351 /* NirV: set expected release sequance parameter that's being set in the toe fw but not in the iscsi fw */
2352 /* should be done in the iscsi initiate offload handler in the fw as in toe */
2353 ttcp_st->expected_rel_seq = tcp->tcp_delegated.send_una;
2354 }
2355
2356 DbgMessage(pdev, INFORMl4sp, "offload num_retx=%d, snd_wnd_probe_cnt=%d\n",tcp->tcp_delegated.u.retransmit.num_retx,tcp->tcp_delegated.snd_wnd_probe_count);
2357
2358 ttcp_st->persist_probe_count = tcp->tcp_delegated.snd_wnd_probe_count;
2359 ttcp_st->prev_seg_seq = tcp->tcp_delegated.send_wl1;
2360 ttcp_st->rcv_nxt = tcp->tcp_delegated.recv_next;
2361 /*ttcp_st->reserved_slowpath = 0; This value is the 7 LSBs of the toeplitz hash result for this connection's 4 tuple.
2362 required in order to give the L2-completion on the correct RSS ring
2363 TBD - toeplitz hash calc not implemented for this yet, but no harm done */
2364
2365 //calculate snd window
2366 snd_wnd = (S32_SUB(tcp->tcp_delegated.send_cwin, tcp->tcp_delegated.send_win) > 0) ?
2367 (tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una) : /* i.e. ndis_tcp_delegated->SndWnd */
2368 (tcp->tcp_delegated.send_cwin - tcp->tcp_delegated.send_una); /* i.e. ndis_tcp_delegated->CWnd */
2369
2370 if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max && snd_wnd > 0) { /* KA is running (?) */
2371 ttcp_st->rto_exp = 0;
2372 ttcp_st->retransmit_count = 0;
2373 } else { /* retransmit is running (?) */
2374 ttcp_st->retransmit_count = tcp->tcp_delegated.u.retransmit.num_retx;
2375 ttcp_st->rto_exp = tcp->tcp_delegated.u.retransmit.num_retx;
2376 }
2377 ttcp_st->retransmit_start_time =
2378 lm_time_resolution(pdev, tcp->tcp_delegated.total_rt, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
2379
2380 /* convert to ms.
2381 * the /8 and /4 are a result of some shifts that MSFT does, these number were received from MSFT through emails and are
2382 * done the same in Teton. */
2383 sm_rtt = lm_time_resolution(pdev, tcp->tcp_delegated.sm_rtt, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/8;
2384 if (sm_rtt > 30000) { /* reduce to 30sec */
2385 sm_rtt = 30000;
2386 }
2387 sm_delta = lm_time_resolution(pdev, tcp->tcp_delegated.sm_delta, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/4;
2388 if (sm_delta > 30000) { /* reduce to 30sec */
2389 sm_delta = 30000;
2390 }
2391
2392 ttcp_st->flags1 |= (sm_rtt << TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT); /* given in ticks, no conversion is required */
2393 ttcp_st->flags2 |= (sm_delta << TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT); /* given in ticks, no conversion is required */
2394 if ((tcp->ulp_type == TOE_CONNECTION_TYPE) && (tcp->rx_con->flags & TCP_REMOTE_FIN_RECEIVED)) {
2395 ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD;
2396 }
2397
2398 ttcp_st->ss_thresh = tcp->tcp_delegated.ss_thresh;
2399 ttcp_st->timestamp_recent = tcp->tcp_delegated.ts_recent;
2400 ttcp_st->timestamp_recent_time =
2401 lm_time_resolution(pdev, tcp->tcp_delegated.ts_recent_age, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
2402 ttcp_st->vlan_id = neigh->neigh_const.vlan_tag;
2403 ttcp_st->recent_seg_wnd = tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una;
2404 ttcp_st->ooo_support_mode = (tcp->ulp_type == TOE_CONNECTION_TYPE)? TCP_TSTORM_OOO_SUPPORTED : TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
2405 ttcp_st->statistics_counter_id = (tcp->ulp_type == TOE_CONNECTION_TYPE)? LM_STATS_CNT_ID(pdev) : LM_CLI_IDX_ISCSI;
2406
2407 // Set statistics params
2408 if( TOE_CONNECTION_TYPE == tcp->ulp_type )
2409 {
2410 // set enable L2
2411 SET_FLAGS( ttcp_st->flags2, 1<<TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT );
2412
2413 // set enable L4
2414 SET_FLAGS( ttcp_st->flags2, 1<<TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT );
2415 }
2416
2417 return LM_STATUS_SUCCESS;
2418 }
2419
2420
_lm_tcp_init_xstorm_tcp_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2421 static lm_status_t _lm_tcp_init_xstorm_tcp_context(
2422 struct _lm_device_t *pdev,
2423 lm_tcp_state_t *tcp)
2424 {
2425 /* TODO: unify iscsi + toe structure name */
2426 struct xstorm_toe_tcp_ag_context_section * xtcp_ag;
2427 struct xstorm_common_context_section * xtcp_st;
2428 lm_path_state_t * path = tcp->path;
2429 lm_neigh_state_t * neigh = path->neigh;
2430 l4_ofld_params_t * l4_params = &(pdev->ofld_info.l4_params);
2431 u32_t src_ip[4], dst_ip[4];
2432 u16_t pseudo_cs, i;
2433 u32_t sm_rtt, sm_delta;
2434
2435 ASSERT_STATIC(sizeof(struct xstorm_toe_tcp_ag_context_section) == sizeof(struct xstorm_tcp_tcp_ag_context_section));
2436 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2437 xtcp_ag = &((struct toe_context *)tcp->ctx_virt)->xstorm_ag_context.tcp;
2438 xtcp_st = &((struct toe_context *)tcp->ctx_virt)->xstorm_st_context.context.common;
2439 } else {
2440 xtcp_ag = (struct xstorm_toe_tcp_ag_context_section *)&((struct iscsi_context *)tcp->ctx_virt)->xstorm_ag_context.tcp;
2441 xtcp_st = &((struct iscsi_context *)tcp->ctx_virt)->xstorm_st_context.common;
2442 }
2443
2444 mm_mem_zero(xtcp_ag, sizeof(struct xstorm_toe_tcp_ag_context_section));
2445 mm_mem_zero(xtcp_st, sizeof(struct xstorm_common_context_section));
2446
2447 xtcp_ag->ack_to_far_end = tcp->tcp_delegated.recv_next;
2448 if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max) { /* KA is running (?) */
2449 if ((tcp->tcp_cached.ka_probe_cnt > 0) && (tcp->tcp_delegated.u.keep_alive.timeout_delta == 0)) {
2450 xtcp_ag->ka_timer = 1;
2451 } else if ((tcp->tcp_cached.ka_probe_cnt == 0) && (tcp->tcp_delegated.u.keep_alive.timeout_delta == 0)) {
2452 if (tcp->tcp_cached.ka_time_out == 0) {/* KA disabled */
2453 xtcp_ag->ka_timer = 0xffffffff;
2454 } else {
2455 if (tcp->tcp_cached.ka_time_out == 0xffffffff) {
2456 xtcp_ag->ka_timer = 0xffffffff;
2457 } else {
2458 xtcp_ag->ka_timer =
2459 tcp->tcp_cached.ka_time_out ?
2460 lm_time_resolution(pdev, tcp->tcp_cached.ka_time_out, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC) :
2461 1 /* value of 0 is not allowed by FW */;
2462 }
2463 }
2464 } else {
2465 if (tcp->tcp_delegated.u.keep_alive.timeout_delta == 0xffffffff) {
2466 xtcp_ag->ka_timer = 0xffffffff;
2467 } else {
2468 xtcp_ag->ka_timer = lm_time_resolution(pdev, tcp->tcp_delegated.u.keep_alive.timeout_delta, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
2469 }
2470 }
2471 } else { /* retransmit is running (?) */
2472 xtcp_ag->ka_timer = 0xffffffff;
2473 }
2474
2475 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2476 xtcp_ag->local_adv_wnd = tcp->tcp_delegated.recv_win_seq;
2477 } else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE) {
2478 /* NirV: Add define to the iscsi HSI */
2479 xtcp_ag->local_adv_wnd = 0xFFFF << ((u16_t)tcp->tcp_const.rcv_seg_scale & 0xf); /* rcv_seg_scale is only 4b long */
2480 }
2481
2482 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2483 if (tcp->rx_con->u.rx.sws_info.mss > 0xffff)
2484 {
2485 DbgBreakIf(tcp->rx_con->u.rx.sws_info.mss > 0xffff);
2486 return LM_STATUS_INVALID_PARAMETER;
2487 }
2488 xtcp_ag->mss = tcp->rx_con->u.rx.sws_info.mss & 0xffff;
2489 } else {
2490 /* we must calc mss here since it is possible that we don't have rx_con (iscsi) */
2491 xtcp_ag->mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
2492 tcp->tcp_const.remote_mss,
2493 (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
2494 tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
2495 pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
2496 tcp->path->neigh->neigh_const.vlan_tag != 0) & 0xfffc; /* MSS value set in the XStorm should be multiple of 4 */
2497
2498 if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
2499 {
2500 if (xtcp_ag->mss < 4)
2501 {
2502 DbgBreakIf(xtcp_ag->mss < 4);
2503 return LM_STATUS_INVALID_PARAMETER;
2504 }
2505 xtcp_ag->mss -= 4; // -4 for data digest
2506 }
2507 }
2508
2509 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2510 /*if persist probes were sent xstorm should be blocked*/
2511 if (tcp->tcp_delegated.snd_wnd_probe_count == 0) {
2512 xtcp_ag->tcp_agg_vars2 |= __XSTORM_TOE_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED;
2513 }
2514 }
2515
2516 /* calculate transmission window */
2517 xtcp_ag->tx_wnd =
2518 (S32_SUB(tcp->tcp_delegated.send_cwin, tcp->tcp_delegated.send_win) > 0) ?
2519 (tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una) : /* i.e. ndis_tcp_delegated->SndWnd */
2520 (tcp->tcp_delegated.send_cwin - tcp->tcp_delegated.send_una); /* i.e. ndis_tcp_delegated->CWnd */
2521
2522 /* bugbug: driver workaround - wnd may be 0xffffffff, in this case we change it to 2^30 - since FW has an assumption this value
2523 * doesn't wrap-around, configuring it to 0xffffffff may cause it to wrap around and then change from a very large cwnd to a ver
2524 * small one - we give 2^30 which is the largest cwnd that can be advertised. */
2525 if (xtcp_ag->tx_wnd == 0xffffffff) {
2526 xtcp_ag->tx_wnd = 0x40000000;
2527 }
2528
2529 /* check if we are in keepalive. */
2530 if ((tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max) && ((xtcp_ag->tx_wnd > 0) || (tcp->tcp_delegated.u.retransmit.retx_ms == 0xffffffff))) { /* KA is enabled (?) */
2531 /* convert to ms.
2532 * the /8 and /4 are a result of some shifts that MSFT does, these number were received from MSFT through emails and are
2533 * done the same in Teton. */
2534 sm_rtt = lm_time_resolution(pdev, tcp->tcp_delegated.sm_rtt, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/8;
2535 if (sm_rtt > 30000) { /* reduce to 30sec */
2536 sm_rtt = 30000;
2537 }
2538 sm_delta = lm_time_resolution(pdev, tcp->tcp_delegated.sm_delta, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/4;
2539 if (sm_delta > 30000) { /* reduce to 30sec */
2540 sm_delta = 30000;
2541 }
2542 xtcp_ag->rto_timer = (sm_rtt + (sm_delta << 2));
2543 } else { /* retransmit is running (?) */
2544 if (tcp->tcp_delegated.u.retransmit.retx_ms == 0xffffffff) {
2545 xtcp_ag->rto_timer = 0xffffffff;
2546 } else {
2547 xtcp_ag->rto_timer = tcp->tcp_delegated.u.retransmit.retx_ms ? tcp->tcp_delegated.u.retransmit.retx_ms : 1 /* value of 0 is not allowed by FW*/;
2548 /* TODO: retx_ms is already converted in Miniport
2549 * we need to convert retx_ms to clock ticks in VBD instead of
2550 * doing this conversion in NDIS (same as Teton) */
2551 /*tcp->tcp_delegated.u.retransmit.retx_ms ?
2552 lm_time_resolution(pdev, tcp->tcp_delegated.u.retransmit.retx_ms,
2553 1000, TIMERS_TICKS_PER_SEC) :
2554 1 *//* value of 0 is not allowed by FW*/;
2555 }
2556 }
2557 xtcp_ag->snd_nxt = tcp->tcp_delegated.send_next;
2558 xtcp_ag->snd_una = tcp->tcp_delegated.send_una;
2559 xtcp_ag->tcp_agg_vars2 |= XSTORM_TOE_TCP_AG_CONTEXT_SECTION_DA_ENABLE; /* Delayed Acks always on */
2560 xtcp_ag->ts_to_echo = tcp->tcp_delegated.ts_recent;
2561
2562
2563 /* xstorm_st_context */
2564 xtcp_st->ethernet.remote_addr_0 = neigh->neigh_cached.dst_addr[0];
2565 xtcp_st->ethernet.remote_addr_1 = neigh->neigh_cached.dst_addr[1];
2566 xtcp_st->ethernet.remote_addr_2 = neigh->neigh_cached.dst_addr[2];
2567 xtcp_st->ethernet.remote_addr_3 = neigh->neigh_cached.dst_addr[3];
2568 xtcp_st->ethernet.remote_addr_4 = neigh->neigh_cached.dst_addr[4];
2569 xtcp_st->ethernet.remote_addr_5 = neigh->neigh_cached.dst_addr[5];
2570
2571 if (neigh->neigh_const.vlan_tag > 0xfff)
2572 {
2573 DbgBreakIf(neigh->neigh_const.vlan_tag > 0xfff);
2574 return LM_STATUS_INVALID_PARAMETER;
2575 }
2576 xtcp_st->ethernet.vlan_params |= (neigh->neigh_const.vlan_tag << XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT);
2577
2578 if (tcp->tcp_cached.user_priority > 0x7)
2579 {
2580 DbgBreakIf(tcp->tcp_cached.user_priority > 0x7);
2581 return LM_STATUS_INVALID_PARAMETER;
2582 }
2583 xtcp_st->ethernet.vlan_params |= (tcp->tcp_cached.user_priority << XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT);
2584
2585 if ((0 != GET_FLAGS(xtcp_st->ethernet.vlan_params, XSTORM_ETH_CONTEXT_SECTION_VLAN_ID)) ||
2586 (0 != GET_FLAGS(xtcp_st->ethernet.vlan_params, XSTORM_ETH_CONTEXT_SECTION_CFI)) ||
2587 (0 != GET_FLAGS(xtcp_st->ethernet.vlan_params, XSTORM_ETH_CONTEXT_SECTION_PRIORITY)))
2588 {
2589 // This fields should be set to 1 whenever an inner VLAN is provided by the OS.
2590 // This flags is relevant for all function modes.
2591 SET_FLAGS( xtcp_st->flags, XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE);
2592 }
2593
2594 xtcp_st->ethernet.local_addr_0 = neigh->neigh_const.src_addr[0];
2595 xtcp_st->ethernet.local_addr_1 = neigh->neigh_const.src_addr[1];
2596 xtcp_st->ethernet.local_addr_2 = neigh->neigh_const.src_addr[2];
2597 xtcp_st->ethernet.local_addr_3 = neigh->neigh_const.src_addr[3];
2598 xtcp_st->ethernet.local_addr_4 = neigh->neigh_const.src_addr[4];
2599 xtcp_st->ethernet.local_addr_5 = neigh->neigh_const.src_addr[5];
2600 xtcp_st->ethernet.reserved_vlan_type = 0x8100;
2601
2602 xtcp_st->ip_version_1b = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? 0 : 1;
2603 if (tcp->path->path_const.ip_version == IP_VERSION_IPV4) {
2604 /* IPv4*/
2605 xtcp_st->ip_union.padded_ip_v4.ip_v4.ip_remote_addr = path->path_const.u.ipv4.dst_ip;
2606 xtcp_st->ip_union.padded_ip_v4.ip_v4.ip_local_addr = path->path_const.u.ipv4.src_ip;
2607 xtcp_st->ip_union.padded_ip_v4.ip_v4.tos = tcp->tcp_cached.tos_or_traffic_class;
2608 #if DBG
2609 xtcp_st->ip_union.padded_ip_v4.ip_v4.ttl = (tcp->ulp_type == TOE_CONNECTION_TYPE) ? TOE_DBG_TTL : ISCSI_DBG_TTL;
2610 #else
2611 xtcp_st->ip_union.padded_ip_v4.ip_v4.ttl = tcp->tcp_cached.ttl_or_hop_limit;
2612 #endif
2613 src_ip[0] = HTON32(path->path_const.u.ipv4.src_ip);
2614 dst_ip[0] = HTON32(path->path_const.u.ipv4.dst_ip);
2615 pseudo_cs = lm_tcp_calc_tcp_pseudo_checksum(pdev, src_ip, dst_ip, IP_VERSION_IPV4);
2616 } else {
2617 /* IPv6*/
2618 xtcp_st->ip_union.ip_v6.ip_remote_addr_lo_lo = path->path_const.u.ipv6.dst_ip[0];
2619 xtcp_st->ip_union.ip_v6.ip_remote_addr_lo_hi = path->path_const.u.ipv6.dst_ip[1];
2620 xtcp_st->ip_union.ip_v6.ip_remote_addr_hi_lo = path->path_const.u.ipv6.dst_ip[2];
2621 xtcp_st->ip_union.ip_v6.ip_remote_addr_hi_hi = path->path_const.u.ipv6.dst_ip[3];
2622
2623 xtcp_st->ip_union.ip_v6.ip_local_addr_lo_lo = path->path_const.u.ipv6.src_ip[0];
2624 xtcp_st->ip_union.ip_v6.ip_local_addr_lo_hi = path->path_const.u.ipv6.src_ip[1];
2625 xtcp_st->ip_union.ip_v6.ip_local_addr_hi_lo = path->path_const.u.ipv6.src_ip[2];
2626 xtcp_st->ip_union.ip_v6.ip_local_addr_hi_hi = path->path_const.u.ipv6.src_ip[3];
2627
2628 #if DBG
2629 xtcp_st->ip_union.ip_v6.hop_limit = (tcp->ulp_type == TOE_CONNECTION_TYPE) ? TOE_DBG_TTL : ISCSI_DBG_TTL;
2630 #else
2631 xtcp_st->ip_union.ip_v6.hop_limit = tcp->tcp_cached.ttl_or_hop_limit;
2632 #endif
2633 DbgBreakIf(tcp->tcp_cached.flow_label > 0xffff);
2634 xtcp_st->ip_union.ip_v6.priority_flow_label =
2635 tcp->tcp_cached.flow_label << XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT |
2636 tcp->tcp_cached.tos_or_traffic_class << XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT;
2637
2638 for (i = 0; i < 4; i++) {
2639 src_ip[i] = HTON32(path->path_const.u.ipv6.src_ip[i]);
2640 dst_ip[i] = HTON32(path->path_const.u.ipv6.dst_ip[i]);
2641 }
2642 pseudo_cs = lm_tcp_calc_tcp_pseudo_checksum(pdev, src_ip, dst_ip, IP_VERSION_IPV6);
2643 }
2644
2645 xtcp_st->tcp.local_port = tcp->tcp_const.src_port;
2646
2647
2648 xtcp_st->tcp.pseudo_csum = NTOH16(pseudo_cs);
2649 xtcp_st->tcp.remote_port = tcp->tcp_const.dst_port;
2650 xtcp_st->tcp.snd_max = tcp->tcp_delegated.send_max;
2651 if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP) {
2652 xtcp_st->tcp.ts_enabled = 1;
2653 }
2654 if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_SACK) {
2655 xtcp_st->tcp.tcp_params |= XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED;
2656 }
2657 if ((tcp->ulp_type == TOE_CONNECTION_TYPE) && (tcp->tx_con->flags & TCP_FIN_REQ_POSTED)) {
2658 xtcp_st->tcp.tcp_params |= XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG;
2659 }
2660 xtcp_st->tcp.ts_time_diff = tcp->tcp_delegated.tstamp; /* time conversion not required */
2661 xtcp_st->tcp.window_scaling_factor = (u16_t)tcp->tcp_const.rcv_seg_scale & 0xf; /* rcv_seg_scale is only 4b long */
2662
2663 // Set statistics params
2664 if( TOE_CONNECTION_TYPE == tcp->ulp_type )
2665 {
2666 // set counter id
2667 xtcp_st->tcp.statistics_counter_id = LM_STATS_CNT_ID(pdev);
2668
2669 // set enable L2
2670 SET_FLAGS( xtcp_st->tcp.statistics_params, 1<<XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT );
2671
2672 // set enable L4
2673 SET_FLAGS( xtcp_st->tcp.statistics_params, 1<<XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT );
2674 }
2675 if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
2676 {
2677 SET_FLAGS( xtcp_st->flags,(1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT ));
2678
2679 SET_FLAGS( xtcp_st->flags,(PORT_ID(pdev) << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT));
2680 }
2681 return LM_STATUS_SUCCESS;
2682 }
2683
2684
2685 /* init the content of the toe context */
_lm_tcp_init_tcp_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2686 static lm_status_t _lm_tcp_init_tcp_context(
2687 struct _lm_device_t *pdev,
2688 lm_tcp_state_t *tcp)
2689 {
2690 lm_status_t lm_status ;
2691
2692 lm_status = _lm_tcp_init_xstorm_tcp_context(pdev, tcp);
2693 if (lm_status != LM_STATUS_SUCCESS) {
2694 return lm_status;
2695 }
2696
2697 lm_status = _lm_tcp_init_tstorm_tcp_context(pdev, tcp);
2698 if (lm_status != LM_STATUS_SUCCESS) {
2699 return lm_status;
2700 }
2701
2702 return LM_STATUS_SUCCESS;
2703 }
2704
_lm_tcp_init_iscsi_tcp_related_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2705 static lm_status_t _lm_tcp_init_iscsi_tcp_related_context(
2706 struct _lm_device_t *pdev,
2707 lm_tcp_state_t *tcp)
2708 {
2709 struct cstorm_iscsi_ag_context * ciscsi_ag = &((struct iscsi_context *)tcp->ctx_virt)->cstorm_ag_context;
2710 struct cstorm_iscsi_st_context * ciscsi_st = &((struct iscsi_context *)tcp->ctx_virt)->cstorm_st_context;
2711 struct xstorm_iscsi_ag_context * xiscsi_ag = &((struct iscsi_context *)tcp->ctx_virt)->xstorm_ag_context;
2712 struct xstorm_iscsi_st_context * xiscsi_st = &((struct iscsi_context *)tcp->ctx_virt)->xstorm_st_context;
2713 struct tstorm_iscsi_ag_context * tiscsi_ag = &((struct iscsi_context *)tcp->ctx_virt)->tstorm_ag_context;
2714 struct tstorm_iscsi_st_context * tiscsi_st = &((struct iscsi_context *)tcp->ctx_virt)->tstorm_st_context;
2715
2716 UNREFERENCED_PARAMETER_(pdev);
2717
2718 ASSERT_STATIC(sizeof(struct cstorm_toe_ag_context) == sizeof(struct cstorm_iscsi_ag_context));
2719 // ASSERT_STATIC(sizeof(struct cstorm_toe_st_context) == sizeof(struct cstorm_iscsi_st_context));
2720 // ASSERT_STATIC(OFFSETOF(struct iscsi_context, cstorm_ag_context)== OFFSETOF(struct toe_context, cstorm_ag_context) ) ;
2721 // ASSERT_STATIC(OFFSETOF(struct iscsi_context, cstorm_st_context)== OFFSETOF(struct toe_context, cstorm_st_context) ) ;
2722
2723 /* cstorm */
2724 ciscsi_ag->rel_seq = tcp->tcp_delegated.send_next; //pTcpParams->sndNext;
2725 ciscsi_ag->rel_seq_th = tcp->tcp_delegated.send_next; //pTcpParams->sndNext;
2726 ciscsi_st->hq_tcp_seq = tcp->tcp_delegated.send_next; //pTcpParams->sndNext;
2727
2728 /* xstorm */
2729 xiscsi_ag->hq_cons_tcp_seq = tcp->tcp_delegated.send_next; //pTcpParams->sndNext;
2730
2731 /* tstorm */
2732 /* in toe the window right edge is initialized by the doorbell */
2733 /* recv_win_seq */ /* recv next */
2734 tiscsi_ag->tcp.wnd_right_edge = (xiscsi_ag->tcp.local_adv_wnd << xiscsi_st->common.tcp.window_scaling_factor) + xiscsi_ag->tcp.ack_to_far_end;
2735
2736 tiscsi_ag->tcp.wnd_right_edge_local = tiscsi_ag->tcp.wnd_right_edge;
2737
2738 tiscsi_st->iscsi.process_nxt = tcp->tcp_delegated.recv_next; // same value as rcv_nxt
2739
2740 //xAgCtx->mss = pTcpParams->mss - 4; // -4 for data digest
2741
2742 return LM_STATUS_SUCCESS;
2743 }
2744
2745 /* Desciption:
2746 * Allocation of CID for a new TCP connection to be offloaded,
2747 * Initiation of connection's context line as required by FW.
2748 * Assumptions:
2749 * - lm_tcp_init_tcp_state, lm_tcp_init_rx_con/tx_con already called
2750 * - send unacked data already posted
2751 * - If the TCP is in states FinWait1, Closing or LastAck,
2752 * FIN is already posted to the tx chain
2753 * - Called under connection lock: since it can be called from either initiate-ofld
2754 * or recycle-cid (before ofld had the chance to complete)
2755 * Returns:
2756 * SUCCESS or any failure */
lm_tcp_init_tcp_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2757 static lm_status_t lm_tcp_init_tcp_context(
2758 struct _lm_device_t *pdev,
2759 lm_tcp_state_t *tcp)
2760 {
2761 s32_t cid;
2762 lm_status_t lm_status;
2763 lm_4tuple_t tuple = {{0}};
2764 u32_t expect_rwin;
2765 u8_t i;
2766
2767 DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_tcp_context\n");
2768
2769 /* NirV: allocate cid is getting back here */
2770 /* allocate cid only if cid==0: we may re-enter this function after a cid has already been allocated */
2771 if (tcp->cid == 0)
2772 {
2773 lm_status = lm_allocate_cid(pdev, TOE_CONNECTION_TYPE, (void*)tcp, &cid);
2774 if(lm_status == LM_STATUS_RESOURCE){
2775 DbgMessage(pdev, WARNl4sp, "lm_tcp_init_tcp_state: Failed in allocating cid\n");
2776 return LM_STATUS_RESOURCE;
2777 } else if (lm_status == LM_STATUS_PENDING) {
2778 lm_sp_req_manager_block(pdev, (u32_t)cid);
2779 }
2780 tcp->cid = (u32_t)cid;
2781 }
2782
2783 if (lm_cid_state(pdev, tcp->cid) == LM_CID_STATE_PENDING) {
2784 return LM_STATUS_SUCCESS; /* Too soon to initialize context */
2785 }
2786
2787 /* Validate some of the offload parameters - only relevant for TOE. */
2788 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2789 tcp->rx_con->u.rx.sws_info.extra_bytes = 0;
2790 if (tcp->rx_con->u.rx.gen_info.peninsula_nbytes > tcp->tcp_cached.initial_rcv_wnd) {
2791 tcp->rx_con->u.rx.sws_info.extra_bytes = tcp->rx_con->u.rx.gen_info.peninsula_nbytes - tcp->tcp_cached.initial_rcv_wnd;
2792 tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->tcp_delegated.recv_next;
2793 tcp->rx_con->db_data.rx->rcv_win_right_edge = tcp->tcp_delegated.recv_next;
2794 DbgMessage(pdev, INFORMl4sp, "lm_tcp_init_tcp_state: pnb:%x, irw:%x, ext:%x, rnx:%x\n",tcp->rx_con->u.rx.gen_info.peninsula_nbytes,
2795 tcp->tcp_cached.initial_rcv_wnd,tcp->rx_con->u.rx.sws_info.extra_bytes,tcp->tcp_delegated.recv_next);
2796 } else {
2797 expect_rwin = (u32_t)S32_SUB(
2798 tcp->tcp_delegated.recv_win_seq,
2799 tcp->tcp_delegated.recv_next);
2800 expect_rwin += tcp->rx_con->u.rx.gen_info.peninsula_nbytes;
2801
2802 /* WorkAround for LH: fields received at offload should match the equation below,
2803 * In LH it's not the case. TBA: add assert that we are on LH operating system */
2804 DbgMessage(pdev, INFORMl4sp, "lm_tcp_init_tcp_state: pnb:%x, irw:%x, rws:%x, rnx:%x\n",tcp->rx_con->u.rx.gen_info.peninsula_nbytes,
2805 tcp->tcp_cached.initial_rcv_wnd,
2806 tcp->tcp_delegated.recv_win_seq,
2807 tcp->tcp_delegated.recv_next);
2808 if (ERR_IF(expect_rwin != tcp->tcp_cached.initial_rcv_wnd)) {
2809 u32_t delta;
2810 /* move tcp_delegated.recv_next accordingly */
2811 if (expect_rwin > tcp->tcp_cached.initial_rcv_wnd) {
2812 delta = expect_rwin - tcp->tcp_cached.initial_rcv_wnd;
2813 tcp->tcp_delegated.recv_win_seq -= delta;
2814 } else {
2815 delta = tcp->tcp_cached.initial_rcv_wnd - expect_rwin;
2816 tcp->tcp_delegated.recv_win_seq += delta;
2817 }
2818 /* Need to also update the driver win right edge */
2819 tcp->rx_con->db_data.rx->rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
2820 tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
2821 }
2822 }
2823 }
2824 /* insert 4 tuple to searcher's mirror hash */
2825 if(tcp->path->path_const.ip_version == IP_VERSION_IPV4) { /* IPV4 */
2826 tuple.ip_type = LM_IP_TYPE_V4;
2827 tuple.dst_ip[0] = tcp->path->path_const.u.ipv4.dst_ip;
2828 tuple.src_ip[0] = tcp->path->path_const.u.ipv4.src_ip;
2829 } else {
2830 tuple.ip_type = LM_IP_TYPE_V6;
2831 for (i = 0; i < 4; i++) {
2832 tuple.dst_ip[i] = tcp->path->path_const.u.ipv6.dst_ip[i];
2833 tuple.src_ip[i] = tcp->path->path_const.u.ipv6.src_ip[i];
2834 }
2835 }
2836 tuple.src_port = tcp->tcp_const.src_port;
2837 tuple.dst_port = tcp->tcp_const.dst_port;
2838 if (lm_searcher_mirror_hash_insert(pdev, tcp->cid, &tuple) != LM_STATUS_SUCCESS) {
2839 DbgMessage(pdev, WARNl4sp, "lm_tcp_init_tcp_context: Failed inserting tuple to SRC hash\n");
2840 tcp->in_searcher = 0;
2841 return LM_STATUS_RESOURCE;
2842 }
2843 tcp->in_searcher = 1;
2844
2845 /* get context */
2846 tcp->ctx_virt = (struct toe_context *)lm_get_context(pdev, tcp->cid);
2847 if (!tcp->ctx_virt) {
2848 DbgBreakIf(!tcp->ctx_virt);
2849 return LM_STATUS_FAILURE;
2850 }
2851
2852 tcp->ctx_phys.as_u64 = lm_get_context_phys(pdev, tcp->cid);
2853 if (!tcp->ctx_phys.as_u64) {
2854 DbgBreakIf(!tcp->ctx_phys.as_u64);
2855 return LM_STATUS_FAILURE;
2856 }
2857 DbgMessage(pdev, VERBOSEl4sp,
2858 "tcp->ctx_virt=%p, tcp->ctx_phys_high=%x, tcp->ctx_phys_low=%x\n",
2859 tcp->ctx_virt, tcp->ctx_phys.as_u32.high, tcp->ctx_phys.as_u32.low);
2860
2861 /* init the content of the context */
2862 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2863 lm_status = _lm_tcp_init_toe_context(pdev, tcp);
2864 if (lm_status != LM_STATUS_SUCCESS) {
2865 return lm_status;
2866 }
2867 }
2868
2869 lm_status = _lm_tcp_init_tcp_context(pdev, tcp);
2870 if (lm_status != LM_STATUS_SUCCESS) {
2871 return lm_status;
2872 }
2873
2874 /* iscsi / toe contexts are initialized separately, only the tcp section is common, HOWEVER, in iscsi
2875 * most of the context is initialized in the l5_ofld_stage, but some of the context initialization is based on tcp
2876 * params, that's why we need to complete it here... */
2877 if (tcp->ulp_type == ISCSI_CONNECTION_TYPE) {
2878 lm_status = _lm_tcp_init_iscsi_tcp_related_context(pdev, tcp);
2879 if (lm_status != LM_STATUS_SUCCESS) {
2880 return lm_status;
2881 }
2882 }
2883
2884 return LM_STATUS_SUCCESS;
2885 }
2886
2887 /** Description
2888 * Callback function for cids being recylced
2889 */
lm_tcp_recycle_cid_cb(struct _lm_device_t * pdev,void * cookie,s32_t cid)2890 void lm_tcp_recycle_cid_cb(
2891 struct _lm_device_t *pdev,
2892 void *cookie,
2893 s32_t cid)
2894 {
2895 lm_tcp_state_t *tcp = (lm_tcp_state_t *)cookie;
2896 lm_sp_req_common_t *sp_req = NULL;
2897 MM_ACQUIRE_TOE_LOCK(pdev);
2898
2899 /* un-block the manager... */
2900 lm_set_cid_state(pdev, tcp->cid, LM_CID_STATE_VALID);
2901
2902 /* if the ofld flow got to the ofld workitem, only now set we can use the context,
2903 other wise, we'll get to the init_tcp_context later on */
2904 if (tcp->hdr.status == STATE_STATUS_INIT_CONTEXT)
2905 {
2906 lm_tcp_init_tcp_context(pdev,tcp);
2907 }
2908
2909 /* we can now unblock any pending slow-paths */
2910 lm_sp_req_manager_unblock(pdev,cid, &sp_req);
2911
2912
2913 MM_RELEASE_TOE_LOCK(pdev);
2914 }
2915
2916 /* This function needs to complete a pending slowpath toe request. Unfortunatelly it needs
2917 * to take care of all the steps done in lm_toe_service_rx_intr and lm_toe_service_tx_intr,
2918 * process the cqe, and complete slowpath...
2919 */
lm_tcp_comp_cb(struct _lm_device_t * pdev,struct sq_pending_command * pending)2920 void lm_tcp_comp_cb(struct _lm_device_t *pdev, struct sq_pending_command *pending)
2921 {
2922 lm_tcp_state_t * tcp = NULL;
2923 lm_tcp_con_t * rx_con = NULL;
2924 lm_tcp_con_t * tx_con = NULL;
2925 struct toe_rx_cqe rx_cqe = {0};
2926 struct toe_tx_cqe tx_cqe = {0};
2927 u8_t i = 0;
2928 u8_t cmp_rx = FALSE;
2929 u8_t cmp_tx = FALSE;
2930
2931 MM_INIT_TCP_LOCK_HANDLE();
2932
2933 tcp = lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, pending->cid);
2934 /* Possible the tcp is NULL for ramrods that are context-less (RSS for example) */
2935 if (tcp)
2936 {
2937 rx_con = tcp->rx_con;
2938 tx_con = tcp->tx_con;
2939 }
2940
2941 #define LM_TCP_SET_CQE(_param, _cid, _cmd) \
2942 (_param) = (((_cid) << TOE_RX_CQE_CID_SHIFT) & TOE_RX_CQE_CID) | \
2943 (((_cmd) << TOE_RX_CQE_COMPLETION_OPCODE_SHIFT) & TOE_RX_CQE_COMPLETION_OPCODE);
2944
2945 switch (pending->cmd)
2946 {
2947 case RAMROD_OPCODE_TOE_INIT:
2948 DbgBreakMsg("Not Supported\n");
2949 break;
2950 case RAMROD_OPCODE_TOE_INITIATE_OFFLOAD:
2951 LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_INITIATE_OFFLOAD);
2952 cmp_rx = TRUE;
2953 break;
2954 case RAMROD_OPCODE_TOE_SEARCHER_DELETE:
2955 LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_SEARCHER_DELETE);
2956 cmp_rx = TRUE;
2957 break;
2958 case RAMROD_OPCODE_TOE_TERMINATE:
2959 /* Completion may have completed on tx / rx only, so whether or not to complete it depends not
2960 * only on type but on state of sp_request as well... */
2961 LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_TERMINATE);
2962 cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
2963 LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_TERMINATE);
2964 cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);;
2965 break;
2966 case RAMROD_OPCODE_TOE_QUERY:
2967 LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_QUERY);
2968 cmp_rx = TRUE;
2969 break;
2970 case RAMROD_OPCODE_TOE_RESET_SEND:
2971 LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_RESET_SEND);
2972 cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
2973 LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_RESET_SEND);
2974 cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);
2975 break;
2976 case RAMROD_OPCODE_TOE_EMPTY_RAMROD:
2977 LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_EMPTY_RAMROD);
2978 cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
2979 LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_EMPTY_RAMROD);
2980 cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);
2981 break;
2982 case RAMROD_OPCODE_TOE_INVALIDATE:
2983 LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_INVALIDATE);
2984 cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
2985 LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_INVALIDATE);
2986 cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);
2987 break;
2988 case RAMROD_OPCODE_TOE_UPDATE:
2989 LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_UPDATE);
2990 cmp_rx = TRUE;
2991 break;
2992 case RAMROD_OPCODE_TOE_RSS_UPDATE:
2993 /* This one is special, its not treated as other ramrods, we return and not break
2994 * at the end of this one... */
2995 /* a bit of a hack here... we only want to give one completion and not on all
2996 * rcq-chains, so we update the counters and decrease all l4 rss chains
2997 * except one. then we give the completion to just one chain which should take care
2998 * of completing the sq and if L2 ramrod has completed already it will also comp
2999 * back to OS */
3000 for (i = 0; i < pdev->params.l4_rss_chain_cnt-1; i++)
3001 {
3002 mm_atomic_dec(&pdev->params.update_toe_comp_cnt);
3003 mm_atomic_dec(&pdev->params.update_comp_cnt);
3004 mm_atomic_dec(&pdev->params.update_suspend_cnt);
3005 }
3006 lm_tcp_rss_update_ramrod_comp(pdev,
3007 &pdev->toe_info.rcqs[LM_TOE_BASE_RSS_ID(pdev)],
3008 pending->cid,
3009 TOE_RSS_UPD_QUIET /* doesn't really matter*/,
3010 TRUE);
3011
3012 return;
3013 }
3014 /* process the cqes and initialize connections with all the connections that appeared
3015 * in the DPC */
3016 if (cmp_rx)
3017 {
3018 lm_tcp_rx_process_cqe(pdev, &rx_cqe, tcp, 0 /* d/c for slpowpath */);
3019 /* FP: no need to call complete_tcp_fp since we're only completing slowpath, but we do
3020 * need to move the flags for sake of next function */
3021 rx_con->dpc_info.snapshot_flags = rx_con->dpc_info.dpc_flags;
3022 rx_con->dpc_info.dpc_flags = 0;
3023
3024 /* we access snapshot and not dpc, since once the dpc_flags were copied
3025 * to snapshot they were zeroized */
3026 lm_tcp_rx_complete_tcp_sp(pdev, tcp, rx_con);
3027 }
3028
3029 /* process the cqes and initialize connections with all the connections that appeared
3030 * in the DPC */
3031 if (cmp_tx)
3032 {
3033 lm_tcp_tx_process_cqe(pdev, &tx_cqe, tcp);
3034 /* FP: no need to call complete_tcp_fp since we're only completing slowpath, but we do
3035 * need to move the flags for sake of next function */
3036 tx_con->dpc_info.snapshot_flags = tx_con->dpc_info.dpc_flags;
3037 tx_con->dpc_info.dpc_flags = 0;
3038
3039 /* we access snapshot and not dpc, since once the dpc_flags were copied
3040 * to snapshot they were zeroized */
3041 lm_tcp_tx_complete_tcp_sp(pdev, tcp, tx_con);
3042 }
3043
3044 }
3045
3046 /* Desciption:
3047 * - init TCP state according to its TCP state machine's state
3048 * Assumptions:
3049 * - lm_tcp_init_tcp_state, lm_tcp_init_rx_con/tx_con already called
3050 * - send unacked data already posted
3051 * Returns:
3052 * SUCCESS or any failure */
lm_tcp_init_tcp_state_machine(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)3053 static lm_status_t lm_tcp_init_tcp_state_machine(
3054 struct _lm_device_t *pdev,
3055 lm_tcp_state_t *tcp)
3056 {
3057 lm_tcp_con_t *con = tcp->rx_con;
3058 lm_tcp_state_calculation_t *state_calc = &tcp->tcp_state_calc;
3059 u64_t curr_time = 0;
3060 lm_status_t lm_status = LM_STATUS_SUCCESS;
3061
3062 DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_tcp_state_machine\n");
3063
3064 /* initiate times in the state calculation struct
3065 according to delegated.con_state */
3066
3067 state_calc->fin_request_time = state_calc->fin_completed_time =
3068 state_calc->fin_reception_time = 0;
3069 curr_time = mm_get_current_time(pdev);
3070
3071 switch (tcp->tcp_delegated.con_state) {
3072 case L4_TCP_CON_STATE_ESTABLISHED:
3073 break;
3074 case L4_TCP_CON_STATE_FIN_WAIT1:
3075 DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state FIN_WAIT1 (tcp=%p)\n", tcp);
3076 state_calc->fin_request_time = curr_time;
3077 break;
3078 case L4_TCP_CON_STATE_FIN_WAIT2:
3079 DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state FIN_WAIT2 (tcp=%p)\n", tcp);
3080 state_calc->fin_request_time = curr_time - 1;
3081 state_calc->fin_completed_time = curr_time;
3082 break;
3083 case L4_TCP_CON_STATE_CLOSE_WAIT:
3084 DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state CLOSE_WAIT (tcp=%p)\n", tcp);
3085 state_calc->fin_reception_time = curr_time;
3086 break;
3087 case L4_TCP_CON_STATE_CLOSING:
3088 DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state CLOSING (tcp=%p)\n", tcp);
3089 state_calc->fin_request_time = curr_time - 1;
3090 state_calc->fin_reception_time = curr_time;
3091 break;
3092 case L4_TCP_CON_STATE_LAST_ACK:
3093 DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state LAST_ACK (tcp=%p)\n", tcp);
3094 state_calc->fin_reception_time = curr_time - 1;
3095 state_calc->fin_request_time = curr_time;
3096 break;
3097 default:
3098 DbgMessage(pdev, FATAL,
3099 "Initiate offload in con state=%d is not allowed by WDK!\n",
3100 tcp->tcp_delegated.con_state);
3101 DbgBreak();
3102 return LM_STATUS_FAILURE;
3103 }
3104
3105 /* In case the the TCP state is CloseWait, Closing or LastAck, the Rx con
3106 * should be initiated as if remote FIN was already received */
3107
3108 if (state_calc->fin_reception_time) {
3109 /* remote FIN was already received */
3110 DbgBreakIf(con->flags & TCP_REMOTE_FIN_RECEIVED);
3111 con->flags |= TCP_REMOTE_FIN_RECEIVED;
3112
3113 if (con->flags & TCP_INDICATE_REJECTED) {
3114 /* GilR: TODO - is this case really possible [fin received+buffered data given]? If so, does NDIS really expect the fin received indication? */
3115 /* buffered data exists, defer FIN indication */
3116 con->u.rx.flags |= TCP_CON_FIN_IND_PENDING;
3117 } else {
3118 /* no buffered data, simulate that remote FIN already indicated */
3119 con->flags |= TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED;
3120 con->flags |= TCP_BUFFERS_ABORTED;
3121 }
3122 }
3123
3124 con = tcp->tx_con;
3125 /* check if local FIN was already sent, and if it was acknowledged */
3126 if (state_calc->fin_completed_time) {
3127 /* FIN already sent and acked */
3128 volatile struct toe_tx_db_data *db_data = con->db_data.tx;
3129 DbgBreakIf(!state_calc->fin_request_time);
3130 DbgBreakIf(!s_list_is_empty(&con->active_tb_list));
3131 con->flags |= (TCP_FIN_REQ_POSTED | TCP_FIN_REQ_COMPLETED);
3132 db_data->flags |= (TOE_TX_DB_DATA_FIN << TOE_TX_DB_DATA_FIN_SHIFT);
3133 db_data->bytes_prod_seq--;
3134 } else if (state_calc->fin_request_time) {
3135 /* FIN was already sent but not acked */
3136
3137 /* GilR 11/12/2006 - TODO - we do not take the tx lock here, verify that its ok... */
3138 /* We want to make sure we'll be able to post the tcp buffer but
3139 * NOT ring the doorbell */
3140 DbgBreakIf(con->flags & TCP_DB_BLOCKED);
3141 con->flags |= TCP_DB_BLOCKED;
3142 DbgBreakIf(!(con->flags & TCP_POST_BLOCKED));
3143 con->flags &= ~TCP_POST_BLOCKED; /* posting is temporary allowed */
3144
3145 con->u.tx.flags |= TCP_CON_FIN_REQ_LM_INTERNAL;
3146 lm_status = lm_tcp_graceful_disconnect(pdev, tcp);
3147 DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
3148
3149 /* retrieve initial state */
3150 con->flags &= ~TCP_DB_BLOCKED;
3151 con->flags |= TCP_POST_BLOCKED; /* posting is no longer allowed*/
3152 }
3153
3154 return LM_STATUS_SUCCESS;
3155 }
3156
3157
3158 /* Desciption:
3159 * - call lm_tcp_init_tcp_state_machine
3160 * - call lm_tcp_init_tcp_context
3161 * Assumptions:
3162 * - lm_tcp_init_tcp_state, lm_tcp_init_rx_con/tx_con already called
3163 * - send unacked data already posted
3164 * Returns:
3165 * SUCCESS or any failure */
lm_tcp_init_tcp_common(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)3166 lm_status_t lm_tcp_init_tcp_common(
3167 struct _lm_device_t *pdev,
3168 lm_tcp_state_t *tcp)
3169 {
3170 lm_status_t lm_status = LM_STATUS_SUCCESS;
3171
3172 DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_tcp_common\n");
3173 DbgBreakIf(!(pdev && tcp));
3174
3175 lm_status = lm_tcp_init_tcp_state_machine(pdev, tcp);
3176 if (lm_status != LM_STATUS_SUCCESS) {
3177 return lm_status;
3178 }
3179
3180 lm_status = lm_tcp_init_tcp_context(pdev, tcp);
3181 if (lm_status != LM_STATUS_SUCCESS) {
3182 return lm_status;
3183 }
3184
3185 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
3186 tcp->rx_con->u.rx.gen_info.dont_send_to_system_more_then_rwin = FALSE; //TRUE;
3187 }
3188
3189 return LM_STATUS_SUCCESS;
3190 }
3191
3192
_lm_tcp_comp_upload_neigh_request(struct _lm_device_t * pdev,lm_neigh_state_t * neigh_state)3193 static void _lm_tcp_comp_upload_neigh_request(
3194 struct _lm_device_t * pdev,
3195 lm_neigh_state_t * neigh_state)
3196 {
3197 DbgBreakIf(neigh_state->hdr.status != STATE_STATUS_UPLOAD_PENDING);
3198 DbgBreakIf(neigh_state->hdr.state_id != STATE_ID_NEIGH);
3199
3200 DbgBreakIf(neigh_state->num_dependents);
3201
3202 neigh_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
3203 mm_tcp_complete_neigh_upload_request(pdev, neigh_state);
3204 }
3205
3206
3207 /** Description
3208 * upload path state
3209 * Assumptions:
3210 * called under TOE-lock
3211 */
_lm_tcp_comp_upload_path_request(struct _lm_device_t * pdev,lm_path_state_t * path_state)3212 static void _lm_tcp_comp_upload_path_request(
3213 struct _lm_device_t * pdev,
3214 lm_path_state_t * path_state)
3215 {
3216 lm_neigh_state_t * neigh = NULL;
3217
3218 DbgBreakIf(path_state->hdr.status != STATE_STATUS_UPLOAD_PENDING);
3219 DbgBreakIf(path_state->hdr.state_id != STATE_ID_PATH);
3220
3221 path_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
3222
3223 DbgBreakIf(path_state->neigh->num_dependents == 0);
3224 path_state->neigh->num_dependents--;
3225 if ((path_state->neigh->num_dependents == 0) &&
3226 (path_state->neigh->hdr.status == STATE_STATUS_UPLOAD_PENDING)) {
3227 /* Time to release the neighbor resources...*/
3228 neigh = path_state->neigh;
3229 }
3230 path_state->neigh = NULL;
3231
3232 DbgBreakIf(path_state->num_dependents);
3233
3234 mm_tcp_complete_path_upload_request(pdev, path_state);
3235
3236 if (neigh) {
3237 _lm_tcp_comp_upload_neigh_request(pdev, neigh);
3238 }
3239 }
3240
3241
3242 /* post initiate offload slow path ramrod
3243 * returns SUCCESS or any failure */
lm_tcp_post_initiate_offload_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t * command,u64_t * data)3244 static lm_status_t lm_tcp_post_initiate_offload_request(
3245 struct _lm_device_t *pdev,
3246 lm_tcp_state_t *tcp,
3247 u8_t *command,
3248 u64_t *data)
3249 {
3250 lm_tcp_con_t *con = tcp->tx_con;
3251 int i = 0;
3252 MM_INIT_TCP_LOCK_HANDLE();
3253
3254 DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_post_initiate_offload_request\n");
3255 DbgBreakIf(tcp->hdr.status != STATE_STATUS_INIT_CONTEXT);
3256 tcp->hdr.status = STATE_STATUS_OFFLOAD_PENDING;
3257
3258 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
3259 con = tcp->tx_con;
3260 for (i = 0; i < 2; i++) {
3261 mm_acquire_tcp_lock(pdev, con);
3262 DbgBreakIf(!(con->flags & TCP_POST_BLOCKED));
3263 DbgBreakIf(!(con->flags & TCP_COMP_BLOCKED));
3264 con->flags &= ~TCP_COMP_BLOCKED;
3265 con->flags |= TCP_COMP_DEFERRED; /* completions are now allowed but deferred */
3266 mm_release_tcp_lock(pdev, con);
3267 con = tcp->rx_con;
3268 }
3269 }
3270
3271 tcp->sp_flags |= SP_TCP_OFLD_REQ_POSTED;
3272 *command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_INITIATE_OFFLOAD : L5CM_RAMROD_CMD_ID_ADD_NEW_CONNECTION;
3273 *data = tcp->ctx_phys.as_u64;
3274
3275 return LM_STATUS_PENDING;
3276 }
3277
3278
lm_tcp_post_terminate_tcp_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data)3279 static lm_status_t lm_tcp_post_terminate_tcp_request (
3280 IN struct _lm_device_t * pdev,
3281 IN lm_tcp_state_t * tcp,
3282 OUT u8_t * command,
3283 OUT u64_t * data
3284 )
3285 {
3286 DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_terminate_tcp_request\n");
3287
3288 DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
3289
3290 lm_tcp_flush_db(pdev,tcp);
3291
3292 SET_FLAGS(tcp->sp_flags, SP_TCP_TRM_REQ_POSTED );
3293
3294 *command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_TERMINATE : L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3295 *data = 0;
3296
3297 return LM_STATUS_PENDING;
3298 }
3299
3300 /**
3301 Description:
3302 * Posts RST request.
3303 *
3304 * Assumptions:
3305 * - Global TOE lock is already taken by the caller.
3306 *
3307 * Returns:
3308 * SUCCESS or any failure
3309 *
3310 */
lm_tcp_post_abortive_disconnect_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data)3311 static lm_status_t lm_tcp_post_abortive_disconnect_request (
3312 IN struct _lm_device_t * pdev,
3313 IN lm_tcp_state_t * tcp,
3314 OUT u8_t * command,
3315 OUT u64_t * data
3316 )
3317 {
3318 /* Get Rx and Tx connections */
3319 lm_tcp_con_t *rx_con = tcp->rx_con;
3320 lm_tcp_con_t *tx_con = tcp->tx_con;
3321
3322 MM_INIT_TCP_LOCK_HANDLE();
3323
3324 DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_abortive_disconnect_request\n");
3325 DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL ) &&
3326 (tcp->hdr.status != STATE_STATUS_ABORTED) );
3327
3328 /*********************** Tx **********************/
3329 /* Take Tx lock */
3330 mm_acquire_tcp_lock(pdev, tx_con);
3331
3332 /* This will imply Tx POST_BLOCKED */
3333 tx_con->flags |= TCP_RST_REQ_POSTED;
3334
3335 /* Release Tx lock */
3336 mm_release_tcp_lock(pdev, tx_con);
3337
3338 /*********************** Rx **********************/
3339 /* Take Rx lock */
3340 mm_acquire_tcp_lock(pdev, rx_con);
3341
3342 /* This will imply Rx POST_BLOCKED and IND_BLOCKED */
3343 rx_con->flags |= TCP_RST_REQ_POSTED;
3344
3345 /* Release Rx lock */
3346 mm_release_tcp_lock(pdev, rx_con);
3347 /**************Post the ramrod *******************/
3348 *command = RAMROD_OPCODE_TOE_RESET_SEND;
3349 *data = 0;
3350
3351 return LM_STATUS_PENDING;
3352 }
3353
3354
3355 /**
3356 Description:
3357 * Initiates the TCP connection upload process.
3358 * Posts a Searcher ramrod to the chip.
3359 *
3360 * Assumptions:
3361 * - Global TOE lock is already taken by the caller.
3362 * - UM caller has allocated "struct toe_context" phys. cont. buffer
3363 * and put its address to "data.phys_addr".
3364 * Returns:
3365 * SUCCESS or any failure
3366 *
3367 */
lm_tcp_post_upload_tcp_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data)3368 static lm_status_t lm_tcp_post_upload_tcp_request (
3369 IN struct _lm_device_t * pdev,
3370 IN lm_tcp_state_t * tcp,
3371 OUT u8_t * command,
3372 OUT u64_t * data
3373 )
3374 {
3375 lm_tcp_con_t *rx_con, *tx_con = NULL;
3376 struct toe_spe spe = {{0}};
3377 MM_INIT_TCP_LOCK_HANDLE();
3378
3379 DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_upload_tcp_request\n");
3380 DbgBreakIf(tcp->hdr.status < STATE_STATUS_NORMAL);
3381 DbgBreakIf(tcp->hdr.status >= STATE_STATUS_UPLOAD_PENDING);
3382 DbgBreakIf(tcp->hdr.state_id != STATE_ID_TCP);
3383
3384
3385 /* Set the status of the connection to UPLOAD_PENDING */
3386 tcp->hdr.status = STATE_STATUS_UPLOAD_PENDING;
3387
3388 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
3389 /* Get Rx and Tx connections */
3390 rx_con = tcp->rx_con;
3391 tx_con = tcp->tx_con;
3392
3393 /* Set the flags for the connections (Rx and Tx) */
3394 /* Tx */
3395 mm_acquire_tcp_lock(pdev, tx_con);
3396 DbgBreakIf(tx_con->flags & TCP_TRM_REQ_POSTED);
3397 tx_con->flags |= TCP_TRM_REQ_POSTED;
3398 mm_release_tcp_lock(pdev, tx_con);
3399 /* Rx */
3400 mm_acquire_tcp_lock(pdev, rx_con);
3401 DbgBreakIf(rx_con->flags & TCP_TRM_REQ_POSTED);
3402 rx_con->flags |= TCP_TRM_REQ_POSTED;
3403 mm_release_tcp_lock(pdev, rx_con);
3404 }
3405
3406 tcp->sp_flags |= SP_TCP_SRC_REQ_POSTED;
3407
3408 *command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_SEARCHER_DELETE : L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3409 spe.toe_data.rx_completion.hash_value = (u16_t)(tcp->tcp_const.hash_value);
3410 *data = *((u64_t*)(&(spe.toe_data.rx_completion)));
3411
3412 return LM_STATUS_PENDING;
3413 }
3414
lm_tcp_post_query_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data,IN lm_tcp_slow_path_request_t * request)3415 static lm_status_t lm_tcp_post_query_request (
3416 IN struct _lm_device_t * pdev,
3417 IN lm_tcp_state_t * tcp,
3418 OUT u8_t * command,
3419 OUT u64_t * data,
3420 IN lm_tcp_slow_path_request_t * request
3421 )
3422 {
3423 struct toe_spe spe = {{0}};
3424
3425 UNREFERENCED_PARAMETER_(request);
3426
3427 DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_query_request\n");
3428
3429 tcp->sp_flags |= SP_TCP_QRY_REQ_POSTED;
3430 *command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_QUERY : L5CM_RAMROD_CMD_ID_QUERY;
3431
3432 mm_memset(tcp->sp_req_data.virt_addr, 0, TOE_SP_PHYS_DATA_SIZE);
3433
3434 spe.toe_data.phys_addr.hi = tcp->sp_req_data.phys_addr.as_u32.high;
3435 spe.toe_data.phys_addr.lo = tcp->sp_req_data.phys_addr.as_u32.low;
3436 *data = *((u64_t*)(&(spe.toe_data.phys_addr)));
3437
3438 return LM_STATUS_PENDING;
3439 }
3440
lm_tcp_post_upload_path_request(struct _lm_device_t * pdev,lm_path_state_t * path_state,l4_path_delegated_state_t * ret_delegated)3441 lm_status_t lm_tcp_post_upload_path_request (
3442 struct _lm_device_t * pdev,
3443 lm_path_state_t * path_state,
3444 l4_path_delegated_state_t * ret_delegated)
3445 {
3446
3447 DbgBreakIf(path_state->hdr.status != STATE_STATUS_NORMAL);
3448 DbgBreakIf(path_state->hdr.state_id != STATE_ID_PATH);
3449
3450 /* MichalS TBA: do we need this? (also in spec ('ipv4_current_ip_id' unclear)) */
3451 *ret_delegated = path_state->path_delegated;
3452
3453 DbgMessage(pdev, INFORMl4sp, "lm_tcp_post_upload_path_request: num_dependents=%d\n", path_state->num_dependents);
3454
3455 if (path_state->num_dependents == 0) {
3456 path_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
3457 return LM_STATUS_SUCCESS;
3458 }
3459 path_state->hdr.status = STATE_STATUS_UPLOAD_PENDING;
3460 return LM_STATUS_PENDING;
3461
3462 }
3463
lm_tcp_post_upload_neigh_request(struct _lm_device_t * pdev,lm_neigh_state_t * neigh_state)3464 lm_status_t lm_tcp_post_upload_neigh_request(
3465 struct _lm_device_t * pdev,
3466 lm_neigh_state_t * neigh_state
3467 )
3468 {
3469 DbgBreakIf(neigh_state->hdr.status != STATE_STATUS_NORMAL);
3470 DbgBreakIf(neigh_state->hdr.state_id != STATE_ID_NEIGH);
3471
3472 DbgMessage(pdev, INFORMl4sp, "lm_tcp_post_upload_neigh_request: num_dependents=%d\n", neigh_state->num_dependents);
3473
3474 #if DBG
3475 {
3476 /* NirV: multi client todo */
3477 lm_path_state_t * path = (lm_path_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.path_list);
3478 while(path) {
3479 if(path->neigh == neigh_state) {
3480 DbgBreakIf(path->hdr.status == STATE_STATUS_NORMAL);
3481 }
3482 path = (lm_path_state_t *) d_list_next_entry(&path->hdr.link);
3483 }
3484 }
3485 #endif
3486
3487 if (neigh_state->num_dependents == 0) {
3488 neigh_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
3489 return LM_STATUS_SUCCESS;
3490 }
3491 neigh_state->hdr.status = STATE_STATUS_UPLOAD_PENDING;
3492 return LM_STATUS_PENDING;
3493
3494 }
3495
3496 /* sets the cached parameters of tcp/path/neigh and initializes a toe_context (which is initially all zeros) */
lm_tcp_set_tcp_cached(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,l4_tcp_cached_state_t * tcp_cached,void * mem_virt)3497 static lm_status_t lm_tcp_set_tcp_cached(
3498 struct _lm_device_t * pdev,
3499 lm_tcp_state_t * tcp,
3500 l4_tcp_cached_state_t * tcp_cached,
3501 void * mem_virt /* firmware context */
3502 )
3503 {
3504 struct toe_update_ramrod_cached_params * ctx = mem_virt;
3505 l4_ofld_params_t * l4_params = &(pdev->ofld_info.l4_params);
3506
3507 MM_INIT_TCP_LOCK_HANDLE();
3508
3509 /* tcp-flags */
3510 DbgMessage(pdev, INFORMl4sp, "## lm_tcp_set_tcp_cached cid=%d\n", tcp->cid);
3511
3512 if ((tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE) !=
3513 (tcp_cached->tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE)) {
3514 if (tcp_cached->tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE) {
3515 ctx->enable_keepalive = 1;
3516 } else {
3517 ctx->enable_keepalive = 0;
3518 }
3519 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_ENABLE_KEEPALIVE_CHANGED;
3520 DbgMessage(pdev, INFORMl4sp, "## tcp_cached: [cid=%d] update : flag TCP_FLAG_ENABLE_KEEP_ALIVE changed to %d\n",
3521 tcp->cid, ctx->enable_keepalive);
3522 }
3523 if ((tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_NAGLING) !=
3524 (tcp_cached->tcp_flags & TCP_FLAG_ENABLE_NAGLING)) {
3525 if (tcp_cached->tcp_flags & TCP_FLAG_ENABLE_NAGLING) {
3526 ctx->enable_nagle = 1;
3527 } else {
3528 ctx->enable_nagle = 0;
3529 }
3530 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_ENABLE_NAGLE_CHANGED;
3531 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TCP_FLAG_ENABLE_NAGLING changed to %d\n",
3532 tcp->cid, ctx->enable_nagle);
3533 }
3534 if (tcp_cached->tcp_flags & TCP_FLAG_RESTART_KEEP_ALIVE) {
3535 ctx->ka_restart = 1;
3536 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TCP_FLAG_RESTART_KEEP_ALIVE set\n",
3537 tcp->cid);
3538 } else {
3539 ctx->ka_restart = 0;
3540 }
3541 if (tcp_cached->tcp_flags & TCP_FLAG_RESTART_MAX_RT) {
3542 ctx->retransmit_restart = 1;
3543 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TOE_CACHED_RESTART_MAX_RT set\n",
3544 tcp->cid);
3545 } else {
3546 ctx->retransmit_restart = 0;
3547 }
3548 if (tcp_cached->tcp_flags & TCP_FLAG_UPDATE_RCV_WINDOW) {
3549 /* for debugging purposes */
3550 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TCP_FLAG_UPDATE_RCV_WINDOW set\n",
3551 tcp->cid);
3552 }
3553
3554 tcp->tcp_cached.tcp_flags = tcp_cached->tcp_flags;
3555
3556 /* flow label ipv6 only */
3557 if (tcp->path->path_const.ip_version == IP_VERSION_IPV6) {
3558 if (tcp->tcp_cached.flow_label != tcp_cached->flow_label) {
3559 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flow_label changed from %d to %d\n",
3560 tcp->cid, tcp->tcp_cached.flow_label, tcp_cached->flow_label);
3561 tcp->tcp_cached.flow_label = tcp_cached->flow_label;
3562 ctx->flow_label= tcp->tcp_cached.flow_label;
3563 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_FLOW_LABEL_CHANGED;
3564 }
3565 }
3566
3567 /* initial_rcv_wnd */
3568 if (tcp->tcp_cached.initial_rcv_wnd != tcp_cached->initial_rcv_wnd) {
3569 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : initial_rcv_wnd changed from %d to %d\n",
3570 tcp->cid, tcp->tcp_cached.initial_rcv_wnd, tcp_cached->initial_rcv_wnd);
3571 /* no change to firmware */
3572 mm_tcp_update_required_gen_bufs(pdev,
3573 tcp->rx_con->u.rx.sws_info.mss, /* new-mss(no change)*/
3574 tcp->rx_con->u.rx.sws_info.mss, /* old-mss*/
3575 tcp_cached->initial_rcv_wnd, /* new initial receive window */
3576 tcp->tcp_cached.initial_rcv_wnd); /* old initial receive window */
3577
3578 /* In VISTA and higher, window CAN decrease! */
3579 if ERR_IF(tcp_cached->initial_rcv_wnd > MAX_INITIAL_RCV_WND) {
3580 /* TBD: Miniport doesn't handle any parameter other than SUCCESS / PENDING... */
3581 /* TODO: return LM_STATUS_INVALID_PARAMETER; */
3582 DbgBreakIfAll(tcp_cached->initial_rcv_wnd > MAX_INITIAL_RCV_WND);
3583 }
3584 /* update the sws_bytea accordingly */
3585 mm_acquire_tcp_lock(pdev,tcp->rx_con);
3586 /* it's now time to give the window doorbell in-case there was a window update - could be negative, in which case, special handling is required... */
3587 if (tcp->tcp_cached.initial_rcv_wnd < tcp_cached->initial_rcv_wnd) {
3588 /* regular window update */
3589 lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, tcp_cached->initial_rcv_wnd - tcp->tcp_cached.initial_rcv_wnd, TCP_RX_POST_SWS_INC);
3590 } else {
3591 lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, tcp->tcp_cached.initial_rcv_wnd - tcp_cached->initial_rcv_wnd, TCP_RX_POST_SWS_DEC);
3592 pdev->toe_info.toe_events |= LM_TOE_EVENT_WINDOW_DECREASE;
3593 }
3594 mm_release_tcp_lock(pdev, tcp->rx_con);
3595 tcp->tcp_cached.initial_rcv_wnd = tcp_cached->initial_rcv_wnd;
3596 ctx->initial_rcv_wnd = tcp->tcp_cached.initial_rcv_wnd;
3597 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_INITIAL_RCV_WND_CHANGED;
3598 }
3599
3600 /*ttl_or_hop_limit*/
3601 if (tcp->tcp_cached.ttl_or_hop_limit != tcp_cached->ttl_or_hop_limit) {
3602 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ttl_or_hop_limit changed from %d to %d\n",
3603 tcp->cid, tcp->tcp_cached.ttl_or_hop_limit, tcp_cached->ttl_or_hop_limit);
3604 tcp->tcp_cached.ttl_or_hop_limit = tcp_cached->ttl_or_hop_limit;
3605 if (tcp->path->path_const.ip_version == IP_VERSION_IPV4) {
3606 ctx->ttl= tcp->tcp_cached.ttl_or_hop_limit;
3607 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_TTL_CHANGED;
3608 } else {
3609 ctx->hop_limit = tcp->tcp_cached.ttl_or_hop_limit;
3610 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_HOP_LIMIT_CHANGED;
3611 }
3612 }
3613
3614 /* tos_or_traffic_class */
3615 if (tcp->tcp_cached.tos_or_traffic_class != tcp_cached->tos_or_traffic_class) {
3616 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : tos_or_traffic_class changed from %d to %d\n",
3617 tcp->cid, tcp->tcp_cached.tos_or_traffic_class, tcp_cached->tos_or_traffic_class);
3618 tcp->tcp_cached.tos_or_traffic_class = tcp_cached->tos_or_traffic_class;
3619
3620 if (tcp->path->path_const.ip_version == IP_VERSION_IPV4) {
3621 ctx->tos = tcp_cached->tos_or_traffic_class;
3622 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_TOS_CHANGED;
3623 } else {
3624 ctx->traffic_class = tcp_cached->tos_or_traffic_class;
3625 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_TRAFFIC_CLASS_CHANGED;
3626 }
3627 }
3628
3629 /* ka_probe_cnt */
3630 if (tcp->tcp_cached.ka_probe_cnt != tcp_cached->ka_probe_cnt) {
3631 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ka_probe_cnt changed from %d to %d\n",
3632 tcp->cid, tcp->tcp_cached.ka_probe_cnt, tcp_cached->ka_probe_cnt);
3633 tcp->tcp_cached.ka_probe_cnt = tcp_cached->ka_probe_cnt;
3634 ctx->ka_max_probe_count = tcp_cached->ka_probe_cnt;
3635 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_KA_MAX_PROBE_COUNT_CHANGED;
3636 }
3637
3638 /* user_priority */
3639 if (tcp->tcp_cached.user_priority != tcp_cached->user_priority) {
3640 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : user_priority changed from %d to %d\n",
3641 tcp->cid, tcp->tcp_cached.user_priority, tcp_cached->user_priority);
3642 DbgBreakIf(tcp_cached->user_priority > 0x7);
3643 tcp->tcp_cached.user_priority = tcp_cached->user_priority;
3644 ctx->user_priority = tcp_cached->user_priority;
3645 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_USER_PRIORITY_CHANGED;
3646 }
3647
3648 /* rcv_indication_size */
3649 DbgBreakIf(tcp_cached->rcv_indication_size != 0);
3650 if (tcp->tcp_cached.rcv_indication_size != tcp_cached->rcv_indication_size) {
3651 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : rcv_indication_size changed from %d to %d\n",
3652 tcp->cid, tcp->tcp_cached.rcv_indication_size, tcp_cached->rcv_indication_size);
3653 DbgBreakIf(tcp->tcp_cached.rcv_indication_size > 0xffff);
3654 tcp->tcp_cached.rcv_indication_size = tcp_cached->rcv_indication_size;
3655 ctx->rcv_indication_size = (u16_t)tcp_cached->rcv_indication_size;
3656 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_RCV_INDICATION_SIZE_CHANGED;
3657 }
3658
3659 /* ka_time_out */
3660 if (tcp->tcp_cached.ka_time_out != tcp_cached->ka_time_out) {
3661 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ka_time_out changed from %d to %d\n",
3662 tcp->cid, tcp->tcp_cached.ka_time_out, tcp_cached->ka_time_out);
3663 tcp->tcp_cached.ka_time_out = tcp_cached->ka_time_out;
3664 ctx->ka_timeout =
3665 lm_time_resolution(pdev, tcp->tcp_cached.ka_time_out, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
3666 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_KA_TIMEOUT_CHANGED;
3667 }
3668
3669 /* ka_interval */
3670 if (tcp->tcp_cached.ka_interval != tcp_cached->ka_interval) {
3671 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ka_interval changed from %d to %d\n",
3672 tcp->cid, tcp->tcp_cached.ka_interval, tcp_cached->ka_interval);
3673 tcp->tcp_cached.ka_interval = tcp_cached->ka_interval;
3674 ctx->ka_interval =
3675 lm_time_resolution(pdev, tcp->tcp_cached.ka_interval, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
3676 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_KA_INTERVAL_CHANGED;
3677 }
3678
3679 /* max_rt */
3680 if (tcp->tcp_cached.max_rt != tcp_cached->max_rt) {
3681 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : max_rt changed from %d to %d\n",
3682 tcp->cid, tcp->tcp_cached.max_rt, tcp_cached->max_rt);
3683 tcp->tcp_cached.max_rt = tcp_cached->max_rt;
3684 ctx->max_rt =
3685 lm_time_resolution(pdev, tcp->tcp_cached.max_rt, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
3686 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_MAX_RT_CHANGED;
3687 }
3688
3689 if (!ctx->changed_fields && !ctx->ka_restart && !ctx->retransmit_restart) {
3690 DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : nothing changed, completing synchronously\n", tcp->cid);
3691 return LM_STATUS_SUCCESS; /* synchronous complete */
3692 }
3693 //DbgMessage(pdev, WARNl4sp, "## lm_tcp_set_tcp_cached cid=%d DONE!\n", tcp->cid);
3694 return LM_STATUS_PENDING;
3695 }
3696
3697 /* sets the cached parameters of tcp/path/neigh and initializes a toe_context (which is initially all zeros) */
lm_tcp_set_path_cached(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,l4_path_cached_state_t * path_cached,void * mem_virt)3698 static lm_status_t lm_tcp_set_path_cached(
3699 struct _lm_device_t * pdev,
3700 lm_tcp_state_t * tcp,
3701 l4_path_cached_state_t * path_cached,
3702 void * mem_virt /* firmware context */
3703 )
3704 {
3705 struct toe_update_ramrod_cached_params * ctx = mem_virt;
3706 u32_t new_mss = 0;
3707
3708 new_mss = _lm_tcp_calc_mss(path_cached->path_mtu,
3709 tcp->tcp_const.remote_mss,
3710 (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
3711 tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
3712 pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
3713 tcp->path->neigh->neigh_const.vlan_tag != 0);
3714
3715 if (new_mss != tcp->rx_con->u.rx.sws_info.mss) {
3716 /* also need to notify um, since this may affect the number of generic buffers
3717 * required. */
3718 DbgMessage(pdev, INFORMl4sp, "## path_cached: tcp [cid=%d] update : mss (as a result of pathMtu) from %d to %d\n",
3719 tcp->cid, tcp->rx_con->u.rx.sws_info.mss, new_mss);
3720 mm_tcp_update_required_gen_bufs(pdev,
3721 new_mss,
3722 tcp->rx_con->u.rx.sws_info.mss, /* old-mss*/
3723 tcp->tcp_cached.initial_rcv_wnd, /* new initial receive window */
3724 tcp->tcp_cached.initial_rcv_wnd); /* old initial receive window */
3725
3726 tcp->rx_con->u.rx.sws_info.mss = new_mss;
3727 DbgBreakIf(new_mss > 0xffff);
3728 ctx->mss = (u16_t)new_mss;
3729 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_MSS_CHANGED;
3730 }
3731
3732 if (ctx->changed_fields == 0) {
3733 return LM_STATUS_SUCCESS; /* synchronous complete */
3734 }
3735
3736 return LM_STATUS_PENDING;
3737 }
3738
3739 /* sets the cached parameters of tcp/path/neigh and initializes a toe_context (which is initially all zeros)
3740 * Assumption: this function is only called if in-fact, the destination address changed.
3741 */
lm_tcp_set_neigh_cached(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,l4_neigh_cached_state_t * neigh_cached,void * mem_virt)3742 static lm_status_t lm_tcp_set_neigh_cached(
3743 struct _lm_device_t * pdev,
3744 lm_tcp_state_t * tcp,
3745 l4_neigh_cached_state_t * neigh_cached,
3746 void * mem_virt /* firmware context */
3747 )
3748 {
3749 struct toe_update_ramrod_cached_params * ctx = mem_virt;
3750 int i = 0;
3751
3752 DbgMessage(pdev, INFORMl4sp, "## neigh_cached: tcp [cid=%d] update : neighbor dst_addr\n", tcp->cid);
3753
3754 for (i = 0; i < 6; i++) {
3755 ctx->dest_addr[i] = (u8_t)neigh_cached->dst_addr[i]; /* TBA Michals : is this init correct? order of assignment*/
3756 }
3757 ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_DEST_ADDR_CHANGED;
3758
3759 return LM_STATUS_PENDING;
3760 }
3761
lm_tcp_post_update_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data,IN lm_tcp_slow_path_request_t * request)3762 static lm_status_t lm_tcp_post_update_request (
3763 IN struct _lm_device_t * pdev,
3764 IN lm_tcp_state_t * tcp,
3765 OUT u8_t * command,
3766 OUT u64_t * data,
3767 IN lm_tcp_slow_path_request_t * request
3768 )
3769 {
3770 struct toe_spe spe = {{0}};
3771 lm_status_t lm_status = LM_STATUS_FAILURE ;
3772
3773 DbgBreakIf(tcp->hdr.state_id != STATE_ID_TCP);
3774
3775 *command = RAMROD_OPCODE_TOE_UPDATE;
3776 spe.toe_data.phys_addr.hi = tcp->sp_req_data.phys_addr.as_u32.high;
3777 spe.toe_data.phys_addr.lo = tcp->sp_req_data.phys_addr.as_u32.low;
3778 *data = *((u64_t*)(&(spe.toe_data.phys_addr)));
3779 mm_memset(tcp->sp_req_data.virt_addr, 0, sizeof(struct toe_update_ramrod_cached_params));
3780
3781 DbgBreakIf((tcp->hdr.status != STATE_STATUS_NORMAL) &&
3782 (tcp->hdr.status != STATE_STATUS_ABORTED));
3783
3784 /* we need to initialize the data for firmware */
3785 switch(request->type) {
3786 case SP_REQUEST_UPDATE_TCP:
3787 lm_status = lm_tcp_set_tcp_cached(pdev, tcp,
3788 request->sent_data.tcp_update_data.data,
3789 tcp->sp_req_data.virt_addr);
3790 break;
3791 case SP_REQUEST_UPDATE_PATH:
3792 DbgBreakIf(tcp->path->hdr.status != STATE_STATUS_NORMAL);
3793 DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
3794 lm_status = lm_tcp_set_path_cached(pdev, tcp,
3795 request->sent_data.tcp_update_data.data,
3796 tcp->sp_req_data.virt_addr);
3797 break;
3798 case SP_REQUEST_UPDATE_NEIGH:
3799 DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
3800
3801 lm_status = lm_tcp_set_neigh_cached(pdev, tcp,
3802 request->sent_data.tcp_update_data.data,
3803 tcp->sp_req_data.virt_addr);
3804 break;
3805 case SP_REQUEST_UPDATE_PATH_RELINK:
3806 /* we will always return PENDING status */
3807 DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
3808 lm_status = lm_tcp_set_neigh_cached(pdev, tcp,
3809 &((lm_tcp_path_relink_cached_t *)request->sent_data.tcp_update_data.data)->neigh_cached,
3810 tcp->sp_req_data.virt_addr);
3811
3812 DbgBreakIf(tcp->path->hdr.status != STATE_STATUS_NORMAL);
3813 DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
3814 lm_tcp_set_path_cached(pdev, tcp, &((lm_tcp_path_relink_cached_t *)request->sent_data.tcp_update_data.data)->path_cached,
3815 tcp->sp_req_data.virt_addr);
3816 break;
3817 }
3818
3819 return lm_status;
3820 }
3821
lm_tcp_post_empty_ramrod_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data)3822 static lm_status_t lm_tcp_post_empty_ramrod_request(
3823 IN struct _lm_device_t * pdev,
3824 IN lm_tcp_state_t * tcp,
3825 OUT u8_t * command,
3826 OUT u64_t * data)
3827 {
3828 struct toe_spe spe = {{0}};
3829
3830 DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_empty_ramrod_request\n");
3831
3832 *command = RAMROD_OPCODE_TOE_EMPTY_RAMROD;
3833 spe.toe_data.rx_completion.hash_value = (u16_t)(tcp->tcp_const.hash_value);
3834 *data = *((u64_t*)(&(spe.toe_data.rx_completion)));
3835
3836 return LM_STATUS_PENDING;
3837 }
3838
lm_tcp_post_invalidate_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data)3839 static lm_status_t lm_tcp_post_invalidate_request(
3840 IN struct _lm_device_t * pdev,
3841 IN lm_tcp_state_t * tcp,
3842 OUT u8_t * command,
3843 OUT u64_t * data)
3844 {
3845 /* Get Rx and Tx connections */
3846 lm_tcp_con_t * rx_con = tcp->rx_con;
3847 lm_tcp_con_t * tx_con = tcp->tx_con;
3848 MM_INIT_TCP_LOCK_HANDLE();
3849
3850 DbgMessage(pdev, INFORMl4sp, "## lm_tcp_post_invalidate_request cid=%d\n", tcp->cid);
3851
3852 DbgBreakIf(tcp->hdr.status != STATE_STATUS_NORMAL &&
3853 tcp->hdr.status != STATE_STATUS_ABORTED);
3854
3855 /* Set the flags for the connections (Rx and Tx) */
3856 /* Tx */
3857 mm_acquire_tcp_lock(pdev, tx_con);
3858 DbgBreakIf(tx_con->flags & TCP_INV_REQ_POSTED);
3859 tx_con->flags |= TCP_INV_REQ_POSTED;
3860 mm_release_tcp_lock(pdev, tx_con);
3861 /* Rx */
3862 mm_acquire_tcp_lock(pdev, rx_con);
3863 DbgBreakIf(rx_con->flags & TCP_INV_REQ_POSTED);
3864 rx_con->flags |= TCP_INV_REQ_POSTED;
3865 mm_release_tcp_lock(pdev, rx_con);
3866
3867
3868 *command = RAMROD_OPCODE_TOE_INVALIDATE;
3869 *data = 0;
3870
3871 return LM_STATUS_PENDING;
3872 }
3873
3874
3875 /* Desciption:
3876 * post slow path request of given type for given tcp state
3877 * Assumptions:
3878 * - caller initialized request->type according to his specific request
3879 * - caller allocated space for request->data, according to the specific request type
3880 * - all previous slow path requests for given tcp state are already completed
3881 * Returns:
3882 * PENDING, SUCCESS or any failure */
lm_tcp_post_slow_path_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_slow_path_request_t * request)3883 lm_status_t lm_tcp_post_slow_path_request(
3884 struct _lm_device_t *pdev,
3885 lm_tcp_state_t *tcp,
3886 lm_tcp_slow_path_request_t *request)
3887 {
3888 lm_status_t lm_status = LM_STATUS_INVALID_PARAMETER;
3889 u64_t data = 0;
3890 u8_t command = 0;
3891
3892 DbgBreakIf(!(pdev && tcp && request));
3893 DbgBreakIf(tcp->sp_request); /* lm supports only one pending slow path request per connection */
3894 DbgMessage(pdev, VERBOSEl4sp, "### lm_tcp_post_slow_path_request cid=%d, type=%d\n", tcp->cid, request->type);
3895 DbgBreakIf(tcp->cid && (tcp != lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, tcp->cid)));
3896 tcp->sp_request = request;
3897
3898 switch(request->type) {
3899 /* call the type specific post function that:
3900 execute any actions required for the specific sp request (possibly take tx/rx locks for that)
3901 according to state, possibly set the request status and complete the request synchronously
3902 fill the appropriate content in the lm information structure of the request */
3903 case SP_REQUEST_INITIATE_OFFLOAD:
3904 lm_status = lm_tcp_post_initiate_offload_request(pdev, tcp, &command, &data);
3905 break;
3906 case SP_REQUEST_TERMINATE1_OFFLOAD:
3907 lm_status = lm_tcp_post_terminate_tcp_request(pdev, tcp, &command, &data);
3908 break;
3909 case SP_REQUEST_TERMINATE_OFFLOAD:
3910 lm_status = lm_tcp_post_upload_tcp_request(pdev, tcp, &command, &data);
3911 break;
3912 case SP_REQUEST_QUERY:
3913 lm_status = lm_tcp_post_query_request(pdev, tcp, &command, &data, request);
3914 break;
3915 case SP_REQUEST_UPDATE_TCP:
3916 case SP_REQUEST_UPDATE_PATH:
3917 case SP_REQUEST_UPDATE_NEIGH:
3918 case SP_REQUEST_UPDATE_PATH_RELINK:
3919 lm_status = lm_tcp_post_update_request(pdev, tcp, &command, &data, request);
3920 break;
3921 case SP_REQUEST_INVALIDATE:
3922 lm_status = lm_tcp_post_invalidate_request(pdev, tcp, &command, &data);
3923 break;
3924 case SP_REQUEST_ABORTIVE_DISCONNECT:
3925 lm_status = lm_tcp_post_abortive_disconnect_request(pdev,tcp, &command, &data);
3926 break;
3927 case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
3928 case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
3929 case SP_REQUEST_PENDING_TX_RST:
3930 lm_status = lm_tcp_post_empty_ramrod_request(pdev, tcp, &command, &data);
3931 break;
3932 default:
3933 DbgBreakMsg("Illegal slow path request type!\n");
3934 }
3935 if(lm_status == LM_STATUS_PENDING) {
3936 DbgMessage(pdev, VERBOSEl4sp,
3937 "calling lm_command_post, cid=%d, command=%d, con_type=%d, data=%lx\n",
3938 tcp->cid, command, tcp->ulp_type, data);
3939 if (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE)
3940 {
3941 /* no slow path request can be posted after connection is uploaded */
3942 DbgBreakIf(tcp->hdr.status == STATE_STATUS_UPLOAD_DONE);
3943 tcp->sp_request = NULL;
3944 lm_status = LM_STATUS_INVALID_PARAMETER;
3945 } else
3946 {
3947 lm_command_post(pdev, tcp->cid, command, CMD_PRIORITY_NORMAL, tcp->ulp_type, data);
3948 }
3949 } else {
3950 tcp->sp_request = NULL;
3951 }
3952
3953 request->status = lm_status;
3954 return lm_status;
3955 }
3956
3957 /* slow path request completion template */
3958 // lm_status_t lm_tcp_comp_XXX_slow_path_request(struct _lm_device_t *pdev,
3959 // lm_tcp_state_t *tcp,
3960 // ...cqe...)
3961 // {
3962 // lm_tcp_slow_path_request_t *sp_request;
3963 //
3964 // DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_comp_XXX_slow_path_request\n");
3965 // MM_ACQUIRE_TOE_LOCK(pdev);
3966 // DbgBreakIf(tcp->hdr.status != STATE_STATUS_YYY);
3967 // tcp->hdr.status = STATE_STATUS_ZZZ;
3968 // execute lm state actions if required
3969 // lm_sp_ring_command_completed (*) [not here, automatically in 'process CQ']
3970 // MM_RELEASE_TOE_LOCK(pdev);
3971 // under tx lock, execute any Tx actions required (possibly call mm_*)
3972 // under rx lock, execute any Rx actions required (possibly call mm_*)
3973 // MM_ACQUIRE_TOE_LOCK(pdev);
3974 // tcp->sp_flags ~= (SP_REQ_COMPLETED_RX | SP_REQ_COMPLETED_TX)
3975 // tcp->sp_request->status = completion status;
3976 // sp_request = tcp->sp_request;
3977 // tcp->sp_request = NULL
3978 // mm_tcp_comp_slow_path_request(tcp, sp_request)
3979 // MM_RELEASE_TOE_LOCK(pdev);
3980 // }
lm_tcp_service_deferred_cqes(lm_device_t * pdev,lm_tcp_state_t * tcp)3981 void lm_tcp_service_deferred_cqes(lm_device_t * pdev, lm_tcp_state_t * tcp)
3982 {
3983 lm_tcp_con_t * con = tcp->tx_con;
3984 u8_t idx = 0, dead=FALSE;
3985 MM_INIT_TCP_LOCK_HANDLE();
3986
3987 DbgMessage(pdev, INFORMl4sp, "### lm_tcp_service_deferred_cqes cid=%d\n", tcp->cid);
3988
3989
3990
3991 for (idx = 0; idx < 2; idx++) {
3992 mm_acquire_tcp_lock(pdev, con);
3993 while(con->flags & TCP_DEFERRED_PROCESSING) {
3994 /* consistent state. at this stage, since we have the lock and deferred cqes need the lock
3995 * for processing, it's as if we have just processed X cqes and are about to complete the fp
3996 * of these cqes... During the complete of fp and sp, the lock may be released, in this case
3997 * more cqes may be processed, in which case TCP_DEFERRED_PROCESSING will be switched back on. */
3998 con->flags &= ~TCP_DEFERRED_PROCESSING;
3999 DbgMessage(pdev, INFORMl4sp, "### deferred cid=%d\n", tcp->cid);
4000
4001 if (con->type == TCP_CON_TYPE_RX) {
4002 lm_tcp_rx_complete_tcp_fp(pdev, con->tcp_state, con);
4003 } else {
4004 lm_tcp_tx_complete_tcp_fp(pdev, con->tcp_state, con);
4005 }
4006
4007 if (con->dpc_info.snapshot_flags) {
4008 mm_release_tcp_lock(pdev, con);
4009
4010 if (con->type == TCP_CON_TYPE_RX) {
4011 lm_tcp_rx_complete_tcp_sp(pdev,tcp, con);
4012 } else {
4013 lm_tcp_tx_complete_tcp_sp(pdev,tcp, con);
4014 }
4015
4016 mm_acquire_tcp_lock(pdev, con);
4017 }
4018 }
4019
4020 con->flags &= ~TCP_COMP_DEFERRED; /* completions are no longer deferred */
4021
4022 /* it's possible, that while processing the deferred cqes - the connection was uploaded,
4023 * since the TCP_COMP_DEFERRED flag was still on - we didn't delete it yet, now is the time
4024 * to delete it... note, that this can only happen while we're handling the deferred cqes of
4025 * Rx_con - since query will only complete on RX and not TX, that's why it's safe to check and
4026 * after handling rx we won't access this connection anymore....*/
4027 dead = lm_tcp_is_tcp_dead(pdev, tcp, TCP_IS_DEAD_OP_OFLD_COMP_DFRD);
4028
4029
4030 mm_release_tcp_lock(pdev, con);
4031
4032 con = tcp->rx_con;
4033
4034 if (dead) {
4035 mm_tcp_del_tcp_state(pdev, tcp);
4036 }
4037
4038 }
4039 }
4040
4041 /* initiate offload request completion */
lm_tcp_comp_initiate_offload_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t comp_status)4042 void lm_tcp_comp_initiate_offload_request(
4043 struct _lm_device_t *pdev,
4044 lm_tcp_state_t *tcp,
4045 u32_t comp_status)
4046 {
4047 lm_tcp_slow_path_request_t *sp_request;
4048 lm_tcp_con_t *con;
4049 lm_status_t lm_status = LM_STATUS_SUCCESS;
4050 int i;
4051 MM_INIT_TCP_LOCK_HANDLE();
4052
4053 DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_comp_initiate_offload_request\n");
4054
4055 MM_ACQUIRE_TOE_LOCK(pdev);
4056
4057 DbgBreakIf(tcp->hdr.status != STATE_STATUS_OFFLOAD_PENDING);
4058
4059 if(!comp_status)
4060 { /* successful completion */
4061 tcp->hdr.status = STATE_STATUS_NORMAL;
4062
4063 if (tcp->ulp_type == TOE_CONNECTION_TYPE)
4064 {
4065 con = tcp->tx_con;
4066 for (i = 0; i < 2; i++)
4067 {
4068 mm_acquire_tcp_lock(pdev, con);
4069 DbgBreakIf(!(con->flags & TCP_COMP_DEFERRED));
4070 DbgBreakIf(!(con->flags & TCP_POST_BLOCKED));
4071 con->flags &= ~TCP_POST_BLOCKED; /* posting is now allowed */
4072 mm_release_tcp_lock(pdev, con);
4073 con = tcp->rx_con;
4074 }
4075
4076 // update stats counters if TOE
4077 if( IP_VERSION_IPV4 == tcp->path->path_const.ip_version )
4078 {
4079 ++pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_4_IDX].currently_established;
4080 }
4081 else if( IP_VERSION_IPV6 == tcp->path->path_const.ip_version )
4082 {
4083 ++pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_6_IDX].currently_established;
4084 }
4085 }
4086 }
4087 else
4088 {
4089 #ifndef _VBD_CMD_
4090 DbgMessage(pdev, FATAL, "initiate offload failed. err=%x\n", comp_status);
4091 #endif // _VBD_CMD_
4092 tcp->hdr.status = STATE_STATUS_INIT_OFFLOAD_ERR;
4093
4094 if (tcp->ulp_type == TOE_CONNECTION_TYPE)
4095 {
4096 con = tcp->tx_con;
4097 for (i = 0; i < 2; i++)
4098 {
4099 mm_acquire_tcp_lock(pdev, con);
4100 DbgBreakIf((con->flags & ~TCP_INDICATE_REJECTED) != (TCP_POST_BLOCKED | TCP_COMP_DEFERRED));
4101 con->flags &= ~TCP_COMP_DEFERRED;
4102 con->flags |= TCP_COMP_BLOCKED; /* completions are blocked */
4103 mm_release_tcp_lock(pdev, con);
4104 con = tcp->rx_con;
4105 }
4106 }
4107
4108 lm_status = LM_STATUS_FAILURE;
4109 }
4110
4111 DbgBreakIf(tcp->sp_flags & (SP_REQUEST_COMPLETED_RX | SP_REQUEST_COMPLETED_TX));
4112 tcp->sp_request->status = lm_status;
4113 // DbgMessage(pdev, FATAL, "#lm_tcp_comp_initiate_offload_request cid=%d, sp_request->status=%d\n", tcp->cid, tcp->sp_request->status);
4114 sp_request = tcp->sp_request;
4115 tcp->sp_request = NULL;
4116
4117 DbgBreakIf(!(tcp->sp_flags & SP_TCP_OFLD_REQ_POSTED));
4118 tcp->sp_flags |= SP_TCP_OFLD_REQ_COMP;
4119 mm_tcp_comp_slow_path_request(pdev, tcp, sp_request);
4120
4121 MM_RELEASE_TOE_LOCK(pdev);
4122
4123 /* handle deferred CQEs */
4124 if(!comp_status && (tcp->ulp_type == TOE_CONNECTION_TYPE)) {
4125 lm_tcp_service_deferred_cqes(pdev, tcp);
4126 }
4127 }
4128
lm_tcp_collect_stats(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4129 void lm_tcp_collect_stats(
4130 struct _lm_device_t *pdev,
4131 lm_tcp_state_t *tcp)
4132 {
4133
4134 if (tcp->tx_con && tcp->rx_con) {
4135 pdev->toe_info.stats.tx_bytes_posted_total += tcp->tx_con->bytes_post_cnt;
4136 pdev->toe_info.stats.tx_rq_complete_calls += tcp->tx_con->rq_completion_calls;
4137 pdev->toe_info.stats.tx_bytes_completed_total += tcp->tx_con->bytes_comp_cnt;
4138 pdev->toe_info.stats.tx_rq_bufs_completed += tcp->tx_con->buffer_completed_cnt;
4139 pdev->toe_info.stats.total_tx_abortion_under_flr += tcp->tx_con->abortion_under_flr;
4140
4141 pdev->toe_info.stats.rx_rq_complete_calls += tcp->rx_con->rq_completion_calls;
4142 pdev->toe_info.stats.rx_rq_bufs_completed += tcp->rx_con->buffer_completed_cnt;
4143 pdev->toe_info.stats.rx_bytes_completed_total += tcp->rx_con->bytes_comp_cnt;
4144
4145 pdev->toe_info.stats.rx_accepted_indications += tcp->rx_con->u.rx.gen_info.num_success_indicates;
4146 pdev->toe_info.stats.rx_bufs_indicated_accepted += tcp->rx_con->u.rx.gen_info.num_buffers_indicated;
4147 pdev->toe_info.stats.rx_bytes_indicated_accepted += tcp->rx_con->u.rx.gen_info.bytes_indicated_accepted;
4148
4149 pdev->toe_info.stats.rx_rejected_indications += tcp->rx_con->u.rx.gen_info.num_failed_indicates;
4150 pdev->toe_info.stats.rx_bufs_indicated_rejected += tcp->rx_con->u.rx.gen_info.bufs_indicated_rejected;
4151 pdev->toe_info.stats.rx_bytes_indicated_rejected += tcp->rx_con->u.rx.gen_info.bytes_indicated_rejected;
4152 pdev->toe_info.stats.total_num_non_full_indications += tcp->rx_con->u.rx.gen_info.num_non_full_indications;
4153
4154 pdev->toe_info.stats.rx_zero_byte_recv_reqs += tcp->rx_con->u.rx.rx_zero_byte_recv_reqs;
4155 pdev->toe_info.stats.rx_bufs_copied_grq += tcp->rx_con->u.rx.gen_info.num_buffers_copied_grq;
4156 pdev->toe_info.stats.rx_bufs_copied_rq += tcp->rx_con->u.rx.gen_info.num_buffers_copied_rq;
4157 pdev->toe_info.stats.rx_bytes_copied_in_comp += tcp->rx_con->u.rx.gen_info.bytes_copied_cnt_in_comp;
4158 pdev->toe_info.stats.rx_bytes_copied_in_post += tcp->rx_con->u.rx.gen_info.bytes_copied_cnt_in_post;
4159 pdev->toe_info.stats.rx_bytes_copied_in_process += tcp->rx_con->u.rx.gen_info.bytes_copied_cnt_in_process;
4160 if (pdev->toe_info.stats.max_number_of_isles_in_single_con < tcp->rx_con->u.rx.gen_info.max_number_of_isles) {
4161 pdev->toe_info.stats.max_number_of_isles_in_single_con = tcp->rx_con->u.rx.gen_info.max_number_of_isles;
4162 }
4163 pdev->toe_info.stats.rx_bufs_posted_total += tcp->rx_con->buffer_post_cnt;
4164 pdev->toe_info.stats.rx_bytes_posted_total += tcp->rx_con->bytes_post_cnt;
4165 pdev->toe_info.stats.rx_bufs_skipped_post += tcp->rx_con->buffer_skip_post_cnt;
4166 pdev->toe_info.stats.rx_bytes_skipped_post += tcp->rx_con->bytes_skip_post_cnt;
4167
4168 pdev->toe_info.stats.rx_bytes_skipped_push += tcp->rx_con->bytes_push_skip_cnt;
4169 pdev->toe_info.stats.rx_partially_completed_buf_cnt += tcp->rx_con->partially_completed_buf_cnt;
4170 pdev->toe_info.stats.total_droped_empty_isles += tcp->rx_con->droped_empty_isles;
4171 pdev->toe_info.stats.total_droped_non_empty_isles += tcp->rx_con->droped_non_empty_isles;
4172 pdev->toe_info.stats.total_rx_post_blocked += tcp->rx_con->rx_post_blocked;
4173 pdev->toe_info.stats.total_zb_rx_post_blocked += tcp->rx_con->zb_rx_post_blocked;
4174 if (tcp->aux_mem_flag & TCP_CON_AUX_RT_MEM_SUCCSESS_ALLOCATION) {
4175 pdev->toe_info.stats.total_aux_mem_success_allocations++;
4176 } else if (tcp->aux_mem_flag & TCP_CON_AUX_RT_MEM_FAILED_ALLOCATION) {
4177 pdev->toe_info.stats.total_aux_mem_failed_allocations++;
4178 }
4179 pdev->toe_info.stats.total_rx_abortion_under_flr += tcp->rx_con->abortion_under_flr;
4180 }
4181 }
4182
4183
4184
4185 /* Desciption:
4186 * delete tcp state from lm _except_ from actual freeing of memory.
4187 * the task of freeing of memory is done in lm_tcp_free_tcp_state()
4188 * Assumptions:
4189 * global toe lock is taken by the caller
4190 */
lm_tcp_del_tcp_state(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4191 void lm_tcp_del_tcp_state(
4192 struct _lm_device_t *pdev,
4193 lm_tcp_state_t *tcp)
4194 {
4195 DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_del_tcp_state\n");
4196 DbgBreakIf(!(pdev && tcp));
4197
4198 if (!lm_fl_reset_is_inprogress(pdev))
4199 {
4200 DbgBreakIf(tcp->hdr.status >= STATE_STATUS_OFFLOAD_PENDING &&
4201 tcp->hdr.status < STATE_STATUS_UPLOAD_DONE);
4202 }
4203 else
4204 {
4205 DbgMessage(pdev, FATAL, "###lm_tcp_del_tcp_state under FLR\n");
4206 }
4207
4208 /* just a moment before we delete this connection, lets take it's info... */
4209 lm_tcp_collect_stats(pdev, tcp);
4210
4211 d_list_remove_entry(
4212 &tcp->hdr.state_blk->tcp_list,
4213 &tcp->hdr.link);
4214
4215 if (tcp->ulp_type == TOE_CONNECTION_TYPE)
4216 {
4217 pdev->toe_info.stats.total_upld++;
4218 }
4219 else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
4220 {
4221 pdev->iscsi_info.run_time.stats.total_upld++;
4222 }
4223
4224 if (!lm_fl_reset_is_inprogress(pdev) && (tcp->path != NULL)) {
4225 /* This is called as a result of a failured offload and not an upload...,
4226 * if connection is uploaded it means that path must have been taken care of
4227 * already. */
4228 DbgBreakIf((tcp->hdr.status != STATE_STATUS_INIT_OFFLOAD_ERR) &&
4229 (tcp->hdr.status != STATE_STATUS_INIT) &&
4230 (tcp->hdr.status != STATE_STATUS_INIT_CONTEXT));
4231 DbgBreakIf(tcp->path->hdr.status != STATE_STATUS_NORMAL);
4232 tcp->path->num_dependents--;
4233 tcp->path = NULL;
4234 }
4235
4236 if (tcp->in_searcher) {
4237 /* remove 4tuple from searcher */
4238 lm_searcher_mirror_hash_remove(pdev, tcp->cid);
4239 tcp->in_searcher = 0;
4240 }
4241
4242 if (tcp->cid != 0) {
4243 u8_t notify_fw = 0;
4244
4245 /* we only notify FW if this delete is a result of upload, otherwise
4246 * (err_offload / error in init stage) we don't*/
4247 if (!lm_fl_reset_is_inprogress(pdev) && (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE)) {
4248 notify_fw = 1;
4249 }
4250 lm_free_cid_resc(pdev, TOE_CONNECTION_TYPE, tcp->cid, notify_fw);
4251 }
4252
4253 tcp->hdr.state_blk = NULL;
4254 tcp->cid = 0;
4255 tcp->ctx_virt = NULL;
4256 tcp->ctx_phys.as_u64 = 0;
4257 if (tcp->aux_memory != NULL) {
4258 switch (tcp->type_of_aux_memory) {
4259 case TCP_CON_AUX_RT_MEM:
4260 DbgMessage(pdev, WARNl4sp,
4261 "###lm_tcp_del_tcp_state: delete aux_mem (%d)\n",
4262 tcp->aux_mem_size);
4263 tcp->type_of_aux_memory = 0;
4264 mm_rt_free_mem(pdev,tcp->aux_memory,tcp->aux_mem_size,LM_RESOURCE_NDIS);
4265 break;
4266 default:
4267 break;
4268 }
4269 }
4270 } /* lm_tcp_del_tcp_state */
4271
4272 /* Desciption:
4273 * delete path state from lm
4274 * Assumptions:
4275 * global toe lock is taken by the caller
4276 */
lm_tcp_del_path_state(struct _lm_device_t * pdev,lm_path_state_t * path)4277 void lm_tcp_del_path_state(
4278 struct _lm_device_t *pdev,
4279 lm_path_state_t *path)
4280 {
4281 UNREFERENCED_PARAMETER_(pdev);
4282
4283 if (path->neigh != NULL) {
4284
4285 DbgBreakIf(path->neigh->hdr.status != STATE_STATUS_NORMAL);
4286 /* This is called as a result of a synchronous path upload */
4287 path->neigh->num_dependents--;
4288 path->neigh = NULL;
4289 }
4290
4291 DbgBreakIf(!lm_fl_reset_is_inprogress(pdev) && (path->hdr.status != STATE_STATUS_UPLOAD_DONE));
4292 d_list_remove_entry(&path->hdr.state_blk->path_list, &path->hdr.link);
4293 }
4294
4295 /* Desciption:
4296 * delete neigh state from lm
4297 * Assumptions:
4298 * global toe lock is taken by the caller
4299 */
lm_tcp_del_neigh_state(struct _lm_device_t * pdev,lm_neigh_state_t * neigh)4300 void lm_tcp_del_neigh_state(
4301 struct _lm_device_t *pdev,
4302 lm_neigh_state_t *neigh)
4303 {
4304 UNREFERENCED_PARAMETER_(pdev);
4305
4306 DbgBreakIf(!lm_fl_reset_is_inprogress(pdev) && (neigh->hdr.status != STATE_STATUS_UPLOAD_DONE));
4307 d_list_remove_entry(&neigh->hdr.state_blk->neigh_list, &neigh->hdr.link);
4308 }
4309
4310 /* Desciption:
4311 * free lm tcp state resources
4312 * Assumptions:
4313 * lm_tcp_del_tcp_state() already called */
lm_tcp_free_tcp_resc(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4314 void lm_tcp_free_tcp_resc(
4315 struct _lm_device_t *pdev,
4316 lm_tcp_state_t *tcp)
4317 {
4318 lm_tcp_con_t *tcp_con;
4319 d_list_t released_list_of_gen_bufs;
4320 u8_t reset_in_progress = lm_reset_is_inprogress(pdev);
4321 u32_t num_isles = 0;
4322 u32_t num_bytes_in_isles = 0;
4323 u32_t num_gen_bufs_in_isles = 0;
4324
4325 DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_free_tcp_resc tcp=%p\n", tcp);
4326 DbgBreakIf(!(pdev && tcp));
4327 DbgBreakIf(!reset_in_progress && tcp->hdr.status >= STATE_STATUS_OFFLOAD_PENDING &&
4328 tcp->hdr.status < STATE_STATUS_UPLOAD_DONE);
4329 DbgBreakIf(tcp->cid); /* i.e lm_tcp_del_tcp_state wasn't called */
4330
4331 tcp_con = tcp->rx_con;
4332 if (tcp_con) {
4333 /* need to return the generic buffers of the isle list to the pool */
4334 d_list_init(&released_list_of_gen_bufs, NULL, NULL, 0);
4335 num_isles = d_list_entry_cnt(&tcp_con->u.rx.gen_info.isles_list);
4336 num_bytes_in_isles = tcp_con->u.rx.gen_info.isle_nbytes;
4337 lm_tcp_rx_clear_isles(pdev, tcp, &released_list_of_gen_bufs);
4338 num_gen_bufs_in_isles = d_list_entry_cnt(&released_list_of_gen_bufs);
4339 if(!d_list_is_empty(&tcp_con->u.rx.gen_info.dpc_peninsula_list)) {
4340 if (!reset_in_progress) {
4341 DbgBreak();
4342 }
4343 d_list_add_tail(&released_list_of_gen_bufs,&tcp_con->u.rx.gen_info.dpc_peninsula_list);
4344 d_list_init(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list, NULL, NULL, 0);
4345 }
4346 if (!d_list_is_empty(&tcp_con->u.rx.gen_info.peninsula_list)) {
4347 d_list_add_tail(&released_list_of_gen_bufs,&tcp_con->u.rx.gen_info.peninsula_list);
4348 d_list_init(&tcp->rx_con->u.rx.gen_info.peninsula_list, NULL, NULL, 0);
4349 if (!reset_in_progress) {
4350 /* we can only have data in the peninsula if we didn't go via the upload flow (i.e. offload failure of some sort...)*/
4351 DbgBreakIf(tcp->hdr.status == STATE_STATUS_UPLOAD_DONE);
4352 if (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE) {
4353 pdev->toe_info.stats.total_bytes_lost_on_upload += tcp_con->u.rx.gen_info.peninsula_nbytes;
4354 }
4355 }
4356 }
4357
4358 if (!d_list_is_empty(&released_list_of_gen_bufs)) {
4359 mm_tcp_return_list_of_gen_bufs(pdev, &released_list_of_gen_bufs, 0, NON_EXISTENT_SB_IDX);
4360 if (!reset_in_progress && num_isles) {
4361 s32_t delta = -(s32_t)num_gen_bufs_in_isles;
4362 MM_ACQUIRE_ISLES_CONTROL_LOCK(pdev);
4363 lm_tcp_update_isles_cnts(pdev, -(s32_t)num_isles, delta);
4364 MM_RELEASE_ISLES_CONTROL_LOCK(pdev);
4365 }
4366 }
4367 }
4368
4369 } /* lm_tcp_free_tcp_resc */
4370
4371 /* Desciption:
4372 * update chip internal memory and hw with given offload params
4373 * Assumptions:
4374 * - lm_tcp_init was already called
4375 * Returns:
4376 * SUCCESS or any failure */
4377 lm_status_t
lm_tcp_set_ofld_params(lm_device_t * pdev,lm_state_block_t * state_blk,l4_ofld_params_t * params)4378 lm_tcp_set_ofld_params(
4379 lm_device_t *pdev,
4380 lm_state_block_t *state_blk,
4381 l4_ofld_params_t *params)
4382 {
4383 l4_ofld_params_t *curr_params = &pdev->ofld_info.l4_params;
4384
4385 UNREFERENCED_PARAMETER_(state_blk);
4386
4387 DbgMessage(pdev, VERBOSE, "###lm_tcp_set_ofld_params\n");
4388
4389 /* we assume all timers periods can't be 0 */
4390 DbgBreakIf(!(params->delayed_ack_ticks &&
4391 params->nce_stale_ticks &&
4392 params->push_ticks &&
4393 params->sws_prevention_ticks &&
4394 params->ticks_per_second));
4395
4396 /* <MichalK> Here we override the ofld info. This in theory effects iscsi as well, however, since ftsk
4397 * does not really use timers, and passes '0' for ka / rt in delegate/cached params its ok that
4398 * we're overriding the parameters here. The correct solution is to maintain this per cli-idx,
4399 * but that will require major changes in l4 context initialization and not worth the effort.
4400 */
4401 *curr_params = *params;
4402
4403 /* update internal memory/hw for each storm both with
4404 * toe/rdma/iscsi common params and with toe private params (where applicable) */
4405
4406 _lm_set_ofld_params_xstorm_common(pdev, curr_params);
4407
4408 _lm_set_ofld_params_tstorm_common(pdev, curr_params);
4409
4410 _lm_set_ofld_params_tstorm_toe(pdev, curr_params);
4411
4412 _lm_set_ofld_params_ustorm_toe(pdev, curr_params);
4413
4414 _lm_set_ofld_params_xstorm_toe(pdev, curr_params);
4415
4416 /* GilR 6/7/2006 - TBD - usage of params->starting_ip_id is not clear. currenlty we ignore it */
4417
4418 return LM_STATUS_SUCCESS;
4419 } /* lm_tcp_set_ofld_params */
4420
4421
4422 /** Description
4423 * indicates that a rst request was received. Called from several
4424 * functions. Could also be called as a result of a delayed rst.
4425 * Assumptions:
4426 */
lm_tcp_indicate_rst_received(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4427 void lm_tcp_indicate_rst_received(
4428 struct _lm_device_t * pdev,
4429 lm_tcp_state_t * tcp
4430 )
4431 {
4432 lm_tcp_con_t *rx_con, *tx_con;
4433 u8_t ip_version;
4434 MM_INIT_TCP_LOCK_HANDLE();
4435
4436 //DbgMessage(pdev, WARNl4rx , "##lm_tcp_indicate_rst_received cid=%d\n", tcp->cid);
4437
4438 /* Update the Reset Received statistic*/
4439 ip_version = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? STATS_IP_4_IDX : STATS_IP_6_IDX;
4440 LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, ipv[ip_version].in_reset);
4441
4442 rx_con = tcp->rx_con;
4443 tx_con = tcp->tx_con;
4444
4445 DbgBreakIf( ! (pdev && tcp) );
4446 /* The state may only be NORMAL or UPLOAD_PENDING */
4447 DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
4448 (tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING) );
4449
4450 /* Get the global TOE lock */
4451 MM_ACQUIRE_TOE_LOCK(pdev);
4452
4453 /* Change the state status if needed: NORMAL->ABORTED */
4454 if ( tcp->hdr.status == STATE_STATUS_NORMAL ) {
4455 tcp->hdr.status = STATE_STATUS_ABORTED;
4456 }
4457
4458 /* Release the global TOE lock */
4459 MM_RELEASE_TOE_LOCK(pdev);
4460 /*********************** Tx **********************/
4461 /* Take Tx lock */
4462 mm_acquire_tcp_lock(pdev, tx_con);
4463
4464 /* Implies POST Tx blocked */
4465 DbgBreakIf(tx_con->flags & TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED);
4466 tx_con->flags |= TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED;
4467
4468 /* Abort Tx buffers */
4469 lm_tcp_abort_bufs(pdev, tcp, tx_con, LM_STATUS_CONNECTION_RESET);
4470
4471 /* Clear delayed RST flag */
4472 tx_con->u.tx.flags &= ~ TCP_CON_RST_IND_NOT_SAFE;
4473
4474 /* Release Tx lock */
4475 mm_release_tcp_lock(pdev, tx_con);
4476 /*********************** Rx **********************/
4477 /* Take Rx lock */
4478 mm_acquire_tcp_lock(pdev, rx_con);
4479
4480 /* Clear delayed FIN and RST */
4481 rx_con->u.rx.flags &= ~ (TCP_CON_RST_IND_PENDING | TCP_CON_FIN_IND_PENDING);
4482
4483 /* Implies POST Rx blocked */
4484 DbgBreakIf(rx_con->flags & TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED);
4485 rx_con->flags |= TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED;
4486
4487 /* Abort Rx buffers */
4488 lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_CONNECTION_RESET);
4489
4490 /* Release Rx lock */
4491 mm_release_tcp_lock(pdev, rx_con);
4492
4493 /* Indicate the Remote Abortive Disconnect to the Client */
4494 mm_tcp_indicate_rst_received(pdev, tcp);
4495 }
4496
lm_tcp_searcher_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)4497 void lm_tcp_searcher_ramrod_complete(
4498 IN struct _lm_device_t * pdev,
4499 IN lm_tcp_state_t * tcp
4500 )
4501 {
4502 lm_tcp_slow_path_request_t * request = tcp->sp_request;
4503
4504 DbgMessage(pdev, VERBOSEl4, "## lm_tcp_searcher_ramrod_comp\n");
4505
4506 DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
4507 DbgBreakIf(request->type != SP_REQUEST_TERMINATE_OFFLOAD);
4508
4509 tcp->sp_request = NULL;
4510 request->type = SP_REQUEST_TERMINATE1_OFFLOAD;
4511
4512
4513 MM_ACQUIRE_TOE_LOCK(pdev);
4514 /* remove 4tuple from searcher */
4515 DbgBreakIf(!tcp->in_searcher);
4516 lm_searcher_mirror_hash_remove(pdev, tcp->cid);
4517 tcp->in_searcher = 0;
4518 DbgBreakIf(!(tcp->sp_flags & SP_TCP_SRC_REQ_POSTED));
4519 tcp->sp_flags |= SP_TCP_SRC_REQ_COMP;
4520 lm_tcp_post_slow_path_request(pdev, tcp, request);
4521 MM_RELEASE_TOE_LOCK(pdev);
4522 }
4523
lm_tcp_terminate_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)4524 void lm_tcp_terminate_ramrod_complete(
4525 IN struct _lm_device_t * pdev,
4526 IN lm_tcp_state_t * tcp)
4527 {
4528 lm_tcp_slow_path_request_t * request = tcp->sp_request;
4529 MM_ACQUIRE_TOE_LOCK(pdev);
4530 tcp->sp_request = NULL;
4531 request->type = SP_REQUEST_QUERY;
4532 /* Clear the flags */
4533 DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
4534
4535 DbgBreakIf(!(tcp->sp_flags & SP_TCP_TRM_REQ_POSTED));
4536 tcp->sp_flags |= SP_TCP_TRM_REQ_COMP;
4537
4538 /* Part of the fast-terminate flow is to zeroize the timers context: turn of num of active timers */
4539 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
4540 RESET_FLAGS(((struct toe_context *)tcp->ctx_virt)->timers_context.flags, __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS);
4541 }
4542
4543 lm_tcp_post_slow_path_request(pdev, tcp, request);
4544
4545 MM_RELEASE_TOE_LOCK(pdev);
4546 }
4547
lm_tcp_rx_terminate_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)4548 static void lm_tcp_rx_terminate_ramrod_complete(
4549 IN struct _lm_device_t * pdev,
4550 IN lm_tcp_state_t * tcp)
4551 {
4552 lm_tcp_con_t * rx_con = tcp->rx_con;
4553 MM_INIT_TCP_LOCK_HANDLE();
4554
4555 DbgMessage(pdev, VERBOSEl4rx, "## lm_tcp_terminate_ramrod_comp_rx\n");
4556
4557 DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
4558
4559 mm_acquire_tcp_lock(pdev, rx_con);
4560 DbgBreakIf( mm_tcp_indicating_bufs(rx_con) );
4561 DbgBreakIf(rx_con->flags & TCP_TRM_REQ_COMPLETED);
4562 rx_con->flags |= TCP_TRM_REQ_COMPLETED;
4563 mm_release_tcp_lock(pdev, rx_con);
4564 }
4565
lm_tcp_tx_terminate_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)4566 static void lm_tcp_tx_terminate_ramrod_complete(
4567 IN struct _lm_device_t * pdev,
4568 IN lm_tcp_state_t * tcp)
4569 {
4570 lm_tcp_con_t * tx_con = tcp->tx_con;
4571 MM_INIT_TCP_LOCK_HANDLE();
4572
4573 DbgMessage(pdev, VERBOSEl4tx, "## lm_tcp_terminate_ramrod_comp_tx\n");
4574
4575 DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
4576
4577 mm_acquire_tcp_lock(pdev, tx_con);
4578 DbgBreakIf(tx_con->flags & TCP_TRM_REQ_COMPLETED);
4579 tx_con->flags |= TCP_TRM_REQ_COMPLETED;
4580 mm_release_tcp_lock(pdev, tx_con);
4581
4582 }
4583
4584 /** Description
4585 * indicates that a fin request was received. Called from several
4586 * functions. Could also be called as a result of a delayed fin
4587 * Assumptions: called without any lock taken
4588 */
lm_tcp_indicate_fin_received(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4589 static void lm_tcp_indicate_fin_received(
4590 struct _lm_device_t * pdev,
4591 lm_tcp_state_t * tcp
4592 )
4593 {
4594 lm_tcp_con_t * rx_con;
4595 u8_t ip_version;
4596 MM_INIT_TCP_LOCK_HANDLE();
4597
4598 DbgMessage(pdev, INFORMl4rx , "##lm_tcp_indicate_fin_received cid=%d\n", tcp->cid);
4599 DbgBreakIf( ! ( pdev && tcp ) );
4600
4601 ip_version = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? STATS_IP_4_IDX : STATS_IP_6_IDX;
4602 LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, ipv[ip_version].in_fin);
4603
4604 rx_con = tcp->rx_con;
4605
4606 mm_acquire_tcp_lock(pdev, rx_con);
4607
4608 rx_con->u.rx.flags &= ~TCP_CON_FIN_IND_PENDING;
4609
4610 /* Mark the connection as POST_BLOCKED due to Remote FIN Received */
4611 DbgBreakIf(rx_con->flags & TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED);
4612 rx_con->flags |= TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED;
4613 /* Abort pending Rx buffers */
4614 lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_SUCCESS);
4615
4616 mm_release_tcp_lock(pdev, rx_con);
4617
4618 /* Indicate the Remote FIN up to the client */
4619 mm_tcp_indicate_fin_received(pdev, tcp);
4620 }
4621
lm_tcp_process_retrieve_indication_cqe(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,l4_upload_reason_t upload_reason)4622 void lm_tcp_process_retrieve_indication_cqe(
4623 struct _lm_device_t * pdev,
4624 lm_tcp_state_t * tcp,
4625 l4_upload_reason_t upload_reason)
4626 {
4627 u32_t rx_flags = 0;
4628 u32_t tx_flags = 0;
4629 DbgMessage(pdev, INFORMl4, "###lm_tcp_process_retrieve_indication_cqe cid=%d upload_reason=%d\n", tcp->cid, upload_reason);
4630
4631 /* assert that this CQE is allowed */
4632 /* we could receive this cqe after a RST / UPL, in which cases we will not notify about it. */
4633 SET_FLAGS(rx_flags, TCP_RX_COMP_BLOCKED | TCP_UPLOAD_REQUESTED);
4634 SET_FLAGS(tx_flags, TCP_TX_COMP_BLOCKED);
4635
4636 /* we do need to notify about it even if it's after a FIN... */
4637 RESET_FLAGS(rx_flags, TCP_REMOTE_FIN_RECEIVED);
4638 RESET_FLAGS(tx_flags, TCP_FIN_REQ_COMPLETED);
4639
4640 if (!GET_FLAGS(tcp->rx_con->flags, rx_flags) && !GET_FLAGS(tcp->tx_con->flags,tx_flags)) {
4641 SET_FLAGS(tcp->rx_con->flags, TCP_UPLOAD_REQUESTED);
4642 DbgMessage(pdev, INFORMl4, "###Indicating UP: cid=%d upload_reason=%d\n", tcp->cid, upload_reason);
4643 mm_tcp_indicate_retrieve_indication(pdev, tcp, upload_reason);
4644 }
4645 }
4646
4647 /* Assumption: called without any lock taken */
lm_tcp_rx_fin_received_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t upload)4648 static void lm_tcp_rx_fin_received_complete(
4649 struct _lm_device_t * pdev,
4650 lm_tcp_state_t * tcp,
4651 u8_t upload
4652 )
4653 {
4654 lm_tcp_con_t * rx_con;
4655 u8_t indicate = 1;
4656 u8_t is_empty_peninsula;
4657 MM_INIT_TCP_LOCK_HANDLE();
4658
4659 DbgMessage(pdev, INFORMl4rx, "###lm_tcp_rx_fin_received_complete cid=%d\n", tcp->cid);
4660 DbgBreakIf( ! (pdev && tcp) );
4661 DbgBreakIf( tcp->hdr.status != STATE_STATUS_NORMAL && tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
4662
4663 rx_con = tcp->rx_con;
4664
4665 mm_acquire_tcp_lock(pdev, rx_con);
4666
4667 /* break if we received a fin on the cqe and we still have an 'unreleased' generic buffer in our peninsula */
4668 DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list) );
4669
4670 /* Mark the connection as 'COMP_BLOCKED' and 'DB BLOCKED' */
4671 DbgBreakIf(rx_con->flags & TCP_REMOTE_FIN_RECEIVED);
4672 rx_con->flags |= TCP_REMOTE_FIN_RECEIVED;
4673 is_empty_peninsula = (rx_con->u.rx.gen_info.peninsula_nbytes > 0 ? 0 : 1);
4674 if (!is_empty_peninsula || mm_tcp_indicating_bufs(rx_con) ) {
4675 DbgMessage(pdev, INFORMl4, "lm_tcp_process_fin_received_cqe - postponing fin indication cid=%d\n", tcp->cid);
4676 rx_con->u.rx.flags |= TCP_CON_FIN_IND_PENDING;
4677 indicate = 0;
4678 }
4679
4680 tcp->tcp_state_calc.fin_reception_time = mm_get_current_time(pdev);
4681 if (tcp->tcp_state_calc.fin_reception_time == tcp->tcp_state_calc.fin_request_time) {
4682 tcp->tcp_state_calc.fin_request_time -= 1;
4683 }
4684
4685 mm_release_tcp_lock(pdev, rx_con);
4686
4687 if (indicate)
4688 {
4689 lm_tcp_indicate_fin_received(pdev, tcp);
4690 } else if(upload && !is_empty_peninsula)
4691 {
4692 /* we did not indicate the received fin, AND we got upload request from FW, AND peninsula is not empty,
4693 i.e. we _may_ be waiting for RQ buffers to be posted before we indicate the fin.
4694 Thus, we _may_ need to request for upload: */
4695
4696 /* imitate as if FW has sent an upload request CQE: */
4697 lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
4698 pdev->toe_info.stats.total_fin_upld_requested++;
4699 }
4700 }
4701
4702
lm_tcp_comp_empty_ramrod_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)4703 static void lm_tcp_comp_empty_ramrod_request(
4704 IN struct _lm_device_t * pdev,
4705 IN lm_tcp_state_t * tcp)
4706 {
4707 lm_tcp_slow_path_request_t * sp_req = tcp->sp_request;
4708
4709 MM_ACQUIRE_TOE_LOCK(pdev);
4710
4711 DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
4712 sp_req->status = LM_STATUS_SUCCESS;
4713 tcp->sp_request = NULL;
4714 mm_tcp_comp_slow_path_request(pdev, tcp, sp_req);
4715
4716 MM_RELEASE_TOE_LOCK(pdev);
4717 }
4718
lm_tcp_rx_empty_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN u32_t sp_type)4719 static void lm_tcp_rx_empty_ramrod_complete(
4720 IN struct _lm_device_t * pdev,
4721 IN lm_tcp_state_t * tcp,
4722 IN u32_t sp_type)
4723 {
4724 u8_t indicate = 0;
4725
4726 DbgBreakIf(!tcp);
4727
4728 DbgMessage(pdev, INFORMl4rx | INFORMl4sp,
4729 "###lm_tcp_process_empty_slow_path_rcqe cid=%d, request->type=%d\n",
4730 tcp->cid, sp_type);
4731
4732 switch (sp_type) {
4733 case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
4734 case SP_REQUEST_PENDING_TX_RST:
4735 break; /* relevant to scqe only */
4736 case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
4737 if ( tcp->rx_con->u.rx.flags & TCP_CON_RST_IND_PENDING ) {
4738 /* process it */
4739 MM_ACQUIRE_TOE_LOCK(pdev);
4740
4741 /* Mark Rx ready for RST indication - before it was marked as 'delayed' */
4742 tcp->sp_flags |= REMOTE_RST_INDICATED_RX;
4743
4744 if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
4745 indicate = 1;
4746 }
4747
4748 /* Release global TOE lock */
4749 MM_RELEASE_TOE_LOCK(pdev);
4750 if (indicate) {
4751 lm_tcp_indicate_rst_received(pdev, tcp);
4752 } /* o/w we haven't seen the TX yet... */
4753 }
4754 else if ( tcp->rx_con->u.rx.flags & TCP_CON_FIN_IND_PENDING ) {
4755 /* process it */
4756 lm_tcp_indicate_fin_received(pdev, tcp);
4757 }
4758 break;
4759 default:
4760 {
4761 DbgMessage(pdev, FATAL,
4762 "'empty ramrod' opcode in cqe doesn't fit with sp_request->type %d\n",
4763 sp_type);
4764 DbgBreak();
4765 }
4766 }
4767 }
4768
lm_tcp_tx_empty_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN u32_t sp_type)4769 static void lm_tcp_tx_empty_ramrod_complete(
4770 IN struct _lm_device_t * pdev,
4771 IN lm_tcp_state_t * tcp,
4772 IN u32_t sp_type)
4773 {
4774 u8_t indicate = 0;
4775 MM_INIT_TCP_LOCK_HANDLE();
4776
4777 DbgBreakIf(!tcp);
4778
4779 DbgMessage(pdev, INFORMl4tx | INFORMl4sp,
4780 "###lm_tcp_process_empty_slow_path_scqe cid=%d, request->type=%d\n",
4781 tcp->cid, sp_type);
4782
4783 switch (sp_type) {
4784 case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
4785 /* process it */
4786 mm_acquire_tcp_lock(pdev, tcp->tx_con);
4787 lm_tcp_abort_bufs(pdev,tcp,tcp->tx_con,LM_STATUS_ABORTED);
4788 mm_release_tcp_lock(pdev, tcp->tx_con);
4789 break;
4790 case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
4791 break; /* rcqe only */
4792 case SP_REQUEST_PENDING_TX_RST:
4793 /* safe to abort buffers at this stage - we know none are pending on pbf */
4794 if (tcp->tx_con->u.tx.flags & TCP_CON_RST_IND_NOT_SAFE ) {
4795 /* process it */
4796 MM_ACQUIRE_TOE_LOCK(pdev);
4797
4798 /* Mark Rx ready for RST indication - before it was marked as 'delayed' */
4799 tcp->sp_flags |= REMOTE_RST_INDICATED_TX;
4800
4801 if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
4802 indicate = 1;
4803 }
4804
4805 mm_acquire_tcp_lock(pdev, tcp->tx_con);
4806 tcp->tx_con->u.tx.flags &= ~TCP_CON_RST_IND_NOT_SAFE;
4807 mm_release_tcp_lock(pdev, tcp->tx_con);
4808
4809 /* Release global TOE lock */
4810 MM_RELEASE_TOE_LOCK(pdev);
4811 if (indicate) {
4812 lm_tcp_indicate_rst_received(pdev, tcp);
4813 } /* o/w we haven't seen the RX yet... */
4814 }
4815 break;
4816 default:
4817 {
4818 DbgMessage(pdev, FATAL,
4819 "'empty ramrod' opcode in cqe doesn't fit with sp_request->type %d\n",
4820 sp_type);
4821 DbgBreak();
4822 }
4823 }
4824 }
4825
lm_tcp_comp_abortive_disconnect_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_slow_path_request_t * request)4826 static void lm_tcp_comp_abortive_disconnect_request(
4827 struct _lm_device_t * pdev,
4828 lm_tcp_state_t * tcp,
4829 lm_tcp_slow_path_request_t * request
4830 )
4831 {
4832 lm_tcp_con_t *rx_con, *tx_con;
4833 u8_t delayed_rst = 0;
4834 u8_t ip_version;
4835 u8_t complete_sp_request = TRUE;
4836 MM_INIT_TCP_LOCK_HANDLE();
4837
4838 DbgBreakIf( ! (pdev && tcp && request) );
4839
4840 /* Update the statistics */
4841 ip_version = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? STATS_IP_4_IDX : STATS_IP_6_IDX;
4842 LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, ipv[ip_version].out_resets);
4843
4844 rx_con = tcp->rx_con;
4845 tx_con = tcp->tx_con;
4846
4847 /* Get global TOE lock */
4848 MM_ACQUIRE_TOE_LOCK(pdev);
4849
4850 /* The state may only be NORMAL or ABORTED (due to remote RST) */
4851 DbgBreakIf( ( tcp->hdr.status != STATE_STATUS_NORMAL ) && ( tcp->hdr.status != STATE_STATUS_ABORTED ) );
4852 /* the FW will always post a RST packet no matter if
4853 remote RST was already received, therefore, the
4854 completion status of the request is always SUCCESS */
4855 request->status = LM_STATUS_SUCCESS;
4856
4857 tcp->hdr.status = STATE_STATUS_ABORTED;
4858
4859 tcp->tcp_state_calc.con_rst_flag = TRUE;
4860
4861 /* Release global TOE lock */
4862 MM_RELEASE_TOE_LOCK(pdev);
4863
4864 /***************** Tx ********************/
4865 /* Get Tx lock */
4866 mm_acquire_tcp_lock(pdev, tx_con);
4867
4868 /* Clear delayed RST flag */
4869 tx_con->u.tx.flags &= ~ TCP_CON_RST_IND_NOT_SAFE;
4870 /* safe to abort buffers anyway, even if we have a non-safe tx abort, since this means that a ramrod has been sent so queues are clear */
4871 lm_tcp_abort_bufs(pdev,tcp,tx_con, LM_STATUS_ABORTED);
4872
4873 /* Release Tx lock */
4874 mm_release_tcp_lock(pdev, tx_con);
4875
4876 /***************** Rx ********************/
4877 /* Get Rx lock */
4878 mm_acquire_tcp_lock(pdev, rx_con);
4879
4880 /* 'POST/IND BLOCKED' in the request. Even a post was in the middle it must be done by now */
4881 if (mm_tcp_indicating_bufs(rx_con)) {
4882 if (pdev->params.l4_support_pending_sp_req_complete) {
4883 DbgBreakIf(DBG_BREAK_ON(ABORTIVE_DISCONNECT_DURING_IND));
4884 complete_sp_request = FALSE;
4885 tcp->sp_request_pending_completion = TRUE;
4886 tcp->pending_abortive_disconnect++;
4887 mm_atomic_inc(&pdev->toe_info.stats.total_aborive_disconnect_during_completion);
4888 DbgMessage(pdev, INFORMl4sp, "Abortive disconnect completion during indication(%d)\n", tcp->cid);
4889 } else {
4890 DbgBreak();
4891 }
4892 }
4893
4894 if ( rx_con->u.rx.flags & TCP_CON_RST_IND_PENDING ) {
4895 delayed_rst = 1;
4896 }
4897
4898 /* Clear delayed RST and FIN flags */
4899 rx_con->u.rx.flags &= ~ (TCP_CON_RST_IND_PENDING | TCP_CON_FIN_IND_PENDING);
4900
4901 lm_tcp_abort_bufs(pdev,tcp, rx_con, LM_STATUS_ABORTED);
4902
4903 /* Release Rx lock */
4904 mm_release_tcp_lock(pdev, rx_con);
4905 /*****************************************/
4906
4907 if ( delayed_rst ) {
4908 /* GilR 10/15/2006 - TBD - since anyway we complete the request
4909 with status SUCCESS, we do not need to indicate a remote RST
4910 that was delayed. therefore the following call to
4911 mm_tcp_indicate_rst_received is canceled */
4912 //mm_tcp_indicate_rst_received(pdev, tcp);
4913 }
4914
4915 if (complete_sp_request) {
4916 /* Get global TOE lock */
4917 MM_ACQUIRE_TOE_LOCK(pdev);
4918
4919 DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
4920
4921 tcp->sp_request = NULL;
4922
4923 mm_tcp_comp_slow_path_request(pdev, tcp, request);
4924
4925 /* Release global TOE lock */
4926 MM_RELEASE_TOE_LOCK(pdev);
4927 }
4928 }
4929
lm_tcp_rx_rst_received_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4930 static void lm_tcp_rx_rst_received_complete (
4931 struct _lm_device_t * pdev,
4932 lm_tcp_state_t * tcp
4933 )
4934 {
4935 lm_tcp_con_t * rx_con;
4936 u8_t indicate = 0;
4937 MM_INIT_TCP_LOCK_HANDLE();
4938
4939 DbgMessage(pdev, INFORMl4rx , "###lm_tcp_process_rst_received_rcqe cid=%d\n", tcp->cid);
4940 DbgBreakIf( ! (pdev && tcp) );
4941 /* The state may only be NORMAL or UPLOAD_PENDING */
4942 DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
4943 (tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING) );
4944
4945 rx_con = tcp->rx_con;
4946
4947 /* Get global TOE lock */
4948 MM_ACQUIRE_TOE_LOCK(pdev);
4949
4950 /* Take the Rx lock */
4951 mm_acquire_tcp_lock(pdev, rx_con);
4952
4953 /* break if we received a rst on the cqe and we still have an 'unreleased' generic buffer in our peninsula */
4954 DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list) );
4955
4956
4957 /* This will imply RX_COMP_LOCKED and RX_DB_BLOCKED */
4958 DbgBreakIf(rx_con->flags & TCP_REMOTE_RST_RECEIVED);
4959 rx_con->flags |= TCP_REMOTE_RST_RECEIVED;
4960
4961 /* Clear pending FIN */
4962 rx_con->u.rx.flags &= ~ TCP_CON_FIN_IND_PENDING;
4963
4964 /* Check if all received data has been completed towards the Client */
4965 if (rx_con->u.rx.gen_info.peninsula_nbytes || mm_tcp_indicating_bufs(rx_con) ) {
4966 DbgMessage(pdev, INFORMl4rx , "lm_tcp_process_rst_received_cqe - postponing rst indication cid=%d\n", tcp->cid);
4967 rx_con->u.rx.flags |= TCP_CON_RST_IND_PENDING;
4968 } else {
4969 /* Mark Rx ready for RST indication */
4970 tcp->sp_flags |= REMOTE_RST_INDICATED_RX;
4971 }
4972
4973 /* Release the Rx lock */
4974 mm_release_tcp_lock(pdev, rx_con);
4975
4976 if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
4977 indicate = 1;
4978 tcp->tcp_state_calc.con_rst_flag = TRUE;
4979 }
4980
4981 /* Release global TOE lock */
4982 MM_RELEASE_TOE_LOCK(pdev);
4983
4984 /* Indicate the RST to the Client if it was the second completion */
4985 if ( indicate ) {
4986 lm_tcp_indicate_rst_received(pdev,tcp);
4987 }
4988 }
4989
lm_tcp_tx_rst_received_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4990 static void lm_tcp_tx_rst_received_complete (
4991 struct _lm_device_t * pdev,
4992 lm_tcp_state_t * tcp
4993 )
4994 {
4995 lm_tcp_con_t * tx_con;
4996 lm_status_t lm_status;
4997 u8_t indicate = 0;
4998 u8_t send_empty_ramrod = 0;
4999 u8_t upload_on_fail = 0;
5000
5001 MM_INIT_TCP_LOCK_HANDLE();
5002
5003 DbgMessage(pdev, INFORMl4tx, "###lm_tcp_tx_rst_received_complete cid=%d\n", tcp->cid);
5004 DbgBreakIf( ! (pdev && tcp) );
5005 /* The state may only be NORMAL or UPLOAD_PENDING */
5006 DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
5007 (tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING) );
5008
5009 tx_con = tcp->tx_con;
5010
5011 /* Get global TOE lock */
5012 MM_ACQUIRE_TOE_LOCK(pdev);
5013
5014 /* Take the Tx lock */
5015 mm_acquire_tcp_lock(pdev, tx_con);
5016
5017 /* This will imply TX_COMP_LOCKED and TX_DB_BLOCKED */
5018 DbgBreakIf(tx_con->flags & TCP_REMOTE_RST_RECEIVED);
5019 tx_con->flags |= TCP_REMOTE_RST_RECEIVED;
5020
5021 /* There is a potential race between receiving a reset to aborting buffers, once reset is received from te CSTORM it doesn't mean that
5022 * the pbf isn't trying to transmit any other buffers, to make sure that it flushes remaining buffers we need to pass a ramrod - any ramrod,
5023 * if the active_tb_list is not empty, if the tx post is blocked already, it means its too late, rst / fin / trm / inv were posted, so we don't
5024 * abort the buffers - they will be aborted later on... to make sure buffers aren't aborted we turn on the TCP_CON_RST_IND_NOT_SAFE flag. they'll
5025 * be aborted in terminate later on. we won't send the indication as well, we'll send it when completing terminate / empty ramrod later on.
5026 */
5027 /* Check if all received data has been completed towards the Client + terminate ramrod has not been posted yet */
5028 if ( s_list_entry_cnt(&tx_con->active_tb_list) > 0 ) {
5029 DbgMessage(pdev, INFORMl4rx, "TX lm_tcp_process_rst_received_cqe - postponing rst indication cid=%d sending empty ramrod\n", tcp->cid);
5030 tx_con->u.tx.flags |= TCP_CON_RST_IND_NOT_SAFE;
5031 /* send the empty ramrod only if we're not blocked already.
5032 * TCP_TX_POST_BLOCKED includes FIN_REQ_POSTED case in which we should send the empty ramrod,
5033 * and REMOTE_RST_RECEIVED_ALL_RX_INDICATED, TCP_POST_BLOCKED that shouldn't be set when reaching this point,
5034 * so we'll check all other the relevant flags.
5035 * here we determine whether to send the ramrod according to the lm flags, it is possible that the ramrod will be dropped later
5036 * in the mm_tcp_post_empty_slow_path_request() due upload request pending in the um */
5037 if (!(tx_con->flags & (TCP_RST_REQ_POSTED | TCP_INV_REQ_POSTED | TCP_TRM_REQ_POSTED))) {
5038 send_empty_ramrod = TRUE;
5039 }
5040 } else {
5041 /* Mark Tx ready for RST indication */
5042 tcp->sp_flags |= REMOTE_RST_INDICATED_TX;
5043 }
5044
5045 /* Release the Tx lock */
5046 mm_release_tcp_lock(pdev, tx_con);
5047
5048 if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
5049 indicate = 1;
5050 tcp->tcp_state_calc.con_rst_flag = TRUE;
5051 } else if ( tcp->sp_flags & REMOTE_RST_INDICATED_RX ) {
5052 upload_on_fail = 1; /* RX is done, the only reason that TX isn't is because it has buffers to abort, if we can't postpone tx, indicate anyway. */
5053 tcp->tcp_state_calc.con_rst_flag = TRUE;
5054 }
5055
5056 /* Indicate the RST to the Client if it was the second completion */
5057 if ( indicate ) {
5058 /* Release global TOE lock */
5059 MM_RELEASE_TOE_LOCK(pdev);
5060
5061 lm_tcp_indicate_rst_received(pdev,tcp);
5062 } else if (send_empty_ramrod) {
5063 /* Send empty ramrod, only when it is complete we can complete the reset i.e. tx reset received.
5064 * it is possible that the ramrod will be dropped due upload request pending in the um */
5065 DbgMessage(pdev, INFORMl4tx, "Sending Empty Ramrod TX\n");
5066 lm_status = mm_tcp_post_empty_slow_path_request(pdev, tcp, SP_REQUEST_PENDING_TX_RST);
5067
5068 /* Release global TOE lock */
5069 MM_RELEASE_TOE_LOCK(pdev);
5070
5071 if ((lm_status != LM_STATUS_PENDING) && (lm_status != LM_STATUS_UPLOAD_IN_PROGRESS)) { /* we expect the posting of an empty ramrod to be pending... */
5072 /* This is a bit of a problem here...we don't want to risk the pbf accessing released data, so instead
5073 * we risk the application turning an error, we delay the abort of buffers till the terminate stage.
5074 * we don't remove the RST_IND_PENDING... we'll look at that before aborting buffers... */
5075 if (upload_on_fail) {
5076 DbgMessage(pdev, WARNl4sp, "Couldn't send empty ramrod on TX when we needed\n");
5077
5078 /* instead of indicating the rst, which is NOT possible at this stage, ask for connection upload */
5079 mm_tcp_indicate_retrieve_indication(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
5080 pdev->toe_info.stats.total_rst_upld_requested++;
5081 }
5082 }
5083 }
5084 else
5085 {
5086 /* Release global TOE lock */
5087 MM_RELEASE_TOE_LOCK(pdev);
5088 }
5089 }
5090
5091
lm_tcp_rx_abortive_disconnect_ramrod_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)5092 static void lm_tcp_rx_abortive_disconnect_ramrod_complete (
5093 struct _lm_device_t * pdev,
5094 lm_tcp_state_t * tcp)
5095 {
5096 lm_tcp_con_t * rx_con;
5097 MM_INIT_TCP_LOCK_HANDLE();
5098
5099 DbgMessage(pdev, INFORMl4rx, "###lm_tcp_process_abortive_disconnect_request_rcqe cid=%d\n", tcp->cid);
5100 DbgBreakIf( ! (pdev && tcp) );
5101
5102 rx_con = tcp->rx_con;
5103
5104 /* Take the Rx lock */
5105 mm_acquire_tcp_lock(pdev, rx_con);
5106
5107 /* break if we received a rst on the cqe and we still have an 'unreleased' generic buffer in our peninsula */
5108 DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.peninsula_list) &&
5109 (((lm_tcp_gen_buf_t *)(d_list_peek_tail(&tcp->rx_con->u.rx.gen_info.peninsula_list)))->placed_bytes == 0));
5110
5111 /* This implies COMP_BLOCKED */
5112 rx_con->flags |= TCP_RST_REQ_COMPLETED;
5113
5114 /* Release the Tx lock */
5115 mm_release_tcp_lock(pdev, rx_con);
5116 }
5117
lm_tcp_tx_abortive_disconnect_ramrod_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)5118 static void lm_tcp_tx_abortive_disconnect_ramrod_complete (
5119 struct _lm_device_t * pdev,
5120 lm_tcp_state_t * tcp)
5121 {
5122 lm_tcp_con_t * tx_con;
5123 MM_INIT_TCP_LOCK_HANDLE();
5124
5125 DbgMessage(pdev, INFORMl4tx, "###lm_tcp_tx_abortive_disconnect_request_complete cid=%d\n", tcp->cid);
5126 DbgBreakIf( ! (pdev && tcp) );
5127
5128 tx_con = tcp->tx_con;
5129
5130 /* Take the Tx lock */
5131 mm_acquire_tcp_lock(pdev, tx_con);
5132
5133 /* This implies COMP_BLOCKED */
5134 tx_con->flags |= TCP_RST_REQ_COMPLETED;
5135
5136 /* Release the Tx lock */
5137 mm_release_tcp_lock(pdev, tx_con);
5138 }
5139
5140
5141
lm_tcp_comp_invalidate_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_slow_path_request_t * request)5142 static void lm_tcp_comp_invalidate_request(
5143 struct _lm_device_t * pdev,
5144 lm_tcp_state_t * tcp,
5145 lm_tcp_slow_path_request_t * request)
5146 {
5147 DbgMessage(pdev, INFORMl4sp, "### Completing invalidate request cid=%d\n", tcp->cid);
5148
5149 MM_ACQUIRE_TOE_LOCK(pdev);
5150
5151 DbgBreakIf(!pdev || !tcp);
5152 DbgBreakIf(tcp->hdr.status != STATE_STATUS_NORMAL && tcp->hdr.status != STATE_STATUS_ABORTED);
5153
5154 tcp->hdr.status = STATE_STATUS_INVALIDATED;
5155
5156 tcp->sp_request = NULL;
5157
5158 request->status = LM_STATUS_SUCCESS;
5159
5160 DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
5161
5162 mm_tcp_comp_slow_path_request(pdev, tcp, request);
5163
5164 MM_RELEASE_TOE_LOCK(pdev);
5165 }
5166
5167
lm_tcp_tx_invalidate_ramrod_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)5168 static void lm_tcp_tx_invalidate_ramrod_complete (
5169 struct _lm_device_t * pdev,
5170 lm_tcp_state_t * tcp)
5171 {
5172 lm_tcp_con_t * tx_con;
5173 MM_INIT_TCP_LOCK_HANDLE();
5174
5175 DbgMessage(pdev, INFORMl4tx, "###lm_tcp_tx_invalidate_request_complete cid=%d\n", tcp->cid);
5176
5177 DbgBreakIf( ! (pdev && tcp) );
5178
5179 tx_con = tcp->tx_con;
5180
5181 /* Take the Tx lock */
5182 mm_acquire_tcp_lock(pdev, tx_con);
5183
5184 /* This implies COMP_BLOCKED */
5185 DbgBreakIf(tx_con->flags & TCP_INV_REQ_COMPLETED);
5186 tx_con->flags |= TCP_INV_REQ_COMPLETED;
5187
5188 /* Release the Tx lock */
5189 mm_release_tcp_lock(pdev, tx_con);
5190 }
5191
5192
lm_tcp_rx_invalidate_ramrod_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)5193 static void lm_tcp_rx_invalidate_ramrod_complete (
5194 struct _lm_device_t * pdev,
5195 lm_tcp_state_t * tcp)
5196 {
5197 lm_tcp_con_t * rx_con;
5198 MM_INIT_TCP_LOCK_HANDLE();
5199
5200 DbgMessage(pdev, INFORMl4rx, "###lm_tcp_process_invalidate_request_rcqe cid=%d\n", tcp->cid);
5201 DbgBreakIf( ! (pdev && tcp) );
5202
5203 rx_con = tcp->rx_con;
5204
5205
5206 /* Take the Rx lock */
5207 mm_acquire_tcp_lock(pdev, rx_con);
5208 /* 'POST/IND BLOCKED' in the request.
5209 Even a post was in the middle it must be done by now
5210 */
5211 DbgBreakIf( mm_tcp_indicating_bufs(rx_con) );
5212
5213 /* break if we received an invalidate on the cqe and we still have an 'unreleased' generic buffer in our peninsula */
5214 DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.peninsula_list) &&
5215 (((lm_tcp_gen_buf_t *)(d_list_peek_tail(&tcp->rx_con->u.rx.gen_info.peninsula_list)))->placed_bytes == 0));
5216
5217 /* This implies COMP_BLOCKED */
5218 DbgBreakIf(rx_con->flags & TCP_INV_REQ_COMPLETED);
5219 rx_con->flags |= TCP_INV_REQ_COMPLETED;
5220
5221 /* Release the Rx lock */
5222 mm_release_tcp_lock(pdev, rx_con);
5223 }
5224
5225
lm_tcp_get_delegated(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN void * ctx_p)5226 static void lm_tcp_get_delegated(
5227 IN struct _lm_device_t * pdev,
5228 IN lm_tcp_state_t * tcp,
5229 IN void * ctx_p /* context with updated data */
5230 )
5231 {
5232 struct xstorm_toe_tcp_ag_context_section * xag_tcp = NULL;
5233 struct tstorm_tcp_st_context_section * tst_tcp = NULL;
5234 struct xstorm_tcp_context_section * xst_tcp = NULL;
5235 struct tstorm_toe_tcp_ag_context_section * tag_tcp = NULL;
5236
5237 struct ustorm_toe_st_context * ust_toe = NULL;
5238 struct cstorm_toe_st_context * cst_toe = NULL;
5239 struct xstorm_toe_ag_context * xag_toe = NULL;
5240 struct xstorm_toe_context_section * xst_toe = NULL;
5241
5242 u32_t send_wnd;
5243 u8_t sanity_check;
5244
5245 ASSERT_STATIC(sizeof(struct xstorm_toe_tcp_ag_context_section) == sizeof(struct xstorm_tcp_tcp_ag_context_section));
5246 ASSERT_STATIC(sizeof(struct tstorm_toe_tcp_ag_context_section) == sizeof(struct tstorm_tcp_tcp_ag_context_section));
5247
5248 sanity_check = FALSE;
5249
5250 /* Set shortcuts... and take care of driver delegated params. */
5251 if (tcp->ulp_type == TOE_CONNECTION_TYPE)
5252 {
5253 xst_tcp = &((struct toe_context *)ctx_p)->xstorm_st_context.context.common.tcp;
5254 xag_tcp = &((struct toe_context *)ctx_p)->xstorm_ag_context.tcp;
5255 tst_tcp = &((struct toe_context *)ctx_p)->tstorm_st_context.context.tcp;
5256 tag_tcp = &((struct toe_context *)ctx_p)->tstorm_ag_context.tcp;
5257
5258 xst_toe = &((struct toe_context *)ctx_p)->xstorm_st_context.context.toe;
5259 xag_toe = &((struct toe_context *)ctx_p)->xstorm_ag_context;
5260 cst_toe = &((struct toe_context *)ctx_p)->cstorm_st_context.context;
5261 ust_toe = &((struct toe_context *)ctx_p)->ustorm_st_context.context;
5262
5263 if (S32_SUB(tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge, tcp->rx_con->db_data.rx->rcv_win_right_edge) < 0) {
5264 /* due to window decrease issues... */
5265 tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->rx_con->db_data.rx->rcv_win_right_edge;
5266 }
5267
5268 /* RcvWnd = WndRightEgde - RcvNext */
5269 /* recv_win_seq is determined by the driver, and therefore is the most up-to-date value,
5270 * we also have to add any pending indicated bytes to this value, and this is because we don't
5271 * add them immediatel, only when the buffer is returned to help limit our GRQ pool. */
5272 tcp->tcp_delegated.recv_win_seq = tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge +
5273 tcp->rx_con->u.rx.gen_info.pending_indicated_bytes;
5274
5275 if (!lm_reset_is_inprogress(pdev))
5276 {
5277 sanity_check = TRUE;
5278 }
5279
5280 }
5281 else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
5282 {
5283 xst_tcp = &((struct iscsi_context *)ctx_p)->xstorm_st_context.common.tcp;
5284 xag_tcp = (struct xstorm_toe_tcp_ag_context_section *)&((struct iscsi_context *)ctx_p)->xstorm_ag_context.tcp;
5285 tst_tcp = &((struct iscsi_context *)ctx_p)->tstorm_st_context.tcp;
5286 tag_tcp = (struct tstorm_toe_tcp_ag_context_section *)&((struct toe_context *)ctx_p)->tstorm_ag_context.tcp;
5287
5288 /* RcvWnd = WndRightEgde - RcvNext */
5289 tcp->tcp_delegated.recv_win_seq = tag_tcp->wnd_right_edge - tst_tcp->rcv_nxt;
5290 }
5291 else
5292 {
5293 DbgBreakMsg("lm_tcp_get_delegated: Unsupported protocol type \n") ;
5294 return;
5295 }
5296
5297 /* Sanity Checks: (block below)
5298 * the purpose for sanity checks below, under debug only is to find a problem in FW delegated params before
5299 * we send them to OS in which case it may assert later on, or worse after several offloads.
5300 * Perform sanity checks only if chip isn't under reset... In case of error recovery for example, these delegated
5301 * params may be rubbish, it's ok since in the same case we'll also send a LM_STATUS_FAILURE in the upload completion.
5302 */
5303 if (sanity_check)
5304 {
5305
5306 /* context sanity checks */
5307 #if !defined(_VBD_CMD_)
5308 /* check that DMA write towards host is done */
5309 DbgBreakIf(((struct toe_context *)ctx_p)->ustorm_ag_context.__state == 0);
5310 DbgBreakIf(((struct toe_context *)ctx_p)->tstorm_ag_context.__state == 0);
5311 DbgBreakIf(((struct toe_context *)ctx_p)->xstorm_ag_context.__state == 0);
5312 /* needs to be: t <= x <= u <= drv */
5313 /* driver window right edge >= ust.prev_rcv_win_right_edge >= xag.local_adv_wnd >= tag.wnd_right_edge (cyclic)*/
5314 // apply in w2k3
5315 // DbgBreakIf(S32_SUB(xag_tcp->local_adv_wnd, tag_tcp->wnd_right_edge) < 0);
5316 // DbgBreakIf(S32_SUB(ust_toe->prev_rcv_win_right_edge, xag_tcp->local_adv_wnd) < 0);
5317 // DbgBreakIf(S32_SUB(tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge, ust_toe->prev_rcv_win_right_edge) < 0);
5318 /* xag.snd_nxt <= xst.snd_max */
5319 DbgBreakIf(S32_SUB(xag_tcp->snd_nxt, xst_tcp->snd_max) > 0);
5320 /* xag.snd_una <= tag.snd_una <= tag.snd_max <= xst.snd_max */
5321 DbgBreakIf(S32_SUB(xag_tcp->snd_una, tag_tcp->snd_una) != 0);
5322 DbgBreakIf(S32_SUB(tag_tcp->snd_una, tag_tcp->snd_max) > 0);
5323 // TBD: the assert is not valid, discuess with FW regarding a change. DbgBreakIf(S32_SUB(tag_tcp->snd_max, xst_tcp->snd_max) > 0);
5324 /* xag.cmp_bd_start_seq <= c.cmp_bd_start_seq <= tag.snd_una */
5325 DbgBreakIf(S32_SUB(xag_toe->cmp_bd_start_seq, tag_tcp->snd_una) > 0);
5326 /* tst.rcv_nxt >= xag.ack_to_far_end */
5327 DbgBreakIf(S32_SUB(tst_tcp->rcv_nxt, xag_tcp->ack_to_far_end) != 0);
5328 /* tst.rcv_nxt >= tst.prev_seg_seq */
5329 //DbgBreakIf(S32_SUB(tst_tcp->rcv_nxt, tst_tcp->prev_seg_seq) < 0);
5330 /* xag.cmp_bd_cons <= cst.bd_cons <= xst.tx_bd_cons <= xst.bd_prod <= Driver bd prod (16 bit cyclic) */
5331 DbgBreakIf(S16_SUB(xag_toe->cmp_bd_cons, cst_toe->bd_cons) > 0);
5332 DbgBreakIf(S16_SUB(xst_toe->tx_bd_cons, xst_toe->bd_prod) > 0);
5333 DbgBreakIf(S16_SUB(xst_toe->bd_prod, tcp->tx_con->db_data.tx->bds_prod) > 0);
5334 DbgBreakIf(S32_SUB(tag_tcp->snd_una, xag_tcp->snd_nxt) > 0);
5335 /* timestamp: */
5336 /* tst.timestamp_exists == xst.ts_enable -- ? can't find fields in fw*/
5337
5338 /* tst.timestamp_recent >= xag.ts_to_echo (cyclic) */
5339 DbgBreakIf(S32_SUB(tst_tcp->timestamp_recent, xag_tcp->ts_to_echo) < 0);
5340
5341 /* fin: ?? can't find fields in fw */
5342 /* if (xst.fin_sent_flag) then bds should contain bd with fin // driver flag 'sent-fin' */
5343 /* if (tag.fin_sent_flag) then xst.fin_sent_flag */
5344
5345
5346 /* check that rcv nxt has the expected value compared to bytes that were completed on rx application buffers and generic buffers */
5347 /* rx_bytes_recv = tcp->rx_con->bytes_comp_cnt +
5348 tcp->rx_con->u.rx.gen_info.bytes_indicated_accepted +
5349 (tcp->sp_request->ret_data.tcp_upload_data.frag_list ? tcp->sp_request->ret_data.tcp_upload_data.frag_list->size : 0) -
5350 tcp->rx_con->bytes_push_skip_cnt -
5351 if (tcp->rx_con->flags & TCP_REMOTE_FIN_RECEIVED)
5352 {
5353 DbgBreakIf(((u32_t)(tcp->tcp_delegated.recv_next + (u32_t)rx_bytes_recv + 1) != tst_tcp->rcv_nxt));
5354 } else
5355 {
5356 DbgBreakIf(((u32_t)(tcp->tcp_delegated.recv_next + (u32_t)rx_bytes_recv) != tst_tcp->rcv_nxt));
5357 }
5358 */
5359 /* check that cstrom rel seq is equal to tstorm snd una */
5360 DbgBreakIf(((struct toe_context *)ctx_p)->cstorm_ag_context.rel_seq != tag_tcp->snd_una);
5361
5362 /* check that snd una has the expected value compared to bytes that were completed on tx application buffers */
5363 DbgBreakIf((u32_t)(tcp->tcp_delegated.send_una + (u32_t)tcp->tx_con->bytes_comp_cnt + (u32_t)tcp->tx_con->bytes_trm_aborted_cnt - (u32_t)tcp->tx_con->bytes_aborted_cnt) != tag_tcp->snd_una);
5364 #endif
5365
5366 }
5367
5368 /* Set the updated delegated parameters */
5369 tcp->tcp_delegated.recv_next = tst_tcp->rcv_nxt;
5370
5371 tcp->tcp_delegated.send_una = tag_tcp->snd_una;
5372 tcp->tcp_delegated.send_next = xag_tcp->snd_nxt;
5373 tcp->tcp_delegated.send_max = xst_tcp->snd_max;
5374 /* recent_seg_wnd is the value received in the last packet from the other side. This means this value is scaled,
5375 * therefore we need to get the absolute value by 'unscaling' it */
5376 tcp->tcp_delegated.send_win = (tst_tcp->recent_seg_wnd << tcp->tcp_const.snd_seg_scale)
5377 + tcp->tcp_delegated.send_una;
5378 send_wnd = tst_tcp->recent_seg_wnd << tcp->tcp_const.snd_seg_scale;
5379
5380 /* Does not come from chip! Driver uses what the chip returned for SndWnd,
5381 and takes the maximum between that, all past query results for this paramter,
5382 and 2 * MSS.
5383 */
5384 if ( tcp->tcp_delegated.max_send_win < tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una) {
5385 tcp->tcp_delegated.max_send_win = tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una;
5386 }
5387
5388 tcp->tcp_delegated.send_wl1 = tst_tcp->prev_seg_seq;
5389 tcp->tcp_delegated.send_cwin = tst_tcp->cwnd + tcp->tcp_delegated.send_una;
5390 tcp->tcp_delegated.ss_thresh = tst_tcp->ss_thresh;
5391
5392 tcp->tcp_delegated.sm_rtt = (tst_tcp->flags1 & TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT)
5393 >> TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT;
5394 tcp->tcp_delegated.sm_delta = (tst_tcp->flags2 & TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION)
5395 >> TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT;
5396 /* convert ms to ticks. */
5397 //16/09/2008 NirV: Assert removed, return upon fw fix
5398 //DbgBreakIf(tcp->tcp_delegated.sm_rtt > (35*TIMERS_TICKS_PER_SEC));
5399 //DbgBreakIf(tcp->tcp_delegated.sm_delta > (35*TIMERS_TICKS_PER_SEC));
5400
5401 tcp->tcp_delegated.sm_rtt =
5402 lm_time_resolution(pdev, tcp->tcp_delegated.sm_rtt, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second)*8;
5403 tcp->tcp_delegated.sm_delta =
5404 lm_time_resolution(pdev, tcp->tcp_delegated.sm_delta, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second)*4;
5405
5406 tcp->tcp_delegated.ts_recent = tst_tcp->timestamp_recent;
5407 /* convert ms to ticks. */
5408 tcp->tcp_delegated.ts_recent_age =
5409 lm_time_resolution(pdev, tst_tcp->timestamp_recent_time, TSEMI_CLK1_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second);
5410
5411 tcp->tcp_delegated.tstamp = xst_tcp->ts_time_diff;
5412 /* convert ms to ticks. */
5413 tcp->tcp_delegated.total_rt =
5414 lm_time_resolution(pdev, tst_tcp->retransmit_start_time, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second);
5415
5416 tcp->tcp_delegated.dup_ack_count = tst_tcp->dup_ack_count;
5417 tcp->tcp_delegated.snd_wnd_probe_count = tst_tcp->persist_probe_count;
5418
5419 if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max && (send_wnd > 0)) { /* KA is running (?) */
5420 if ( (tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE)) {
5421
5422 tcp->tcp_delegated.u.keep_alive.probe_cnt = tst_tcp->ka_probe_count;
5423
5424 /* convert ms to ticks. */
5425 tcp->tcp_delegated.u.keep_alive.timeout_delta =
5426 lm_time_resolution(pdev, xag_tcp->ka_timer, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second);
5427
5428 /* ka timeout may be negative in cases that it expired and timer was armed for other purposes. In this case - we write 0 to the
5429 * timeout delta - OS will treat this as if timer has just expired */
5430 /* bugbug, for some reason, we get a 28 bit value from FW, so a value such as 0xffffff9 is actually negative... so instead of checking (the reason is that timer's block bus width is 28 bit - ariel)
5431 * negative - we just check if it's larger than 0x8000000*/
5432 if ((tcp->tcp_delegated.u.keep_alive.timeout_delta != 0xffffffff) &&
5433 (tcp->tcp_delegated.u.keep_alive.timeout_delta > 0x8000000)) {
5434 tcp->tcp_delegated.u.keep_alive.timeout_delta = 0;
5435 }
5436 } else { //ka disabled
5437 tcp->tcp_delegated.u.keep_alive.probe_cnt = 0;
5438 tcp->tcp_delegated.u.keep_alive.timeout_delta = 0xffffffff;
5439 }
5440 } else {
5441 tcp->tcp_delegated.u.retransmit.num_retx = tst_tcp->retransmit_count;
5442 //TBD: Ariel, why it comes from the same place as TotalRT?
5443 /* TODO: we need to convert retx_ms to clock ticks in VBD instead of
5444 * doing this conversion in NDIS (same as Teton) */
5445
5446 /* rto_timer may be negative in cases that it expired and timer was armed for other purposes. In this case - we write 0 to the
5447 * retx_ms - OS will treat this as if timer has just expired and immediately retransmit. */
5448 /* bugbug, for some reason, we get a 28 bit value from FW, so a value such as 0xffffff9 is actually negative... so instead of checking
5449 * negative - we just check if it's larger than 0xf000000*/
5450 if ((xag_tcp->rto_timer != 0xffffffff) && (xag_tcp->rto_timer > 0x8000000)) {
5451 tcp->tcp_delegated.u.retransmit.retx_ms = 0;
5452 } else {
5453 tcp->tcp_delegated.u.retransmit.retx_ms = xag_tcp->rto_timer;
5454 }
5455 }
5456
5457 /* Calculate the TCP connection state */
5458 tcp->tcp_delegated.con_state = lm_tcp_calc_state(pdev, tcp,
5459 xst_tcp->tcp_params & XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG ? 1 : 0);
5460 pdev->toe_info.stats.con_state_on_upload[tcp->tcp_delegated.con_state]++;
5461 }
5462
5463
lm_init_sp_req_type(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_slow_path_request_t * lm_req,void * req_input_data)5464 void lm_init_sp_req_type(
5465 struct _lm_device_t * pdev,
5466 lm_tcp_state_t * tcp,
5467 lm_tcp_slow_path_request_t * lm_req,
5468 void * req_input_data)
5469 {
5470
5471 UNREFERENCED_PARAMETER_(pdev);
5472
5473 switch(lm_req->type) {
5474 case SP_REQUEST_INITIATE_OFFLOAD:
5475 case SP_REQUEST_TERMINATE_OFFLOAD:
5476 case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
5477 case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
5478 case SP_REQUEST_PENDING_TX_RST:
5479 case SP_REQUEST_ABORTIVE_DISCONNECT:
5480 case SP_REQUEST_INVALIDATE:
5481 break;
5482 case SP_REQUEST_UPDATE_TCP:
5483 case SP_REQUEST_UPDATE_PATH:
5484 case SP_REQUEST_UPDATE_NEIGH:
5485 case SP_REQUEST_UPDATE_PATH_RELINK:
5486 lm_req->sent_data.tcp_update_data.data = req_input_data;
5487 break;
5488 case SP_REQUEST_QUERY:
5489 DbgBreakMsg("GilR - NOT IMPLEMENTED!\n");
5490 break;
5491 default:
5492 DbgBreakMsg("Illegal slow path request type!\n");
5493 }
5494
5495 /* initialize common section of the sp request */
5496 lm_req->sp_req_common.req_post_func = (void *)lm_tcp_post_slow_path_request;
5497 lm_req->sp_req_common.req_post_ctx = tcp;
5498 }
5499
5500
5501
_lm_tcp_comp_upload_tcp_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)5502 static void _lm_tcp_comp_upload_tcp_request (
5503 IN struct _lm_device_t * pdev,
5504 IN lm_tcp_state_t * tcp
5505 )
5506 {
5507 lm_tcp_con_t * rx_con = tcp->rx_con;
5508 lm_tcp_con_t * tx_con = tcp->tx_con;
5509 u8_t has_fin = 0;
5510 u8_t has_rst = 0;
5511 lm_tcp_slow_path_request_t * sp_req = tcp->sp_request;
5512 lm_path_state_t * path = NULL;
5513 lm_status_t lm_status = LM_STATUS_SUCCESS;
5514 #if 0 // TODO: add WINDOW_DEC validation check in w2k3, implement upon os type identification in the lm
5515 #if (DBG && !defined(_VBD_CMD_) && !defined(__USER_MODE_DEBUG))
5516 u32_t expect_rwin;
5517 #endif
5518 #endif
5519 MM_INIT_TCP_LOCK_HANDLE();
5520
5521 /* status will be changed only after upload completion returns from the client */
5522
5523 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
5524 /* Abort Tx buffers and pending graceful disconnect request if any */
5525 mm_acquire_tcp_lock(pdev, tx_con);
5526 lm_tcp_abort_bufs(pdev, tcp, tx_con, (tx_con->flags & TCP_CON_RST_IND_NOT_SAFE)? LM_STATUS_CONNECTION_RESET : LM_STATUS_UPLOAD_IN_PROGRESS);
5527
5528 /* Remember pending RST if any */
5529 has_rst |= (tx_con->u.tx.flags & TCP_CON_RST_IND_NOT_SAFE) ? 1 : 0;
5530
5531 /* Clear pending RST */
5532 tx_con->u.tx.flags &= ~(TCP_CON_RST_IND_NOT_SAFE);
5533
5534 mm_release_tcp_lock(pdev, tx_con);
5535
5536 /* Rx abortive part... */
5537 mm_acquire_tcp_lock(pdev, rx_con);
5538 /* Abort pending buffers */
5539 lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_UPLOAD_IN_PROGRESS);
5540
5541 /* Remember pending FIN if any */
5542 has_fin = rx_con->u.rx.flags & TCP_CON_FIN_IND_PENDING ? 1 : 0;
5543
5544 /* Remember pending RST if any */
5545 has_rst |= (rx_con->u.rx.flags & TCP_CON_RST_IND_PENDING) ? 1 : 0;
5546
5547 /* Clear pending FIN and RST */
5548 rx_con->u.rx.flags &= ~(TCP_CON_FIN_IND_PENDING | TCP_CON_RST_IND_PENDING);
5549
5550 /* Get generic data that hasn't been indicated so far */
5551 lm_status = lm_tcp_rx_get_buffered_data_from_terminate(pdev, tcp,
5552 &(tcp->sp_request->ret_data.tcp_upload_data.frag_list),
5553 &(tcp->sp_request->ret_data.tcp_upload_data.ret_buf_ctx)
5554 );
5555 mm_release_tcp_lock(pdev, rx_con);
5556
5557 /* check if we have a delayed fin */
5558 /* assumption: if we have a delayed-fin, it means we have buffered data*/
5559 /* OS can't handle fin indiaction followed by buffered data */
5560 /* DbgBreakIf(has_fin && !sp_req->ret_data.tcp_upload_data.frag_list); */
5561 /* DbgBreakIf(has_rst && !sp_req->ret_data.tcp_upload_data.frag_list); */
5562
5563 /* check if we have a delayed rst (rst is sp so no locks) */
5564 if ( has_rst ) {
5565 mm_tcp_indicate_rst_received(pdev, tcp);
5566 }
5567 }
5568
5569 /* Indication part */
5570 MM_ACQUIRE_TOE_LOCK(pdev);
5571
5572 DbgBreakIf(!(tcp->sp_flags & SP_TCP_QRY_REQ_POSTED));
5573 tcp->sp_flags |= SP_TCP_QRY_REQ_COMP;
5574
5575 /* Update delegated parameters */
5576 lm_tcp_get_delegated(pdev, tcp, &tcp->sp_req_data.virt_addr->toe_ctx);
5577
5578 tcp->sp_request = NULL;
5579 sp_req->status = lm_status;
5580
5581 /* Indicate SP request completion up to the client */
5582 /* Set the request type to TERMINATE_OFFLOAD as it was set by UM during the post */
5583 sp_req->type = SP_REQUEST_TERMINATE_OFFLOAD;
5584
5585 DbgBreakIf(tcp->path->num_dependents == 0);
5586 tcp->path->num_dependents--;
5587
5588 // update stats counters if TOE
5589 if (TOE_CONNECTION_TYPE == tcp->ulp_type )
5590 {
5591 if( IP_VERSION_IPV4 == tcp->path->path_const.ip_version )
5592 {
5593 --pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_4_IDX].currently_established;
5594 }
5595 else if( IP_VERSION_IPV6 == tcp->path->path_const.ip_version )
5596 {
5597 --pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_6_IDX].currently_established;
5598 }
5599 }
5600
5601 if (tcp->path->hdr.status == STATE_STATUS_UPLOAD_PENDING &&
5602 tcp->path->num_dependents == 0) {
5603 /* last pendind-upload-path dependent... */
5604 path = tcp->path;
5605 }
5606 tcp->path = NULL;
5607
5608 #if 0 // TODO: add WINDOW_DEC validation check in w2k3, implement upon os type identification in the lm
5609 if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
5610 #if (DBG && !defined(_VBD_CMD_) && !defined(__USER_MODE_DEBUG))
5611 expect_rwin = (u32_t) S32_SUB(
5612 tcp->tcp_delegated.recv_win_seq,
5613 tcp->tcp_delegated.recv_next);
5614 /* These asserts are not valid for WSD connections. */
5615 if(sp_req->ret_data.tcp_upload_data.frag_list)
5616 {
5617 expect_rwin += (u32_t)sp_req->ret_data.tcp_upload_data.frag_list->size;
5618 }
5619
5620 /* If we received a fin / rst we may be down by one on the initial_rcv_wnd... */
5621 if((tcp->rx_con->flags & TCP_REMOTE_FIN_RECEIVED) ||
5622 (tcp->rx_con->flags & TCP_REMOTE_RST_RECEIVED))
5623 {
5624 DbgBreakIf(
5625 (expect_rwin != tcp->tcp_cached.initial_rcv_wnd) &&
5626 (expect_rwin != tcp->tcp_cached.initial_rcv_wnd - 1));
5627 }
5628 else
5629 {
5630 DbgBreakIf(expect_rwin != tcp->tcp_cached.initial_rcv_wnd);
5631 }
5632 #endif
5633 }
5634 #endif
5635
5636 mm_tcp_comp_slow_path_request(pdev, tcp, sp_req);
5637
5638 if (path) {
5639 DbgMessage(pdev, INFORMl4sp, "_lm_tcp_comp_upload_request: last tcp dependent of pending path %p\n", path);
5640 _lm_tcp_comp_upload_path_request(pdev, path);
5641 }
5642
5643 MM_RELEASE_TOE_LOCK(pdev);
5644
5645
5646 }
5647
lm_tcp_get_next_path_dependent(struct _lm_device_t * pdev,void * path_state,lm_tcp_state_t * tcp_state)5648 lm_tcp_state_t * lm_tcp_get_next_path_dependent(
5649 struct _lm_device_t *pdev,
5650 void *path_state,
5651 lm_tcp_state_t * tcp_state)
5652 {
5653 if (tcp_state == NULL) {
5654 tcp_state = (lm_tcp_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
5655 } else {
5656 tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
5657 }
5658
5659 while(tcp_state) {
5660 /* Update the tcp state only if it is a dependent and is not being offloaded,
5661 * invalidated, or uploaded. */
5662 if (tcp_state->path == (lm_path_state_t*)path_state) {
5663 return tcp_state;
5664 }
5665 tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
5666 }
5667 return NULL;
5668
5669 }
5670
5671
lm_tcp_get_next_neigh_dependent(struct _lm_device_t * pdev,void * neigh_state,lm_tcp_state_t * tcp_state)5672 lm_tcp_state_t * lm_tcp_get_next_neigh_dependent(
5673 struct _lm_device_t *pdev,
5674 void * neigh_state,
5675 lm_tcp_state_t * tcp_state)
5676 {
5677 if (tcp_state == NULL) {
5678 tcp_state = (lm_tcp_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
5679 } else {
5680 tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
5681 }
5682
5683 while(tcp_state) {
5684 /* Update the tcp state only if it is a dependent and is not being offloaded,
5685 * invalidated, or uploaded. */
5686 if (tcp_state->path && (tcp_state->path->neigh == (lm_neigh_state_t*)neigh_state)) {
5687 return tcp_state;
5688 }
5689 tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
5690 }
5691 return NULL;
5692 }
5693
5694
lm_tcp_update_ramrod_complete(lm_device_t * pdev,lm_tcp_state_t * tcp)5695 void lm_tcp_update_ramrod_complete(lm_device_t * pdev, lm_tcp_state_t * tcp)
5696 {
5697 lm_tcp_slow_path_request_t *sp_req;
5698 MM_INIT_TCP_LOCK_HANDLE();
5699
5700 DbgMessage(pdev, INFORMl4sp, "###lm_tcp_update_ramrod_complete cid=%d \n", tcp->cid);
5701
5702 MM_ACQUIRE_TOE_LOCK(pdev);
5703
5704 /* assert state status is NORMAL */
5705 DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
5706 (tcp->hdr.status != STATE_STATUS_ABORTED));
5707 DbgBreakIf(tcp->sp_request == NULL);
5708 DbgBreakIf((tcp->sp_request->type != SP_REQUEST_UPDATE_NEIGH) &&
5709 (tcp->sp_request->type != SP_REQUEST_UPDATE_PATH) &&
5710 (tcp->sp_request->type != SP_REQUEST_UPDATE_TCP) &&
5711 (tcp->sp_request->type != SP_REQUEST_UPDATE_PATH_RELINK));
5712
5713 sp_req = tcp->sp_request;
5714 DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
5715 sp_req->status = LM_STATUS_SUCCESS;
5716 tcp->sp_request = NULL;
5717
5718 /* Take the Rx lock */
5719 mm_acquire_tcp_lock(pdev, tcp->rx_con);
5720 if ((sp_req->type == SP_REQUEST_UPDATE_TCP) && (GET_FLAGS(tcp->rx_con->db_data.rx->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES)))
5721 {
5722 lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, tcp->rx_con->dpc_info.dpc_fw_wnd_after_dec, TCP_RX_POST_SWS_SET);
5723 }
5724 /* Release the Rx lock */
5725 mm_release_tcp_lock(pdev, tcp->rx_con);
5726
5727 mm_tcp_comp_slow_path_request(pdev, tcp, sp_req);
5728
5729 MM_RELEASE_TOE_LOCK(pdev);
5730 }
5731
5732
lm_tcp_query_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)5733 void lm_tcp_query_ramrod_complete(
5734 IN struct _lm_device_t * pdev,
5735 IN lm_tcp_state_t * tcp
5736 )
5737 {
5738 DbgMessage(pdev, VERBOSEl4, "## lm_tcp_query_ramrod_comp\n");
5739 DbgBreakIf(! tcp->sp_request );
5740 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_QUERY);
5741
5742 if (tcp->hdr.status == STATE_STATUS_UPLOAD_PENDING) {
5743 _lm_tcp_comp_upload_tcp_request(pdev, tcp);
5744 } else {
5745 DbgBreakMsg("Vladz: Not implemented yet!\n");
5746 }
5747 }
5748
5749 /* TOE lock should be taken by hte caller */
lm_tcp_internal_query(IN struct _lm_device_t * pdev)5750 void lm_tcp_internal_query(
5751 IN struct _lm_device_t * pdev)
5752 {
5753 lm_tcp_state_t *tcp_state;
5754 u32_t status_arr[STATE_STATUS_ERR+1] = {0};
5755 u32_t status, num_tcps, i;
5756
5757 DbgMessage(pdev, FATAL, "## lm_tcp_debug_query START version %d.%d.%d\n",
5758 LM_DRIVER_MAJOR_VER, LM_DRIVER_MINOR_VER, LM_DRIVER_FIX_NUM);
5759
5760 num_tcps = d_list_entry_cnt(&pdev->toe_info.state_blk.tcp_list);
5761 tcp_state = (lm_tcp_state_t *)d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
5762 i = 0;
5763 while (tcp_state) {
5764 status = tcp_state->hdr.status;
5765 status_arr[status]++;
5766
5767 /* check state's status */
5768 if(status != STATE_STATUS_NORMAL) {
5769 DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has status=%d (!= normal)\n",
5770 tcp_state, tcp_state->cid, status);
5771 }
5772
5773 /* verify the is no pending slow path request */
5774 if(tcp_state->sp_request) {
5775 DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has slow path request of type %d, not completed by FW (sp comp flags=0x%x\n",
5776 tcp_state, tcp_state->cid, tcp_state->sp_request->type, tcp_state->sp_flags);
5777 }
5778
5779 /* verify the is no bytes pending completion */
5780 if(tcp_state->tx_con->bytes_post_cnt != tcp_state->tx_con->bytes_comp_cnt) {
5781 DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has TX pending bytes (%d). (con->flags=0x%x)\n",
5782 tcp_state, tcp_state->cid,
5783 S64_SUB(tcp_state->tx_con->bytes_post_cnt, tcp_state->tx_con->bytes_comp_cnt),
5784 tcp_state->tx_con->flags);
5785 }
5786 if(tcp_state->rx_con->bytes_post_cnt != tcp_state->rx_con->bytes_comp_cnt) {
5787 DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has RX pending bytes (%d). (con->flags=0x%x)\n",
5788 tcp_state, tcp_state->cid,
5789 S64_SUB(tcp_state->rx_con->bytes_post_cnt, tcp_state->rx_con->bytes_comp_cnt),
5790 tcp_state->rx_con->flags);
5791 }
5792
5793 tcp_state = (lm_tcp_state_t *)d_list_next_entry((d_list_entry_t*)tcp_state);
5794 }
5795
5796 /* print statistics */
5797 DbgMessage(pdev, FATAL, "# num offloaded connections=%d\n", num_tcps);
5798 for (i = 0; i < STATE_STATUS_ERR+1; i++) {
5799 if (status_arr[i]) {
5800 DbgMessage(pdev, FATAL, "# num connections in status %d=%d\n", i, status_arr[i]);
5801 }
5802 }
5803
5804 DbgMessage(pdev, FATAL, "## lm_tcp_debug_query END\n");
5805 }
5806
5807
lm_tcp_upld_close_received_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,l4_upload_reason_t upload_reason)5808 void lm_tcp_upld_close_received_complete(
5809 struct _lm_device_t * pdev,
5810 lm_tcp_state_t * tcp,
5811 l4_upload_reason_t upload_reason)
5812 {
5813 DbgMessage(pdev, INFORMl4sp , "###lm_tcp_drv_upl_received_complete cid=%d \n", tcp->cid);
5814
5815 MM_ACQUIRE_TOE_LOCK(pdev);
5816
5817 tcp->tcp_state_calc.con_upld_close_flag = TRUE;
5818
5819 MM_RELEASE_TOE_LOCK(pdev);
5820
5821 lm_tcp_process_retrieve_indication_cqe(pdev, tcp, upload_reason);
5822 pdev->toe_info.stats.total_close_upld_requested++;
5823 }
5824
5825
5826 /** Description
5827 * completes the slow-path part of a connection
5828 */
lm_tcp_tx_complete_tcp_sp(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN lm_tcp_con_t * con)5829 void lm_tcp_tx_complete_tcp_sp(
5830 IN struct _lm_device_t * pdev,
5831 IN lm_tcp_state_t * tcp,
5832 IN lm_tcp_con_t * con)
5833 {
5834 u8_t complete_ramrod;
5835 u32_t sp_type,sp_flags,flags,snapshot_flags;
5836 lm_tcp_slow_path_request_t * request = NULL;
5837
5838 snapshot_flags = con->dpc_info.snapshot_flags;
5839 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RESET_RECV) {
5840 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RESET_RECV;
5841 lm_tcp_tx_rst_received_complete(pdev, con->tcp_state);
5842 }
5843 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RAMROD_CMP) {
5844 /* clean the dpc_info: we're done with it */
5845 con->dpc_info.snapshot_flags = 0;
5846
5847 /* all ramrod on SCQ also complete on RCQ*/
5848 complete_ramrod = FALSE;
5849 /* Get global TOE lock */
5850 MM_ACQUIRE_TOE_LOCK(pdev);
5851
5852 /* save the type under the lock because the next ramrod will change this type ???*/
5853 sp_type = tcp->sp_request->type;
5854 MM_RELEASE_TOE_LOCK(pdev);
5855
5856 switch(sp_type) {
5857 case SP_REQUEST_ABORTIVE_DISCONNECT:
5858 lm_tcp_tx_abortive_disconnect_ramrod_complete(pdev, tcp);
5859 break;
5860 case SP_REQUEST_INVALIDATE:
5861 lm_tcp_tx_invalidate_ramrod_complete(pdev, tcp);
5862 break;
5863 case SP_REQUEST_TERMINATE1_OFFLOAD:
5864 lm_tcp_tx_terminate_ramrod_complete(pdev, tcp);
5865 break;
5866 case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
5867 case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
5868 case SP_REQUEST_PENDING_TX_RST:
5869 lm_tcp_tx_empty_ramrod_complete(pdev, tcp, sp_type);
5870 break;
5871 default:
5872 DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
5873 DbgBreak();
5874 }
5875 /* Get global TOE lock */
5876 MM_ACQUIRE_TOE_LOCK(pdev);
5877
5878 /* save the type under the lock because the next ramrod will change this type */
5879 DbgBreakIf(sp_type != tcp->sp_request->type);
5880
5881 tcp->sp_flags |= SP_REQUEST_COMPLETED_TX;
5882
5883 /* If it's a second comletion, post the query ramrod */
5884 if ( tcp->sp_flags & SP_REQUEST_COMPLETED_RX ) {
5885 complete_ramrod = TRUE;
5886 tcp->sp_flags &= ~ ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX );
5887 }
5888 sp_flags = tcp->sp_flags;
5889 flags = tcp->tx_con->flags;
5890 MM_RELEASE_TOE_LOCK(pdev);
5891 if (complete_ramrod) {
5892 request = tcp->sp_request;
5893 DbgBreakIf(request == NULL);
5894 switch(sp_type) {
5895 case SP_REQUEST_ABORTIVE_DISCONNECT:
5896 DbgBreakIf(request->type != SP_REQUEST_ABORTIVE_DISCONNECT);
5897 lm_tcp_comp_abortive_disconnect_request(pdev, tcp, request);
5898 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_RESET_SEND, tcp->ulp_type, tcp->cid);
5899 break;
5900 case SP_REQUEST_INVALIDATE:
5901 DbgBreakIf(request->type != SP_REQUEST_INVALIDATE);
5902 lm_tcp_comp_invalidate_request(pdev, tcp, request);
5903 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INVALIDATE, tcp->ulp_type, tcp->cid);
5904 break;
5905 case SP_REQUEST_TERMINATE1_OFFLOAD:
5906 DbgBreakIf(request->type != SP_REQUEST_TERMINATE1_OFFLOAD);
5907 lm_tcp_terminate_ramrod_complete(pdev, tcp);
5908 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_TERMINATE, tcp->ulp_type, tcp->cid);
5909 break;
5910 case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
5911 case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
5912 case SP_REQUEST_PENDING_TX_RST:
5913 lm_tcp_comp_empty_ramrod_request(pdev, tcp);
5914 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_EMPTY_RAMROD, tcp->ulp_type, tcp->cid);
5915 break;
5916 default:
5917 DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
5918 DbgBreak();
5919 }
5920 }
5921 }
5922 }
5923
5924 /** Description
5925 * completes the slow-path part of a connection
5926 * completes ramrods if ramrod is completed.
5927 * function logic: every stage 'turns' of it's flag, if at the end of the check the flags is zero
5928 * it means there is nothing left to do and we can return. Usually, we will rarely have a case of more
5929 * than one/two flags on, therefore it seems useless to check all the cases (too many if/jumps)
5930 */
lm_tcp_rx_complete_tcp_sp(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN lm_tcp_con_t * con)5931 void lm_tcp_rx_complete_tcp_sp(
5932 IN struct _lm_device_t * pdev,
5933 IN lm_tcp_state_t * tcp,
5934 IN lm_tcp_con_t * con
5935 )
5936 {
5937 u8_t complete_ramrod;
5938 u32_t sp_type,sp_flags,flags,snapshot_flags;
5939 lm_tcp_slow_path_request_t * request = NULL;
5940 u32_t cid;
5941 u8_t ulp_type;
5942
5943 /* handle fin recv */
5944 snapshot_flags = con->dpc_info.snapshot_flags;
5945 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_FIN_RECV) {
5946 lm_tcp_rx_fin_received_complete(pdev, tcp, 0);
5947 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_FIN_RECV;
5948 }
5949 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_FIN_RECV_UPL) {
5950 lm_tcp_rx_fin_received_complete(pdev, tcp, 1);
5951 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_FIN_RECV_UPL;
5952 }
5953
5954 DbgMessage(pdev, INFORMl4rx, "lm_tcp_rx_complete_tcp_sp tcp=%p cid=%d \n", tcp, tcp->cid);
5955 /* reset recv needs to be checked first */
5956 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RESET_RECV) {
5957 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RESET_RECV;
5958 lm_tcp_rx_rst_received_complete(pdev, tcp);
5959 }
5960
5961 /* check if we have some sort of retrieve indication. we sort of check twice */
5962 /* Rx completions (from ustorm) will not arrive after the following indications,
5963 * therefore, we can assume, they were received before
5964 * can't assume the same for ramrods */
5965 if (con->dpc_info.snapshot_flags & (LM_TCP_DPC_URG | LM_TCP_DPC_RT_TO | LM_TCP_DPC_KA_TO | LM_TCP_DPC_DBT_RE | LM_TCP_DPC_OPT_ERR | LM_TCP_DPC_UPLD_CLOSE)) {
5966 con->dpc_info.snapshot_flags &= ~(LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES);
5967 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_URG) {
5968 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_URG;
5969 lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_URG);
5970 }
5971
5972 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RT_TO) {
5973 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RT_TO;
5974 lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_RETRANSMIT_TIMEOUT);
5975 }
5976
5977 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_KA_TO) {
5978 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_KA_TO;
5979 lm_tcp_upld_close_received_complete(pdev, tcp, L4_UPLOAD_REASON_KEEP_ALIVE_TIMEOUT);
5980 }
5981
5982 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_DBT_RE) {
5983 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_DBT_RE;
5984 lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
5985 pdev->toe_info.stats.total_dbt_upld_requested++;
5986 }
5987
5988 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_OPT_ERR) {
5989 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_OPT_ERR;
5990 lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
5991 pdev->toe_info.stats.total_opt_upld_requested++;
5992 }
5993
5994 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_UPLD_CLOSE) {
5995 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_UPLD_CLOSE;
5996 lm_tcp_upld_close_received_complete(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
5997 }
5998 } else if (con->dpc_info.snapshot_flags & LM_TCP_DPC_TOO_BIG_ISLE) {
5999 con->dpc_info.snapshot_flags &= ~(LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES);
6000 lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
6001 pdev->toe_info.stats.total_big_isle_upld_requesed++;
6002 } else if (con->dpc_info.snapshot_flags & LM_TCP_DPC_TOO_MANY_ISLES) {
6003 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_TOO_MANY_ISLES;
6004 lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
6005 pdev->toe_info.stats.total_many_isles_upld_requesed++;
6006 }
6007
6008
6009 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RAMROD_CMP) {
6010 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RAMROD_CMP;
6011 DbgBreakIf(con->dpc_info.snapshot_flags != 0);
6012 /* Keep these before completing as the completion calls themselves can cause tcp state to be
6013 * deleted... */
6014 cid = tcp->cid;
6015 ulp_type = tcp->ulp_type;
6016 switch (tcp->sp_request->type) {
6017 case SP_REQUEST_UPDATE_NEIGH:
6018 case SP_REQUEST_UPDATE_PATH:
6019 case SP_REQUEST_UPDATE_TCP:
6020 case SP_REQUEST_UPDATE_PATH_RELINK:
6021 lm_tcp_update_ramrod_complete(pdev, tcp);
6022 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_UPDATE, ulp_type, cid);
6023 return;
6024 case SP_REQUEST_QUERY:
6025 lm_tcp_query_ramrod_complete(pdev, tcp); /* this may delete tcp !! */
6026 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_QUERY, ulp_type, cid);
6027 return;
6028 case SP_REQUEST_TERMINATE_OFFLOAD:
6029 lm_tcp_searcher_ramrod_complete(pdev, tcp);
6030 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_SEARCHER_DELETE, ulp_type, cid);
6031 return;
6032 case SP_REQUEST_INITIATE_OFFLOAD:
6033 /* Completion of initiate offload request can reach this point only if there was a license error, */
6034 /* otherwise its being completed earlier during 'process' stage */
6035 lm_tcp_comp_initiate_offload_request(pdev, tcp, TOE_INITIATE_OFFLOAD_RAMROD_DATA_LICENSE_FAILURE);
6036 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INITIATE_OFFLOAD, tcp->ulp_type, cid);
6037 return;
6038 }
6039
6040 /* The rest of the ramrods on RCQ also complete on SCQ */
6041 complete_ramrod = FALSE;
6042 MM_ACQUIRE_TOE_LOCK(pdev);
6043
6044 /* save the type under the lock because the next ramrod will change this type ????*/
6045 sp_type = tcp->sp_request->type;
6046 MM_RELEASE_TOE_LOCK(pdev);
6047
6048 switch(sp_type) {
6049 case SP_REQUEST_ABORTIVE_DISCONNECT:
6050 lm_tcp_rx_abortive_disconnect_ramrod_complete(pdev, tcp);
6051 break;
6052 case SP_REQUEST_INVALIDATE:
6053 lm_tcp_rx_invalidate_ramrod_complete(pdev, tcp);
6054 break;
6055 case SP_REQUEST_TERMINATE1_OFFLOAD:
6056 lm_tcp_rx_terminate_ramrod_complete(pdev, tcp);
6057 break;
6058 case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
6059 case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
6060 case SP_REQUEST_PENDING_TX_RST:
6061 lm_tcp_rx_empty_ramrod_complete(pdev,tcp, sp_type);
6062 break;
6063 default:
6064 DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
6065 DbgBreak();
6066 }
6067 /* Get global TOE lock */
6068 MM_ACQUIRE_TOE_LOCK(pdev);
6069
6070 DbgBreakIf(sp_type != tcp->sp_request->type);
6071
6072 tcp->sp_flags |= SP_REQUEST_COMPLETED_RX;
6073
6074 /* If it's a second comletion, post the query ramrod */
6075 if ( tcp->sp_flags & SP_REQUEST_COMPLETED_TX ) {
6076 complete_ramrod = TRUE;
6077 tcp->sp_flags &= ~ ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX );
6078 }
6079 sp_flags = tcp->sp_flags;
6080 flags = tcp->rx_con->flags;
6081 MM_RELEASE_TOE_LOCK(pdev);
6082 if (complete_ramrod) {
6083 request = tcp->sp_request;
6084 DbgBreakIf(request == NULL);
6085 switch(sp_type) {
6086 case SP_REQUEST_ABORTIVE_DISCONNECT:
6087 DbgBreakIf(request->type != SP_REQUEST_ABORTIVE_DISCONNECT);
6088 lm_tcp_comp_abortive_disconnect_request(pdev, tcp, request);
6089 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_RESET_SEND, tcp->ulp_type, tcp->cid);
6090 break;
6091 case SP_REQUEST_INVALIDATE:
6092 DbgBreakIf(request->type != SP_REQUEST_INVALIDATE);
6093 lm_tcp_comp_invalidate_request(pdev, tcp, request);
6094 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INVALIDATE, tcp->ulp_type, tcp->cid);
6095 break;
6096 case SP_REQUEST_TERMINATE1_OFFLOAD:
6097 DbgBreakIf(request->type != SP_REQUEST_TERMINATE1_OFFLOAD);
6098 lm_tcp_terminate_ramrod_complete(pdev, tcp);
6099 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_TERMINATE, tcp->ulp_type, tcp->cid);
6100 break;
6101 case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
6102 case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
6103 case SP_REQUEST_PENDING_TX_RST:
6104 lm_tcp_comp_empty_ramrod_request(pdev, tcp);
6105 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_EMPTY_RAMROD, tcp->ulp_type, tcp->cid);
6106 break;
6107 default:
6108 DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
6109 DbgBreak();
6110 }
6111 }
6112 }
6113 }
6114
6115 #define MSL 4 /* 4 seconds */
6116
lm_tcp_calc_state(lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t fin_was_sent)6117 l4_tcp_con_state_t lm_tcp_calc_state (
6118 lm_device_t * pdev,
6119 lm_tcp_state_t * tcp,
6120 u8_t fin_was_sent
6121 )
6122 {
6123 enum {
6124 NO_CLOSE = 0,
6125 ACTIVE_CLOSE,
6126 PASSIVE_CLOSE,
6127 PASSIVE_BY_ACTIVE_CLOSE
6128 } closing_type;
6129
6130 u32_t snd_max = tcp->tcp_delegated.send_max;
6131 u32_t snd_una = tcp->tcp_delegated.send_una;
6132 u8_t con_rst = tcp->tcp_state_calc.con_rst_flag;
6133 u8_t con_upld_close = tcp->tcp_state_calc.con_upld_close_flag;
6134 u64_t fin_completed_time = tcp->tcp_state_calc.fin_completed_time;
6135 u64_t fin_reception_time = tcp->tcp_state_calc.fin_reception_time;
6136 u64_t fin_request_time = tcp->tcp_state_calc.fin_request_time;
6137 u64_t time_wait_state_entering_time = fin_completed_time > fin_reception_time ?
6138 fin_completed_time : fin_reception_time;
6139 l4_tcp_con_state_t tcp_state;
6140
6141 /* Set closing type */
6142 closing_type = NO_CLOSE;
6143 if ( fin_reception_time == 0 ) {
6144 if ( fin_request_time > 0 ) {
6145 closing_type = ACTIVE_CLOSE;
6146 }
6147 } else if ( ( fin_reception_time < fin_request_time ) || (fin_request_time == 0) ) {
6148 closing_type = PASSIVE_CLOSE;
6149 } else if ( ( fin_reception_time >= fin_request_time ) && (fin_request_time > 0) ){
6150 closing_type = PASSIVE_BY_ACTIVE_CLOSE;
6151 }
6152
6153 if ((con_rst) || (con_upld_close)) {
6154 tcp_state = L4_TCP_CON_STATE_CLOSED;
6155 } else if ( closing_type == NO_CLOSE ) {
6156 tcp_state = L4_TCP_CON_STATE_ESTABLISHED;
6157 } else if ( ( closing_type == ACTIVE_CLOSE ) && fin_was_sent ) {
6158 if ( snd_una == snd_max ){
6159 tcp_state = L4_TCP_CON_STATE_FIN_WAIT2;
6160 } else {
6161 tcp_state = L4_TCP_CON_STATE_FIN_WAIT1;
6162 }
6163 } else if ( ( closing_type == PASSIVE_BY_ACTIVE_CLOSE ) && (! fin_was_sent ) ) {
6164 tcp_state = L4_TCP_CON_STATE_CLOSE_WAIT;
6165 } else if (closing_type == PASSIVE_BY_ACTIVE_CLOSE ) {
6166 if (snd_una == snd_max) {
6167 if ( mm_get_current_time(pdev) - time_wait_state_entering_time > 2*pdev->ofld_info.l4_params.ticks_per_second *MSL ) {
6168 tcp_state = L4_TCP_CON_STATE_CLOSED;
6169 } else {
6170 tcp_state = L4_TCP_CON_STATE_TIME_WAIT;
6171 }
6172 } else {
6173 tcp_state = L4_TCP_CON_STATE_CLOSING;
6174 }
6175 } else if (closing_type == PASSIVE_CLOSE ) {
6176 if ( ! fin_was_sent ) {
6177 tcp_state = L4_TCP_CON_STATE_CLOSE_WAIT;
6178 } else if ( snd_una == snd_max ) {
6179 tcp_state = L4_TCP_CON_STATE_CLOSED;
6180 } else {
6181 tcp_state = L4_TCP_CON_STATE_LAST_ACK;
6182 }
6183 } else {
6184 tcp_state = L4_TCP_CON_STATE_ESTABLISHED;
6185 }
6186
6187 return tcp_state;
6188 }
6189
lm_tcp_clear_grqs(lm_device_t * pdev)6190 void lm_tcp_clear_grqs(lm_device_t * pdev)
6191 {
6192 lm_tcp_grq_t * grq;
6193 // lm_tcp_gen_buf_t * gen_buf;
6194 u8_t idx;
6195
6196 DbgBreakIf(!(pdev->params.ofld_cap & LM_OFFLOAD_CHIMNEY));
6197
6198 /* shutdown bug - BSOD only if shutdown is not in progress */
6199 if (!lm_reset_is_inprogress(pdev)){
6200 DbgBreakIf(!d_list_is_empty(&pdev->toe_info.state_blk.tcp_list));
6201 DbgBreakIf(!d_list_is_empty(&pdev->toe_info.state_blk.path_list));
6202 DbgBreakIf(!d_list_is_empty(&pdev->toe_info.state_blk.neigh_list));
6203 }
6204
6205 /* we need to go over all the buffers in the GRQs and return them to the pool. We also need
6206 * to clear the consumer- of the grq in the FWto make sure this grq isn't treated in the xon test. */
6207 /* This function is called after all work - items have finished, and the driver
6208 * state is no longer running, therefore there is no risk at accessing the grqs without
6209 * a lock */
6210
6211 if (IS_PFDEV(pdev)) {
6212 DbgBreakIf(USTORM_TOE_GRQ_CONS_PTR_LO_SIZE != 4);
6213 DbgBreakIf(USTORM_TOE_GRQ_CONS_PTR_HI_SIZE != 4);
6214 }
6215
6216 LM_TOE_FOREACH_RSS_IDX(pdev, idx)
6217 {
6218 grq = &pdev->toe_info.grqs[idx];
6219 MM_ACQUIRE_TOE_GRQ_LOCK(pdev, idx);
6220 grq->grq_compensate_on_alloc = FALSE;
6221 MM_RELEASE_TOE_GRQ_LOCK(pdev, idx);
6222 }
6223
6224 LM_TOE_FOREACH_RSS_IDX(pdev, idx)
6225 {
6226 if (IS_PFDEV(pdev)) {
6227 /* nullify consumer pointer of all inactive GRQs (required by FW) (will override with active ones) */
6228 LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,idx), PORT_ID(pdev)), 0, BAR_USTRORM_INTMEM);
6229 LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,idx), PORT_ID(pdev)), 0, BAR_USTRORM_INTMEM);
6230 }
6231
6232 grq = &pdev->toe_info.grqs[idx];
6233 if (!d_list_is_empty(&grq->aux_gen_list)) {
6234 mm_tcp_return_list_of_gen_bufs(pdev, &grq->aux_gen_list, 0, NON_EXISTENT_SB_IDX);
6235 d_list_clear(&grq->aux_gen_list);
6236 }
6237 if (!d_list_is_empty(&grq->active_gen_list)) {
6238 mm_tcp_return_list_of_gen_bufs(pdev, &grq->active_gen_list, 0, NON_EXISTENT_SB_IDX);
6239 d_list_clear(&grq->active_gen_list);
6240 lm_bd_chain_reset(pdev, &grq->bd_chain);
6241 }
6242 }
6243 }
6244
6245 /**
6246 * @Description: Update TOE RSS. The origin of this call is when getting
6247 * an OS RSS update. It's actually by L2 interface and not
6248 * L4. However, the ramrods are separate for L4 + L2 due to the
6249 * assumptions by the different protocols of what the data is
6250 * in the indirection table.
6251 *
6252 * @Assumptions: Called BEFORE calling L2
6253 * enable-rss!!
6254 *
6255 * @param pdev
6256 * @param chain_indirection_table - table of TOE RCQ chain values
6257 * @param table_size - size of table above
6258 * @param enable - is this enable/disable rss if it's disable, the
6259 * table will all point to the same entry
6260 *
6261 * @return lm_status_t - PENDING is completion will arrive asyncrounoulsy
6262 * - SUCCESS if no ramrod is sent (for example table didn't change)
6263 * - FAILURE o/w
6264 */
lm_tcp_update_rss(struct _lm_device_t * pdev,u8_t * chain_indirection_table,u32_t table_size,u8_t enable)6265 lm_status_t lm_tcp_update_rss(struct _lm_device_t * pdev, u8_t * chain_indirection_table,
6266 u32_t table_size, u8_t enable)
6267 {
6268 struct toe_rss_update_ramrod_data *data = pdev->toe_info.rss_update_data;
6269 lm_status_t lm_status = LM_STATUS_SUCCESS;
6270 u8_t value = 0;
6271 u8_t send_ramrod = 0;
6272 u8_t rss_idx = 0;
6273 u16_t bitmap = 0;
6274 u8_t i,j;
6275
6276 /* If data is NULL (allocation failed...) we don't want to fail this operation for L2 */
6277 if (pdev->params.l4_enable_rss == L4_RSS_DISABLED || data == NULL)
6278 {
6279 return LM_STATUS_SUCCESS;
6280 }
6281
6282 DbgBreakIf(pdev->params.l4_enable_rss != L4_RSS_DYNAMIC);
6283
6284 if (enable)
6285 {
6286 if (pdev->params.l4_grq_page_cnt > 2)
6287 {
6288 LM_TOE_FOREACH_RSS_IDX(pdev, rss_idx)
6289 {
6290 pdev->toe_info.grqs[rss_idx].high_bds_threshold = 2 * 512;
6291 }
6292 }
6293 }
6294 else
6295 {
6296 pdev->toe_info.grqs[LM_TOE_BASE_RSS_ID(pdev)].high_bds_threshold = 0;
6297 }
6298
6299
6300 for (j = 0; j < TOE_INDIRECTION_TABLE_SIZE/table_size; j++)
6301 {
6302 for (i = 0; i < table_size; i++)
6303 {
6304 value = LM_TOE_FW_RSS_ID(pdev,chain_indirection_table[i]);
6305
6306 if (pdev->toe_info.indirection_table[(j*table_size)+i] != value) {
6307 pdev->toe_info.indirection_table[(j*table_size)+i] = value;
6308 send_ramrod = TRUE;
6309 }
6310 }
6311 }
6312
6313 /* send update ramrod */
6314 if (send_ramrod)
6315 {
6316 pdev->params.update_comp_cnt = 0;
6317 pdev->params.update_suspend_cnt = 0;
6318 pdev->params.update_toe_comp_cnt = 0; /* We need a separate one for TOE to determine when to update sq credit */
6319
6320 /* 2 global update counters :
6321 * update_comp_cnt - Set initialy to the number of expected completions, decrmented every time an update completion is processed.
6322 * The processing for all chains is suspended until this counter gets to 0.
6323 * update_suspend_cnt - Set initialy to the number of potentially suspended chains. Decremented when each chain resumes processing. The ramrod completion
6324 * is indicated back only when this counter gets to 0.
6325 *
6326 * The update ramrod is 1 pending so we can access the completion and suspend counters here and below without grabbing a lock
6327 */
6328
6329 /* Update once for Eth... */
6330 pdev->params.update_comp_cnt++;
6331 pdev->params.update_suspend_cnt++;
6332
6333
6334 /* TODO: Enhancment, send only on the chains that take part, and the ones removed... */
6335 LM_TOE_FOREACH_RSS_IDX(pdev, rss_idx)
6336 {
6337 bitmap |= (1<<LM_TOE_FW_RSS_ID(pdev,rss_idx));
6338 }
6339
6340 mm_memcpy(data->indirection_table, pdev->toe_info.indirection_table, sizeof(data->indirection_table));
6341 data->toe_rss_bitmap = bitmap;
6342
6343 pdev->params.update_comp_cnt += pdev->params.l4_rss_chain_cnt;
6344 pdev->params.update_suspend_cnt += pdev->params.l4_rss_chain_cnt;
6345 pdev->params.update_toe_comp_cnt = pdev->params.l4_rss_chain_cnt; /* TOE only! */
6346
6347 lm_status = lm_command_post(pdev,
6348 LM_TOE_FW_RSS_ID(pdev, LM_TOE_BASE_RSS_ID(pdev)),
6349 RAMROD_OPCODE_TOE_RSS_UPDATE,
6350 CMD_PRIORITY_MEDIUM,
6351 TOE_CONNECTION_TYPE,
6352 pdev->toe_info.rss_update_data_phys.as_u64);
6353
6354 if (lm_status == LM_STATUS_SUCCESS)
6355 {
6356 lm_status = LM_STATUS_PENDING;
6357 }
6358 }
6359
6360 return lm_status;
6361 }
6362
6363
6364 /** Description
6365 * function is called whenever the UM allocates more generic buffers
6366 */
lm_tcp_rx_gen_bufs_alloc_cb(lm_device_t * pdev)6367 void lm_tcp_rx_gen_bufs_alloc_cb(lm_device_t * pdev)
6368 {
6369 u8_t i;
6370
6371 LM_TOE_FOREACH_RSS_IDX(pdev, i)
6372 {
6373
6374 lm_tcp_grq_t *grq = &pdev->toe_info.grqs[i];
6375 MM_ACQUIRE_TOE_GRQ_LOCK(pdev, i);
6376 if (grq->grq_compensate_on_alloc) {
6377 /* fill GRQ */
6378 if (lm_tcp_rx_fill_grq(pdev, i, NULL, FILL_GRQ_LOW_THRESHOLD)) {
6379 DbgMessage(pdev, INFORMl4rx, "lm_toe_service_rx_intr: Updating GRQ producer\n");
6380 /* notify the fw of the prod of the GRQ */
6381 LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,i), PORT_ID(pdev)),
6382 lm_bd_chain_prod_idx(&pdev->toe_info.grqs[i].bd_chain), BAR_USTRORM_INTMEM);
6383 }
6384 }
6385 MM_RELEASE_TOE_GRQ_LOCK(pdev, i);
6386 }
6387 }
6388
lm_tcp_update_isles_cnts(struct _lm_device_t * pdev,s16_t number_of_isles,s32_t number_of_gen_bufs)6389 void lm_tcp_update_isles_cnts(struct _lm_device_t * pdev, s16_t number_of_isles, s32_t number_of_gen_bufs)
6390 {
6391 lm_toe_isles_t *archipelago = &pdev->toe_info.archipelago;
6392
6393 pdev->toe_info.archipelago.number_of_isles += number_of_isles;
6394 pdev->toe_info.archipelago.gen_bufs_in_isles += number_of_gen_bufs;
6395 if (archipelago->number_of_isles > archipelago->max_number_of_isles) {
6396 archipelago->max_number_of_isles = archipelago->number_of_isles;
6397 }
6398
6399 if (archipelago->gen_bufs_in_isles > archipelago->max_gen_bufs_in_isles) {
6400 archipelago->max_gen_bufs_in_isles = archipelago->gen_bufs_in_isles;
6401 }
6402 if (pdev->params.l4_max_gen_bufs_in_archipelago
6403 && (archipelago->gen_bufs_in_isles > (s32_t)pdev->params.l4_max_gen_bufs_in_archipelago)) {
6404 if (pdev->params.l4_limit_isles & L4_LI_NOTIFY) {
6405 DbgBreak();
6406 }
6407 if (pdev->params.l4_limit_isles & L4_LI_MAX_GEN_BUFS_IN_ARCHIPELAGO) {
6408 pdev->toe_info.archipelago.l4_decrease_archipelago = TRUE;
6409 }
6410 } else if (pdev->toe_info.archipelago.l4_decrease_archipelago) {
6411 if (archipelago->gen_bufs_in_isles <= (s32_t)pdev->params.l4_valid_gen_bufs_in_archipelago) {
6412 pdev->toe_info.archipelago.l4_decrease_archipelago = FALSE;
6413 }
6414 }
6415
6416 }
6417
lm_tcp_init_num_of_blocks_per_connection(struct _lm_device_t * pdev,u8_t num)6418 void lm_tcp_init_num_of_blocks_per_connection(
6419 struct _lm_device_t *pdev,
6420 u8_t num)
6421 {
6422 pdev->params.l4_num_of_blocks_per_connection = num;
6423 }
6424
lm_tcp_get_num_of_blocks_per_connection(struct _lm_device_t * pdev)6425 u8_t lm_tcp_get_num_of_blocks_per_connection(
6426 struct _lm_device_t *pdev)
6427 {
6428 return pdev->params.l4_num_of_blocks_per_connection;
6429 }
6430
lm_tcp_get_next_neigh(struct _lm_device_t * pdev,lm_neigh_state_t * neigh_state)6431 lm_neigh_state_t * lm_tcp_get_next_neigh(
6432 struct _lm_device_t *pdev,
6433 lm_neigh_state_t * neigh_state)
6434 {
6435 if (neigh_state == NULL) {
6436 neigh_state = (lm_neigh_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.neigh_list);
6437 } else {
6438 neigh_state = (lm_neigh_state_t *) d_list_next_entry(&neigh_state->hdr.link);
6439 }
6440 return neigh_state;
6441 }
6442
lm_tcp_get_next_path(struct _lm_device_t * pdev,lm_neigh_state_t * neigh_state,lm_path_state_t * path_state)6443 lm_path_state_t * lm_tcp_get_next_path(
6444 struct _lm_device_t *pdev,
6445 lm_neigh_state_t * neigh_state,
6446 lm_path_state_t * path_state)
6447 {
6448 if (path_state == NULL) {
6449 path_state = (lm_path_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.path_list);
6450 } else {
6451 path_state = (lm_path_state_t *) d_list_next_entry(&path_state->hdr.link);
6452 }
6453
6454 if (neigh_state != NULL) {
6455 while(path_state) {
6456 if (path_state->neigh == neigh_state) {
6457 return path_state;
6458 }
6459 path_state = (lm_path_state_t *) d_list_next_entry(&path_state->hdr.link);
6460 }
6461 }
6462 return path_state;
6463 }
6464
lm_tcp_get_next_tcp(struct _lm_device_t * pdev,lm_tcp_state_t * tcp_state)6465 lm_tcp_state_t * lm_tcp_get_next_tcp(
6466 struct _lm_device_t *pdev,
6467 lm_tcp_state_t * tcp_state)
6468 {
6469 if (tcp_state == NULL) {
6470 tcp_state = (lm_tcp_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
6471 } else {
6472 tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
6473 }
6474 return tcp_state;
6475 }
6476
lm_tcp_get_src_ip_cam_byte(IN struct _lm_device_t * pdev,IN lm_path_state_t * path)6477 u8_t lm_tcp_get_src_ip_cam_byte(
6478 IN struct _lm_device_t * pdev,
6479 IN lm_path_state_t * path)
6480 {
6481 u8_t src_ip_byte;
6482
6483 DbgBreakIf(!(pdev && path));
6484
6485 if (path->path_const.ip_version == IP_VERSION_IPV4) {
6486 src_ip_byte = path->path_const.u.ipv4.src_ip & 0x000000FF;
6487 } else {
6488 src_ip_byte = path->path_const.u.ipv6.src_ip[0] & 0x000000FF;
6489 }
6490 return src_ip_byte;
6491 }
6492
lm_tcp_find_offloaded_tcp_tuple(struct _lm_device_t * pdev,u8_t src_ip_byte,u8_t src_tcp_b,u8_t dst_tcp_b,lm_tcp_state_t * prev_tcp)6493 lm_tcp_state_t* lm_tcp_find_offloaded_tcp_tuple(struct _lm_device_t * pdev, u8_t src_ip_byte, u8_t src_tcp_b, u8_t dst_tcp_b, lm_tcp_state_t * prev_tcp)
6494 {
6495 lm_tcp_state_t *connection_found = NULL;
6496 lm_tcp_state_t *current_tcp = NULL;
6497
6498 while ((current_tcp = lm_tcp_get_next_tcp(pdev, prev_tcp))) {
6499 u8_t c_src_tcp_b;
6500 u8_t c_dst_tcp_b;
6501 prev_tcp = current_tcp;
6502 c_src_tcp_b = current_tcp->tcp_const.src_port & 0x00FF;
6503 c_dst_tcp_b = current_tcp->tcp_const.dst_port & 0x00FF;
6504 if ((c_src_tcp_b == src_tcp_b) && (c_dst_tcp_b == dst_tcp_b)) {
6505 if ((current_tcp->path == NULL) || (lm_tcp_get_src_ip_cam_byte(pdev,current_tcp->path) == src_ip_byte)) {
6506 connection_found = current_tcp;
6507 break;
6508 }
6509 }
6510 }
6511
6512 return connection_found;
6513 }
6514
lm_tcp_get_pattern(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t pattern_idx,u32_t offset,u32_t * pattern_size)6515 u8_t * lm_tcp_get_pattern(struct _lm_device_t * pdev,
6516 lm_tcp_state_t * tcp,
6517 u8_t pattern_idx,
6518 u32_t offset,
6519 u32_t * pattern_size)
6520 {
6521 offset = tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] + offset;
6522 offset = offset % pdev->toe_info.integrity_info.pattern_size;
6523 if (*pattern_size > (pdev->toe_info.integrity_info.pattern_buf_size - pdev->toe_info.integrity_info.pattern_size)) {
6524 *pattern_size = pdev->toe_info.integrity_info.pattern_buf_size - pdev->toe_info.integrity_info.pattern_size;
6525 }
6526 return (pdev->toe_info.integrity_info.pattern_buf + offset);
6527 }
6528
lm_tcp_set_pattern_offset(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t pattern_idx,u32_t offset)6529 void lm_tcp_set_pattern_offset(struct _lm_device_t * pdev,
6530 lm_tcp_state_t * tcp,
6531 u8_t pattern_idx,
6532 u32_t offset)
6533 {
6534 tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] += offset;
6535 tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] =
6536 tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] % pdev->toe_info.integrity_info.pattern_size;
6537
6538 return;
6539 }
6540
lm_tcp_find_pattern_offset(struct _lm_device_t * pdev,u8_t * sub_buf,u32_t sub_buf_size)6541 u32_t lm_tcp_find_pattern_offset(struct _lm_device_t * pdev, u8_t * sub_buf, u32_t sub_buf_size)
6542 {
6543 u32_t i,j;
6544 for (j = 0; j < pdev->toe_info.integrity_info.pattern_size; j++) {
6545 for (i = 0; i < sub_buf_size; i++) {
6546 if (sub_buf[i] != pdev->toe_info.integrity_info.pattern_buf[j+i]) {
6547 break;
6548 }
6549 }
6550 if (i == sub_buf_size) {
6551 return j;
6552 }
6553 }
6554 return 0xFFFFFFFF;
6555 }
6556