1
2 #include "lm5710.h"
3 #include "bd_chain.h"
4 #include "command.h"
5 #include "context.h"
6 #include "lm_l4fp.h"
7 #include "lm_l4sp.h"
8 #include "mm_l4if.h"
9 #include "mm.h"
10
11 /* The maximum counter value for consumed count, if it exceeds this value we post it to firmware.
12 * FW holds 32bits for this counter. Therefore 100MB is OK (see L4 VBD spec) */
13 #define MAX_GRQ_COUNTER 0x6400000
14 #define IS_OOO_CQE(__cmd) ((__cmd == CMP_OPCODE_TOE_GNI) \
15 || (__cmd == CMP_OPCODE_TOE_GAIR) \
16 || (__cmd == CMP_OPCODE_TOE_GAIL) \
17 || (__cmd == CMP_OPCODE_TOE_GRI) \
18 || (__cmd == CMP_OPCODE_TOE_GJ) \
19 || (__cmd == CMP_OPCODE_TOE_DGI))
20
21 typedef struct toe_rx_bd toe_rx_bd_t;
22
23 static u16_t lm_squeeze_rx_buffer_list(
24 struct _lm_device_t * pdev,
25 lm_tcp_state_t * tcp,
26 u16_t adjust_number,
27 lm_tcp_gen_buf_t ** unwanted_gen_buf
28 );
29
30 static lm_status_t _lm_tcp_rx_post_buf(
31 struct _lm_device_t *pdev,
32 lm_tcp_state_t *tcp,
33 lm_tcp_buffer_t *tcp_buf,
34 lm_frag_list_t *frag_list
35 );
36
37 static void lm_tcp_incr_consumed_gen(
38 struct _lm_device_t * pdev,
39 lm_tcp_state_t * tcp,
40 u32_t nbytes
41 );
42
43 static void lm_tcp_return_gen_bufs(
44 struct _lm_device_t * pdev,
45 lm_tcp_state_t * tcp,
46 lm_tcp_gen_buf_t * gen_buf,
47 u32_t flags,
48 u8_t grq_idx
49 );
50
51 static void lm_tcp_return_list_of_gen_bufs(
52 struct _lm_device_t * pdev,
53 lm_tcp_state_t * tcp,
54 d_list_t * gen_buf_list,
55 u32_t flags,
56 u8_t grq_idx
57 );
58
_lm_tcp_isle_get_free_list(struct _lm_device_t * pdev,u8_t grq_idx)59 static lm_isle_t * _lm_tcp_isle_get_free_list(
60 struct _lm_device_t * pdev,
61 u8_t grq_idx)
62 {
63 lm_isle_t * free_list = NULL;
64 lm_isle_t * isles_pool = pdev->toe_info.grqs[grq_idx].isles_pool;
65 u32_t isle_pool_idx;
66 u32_t isle_pool_size = pdev->params.l4_isles_pool_size;
67 DbgBreakIf(!isles_pool);
68 for (isle_pool_idx = 0; isle_pool_idx < isle_pool_size; isle_pool_idx++) {
69 if ((isles_pool[isle_pool_idx].isle_link.next == NULL) && (isles_pool[isle_pool_idx].isle_link.prev == NULL)) {
70 free_list = isles_pool + isle_pool_idx;
71 break;
72 }
73 }
74 DbgBreakIf(!free_list);
75 return free_list;
76 }
77
_lm_tcp_isle_find(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t num_isle)78 static lm_isle_t * _lm_tcp_isle_find(
79 struct _lm_device_t * pdev,
80 lm_tcp_state_t * tcp,
81 u8_t num_isle)
82 {
83 lm_isle_t * isle = NULL;
84 lm_tcp_con_rx_gen_info_t * gen_info;
85 u8_t isle_cnt, isle_idx;
86
87 DbgBreakIf(!(tcp && tcp->rx_con));
88 gen_info = &tcp->rx_con->u.rx.gen_info;
89 isle_cnt = (u8_t)d_list_entry_cnt(&gen_info->isles_list);
90 DbgBreakIf(!isle_cnt);
91 DbgBreakIf(num_isle > isle_cnt);
92 if (num_isle == gen_info->current_isle_number) {
93 isle = gen_info->current_isle;
94 } else {
95 isle = (lm_isle_t*)gen_info->isles_list.head;
96 for (isle_idx = 1; isle_idx < num_isle; isle_idx++) {
97 isle = (lm_isle_t*)d_list_next_entry(&isle->isle_link);
98 }
99 gen_info->current_isle_number = num_isle;
100 gen_info->current_isle = isle;
101 }
102 return isle;
103 }
104
_lm_tcp_isle_remove(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t grq_idx,u8_t num_isle,d_list_t * gen_buf_list)105 static u32_t _lm_tcp_isle_remove(
106 struct _lm_device_t * pdev,
107 lm_tcp_state_t * tcp,
108 u8_t grq_idx,
109 u8_t num_isle,
110 d_list_t * gen_buf_list)
111 {
112 u32_t nbytes = 0;
113 lm_isle_t * new_current_isle = NULL;
114 lm_isle_t * isle = NULL;
115 lm_tcp_con_rx_gen_info_t * gen_info;
116 u8_t isles_cnt;
117 u8_t new_current_isle_num;
118
119 DbgBreakIf(!(tcp && tcp->rx_con));
120 gen_info = &tcp->rx_con->u.rx.gen_info;
121 isles_cnt = (u8_t)d_list_entry_cnt(&gen_info->isles_list);
122 DbgBreakIf(!(num_isle && (num_isle <= isles_cnt)));
123 isle = _lm_tcp_isle_find(pdev,tcp,num_isle);
124
125 // DbgBreakIf((isles_cnt > 1) && (num_isle == 1));
126 if (isle->isle_link.next != NULL) {
127 new_current_isle = (lm_isle_t*)isle->isle_link.next;
128 new_current_isle_num = num_isle;
129 } else if (isle->isle_link.prev != NULL) {
130 new_current_isle = (lm_isle_t*)isle->isle_link.prev;
131 new_current_isle_num = num_isle - 1;
132 } else {
133 new_current_isle = NULL;
134 new_current_isle_num = 0;
135 }
136
137 #if defined(_NTDDK_)
138 #pragma prefast (push)
139 #pragma prefast (disable:6011)
140 #endif //_NTDDK_
141 d_list_remove_entry(&gen_info->isles_list, &isle->isle_link);
142 #if defined(_NTDDK_)
143 #pragma prefast (pop)
144 #endif //_NTDDK_
145
146 nbytes = isle->isle_nbytes;
147 d_list_add_tail(gen_buf_list, &isle->isle_gen_bufs_list_head);
148 d_list_init(&isle->isle_gen_bufs_list_head, NULL, NULL, 0);
149 if (new_current_isle_num) {
150 if (num_isle == 1) {
151 #if defined(_NTDDK_)
152 #pragma prefast (push)
153 #pragma prefast (disable:28182)
154 #endif //_NTDDK_
155 d_list_remove_entry(&gen_info->isles_list, &new_current_isle->isle_link);
156 #if defined(_NTDDK_)
157 #pragma prefast (pop)
158 #endif //_NTDDK_
159 d_list_add_tail(&isle->isle_gen_bufs_list_head, &new_current_isle->isle_gen_bufs_list_head);
160 d_list_push_head(&gen_info->isles_list, &isle->isle_link);
161 isle->isle_nbytes = new_current_isle->isle_nbytes;
162 #ifdef DEBUG_OOO_CQE
163 isle->dedicated_cid = new_current_isle->dedicated_cid;
164 isle->recent_ooo_combined_cqe = new_current_isle->recent_ooo_combined_cqe;
165 #endif
166 isle = new_current_isle;
167 new_current_isle = &gen_info->first_isle;
168 }
169 mm_mem_zero(&isle->isle_gen_bufs_list_head, sizeof(lm_isle_t) - sizeof(d_list_entry_t));
170 isle->isle_link.next = isle->isle_link.prev = NULL;
171 }
172 gen_info->current_isle = new_current_isle;
173 gen_info->current_isle_number = new_current_isle_num;
174 return nbytes;
175 }
176
177 u32_t lm_tcp_rx_peninsula_to_rq(lm_device_t * pdev, lm_tcp_state_t * tcp, u32_t max_num_bytes_to_copy, u8_t sb_idx);
178
179 /* TODO: remove this temporary solution for solaris / linux compilation conflict, linux needs the
180 * first option, solaris the latter */
181 #if defined(__LINUX)
182 #define TOE_RX_INIT_ZERO {{0}}
183 #else
184 #define TOE_RX_INIT_ZERO {0}
185 #endif
186
187 #define TOE_RX_DOORBELL(pdev,cid) do{\
188 struct doorbell db = TOE_RX_INIT_ZERO;\
189 db.header.data |= ((TOE_CONNECTION_TYPE << DOORBELL_HDR_T_CONN_TYPE_SHIFT) |\
190 (DOORBELL_HDR_T_RX << DOORBELL_HDR_T_RX_SHIFT));\
191 DOORBELL((pdev), (cid), *((u32_t *)&db));\
192 } while(0)
193
lm_tcp_rx_write_db(lm_device_t * pdev,lm_tcp_state_t * tcp)194 static __inline void lm_tcp_rx_write_db(
195 lm_device_t *pdev,
196 lm_tcp_state_t *tcp
197 )
198 {
199 lm_tcp_con_t *rx_con = tcp->rx_con;
200 volatile struct toe_rx_db_data *db_data = rx_con->db_data.rx;
201
202 if (!(rx_con->flags & TCP_RX_DB_BLOCKED)) {
203 db_data->bds_prod += rx_con->db_more_bds; /* nbds should be written before nbytes (FW assumption) */
204 db_data->bytes_prod += rx_con->db_more_bytes;
205
206 DbgMessage(pdev, INFORMl4rx,
207 "_lm_tcp_rx_write_db: cid=%d, (nbytes+=%d, nbds+=%d)\n",
208 tcp->cid, rx_con->db_more_bytes, rx_con->db_more_bds);
209 TOE_RX_DOORBELL(pdev, tcp->cid);
210 }
211
212 /* assert if the new addition will make the cyclic counter post_cnt smaller than comp_cnt */
213 DbgBreakIf(S64_SUB(rx_con->bytes_post_cnt + rx_con->db_more_bytes, rx_con->bytes_comp_cnt) < 0);
214 rx_con->bytes_post_cnt += rx_con->db_more_bytes;
215 rx_con->buffer_post_cnt += rx_con->db_more_bufs;
216 rx_con->db_more_bytes = rx_con->db_more_bds = rx_con->db_more_bufs = 0;
217 rx_con->fp_db_cnt++;
218 }
219
220 /** Description
221 * This function is used to increase the window-size. Window is increased in 3 cases:
222 * 1. RQ-placed bytes
223 * 2. GRQ-Indicated succesfully (short/long loop, doensn't matter)
224 * 3. Window-update from NDIS (initial rcv window increased)
225 * 4. This function also takes into account dwa: delayed window algorithm, and updates the
226 * data structures accordingly, however, not all window-updates are part of the dwa algorithm,
227 * specifically, (3) therefore, we need to know if the update is dwa-aware or not.
228 */
lm_tcp_rx_post_sws(lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_con_t * rx_con,u32_t nbytes,u8_t op)229 void lm_tcp_rx_post_sws (
230 lm_device_t * pdev,
231 lm_tcp_state_t * tcp,
232 lm_tcp_con_t * rx_con,
233 u32_t nbytes,
234 u8_t op
235 )
236 {
237 volatile struct toe_rx_db_data *db_data = rx_con->db_data.rx;
238 s32_t diff_to_fw;
239
240 switch (op)
241 {
242 case TCP_RX_POST_SWS_INC:
243 /*DbgMessage(pdev, FATAL, "lm_tcp_rx_post_sws() INC: OLD drv_rcv_win_right_edge=%d, nbytes=%d, NEW drv_rcv_win_right_edge=%d FW right_edge=%d \n", rx_con->u.rx.sws_info.drv_rcv_win_right_edge, nbytes, rx_con->u.rx.sws_info.drv_rcv_win_right_edge + nbytes, db_data->rcv_win_right_edge);*/
244 if (rx_con->u.rx.sws_info.extra_bytes > nbytes) {
245 rx_con->u.rx.sws_info.extra_bytes -= nbytes;
246 nbytes = 0;
247 } else {
248 nbytes -= rx_con->u.rx.sws_info.extra_bytes;
249 rx_con->u.rx.sws_info.extra_bytes = 0;
250 rx_con->u.rx.sws_info.drv_rcv_win_right_edge += nbytes;
251 if (rx_con->u.rx.sws_info.drv_rcv_win_right_edge >= db_data->rcv_win_right_edge) {
252 RESET_FLAGS(tcp->rx_con->db_data.rx->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES);
253 }
254 }
255 break;
256 case TCP_RX_POST_SWS_DEC:
257 if (rx_con->u.rx.sws_info.extra_bytes) {
258 rx_con->u.rx.sws_info.extra_bytes += nbytes;
259 nbytes = 0;
260 }
261 /*DbgMessage(pdev, FATAL, "lm_tcp_rx_post_sws() DEC: OLD drv_rcv_win_right_edge=%d, nbytes=%d, NEW drv_rcv_win_right_edge=%d\n", rx_con->u.rx.sws_info.drv_rcv_win_right_edge, nbytes, rx_con->u.rx.sws_info.drv_rcv_win_right_edge - nbytes);*/
262 rx_con->u.rx.sws_info.drv_rcv_win_right_edge -= nbytes;
263 SET_FLAGS(db_data->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES);
264 break;
265 case TCP_RX_POST_SWS_SET:
266 /*DbgMessage(pdev, FATAL, "lm_tcp_rx_post_sws() SET: nbytes=%d\n", nbytes);*/
267 db_data->rcv_win_right_edge = nbytes;
268 rx_con->u.rx.sws_info.extra_bytes = 0;;
269 break;
270 default:
271 DbgBreakMsg("lm_tcp_rx_post_sws: Invalid operation\n");
272 return;
273 }
274
275 /* note that diff_to_fw could be negative due to possibility of window-decrease in LH */
276 diff_to_fw = S32_SUB(rx_con->u.rx.sws_info.drv_rcv_win_right_edge, db_data->rcv_win_right_edge);
277
278 /* If this update isn't dwa_aware, it's good to go... */
279
280 //DbgMessage(pdev, WARNl4, "###lm_tcp_rx_post_sws cid=%d num_bytes=%d diff_to_fw=%d \n", tcp->cid, nbytes, diff_to_fw );
281 /* we give the window only if diff_to_fw is larger than mss, which also means only in case it is negative... */
282 if ( ((diff_to_fw >= (s32_t)rx_con->u.rx.sws_info.mss) ||
283 (diff_to_fw >= (((s32_t)tcp->tcp_cached.initial_rcv_wnd) / 2)))) {
284 if (rx_con->u.rx.sws_info.timer_on) {
285 /* Vladz TBD: Cancel the timer */
286 rx_con->u.rx.sws_info.timer_on = 0;
287 }
288
289 /* Ring the Advertise Window doorbell here */
290 if (!(tcp->rx_con->flags & TCP_RX_DB_BLOCKED) && !(tcp->rx_con->flags & TCP_RX_POST_BLOCKED)) {
291 db_data->rcv_win_right_edge = rx_con->u.rx.sws_info.drv_rcv_win_right_edge;
292 DbgMessage(pdev, INFORMl4rx,
293 "_lm_tcp_adv_wnd_write_db: cid=%d, nbytes=%d\n",
294 tcp->cid, diff_to_fw);
295 TOE_RX_DOORBELL(pdev, tcp->cid);
296 }
297 } else {
298 if ( ! rx_con->u.rx.sws_info.timer_on ) {
299 /* Vladz TBD: schedule the timer here */
300 rx_con->u.rx.sws_info.timer_on = 1;
301 }
302 }
303 }
304
_lm_tcp_rx_set_bd(IN lm_frag_t * frag,IN u16_t flags,IN lm_bd_chain_t * rx_chain,IN u32_t dbg_bytes_prod)305 static __inline toe_rx_bd_t * _lm_tcp_rx_set_bd (
306 IN lm_frag_t * frag,
307 IN u16_t flags,
308 IN lm_bd_chain_t * rx_chain,
309 IN u32_t dbg_bytes_prod /* Used for synchronizing between fw and driver rq-available-bytes
310 * This is used only as a debug variable for asserting in the fw. */
311 )
312 {
313 struct toe_rx_bd * rx_bd;
314
315 /* hw limit: each bd can point to a buffer with max size of 64KB */
316 DbgBreakIf(frag->size > TCP_MAX_SGE_SIZE || frag->size == 0);
317 rx_bd = (struct toe_rx_bd *)lm_toe_bd_chain_produce_bd(rx_chain);
318 rx_bd->addr_hi = frag->addr.as_u32.high;
319 rx_bd->addr_lo = frag->addr.as_u32.low;
320 rx_bd->flags = flags;
321 rx_bd->size = (u16_t)frag->size;
322 rx_bd->dbg_bytes_prod = dbg_bytes_prod;
323 DbgMessage(NULL, VERBOSEl4rx, "Setting Rx BD flags=0x%x, bd_addr=0x%p, size=%d\n", rx_bd->flags, rx_bd, frag->size);
324 return rx_bd;
325 }
326
327
328 /** Description
329 * function completes nbytes on a single tcp buffer and completes the buffer if it is
330 * completed.
331 * Assumptions:
332 * fp-lock is taken.
333 * It is only called from lm_tcp_rx_post_buf!!!
334 */
lm_tcp_complete_tcp_buf(lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_con_t * con,lm_tcp_buffer_t * tcp_buf,u32_t completed_bytes)335 static void lm_tcp_complete_tcp_buf(
336 lm_device_t * pdev, lm_tcp_state_t * tcp, lm_tcp_con_t * con, lm_tcp_buffer_t * tcp_buf, u32_t completed_bytes)
337 {
338 s_list_t completed_bufs;
339 s_list_entry_t * entry;
340
341 DbgBreakIf(completed_bytes > tcp_buf->more_to_comp);
342 tcp_buf->more_to_comp -= completed_bytes;
343 con->app_buf_bytes_acc_comp += completed_bytes;
344
345 if(tcp_buf->more_to_comp == 0 && GET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_POST_END)) {
346 tcp_buf->app_buf_xferred = con->app_buf_bytes_acc_comp;
347 DbgBreakIf(tcp_buf->app_buf_xferred > tcp_buf->app_buf_size); /* this may be partial completion */
348 con->app_buf_bytes_acc_comp = 0;
349 if (GET_FLAGS(con->flags, TCP_POST_COMPLETE_SPLIT)) {
350 RESET_FLAGS(con->flags, TCP_POST_COMPLETE_SPLIT);
351 }
352 } else {
353 tcp_buf->app_buf_xferred = 0;
354 }
355
356 if (tcp_buf->more_to_comp == 0) {
357 /* should have nothing in the active tb list except this buffer, if we're completing this buffer,
358 * it means that we had something in the peninsula, this means that at the end of the DPC there was
359 * nothing in the active-tb-list, and between DPCs all posted buffers 'occupied' bytes from the peninsula
360 * and were completed to the client. This means that there can be no RQ completions during the DPC that
361 * will try to access the active tb list w/o a lock
362 */
363 DbgBreakIf(s_list_entry_cnt(&con->active_tb_list) != 1);
364 lm_bd_chain_bds_consumed(&con->bd_chain, tcp_buf->bd_used);
365
366 con->buffer_completed_cnt ++;
367 DbgMessage(pdev, VERBOSEl4fp,
368 "cid=%d, completing tcp buf towards mm from post-flow, actual_completed_bytes=%d\n",
369 tcp->cid, tcp_buf->size);
370 entry = s_list_pop_head(&con->active_tb_list);
371 DbgBreakIf(con->rq_nbytes < tcp_buf->size);
372 con->rq_nbytes -= tcp_buf->size;
373 s_list_init(&completed_bufs, entry, entry, 1);
374 con->rq_completion_calls++;
375 mm_tcp_complete_bufs(pdev, tcp, con, &completed_bufs, LM_STATUS_SUCCESS);
376 }
377 }
378
379
380
lm_tcp_rx_cmp_process(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t completed_bytes,u8_t push)381 void lm_tcp_rx_cmp_process(
382 struct _lm_device_t * pdev,
383 lm_tcp_state_t * tcp,
384 u32_t completed_bytes,
385 u8_t push
386 )
387 {
388 lm_tcp_con_t *rx_con;
389 u32_t actual_bytes_completed;
390 MM_INIT_TCP_LOCK_HANDLE();
391
392 DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_cmp_process, completed_bytes=%d, push=%d cid=%d\n", completed_bytes, push, tcp->cid);
393 DbgBreakIf(!(completed_bytes || push)); /* otherwise there is no point for this function to be called */
394
395 rx_con = tcp->rx_con;
396 DbgBreakIf(! rx_con);
397
398 if (!(rx_con->flags & TCP_DEFERRED_PROCESSING)) {
399 mm_acquire_tcp_lock(pdev, rx_con);
400 }
401 DbgBreakIf(rx_con->flags & TCP_RX_COMP_BLOCKED);
402
403 /* RQ completions can't arrive while we have something in the peninsula (peninsula must either be completed or copied
404 * to the app-buffer before) An RQ_SKP within the dpc will always take care of previous RQs waiting to be copied to. */
405 DbgBreakIf(!d_list_is_empty(&rx_con->u.rx.gen_info.peninsula_list));
406 DbgBreakIf(!d_list_is_empty(&rx_con->u.rx.gen_info.dpc_peninsula_list));
407
408 actual_bytes_completed = lm_tcp_complete_nbytes(pdev, tcp, rx_con, completed_bytes , push);
409
410 rx_con->bytes_comp_cnt += actual_bytes_completed;
411 DbgBreakIf(S64_SUB(rx_con->bytes_post_cnt, rx_con->bytes_comp_cnt) < 0);
412 DbgMessage(pdev, VERBOSEl4rx, "lm_tcp_rx_comp, after comp: pending=%d, active_bufs=%d\n",
413 S64_SUB(rx_con->bytes_post_cnt, rx_con->bytes_comp_cnt),
414 s_list_entry_cnt(&rx_con->active_tb_list));
415
416 if ( completed_bytes ) {
417 /* Vladz: TBD
418 lm_neigh_update_nic_reachability_time(tcp->path->neigh) */
419 }
420 if (!(rx_con->flags & TCP_DEFERRED_PROCESSING)) {
421 mm_release_tcp_lock(pdev, rx_con);
422 }
423 } /* lm_tcp_rx_comp */
424
425
lm_tcp_rx_skp_process(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t bytes_skipped,u8_t sb_idx)426 void lm_tcp_rx_skp_process(
427 struct _lm_device_t * pdev,
428 lm_tcp_state_t * tcp,
429 u32_t bytes_skipped,
430 u8_t sb_idx
431 )
432 {
433 lm_tcp_con_t *rx_con;
434 u32_t comp_bytes;
435 MM_INIT_TCP_LOCK_HANDLE();
436
437 DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_skp_process, bytes_skipped=%d, cid=%d\n", bytes_skipped, tcp->cid);
438
439 if (bytes_skipped == 0) {
440 /* nothing to do here - occurs on special fw case, where there is GRQ->RQ processing with no GRQ and no RQ,
441 * this will usually happen at the beginning or in special cases of the connection */
442 return;
443 }
444
445 rx_con = tcp->rx_con;
446 DbgBreakIf(! rx_con);
447
448 if (!GET_FLAGS(rx_con->flags, TCP_DEFERRED_PROCESSING)) {
449 mm_acquire_tcp_lock(pdev, rx_con);
450 }
451 DbgBreakIf(GET_FLAGS(rx_con->flags, TCP_RX_COMP_BLOCKED));
452
453 comp_bytes = min(bytes_skipped, tcp->rx_con->u.rx.skp_bytes_copied);
454 if (comp_bytes) {
455 tcp->rx_con->bytes_comp_cnt += comp_bytes;
456 /* complete nbytes on buffers (dpc-flow ) */
457 lm_tcp_complete_nbytes(pdev, tcp, tcp->rx_con, comp_bytes, /* push=*/ 0);
458 bytes_skipped -= comp_bytes;
459 tcp->rx_con->u.rx.skp_bytes_copied -= comp_bytes;
460 }
461
462 /* We know for sure, that all the application buffers we are about to access have already been posted
463 * before the dpc, and therefore are valid in the active_tb_list.
464 * TBA Michals: bypass FW
465 */
466 if (bytes_skipped) {
467 DbgBreakIf(!d_list_is_empty(&rx_con->u.rx.gen_info.peninsula_list));
468 DbgBreakIfAll(d_list_is_empty(&rx_con->u.rx.gen_info.dpc_peninsula_list));
469 DbgBreakIf(((lm_tcp_gen_buf_t *)d_list_peek_head(&rx_con->u.rx.gen_info.dpc_peninsula_list))->placed_bytes == 0);
470 rx_con->u.rx.gen_info.bytes_copied_cnt_in_process += lm_tcp_rx_peninsula_to_rq(pdev, tcp, bytes_skipped,sb_idx);
471 }
472
473 if (!GET_FLAGS(rx_con->flags, TCP_DEFERRED_PROCESSING)) {
474 mm_release_tcp_lock(pdev, rx_con);
475 }
476 } /* lm_tcp_rx_skp */
477
lm_tcp_rx_delete_isle(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t sb_idx,u8_t num_isle,u32_t num_of_isles)478 void lm_tcp_rx_delete_isle(
479 struct _lm_device_t * pdev,
480 lm_tcp_state_t * tcp,
481 u8_t sb_idx,
482 u8_t num_isle,
483 u32_t num_of_isles)
484 {
485 lm_tcp_con_t * rx_con = tcp->rx_con;
486 lm_tcp_con_rx_gen_info_t * gen_info;
487 d_list_t removed_list;
488 u32_t isle_nbytes;
489
490
491
492 DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_delete_isle cid=%d isle=%d\n", tcp->cid, num_isle);
493 gen_info = &rx_con->u.rx.gen_info;
494 d_list_init(&removed_list, NULL, NULL, 0);
495
496 while (num_of_isles) {
497 isle_nbytes = _lm_tcp_isle_remove(pdev, tcp, sb_idx, num_isle + (num_of_isles - 1), &removed_list);
498 pdev->toe_info.grqs[sb_idx].number_of_isles_delta--;
499 DbgBreakIf(isle_nbytes > gen_info->isle_nbytes);
500 gen_info->isle_nbytes -= isle_nbytes;
501 num_of_isles--;
502 }
503
504 pdev->toe_info.grqs[sb_idx].gen_bufs_in_isles_delta -= (s32_t)d_list_entry_cnt(&removed_list);
505 if (!d_list_is_empty(&removed_list)) {
506 lm_tcp_return_list_of_gen_bufs(pdev,tcp ,&removed_list, MM_TCP_RGB_COLLECT_GEN_BUFS, sb_idx);
507 tcp->rx_con->droped_non_empty_isles++;
508 } else {
509 DbgBreak();
510 tcp->rx_con->droped_empty_isles++;
511 }
512 rx_con->dpc_info.dpc_flags &= ~(LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES);
513 return;
514 }
515
lm_toe_is_rx_completion(lm_device_t * pdev,u8_t drv_toe_rss_id)516 u8_t lm_toe_is_rx_completion(lm_device_t *pdev, u8_t drv_toe_rss_id)
517 {
518 u8_t result = FALSE;
519 lm_tcp_rcq_t *rcq = NULL;
520
521 DbgBreakIf(!(pdev && ARRSIZE(pdev->toe_info.rcqs) > drv_toe_rss_id));
522
523 rcq = &pdev->toe_info.rcqs[drv_toe_rss_id];
524
525 if ( rcq->hw_con_idx_ptr &&
526 *rcq->hw_con_idx_ptr != lm_bd_chain_cons_idx(&rcq->bd_chain) )
527 {
528 result = TRUE;
529 }
530 DbgMessage(pdev, INFORMl4int, "lm_toe_is_rx_completion(): result is:%s\n", result? "TRUE" : "FALSE");
531
532 return result;
533 }
534
535 /** Description
536 * checks if the processing of a certain RCQ is suspended
537 */
lm_toe_is_rcq_suspended(lm_device_t * pdev,u8_t drv_toe_rss_id)538 u8_t lm_toe_is_rcq_suspended(lm_device_t *pdev, u8_t drv_toe_rss_id)
539 {
540 u8_t result = FALSE;
541 lm_tcp_rcq_t *rcq = NULL;
542
543 if (drv_toe_rss_id < MAX_L4_RX_CHAIN)
544 {
545 rcq = &pdev->toe_info.rcqs[drv_toe_rss_id];
546 if (rcq->suspend_processing) {
547 result = TRUE;
548 }
549 }
550 DbgMessage(pdev, INFORMl4int, "lm_toe_is_rcq_suspended(): sb_idx:%d, result is:%s\n", drv_toe_rss_id, result?"TRUE":"FALSE");
551 return result;
552 }
553
554
555 /** Description
556 * Increment consumed generic counter for a connection.
557 * To avoid rollover in the FW if the counter exceeds a maximum threshold, the driver should
558 * not wait for application buffers and post 'receive window update' doorbell immediately.
559 * The FW holds 32bits for this counter. Therefore a threshold of 100MB is OK.
560 */
lm_tcp_incr_consumed_gen(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t nbytes)561 static void lm_tcp_incr_consumed_gen(
562 struct _lm_device_t * pdev,
563 lm_tcp_state_t * tcp,
564 u32_t nbytes
565 )
566 {
567 volatile struct toe_rx_db_data *db_data = tcp->rx_con->db_data.rx;
568
569 db_data->consumed_grq_bytes += nbytes;
570
571 /* theres no need to increased the consumed_cnt in two stages (one in the driver and one for FW db_data)
572 * we can always directly increase FW db_data, we need to decide whether we need to give a doorbell, basically
573 * we have two cases where doorbells are given: (1) buffer posted and bypasses fw (2) indication succeeded in which case
574 * window will also be increased, however, the window isn't always increased: if it's smaller than MSS, so, if we
575 * increase the consumed count by something smaller than mss - we'll give the doorbell here... */
576
577 if (nbytes < tcp->rx_con->u.rx.sws_info.mss) {
578 if (!(tcp->rx_con->flags & TCP_RX_DB_BLOCKED)) {
579 TOE_RX_DOORBELL(pdev, tcp->cid);
580 }
581 }
582 }
583
584 /** Description
585 * Copies as many bytes as possible from the peninsula to the single tcp buffer received
586 * updates the peninsula.
587 * This function can be called from two flows:
588 * 1. Post of a buffer
589 * 2. Completion of a dpc.
590 * We need to know which flow it is called from to know which peninsula list to use:
591 * dpc_peninsula_list / peninsula_list.
592 * Post ALWAYS uses the peninsula_list, since it doesn't know about the dpc_peninsula
593 * Completion ALWAYS uses the dpc_peninsula_list, and in this case peninsula_list MUST be empty
594 * this is because there can be buffers in the active_tb_list ONLY if peninsula_list is empty.
595 *
596 * first_buf_offset refers to the peninsula we're dealing with, at the end of the dpc the dpc_peninsula
597 * is copied to the peninsula, therefore first_buf_offset will still be valid. copying from post means that
598 * there is something in the peninsula which means theres nothing in the active_tb_list ==> won't be a copy from
599 * dpc. Copying from dpc means theres something in the active-tb-list ==> nothing in the peninsula ==> won't be called
600 * from post, mutual exclusion exists between the post/dpc of copying, therefore we can have only one first_buffer_offset
601 * all other accesses (indication) are done under a lock.
602 * param: dpc - indicates if this is called from the dpc or not (post)
603 * Assumptions:
604 * tcp_buf->more_to_comp is initialized
605 * tcp_buf->size is initialized
606 * num_bufs_complete is initialized by caller (could differ from zero)
607 * Returns
608 * the actual number of bytes copied
609 * num_bufs_complete is the number of buffers that were completely copied to the pool and can be
610 * returned to the pool.
611 */
lm_tcp_rx_peninsula_to_rq_copy(lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_buffer_t * tcp_buf,d_list_t * return_list,u32_t max_num_bytes_to_copy,u8_t dpc)612 static u32_t lm_tcp_rx_peninsula_to_rq_copy(
613 lm_device_t * pdev,
614 lm_tcp_state_t * tcp,
615 lm_tcp_buffer_t * tcp_buf,
616 d_list_t * return_list,
617 u32_t max_num_bytes_to_copy,
618 u8_t dpc)
619 {
620 lm_tcp_gen_buf_t * curr_gen_buf;
621 lm_tcp_con_rx_gen_info_t * gen_info;
622 d_list_t * peninsula;
623 u32_t tcp_offset;
624 u32_t ncopy;
625 u32_t bytes_left;
626 u32_t bytes_copied = 0;
627
628 DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_peninsula_to_rq_copy tcp_buf = 0x%x cid=%d\n", *((u32_t *)&tcp_buf), tcp->cid);
629
630 gen_info = &tcp->rx_con->u.rx.gen_info;
631
632 if (dpc) {
633 peninsula = &gen_info->dpc_peninsula_list;
634 } else {
635 peninsula = &gen_info->peninsula_list;
636 }
637
638 curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(peninsula);
639 tcp_offset = tcp_buf->size - tcp_buf->more_to_comp;
640 bytes_left = min(tcp_buf->more_to_comp, max_num_bytes_to_copy); /* copy to buffer only what's aloud...*/
641
642 /* start copying as much as possible from peninsula to tcp buffer */
643 while (bytes_left && curr_gen_buf && curr_gen_buf->placed_bytes) {
644 ncopy = curr_gen_buf->placed_bytes - gen_info->first_buf_offset;
645 if (ncopy > bytes_left) {
646 ncopy = bytes_left;
647 }
648 if (mm_tcp_copy_to_tcp_buf(pdev, tcp, tcp_buf,
649 curr_gen_buf->buf_virt + gen_info->first_buf_offset, /* start of data in generic buffer */
650 tcp_offset, ncopy) != ncopy)
651 {
652 gen_info->copy_gen_buf_dmae_cnt++;
653
654 /* If this is generic buffer that has the free_when_done flag on it means it's non-cached memory and not physical
655 * memory -> so, we can't try and dmae to it... not likely to happen... */
656 if (!GET_FLAGS(curr_gen_buf->flags, GEN_FLAG_FREE_WHEN_DONE)) {
657 if (mm_tcp_rx_peninsula_to_rq_copy_dmae(pdev,
658 tcp,
659 curr_gen_buf->buf_phys,
660 gen_info->first_buf_offset, /* start of data in generic buffer */
661 tcp_buf,
662 tcp_offset,
663 ncopy) != ncopy)
664 {
665 DbgBreakMsg("Unable To Copy");
666 gen_info->copy_gen_buf_fail_cnt++;
667
668 break;
669 }
670 } else {
671 DbgBreakMsg("Unable To Copy");
672 gen_info->copy_gen_buf_fail_cnt++;
673
674 break;
675 }
676 }
677
678 /* update peninsula */
679 bytes_copied += ncopy;
680
681 gen_info->first_buf_offset += (u16_t)ncopy;
682
683 /* done with the generic buffer? - return it to the pool */
684 if (curr_gen_buf->placed_bytes == gen_info->first_buf_offset) {
685 curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_pop_head(peninsula);
686 d_list_push_tail(return_list, &curr_gen_buf->link);
687 gen_info->first_buf_offset = 0;
688 gen_info->num_buffers_copied_grq++;
689 curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(peninsula);
690 }
691
692 /* update tcp buf stuff */
693 bytes_left -= ncopy;
694 tcp_offset += ncopy;
695 }
696
697 if (dpc) {
698 gen_info->dpc_peninsula_nbytes -= bytes_copied;
699 } else {
700 gen_info->peninsula_nbytes -= bytes_copied;
701 }
702
703 /* return the number of bytes actually copied */
704 return bytes_copied;
705 }
706
707 /** Description
708 * function copies data from the peninsula to tcp buffers already placed in the
709 * active_tb_list. The function completes the buffers if a tcp buffer from active_tb_list
710 * was partially/fully filled. This case simulates a call to lm_tcp_rx_comp
711 * (i.e. a completion received from firmware)
712 * Assumptions:
713 */
lm_tcp_rx_peninsula_to_rq(lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t max_num_bytes_to_copy,u8_t sb_idx)714 u32_t lm_tcp_rx_peninsula_to_rq(lm_device_t * pdev, lm_tcp_state_t * tcp, u32_t max_num_bytes_to_copy, u8_t sb_idx)
715 {
716 lm_tcp_buffer_t * curr_tcp_buf;
717 lm_tcp_con_rx_gen_info_t * gen_info;
718 d_list_t return_list;
719 u32_t copied_bytes = 0, currently_copied = 0;
720
721 DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_peninsula_to_rq cid=%d\n", tcp->cid);
722
723 gen_info = &tcp->rx_con->u.rx.gen_info;
724
725 DbgBreakIf(gen_info->peninsula_blocked == TRUE); /* terminate was already called - no copying should be done */
726
727 /* Copy data from dpc_peninsula to tcp buffer[s] */
728 d_list_init(&return_list, NULL, NULL, 0);
729
730 curr_tcp_buf = lm_tcp_next_entry_dpc_active_list(tcp->rx_con);
731
732 /* TBA Michals: FW Bypass First check if we can copy to bypass buffers */
733
734 /* Copy the number of bytes received in SKP */
735 while (max_num_bytes_to_copy && gen_info->dpc_peninsula_nbytes && curr_tcp_buf) {
736 currently_copied = lm_tcp_rx_peninsula_to_rq_copy(pdev, tcp, curr_tcp_buf, &return_list, max_num_bytes_to_copy, TRUE);
737 curr_tcp_buf = (lm_tcp_buffer_t *)s_list_next_entry(&curr_tcp_buf->link);
738 DbgBreakIf(max_num_bytes_to_copy < currently_copied);
739 max_num_bytes_to_copy -= currently_copied;
740 copied_bytes += currently_copied;
741 }
742
743 if (!d_list_is_empty(&return_list)) {
744
745 lm_tcp_return_list_of_gen_bufs(pdev,tcp , &return_list,
746 (sb_idx != NON_EXISTENT_SB_IDX) ? MM_TCP_RGB_COLLECT_GEN_BUFS : 0, sb_idx);
747 }
748
749 /* If we've copied to a buffer in the active_tb_list we need to complete it since fw knows
750 * the driver has the bytes and the driver will take care of copying them and completing them.
751 * this path simulates a call to lm_tcp_rx_comp (buffers taken from active_tb_list) */
752 /* Note that pending bytes here could reach a negative value if a partial
753 * application buffer was posted and the doorbell hasn't been given yet, however,
754 * once the doorbell is given for the application buffer the pending bytes will reach a non-negative
755 * value (>=0) */
756 tcp->rx_con->bytes_comp_cnt += copied_bytes;
757 /* complete nbytes on buffers (dpc-flow ) */
758 lm_tcp_complete_nbytes(pdev, tcp, tcp->rx_con, copied_bytes, /* push=*/ 0);
759
760 DbgMessage(pdev, VERBOSEl4rx, "lm_tcp_rx_peninsula_to_rq copied %d bytes cid=%d\n", copied_bytes, tcp->cid);
761 return copied_bytes;
762 }
763
764 /** Description
765 * determines whether or not we can indicate.
766 * Rules:
767 * - Indication is not blocked
768 * - we are not in the middle of completion a split-buffer
769 * we can only indicate after an entire buffer has been completed/copied to.
770 * we determine this by the app_buf_bytes_acc_comp. This is to avoid the
771 * following data integrity race:
772 * application buffer: app_start, app_end
773 * app_start is posted, peninsula copied to app_start, app_start completed to
774 * fw then the rest is indicated. fw receives app_end, fw thinks peninsula was
775 * copied to buffer, application buffer misses data...
776 * - our active_tb_list is empty... we HAVE to make sure to
777 * always indicate after we've fully utilized our RQ
778 * buffers...
779 */
_lm_tcp_ok_to_indicate(lm_tcp_con_t * rx_con)780 static __inline u8_t _lm_tcp_ok_to_indicate(lm_tcp_con_t * rx_con)
781 {
782 return (!(rx_con->flags & TCP_RX_IND_BLOCKED) && (rx_con->app_buf_bytes_acc_comp == 0) &&
783 (s_list_is_empty(&rx_con->active_tb_list)));
784 }
785
786 /** Description
787 * GA: add a buffer to the peninsula - nbytes represents the number of bytes in the previous buffer.
788 * GR: release a buffer from the peninsula - nbytes represents the number of bytes in the current buffer.
789 * Assumption:
790 * GR can only be called on a buffer that had been added using GA before
791 */
lm_tcp_rx_gen_peninsula_process(lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t nbytes,lm_tcp_gen_buf_t * gen_buf)792 void lm_tcp_rx_gen_peninsula_process(lm_device_t * pdev, lm_tcp_state_t * tcp, u32_t nbytes, lm_tcp_gen_buf_t * gen_buf)
793 {
794 lm_tcp_con_t * rx_con = tcp->rx_con;
795 lm_tcp_con_rx_gen_info_t * gen_info;
796 lm_tcp_gen_buf_t * last_gen_buf;
797
798 DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_gen_peninsula_process, nbytes=%d, cid=%d add=%s\n", nbytes, tcp->cid,
799 (gen_buf)? "TRUE" : "FALSE");
800
801 DbgBreakIf(rx_con->flags & TCP_RX_COMP_BLOCKED);
802
803 gen_info = &rx_con->u.rx.gen_info;
804
805 /* update the previous buffer OR current buffer if this is a release operation. This function is always called
806 * from within a DPC and updates the dpc_peninsula */
807 if (nbytes) {
808 gen_info->dpc_peninsula_nbytes += nbytes;
809 last_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_tail(&gen_info->dpc_peninsula_list);
810 DbgBreakIfAll(last_gen_buf == NULL);
811 DbgBreakIfAll(last_gen_buf->placed_bytes != 0);
812 DbgBreakIfAll(nbytes > LM_TCP_GEN_BUF_SIZE(pdev));
813 last_gen_buf->placed_bytes = (u16_t)nbytes;
814 }
815
816 if (gen_buf /* add */) {
817 DbgBreakIf(SIG(gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
818 DbgBreakIf(END_SIG(gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
819
820 d_list_push_tail(&gen_info->dpc_peninsula_list, &gen_buf->link);
821 }
822
823 }
824
lm_tcp_rx_gen_isle_create(lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_gen_buf_t * gen_buf,u8_t sb_idx,u8_t isle_num)825 void lm_tcp_rx_gen_isle_create(lm_device_t * pdev, lm_tcp_state_t * tcp, lm_tcp_gen_buf_t * gen_buf, u8_t sb_idx, u8_t isle_num)
826 {
827 lm_isle_t * current_isle = NULL;
828 lm_isle_t * next_isle = NULL;
829 lm_tcp_con_t * rx_con = tcp->rx_con;
830 lm_tcp_con_rx_gen_info_t * gen_info;
831 u8_t isles_cnt;
832 d_list_entry_t * isle_entry_prev = NULL;
833 d_list_entry_t * isle_entry_next = NULL;
834
835 gen_info = &rx_con->u.rx.gen_info;
836 isles_cnt = (u8_t)d_list_entry_cnt(&gen_info->isles_list);
837 if (isles_cnt) {
838 DbgBreakIf(isles_cnt == T_TCP_MAX_ISLES_PER_CONNECTION_TOE);
839 current_isle = _lm_tcp_isle_get_free_list(pdev, sb_idx);
840 DbgBreakIf(!current_isle);
841 #ifdef DEBUG_OOO_CQE
842 DbgBreakIf(current_isle->dedicated_cid != 0);
843 current_isle->dedicated_cid = tcp->cid;
844 #endif
845 } else {
846 current_isle = &gen_info->first_isle;
847 }
848
849 d_list_push_head(¤t_isle->isle_gen_bufs_list_head, &gen_buf->link);
850 current_isle->isle_nbytes = 0;
851 if (isle_num == 1) {
852 if (current_isle != &gen_info->first_isle) {
853 *current_isle = gen_info->first_isle;
854 d_list_init(&gen_info->first_isle.isle_gen_bufs_list_head, NULL, NULL, 0);
855 d_list_push_head(&gen_info->first_isle.isle_gen_bufs_list_head, &gen_buf->link);
856 gen_info->first_isle.isle_nbytes = 0;
857 isle_entry_prev = &gen_info->first_isle.isle_link;
858 isle_entry_next = gen_info->first_isle.isle_link.next;
859 }
860 } else if (isle_num <= isles_cnt) {
861 next_isle = _lm_tcp_isle_find(pdev,tcp,isle_num);
862 isle_entry_prev = next_isle->isle_link.prev;
863 isle_entry_next = &next_isle->isle_link;
864 } else if (isle_num == (isles_cnt + 1)) {
865 isle_entry_next = NULL;
866 isle_entry_prev = gen_info->isles_list.tail;
867 } else {
868 DbgBreak();
869 }
870
871 d_list_insert_entry(&gen_info->isles_list, isle_entry_prev, isle_entry_next, ¤t_isle->isle_link);
872 if (isle_num == 1) {
873 current_isle = &gen_info->first_isle;
874 }
875 #ifdef DEBUG_OOO_CQE
876 SET_DEBUG_OOO_INFO(current_isle, CMP_OPCODE_TOE_GNI, 0);
877 #endif
878 gen_info->current_isle = current_isle;
879 gen_info->current_isle_number = isle_num;
880 pdev->toe_info.grqs[sb_idx].number_of_isles_delta++;
881 if (isles_cnt == gen_info->max_number_of_isles) {
882 gen_info->max_number_of_isles++;
883 }
884 }
885
lm_tcp_rx_gen_isle_right_process(lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t nbytes,lm_tcp_gen_buf_t * gen_buf,u8_t sb_idx,u8_t isle_num)886 void lm_tcp_rx_gen_isle_right_process(lm_device_t * pdev, lm_tcp_state_t * tcp, u32_t nbytes, lm_tcp_gen_buf_t * gen_buf, u8_t sb_idx, u8_t isle_num)
887 {
888 lm_tcp_con_t * rx_con = tcp->rx_con;
889 lm_tcp_con_rx_gen_info_t * gen_info;
890 lm_tcp_gen_buf_t * last_gen_buf;
891 lm_isle_t * requested_isle;
892
893 DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_gen_isle_process nbytes = %d cid=%d\n", nbytes, tcp->cid);
894
895 gen_info = &rx_con->u.rx.gen_info;
896 requested_isle = _lm_tcp_isle_find(pdev,tcp,isle_num);
897 DbgBreakIf(!requested_isle);
898
899 /* update the previous buffer */
900 last_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_tail(&requested_isle->isle_gen_bufs_list_head);
901 DbgBreakIf(last_gen_buf == NULL);
902 if (nbytes) {
903 gen_info->isle_nbytes += nbytes;
904 requested_isle->isle_nbytes += nbytes;
905 DbgBreakIf(last_gen_buf->placed_bytes != 0);
906 DbgBreakIf(nbytes > 0xffff);
907 last_gen_buf->placed_bytes = (u16_t)nbytes;
908 } else {
909 DbgBreakIf(gen_buf == NULL);
910 DbgBreakIf(last_gen_buf->placed_bytes == 0);
911 }
912
913 if (gen_buf) {
914 DbgBreakIf(SIG(gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
915 DbgBreakIf(END_SIG(gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
916
917 d_list_push_tail(&requested_isle->isle_gen_bufs_list_head, &gen_buf->link);
918 pdev->toe_info.grqs[sb_idx].gen_bufs_in_isles_delta++;
919 if (pdev->params.l4_max_gen_bufs_in_isle
920 && (d_list_entry_cnt(&requested_isle->isle_gen_bufs_list_head) > pdev->params.l4_max_gen_bufs_in_isle)) {
921 if (pdev->params.l4_limit_isles & L4_LI_NOTIFY) {
922 DbgBreak();
923 }
924 if (pdev->params.l4_limit_isles & L4_LI_MAX_GEN_BUFS_IN_ISLE) {
925 rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_TOO_BIG_ISLE;
926 }
927 }
928 #ifdef DEBUG_OOO_CQE
929 SET_DEBUG_OOO_INFO(requested_isle, CMP_OPCODE_TOE_GAIR, nbytes);
930 } else {
931 SET_DEBUG_OOO_INFO(requested_isle, CMP_OPCODE_TOE_GRI, nbytes);
932 #endif
933 }
934 }
935
lm_tcp_rx_gen_isle_left_process(lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t nbytes,lm_tcp_gen_buf_t * gen_buf,u8_t sb_idx,u8_t isle_num)936 void lm_tcp_rx_gen_isle_left_process(lm_device_t * pdev, lm_tcp_state_t * tcp, u32_t nbytes, lm_tcp_gen_buf_t * gen_buf, u8_t sb_idx, u8_t isle_num)
937 {
938 lm_tcp_con_t * rx_con = tcp->rx_con;
939 lm_tcp_con_rx_gen_info_t * gen_info;
940 lm_tcp_gen_buf_t * last_gen_buf;
941 lm_isle_t * requested_isle;
942
943 DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_gen_isle_process nbytes = %d cid=%d\n", nbytes, tcp->cid);
944
945 gen_info = &rx_con->u.rx.gen_info;
946 requested_isle = _lm_tcp_isle_find(pdev,tcp,isle_num);
947 DbgBreakIf(!requested_isle);
948
949 if (nbytes) {
950 DbgBreakIf(!gen_info->wait_for_isle_left);
951 DbgBreakIf(gen_buf != NULL);
952 gen_info->wait_for_isle_left = FALSE;
953 gen_info->isle_nbytes += nbytes;
954 requested_isle->isle_nbytes += nbytes;
955 #if defined(_NTDDK_)
956 #pragma prefast (push)
957 #pragma prefast (disable:28182) // If nbytes is larger that zero than ((returned_list_of_gen_bufs))->head is not NULL.
958 #endif //_NTDDK_
959 last_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(&requested_isle->isle_gen_bufs_list_head);
960 DbgBreakIf(last_gen_buf->placed_bytes);
961 last_gen_buf->placed_bytes = (u16_t)nbytes;
962 #if defined(_NTDDK_)
963 #pragma prefast (pop)
964 #endif //_NTDDK_
965 } else {
966 DbgBreakIf(gen_info->wait_for_isle_left);
967 DbgBreakIf(gen_buf == NULL);
968 DbgBreakIf(SIG(gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
969 DbgBreakIf(END_SIG(gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
970 gen_info->wait_for_isle_left = TRUE;
971 d_list_push_head(&requested_isle->isle_gen_bufs_list_head, &gen_buf->link);
972 pdev->toe_info.grqs[sb_idx].gen_bufs_in_isles_delta++;
973 }
974 #ifdef DEBUG_OOO_CQE
975 SET_DEBUG_OOO_INFO(requested_isle, CMP_OPCODE_TOE_GAIL, nbytes);
976 #endif
977 }
978
lm_tcp_rx_gen_join_process(lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t sb_idx,u8_t isle_num)979 void lm_tcp_rx_gen_join_process(lm_device_t * pdev, lm_tcp_state_t * tcp, u8_t sb_idx, u8_t isle_num)
980 {
981 lm_tcp_con_t * rx_con = tcp->rx_con;
982 lm_tcp_con_rx_gen_info_t * gen_info;
983 lm_isle_t * start_isle;
984 d_list_t gen_buf_list;
985 u32_t isle_nbytes;
986 DbgMessage(pdev, VERBOSEl4rx, "##lm_tcp_rx_gen_join_process cid=%d\n", tcp->cid);
987
988 gen_info = &rx_con->u.rx.gen_info;
989
990
991 if (!isle_num) {
992 /* break if peninsula list isn't empty and the last buffer in list isn't released yet */
993 DbgBreakIf(d_list_entry_cnt(&gen_info->dpc_peninsula_list) &&
994 ((lm_tcp_gen_buf_t *)(d_list_peek_tail(&gen_info->dpc_peninsula_list)))->placed_bytes == 0);
995 d_list_init(&gen_buf_list, NULL, NULL, 0);
996 isle_nbytes = _lm_tcp_isle_remove(pdev, tcp, sb_idx, 1, &gen_buf_list);
997 // DbgBreakIf(!(isle_nbytes && d_list_entry_cnt(&gen_buf_list)));
998 if (d_list_entry_cnt(&gen_buf_list) > 1) {
999 DbgBreakIf(((lm_tcp_gen_buf_t *)(d_list_peek_head(&gen_buf_list)))->placed_bytes == 0);
1000 }
1001 pdev->toe_info.grqs[sb_idx].gen_bufs_in_isles_delta -= (s32_t)d_list_entry_cnt(&gen_buf_list);
1002 pdev->toe_info.grqs[sb_idx].number_of_isles_delta--;
1003
1004 if (!d_list_is_empty(&gen_buf_list)) {
1005 d_list_add_tail(&gen_info->dpc_peninsula_list, &gen_buf_list);
1006 }
1007 gen_info->dpc_peninsula_nbytes += isle_nbytes;
1008 gen_info->isle_nbytes -= isle_nbytes;
1009 } else {
1010 start_isle = _lm_tcp_isle_find(pdev,tcp,isle_num);
1011 d_list_init(&gen_buf_list, NULL, NULL, 0);
1012 isle_nbytes = _lm_tcp_isle_remove(pdev, tcp, sb_idx, isle_num + 1, &gen_buf_list);
1013 // DbgBreakIf(!(isle_nbytes && d_list_entry_cnt(&gen_buf_list)));
1014 pdev->toe_info.grqs[sb_idx].number_of_isles_delta--;
1015 if (d_list_entry_cnt(&gen_buf_list) > 1) {
1016 DbgBreakIf(((lm_tcp_gen_buf_t *)(d_list_peek_head(&gen_buf_list)))->placed_bytes == 0);
1017 }
1018 DbgBreakIf(((lm_tcp_gen_buf_t *)(d_list_peek_tail(&start_isle->isle_gen_bufs_list_head)))->placed_bytes == 0);
1019 if (!d_list_is_empty(&gen_buf_list)) {
1020 d_list_add_tail(&start_isle->isle_gen_bufs_list_head, &gen_buf_list);
1021 }
1022 start_isle->isle_nbytes += isle_nbytes;
1023 #ifdef DEBUG_OOO_CQE
1024 SET_DEBUG_OOO_INFO(start_isle,CMP_OPCODE_TOE_GJ,0);
1025 #endif
1026 }
1027 rx_con->dpc_info.dpc_flags &= ~(LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES);
1028
1029 }
1030
lm_tcp_rx_next_grq_buf(lm_device_t * pdev,u8_t sb_idx)1031 static __inline lm_tcp_gen_buf_t * lm_tcp_rx_next_grq_buf(lm_device_t * pdev, u8_t sb_idx)
1032 {
1033 lm_tcp_gen_buf_t * gen_buf;
1034
1035 /* 11/12/2008 - TODO: Enhance locking acquisition method,
1036 * TBD: aggragate cons, and active_gen_list updates */
1037 MM_ACQUIRE_TOE_GRQ_LOCK_DPC(pdev, sb_idx);
1038
1039 /* Get the generic buffer for this completion */
1040 gen_buf = (lm_tcp_gen_buf_t *)d_list_pop_head(&pdev->toe_info.grqs[sb_idx].active_gen_list);
1041 if (ERR_IF(gen_buf == NULL)) {
1042 DbgBreakMsg("Received a fw GA/GAI without any generic buffers\n");
1043 return NULL;
1044 }
1045 DbgBreakIf(!gen_buf);
1046 DbgBreakIf(SIG(gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
1047 DbgBreakIf(END_SIG(gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
1048
1049 /* each generic buffer is represented by ONE bd on the bd-chain */
1050 lm_bd_chain_bds_consumed(&pdev->toe_info.grqs[sb_idx].bd_chain, 1);
1051
1052 MM_RELEASE_TOE_GRQ_LOCK_DPC(pdev, sb_idx);
1053
1054 return gen_buf;
1055 }
1056
1057 /** Description
1058 * completes the fast-path operations for a certain connection
1059 * Assumption:
1060 * fp-rx lock is taken
1061 * This function is mutual exclusive: there can only be one thread running it at a time.
1062 */
lm_tcp_rx_complete_tcp_fp(lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_con_t * con)1063 void lm_tcp_rx_complete_tcp_fp(lm_device_t * pdev, lm_tcp_state_t * tcp, lm_tcp_con_t * con)
1064 {
1065 lm_tcp_buffer_t * curr_tcp_buf;
1066 u32_t add_sws_bytes = 0;
1067
1068 if (con->dpc_info.dpc_comp_blocked) {
1069 /* we will no longer receive a "skp" */
1070 SET_FLAGS(con->flags, TCP_POST_NO_SKP); /* so that new posts complete immediately... */
1071 /* complete any outstanding skp bytes... */
1072 if (tcp->rx_con->u.rx.skp_bytes_copied) {
1073 /* now we can complete these bytes that have already been copied... */
1074 tcp->rx_con->bytes_comp_cnt += tcp->rx_con->u.rx.skp_bytes_copied;
1075 /* complete nbytes on buffers (dpc-flow ) */
1076 lm_tcp_complete_nbytes(pdev, tcp, tcp->rx_con, tcp->rx_con->u.rx.skp_bytes_copied, /* push=*/ 0);
1077 tcp->rx_con->u.rx.skp_bytes_copied = 0;
1078 }
1079 }
1080
1081 /* TBA Michals FW BYPASS...copy here */
1082 if (!d_list_is_empty(&con->u.rx.gen_info.dpc_peninsula_list)) {
1083 /* only copy if this is the end... otherwise, we will wait for that SKP... */
1084 if (lm_tcp_next_entry_dpc_active_list(con) && con->u.rx.gen_info.dpc_peninsula_nbytes && con->dpc_info.dpc_comp_blocked) {
1085 /* couldn't have been posted buffers if peninsula exists... */
1086 DbgBreakIf(!d_list_is_empty(&con->u.rx.gen_info.peninsula_list));
1087 con->u.rx.gen_info.bytes_copied_cnt_in_comp += lm_tcp_rx_peninsula_to_rq(pdev, tcp, 0xffffffff,NON_EXISTENT_SB_IDX);
1088 }
1089
1090 /* check if we still have something in the peninsula after the copying AND our active tb list is empty... otherwise, it's intended
1091 * for that and we'll wait for the next RQ_SKP in the next DPC. UNLESS, we've got completion block, in which case RQ_SKP won't make it
1092 * way ever... */
1093 curr_tcp_buf = lm_tcp_next_entry_dpc_active_list(con);
1094 DbgBreakIf(!d_list_is_empty(&con->u.rx.gen_info.dpc_peninsula_list) && curr_tcp_buf && con->dpc_info.dpc_comp_blocked);
1095 if (!d_list_is_empty(&con->u.rx.gen_info.dpc_peninsula_list) && !curr_tcp_buf) {
1096 d_list_add_tail(&con->u.rx.gen_info.peninsula_list, &con->u.rx.gen_info.dpc_peninsula_list);
1097 con->u.rx.gen_info.peninsula_nbytes += con->u.rx.gen_info.dpc_peninsula_nbytes;
1098 con->u.rx.gen_info.dpc_peninsula_nbytes = 0;
1099
1100 /* we want to leave any non-released buffer in the dpc_peninsula (so that we don't access the list w/o a lock) */
1101 if (((lm_tcp_gen_buf_t *)d_list_peek_tail(&con->u.rx.gen_info.peninsula_list))->placed_bytes == 0) {
1102 lm_tcp_gen_buf_t * gen_buf;
1103 gen_buf = (lm_tcp_gen_buf_t *)d_list_pop_tail(&con->u.rx.gen_info.peninsula_list);
1104 if CHK_NULL(gen_buf)
1105 {
1106 DbgBreakIfAll( !gen_buf ) ;
1107 return;
1108 }
1109 d_list_init(&con->u.rx.gen_info.dpc_peninsula_list, &gen_buf->link, &gen_buf->link, 1);
1110 } else {
1111 d_list_clear(&con->u.rx.gen_info.dpc_peninsula_list);
1112 }
1113
1114 }
1115 }
1116
1117 /**** Client completing : may result in lock-release *****/
1118 /* during lock-release, due to this function being called from service_deferred, more
1119 * cqes can be processed. We don't want to mix. This function is mutually exclusive, so
1120 * any processing makes it's way to being completed by calling this function.
1121 * the following define a "fast-path completion"
1122 * (i) RQ buffers to be completed
1123 * defined by dpc_completed_tail and are collected during lm_tcp_complete_bufs BEFORE lock
1124 * is released, so no more buffer processing can make it's way into this buffer completion.
1125 * (ii) GRQ buffers to be indicated
1126 * Are taken from peninsula, and not dpc_peninsula, so no NEW generic buffers can make their
1127 * way to this indication
1128 * (iii) Fin to be indicated
1129 * determined by the flags, since dpc_flags CAN be modified during processing we copy
1130 * them to a snapshot_flags parameter, which is initialized in this function only, so no fin
1131 * can can make its way in while we release the lock.
1132 * (iv) Remainders for sp
1133 * all sp operations are logged in dpc_flags. for the same reason as (iii) no sp commands can
1134 * make their way in during this fp-completion, all sp-processing after will relate to this point in time.
1135 */
1136 /* NDC is the only fp flag: determining that we should complete all the processed cqes. Therefore, we can
1137 * turn it off here. We should turn it off, since if no sp flags are on, the sp-complete function shouldn't be called
1138 */
1139 // RESET_FLAGS(con->dpc_info.dpc_flags, LM_TCP_DPC_NDC);
1140 con->dpc_info.snapshot_flags = con->dpc_info.dpc_flags;
1141 con->dpc_info.dpc_flags = 0;
1142
1143 /* compensate fw-window with the rq-placed bytes */
1144 if (con->dpc_info.dpc_rq_placed_bytes) {
1145 add_sws_bytes += con->dpc_info.dpc_rq_placed_bytes;
1146 con->dpc_info.dpc_rq_placed_bytes = 0;
1147 }
1148
1149
1150 /* check if we completed a buffer that as a result unblocks the um from posting more (a split buffer that
1151 * was placed on the last bd). If this occured - we should not have any other RQs!!! */
1152 if (con->dpc_info.dpc_unblock_post) {
1153 RESET_FLAGS(con->flags, TCP_POST_DELAYED);
1154 con->dpc_info.dpc_unblock_post = 0;
1155 }
1156
1157 /* NOTE: AFTER THIS STAGE DO NOT ACCESS DPC-INFO ANYMORE - for deferred cqes issue */
1158
1159 /* complete buffers to client */
1160 if (con->dpc_info.dpc_completed_tail != NULL) {
1161 lm_tcp_complete_bufs(pdev,tcp,con);
1162 }
1163
1164 /* Is there something left to indicate? */
1165 if (!d_list_is_empty(&con->u.rx.gen_info.peninsula_list) && _lm_tcp_ok_to_indicate(con)) {
1166 mm_tcp_rx_indicate_gen(pdev,tcp);
1167 add_sws_bytes += tcp->rx_con->u.rx.gen_info.add_sws_bytes; /* any bytes we need to update will be aggregated here during indicate */
1168 tcp->rx_con->u.rx.gen_info.add_sws_bytes = 0;
1169 }
1170
1171 if (add_sws_bytes) {
1172 lm_tcp_rx_post_sws(pdev, tcp, con, add_sws_bytes, TCP_RX_POST_SWS_INC);
1173 }
1174
1175 }
1176
1177
1178 /** Description
1179 * processes a single cqe.
1180 */
lm_tcp_rx_process_cqe(lm_device_t * pdev,struct toe_rx_cqe * cqe,lm_tcp_state_t * tcp,u8_t sb_idx)1181 void lm_tcp_rx_process_cqe(
1182 lm_device_t * pdev,
1183 struct toe_rx_cqe * cqe,
1184 lm_tcp_state_t * tcp,
1185 u8_t sb_idx)
1186 {
1187 u32_t nbytes;
1188 u8_t cmd;
1189 u8_t isle_num = 0;
1190
1191 cmd = ((cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT);
1192
1193
1194 /* Check that the cqe nbytes make sense, we could have got here by chance... */
1195 /* update completion has a different usage for nbyts which is a sequence -so any number is valid*/
1196 if(IS_OOO_CQE(cmd)) {
1197 nbytes = (cqe->data.ooo_params.ooo_params & TOE_RX_CQE_OOO_PARAMS_NBYTES) >> TOE_RX_CQE_OOO_PARAMS_NBYTES_SHIFT;
1198 isle_num = (cqe->data.ooo_params.ooo_params & TOE_RX_CQE_OOO_PARAMS_ISLE_NUM) >> TOE_RX_CQE_OOO_PARAMS_ISLE_NUM_SHIFT;
1199 if (((isle_num == 0) && (cmd != CMP_OPCODE_TOE_GJ)) || (isle_num > T_TCP_MAX_ISLES_PER_CONNECTION_TOE)) {
1200 DbgMessage(pdev, FATAL, "Isle number %d is not valid for OOO CQE %d\n", isle_num, cmd);
1201 DbgBreak();
1202 }
1203 } else if (cmd == RAMROD_OPCODE_TOE_UPDATE) {
1204 nbytes = cqe->data.raw_data;
1205 } else {
1206 nbytes = (cqe->data.in_order_params.in_order_params & TOE_RX_CQE_IN_ORDER_PARAMS_NBYTES) >> TOE_RX_CQE_IN_ORDER_PARAMS_NBYTES_SHIFT;
1207 DbgBreakIfAll(nbytes & 0xc0000000); /* two upper bits on show a completion larger than 1GB - a bit odd...*/
1208 DbgBreakIf(nbytes && tcp->rx_con->dpc_info.dpc_comp_blocked);
1209 }
1210 if (pdev->toe_info.archipelago.l4_decrease_archipelago
1211 && d_list_entry_cnt(&tcp->rx_con->u.rx.gen_info.first_isle.isle_gen_bufs_list_head)) {
1212 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_TOO_MANY_ISLES;
1213 }
1214 switch(cmd)
1215 {
1216 case CMP_OPCODE_TOE_SRC_ERR:
1217 DbgMessage(pdev, FATAL, "ERROR: NO SEARCHER ENTRY!\n");
1218 DbgBreakIfAll(TRUE);
1219 return;
1220 case CMP_OPCODE_TOE_GA:
1221 //DbgMessage(pdev, WARN, "GenericAdd cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1222 lm_tcp_rx_gen_peninsula_process(pdev, tcp, nbytes,
1223 lm_tcp_rx_next_grq_buf(pdev, sb_idx));
1224 return;
1225 case CMP_OPCODE_TOE_GNI:
1226 //DbgMessage(pdev, WARN, "GenericCreateIsle cid=%d isle_num=%d!\n", tcp->cid, isle_num);
1227 DbgBreakIf(nbytes);
1228 lm_tcp_rx_gen_isle_create(pdev, tcp,
1229 lm_tcp_rx_next_grq_buf(pdev, sb_idx), sb_idx, isle_num);
1230 return;
1231 case CMP_OPCODE_TOE_GAIR:
1232 //DbgMessage(pdev, WARN, "GenericAddIsleR cid=%d isle_num=%d nbytes=%d!\n", tcp->cid, isle_num, nbytes);
1233 lm_tcp_rx_gen_isle_right_process(pdev, tcp, nbytes,
1234 lm_tcp_rx_next_grq_buf(pdev, sb_idx), sb_idx, isle_num);
1235 return;
1236 case CMP_OPCODE_TOE_GAIL:
1237 DbgMessage(pdev, WARN, "GenericAddIsleL cid=%d isle_num=%d nbytes=%d!\n", tcp->cid, isle_num, nbytes);
1238 if (nbytes)
1239 {
1240 lm_tcp_rx_gen_isle_left_process(pdev, tcp, nbytes,
1241 NULL, sb_idx, isle_num);
1242 }
1243 else
1244 {
1245 lm_tcp_rx_gen_isle_left_process(pdev, tcp, 0,
1246 lm_tcp_rx_next_grq_buf(pdev, sb_idx), sb_idx, isle_num);
1247 }
1248 return;
1249 case CMP_OPCODE_TOE_GRI:
1250 // DbgMessage(pdev, WARN, "GenericReleaseIsle cid=%d isle_num=%d nbytes=%d!\n", tcp->cid, isle_num, nbytes);
1251 lm_tcp_rx_gen_isle_right_process(pdev, tcp, nbytes, NULL, sb_idx, isle_num);
1252 return;
1253 case CMP_OPCODE_TOE_GR:
1254 //DbgMessage(pdev, WARN, "GenericRelease cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1255 lm_tcp_rx_gen_peninsula_process(pdev, tcp, nbytes, NULL);
1256 return;
1257 case CMP_OPCODE_TOE_GJ:
1258 //DbgMessage(pdev, WARN, "GenericJoin cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1259 lm_tcp_rx_gen_join_process(pdev, tcp, sb_idx, isle_num);
1260 return;
1261 case CMP_OPCODE_TOE_CMP:
1262 //DbgMessage(pdev, WARN, "Cmp(push) cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1263 /* Add fast path handler here */
1264 lm_tcp_rx_cmp_process(pdev, tcp, nbytes, 1);
1265 return;
1266 case CMP_OPCODE_TOE_REL:
1267 //DbgMessage(pdev, WARN, "Rel(nopush) cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1268 lm_tcp_rx_cmp_process(pdev, tcp, nbytes, 0);
1269 return;
1270 case CMP_OPCODE_TOE_SKP:
1271 //DbgMessage(pdev, WARN, "Skp cid=%d nbytes=%d!\n", tcp->cid, cqe->nbytes);
1272 lm_tcp_rx_skp_process(pdev, tcp, nbytes, sb_idx);
1273 return;
1274 case CMP_OPCODE_TOE_DGI:
1275 DbgMessage(pdev, WARN, "Delete Isle cid=%d!\n", tcp->cid);
1276 lm_tcp_rx_delete_isle(pdev, tcp, sb_idx, isle_num, nbytes);
1277 return;
1278 }
1279
1280 /* for the rest of the commands, if we have nbytes, we need to complete them (generic/app) */
1281 /* unless it's an update completion, in which case the nbytes has a different meaning. */
1282 if ((cmd != RAMROD_OPCODE_TOE_UPDATE) && nbytes) {
1283 lm_tcp_gen_buf_t * gen_buf;
1284 gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_tail(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list);
1285 if(gen_buf && (gen_buf->placed_bytes == 0)) {
1286 lm_tcp_rx_gen_peninsula_process(pdev, tcp, nbytes, NULL);
1287 } else {
1288 /* if we're here - we will no longer see a RQ_SKP so, let's simulate one...note if we didn't get nbytes here.. we still need
1289 * to take care of this later if it's a blocking completion the skip will have to be everything in the peninsula
1290 * we can access skp_bytes here lockless, because the only time it will be accessed in post is if there is something in the peninsula, if we got a RQ_SKP here, there can't be...*/
1291 DbgBreakIf(!d_list_is_empty(&tcp->rx_con->u.rx.gen_info.peninsula_list));
1292 DbgBreakIf(tcp->rx_con->rq_nbytes <= tcp->rx_con->u.rx.gen_info.dpc_peninsula_nbytes+tcp->rx_con->u.rx.skp_bytes_copied); // we got a RQ completion here... so peninsula CAN;T cover RQ!!!
1293 lm_tcp_rx_skp_process(pdev, tcp, tcp->rx_con->u.rx.gen_info.dpc_peninsula_nbytes+tcp->rx_con->u.rx.skp_bytes_copied, sb_idx);
1294
1295 /* We give push=1 here, this will seperate between 'received' data and 'aborted' bufs. we won't
1296 * have any buffers left that need to be aborted that have partial completed data on them */
1297 lm_tcp_rx_cmp_process(pdev, tcp, nbytes, 2 /* push as result of sp-completion*/);
1298 }
1299 }
1300
1301 switch (cmd) {
1302 case CMP_OPCODE_TOE_FIN_RCV:
1303 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_FIN_RECV;
1304 tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_REMOTE_FIN_RECEIVED */
1305 return;
1306 case CMP_OPCODE_TOE_FIN_UPL:
1307 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_FIN_RECV_UPL;
1308 tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_REMOTE_FIN_RECEIVED + Request to upload the connection */
1309 return;
1310 case CMP_OPCODE_TOE_RST_RCV:
1311 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RESET_RECV;
1312 tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_REMOTE_RST_RECEIVED */
1313 return;
1314 case RAMROD_OPCODE_TOE_UPDATE:
1315 DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) && (tcp->hdr.status != STATE_STATUS_ABORTED));
1316 DbgBreakIf(tcp->sp_request == NULL);
1317 DbgBreakIf((tcp->sp_request->type != SP_REQUEST_UPDATE_NEIGH) &&
1318 (tcp->sp_request->type != SP_REQUEST_UPDATE_PATH) &&
1319 (tcp->sp_request->type != SP_REQUEST_UPDATE_TCP) &&
1320 (tcp->sp_request->type != SP_REQUEST_UPDATE_PATH_RELINK));
1321 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1322
1323 /*DbgMessage(pdev, FATAL, "lm_tcp_rx_process_cqe() RAMROD_OPCODE_TOE_UPDATE: IGNORE_WND_UPDATES=%d, cqe->nbytes=%d\n", GET_FLAGS(tcp->rx_con->db_data.rx->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES), cqe->nbytes);*/
1324
1325 if ((tcp->sp_request->type == SP_REQUEST_UPDATE_TCP) && (GET_FLAGS(tcp->rx_con->db_data.rx->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES)))
1326 {
1327 tcp->rx_con->dpc_info.dpc_fw_wnd_after_dec = nbytes;
1328 }
1329 return;
1330 case CMP_OPCODE_TOE_URG:
1331 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_URG;
1332 return;
1333 case CMP_OPCODE_TOE_MAX_RT:
1334 DbgMessage(pdev, WARNl4, "lm_tcp_rx_process_cqe: CMP_OPCODE_TOE_MAX_RT cid=%d\n", tcp->cid);
1335 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RT_TO;
1336 return;
1337 case CMP_OPCODE_TOE_RT_TO:
1338 DbgMessage(pdev, WARNl4, "lm_tcp_rx_process_cqe: CMP_OPCODE_TOE_RT_TO cid=%d\n", tcp->cid);
1339 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RT_TO;
1340 return;
1341 case CMP_OPCODE_TOE_KA_TO:
1342 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_KA_TO;
1343 return;
1344 case CMP_OPCODE_TOE_DBT_RE:
1345 /* LH Inbox specification: Black Hole detection (RFC 2923)
1346 * TCP Chimney target MUST upload the connection if the TCPDoubtReachabilityRetransmissions threshold is hit.
1347 * SPARTA test scripts and tests that will fail if not implemented: All tests in Tcp_BlackholeDetection.wsf, we cause
1348 * the upload by giving L4_UPLOAD_REASON_UPLOAD_REQUEST (same as Teton) */
1349 DbgMessage(pdev, INFORMl4, "lm_tcp_rx_process_cqe: RCQE CMP_OPCODE_TOE_DBT_RE, cid=%d\n", tcp->cid);
1350 DbgMessage(pdev, WARNl4, "lm_tcp_rx_process_cqe: RCQE CMP_OPCODE_TOE_DBT_RE, cid=%d IGNORING!!!\n", tcp->cid);
1351 /* We add this here only for windows and not ediag */
1352 #if (!defined(DOS)) && (!defined(__LINUX))
1353 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_DBT_RE;
1354 #endif
1355 return;
1356 case CMP_OPCODE_TOE_SYN:
1357 case CMP_OPCODE_TOE_FW2_TO:
1358 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_UPLD_CLOSE;
1359 return;
1360 case CMP_OPCODE_TOE_2WY_CLS:
1361 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_UPLD_CLOSE;
1362 return;
1363 case CMP_OPCODE_TOE_OPT_ERR:
1364 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_OPT_ERR;
1365 return;
1366 case RAMROD_OPCODE_TOE_QUERY:
1367 DbgBreakIf(! tcp->sp_request );
1368 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_QUERY);
1369 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1370 return;
1371 case RAMROD_OPCODE_TOE_SEARCHER_DELETE:
1372 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_TERMINATE_OFFLOAD);
1373 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1374 return;
1375 case RAMROD_OPCODE_TOE_RESET_SEND:
1376 DbgBreakIf(! tcp->sp_request);
1377 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_ABORTIVE_DISCONNECT);
1378 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1379 tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_RST_REQ_COMPLETED */
1380 return;
1381 case RAMROD_OPCODE_TOE_INVALIDATE:
1382 DbgBreakIf(! tcp->sp_request);
1383 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_INVALIDATE);
1384 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1385 tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_INV_REQ_COMPLETED */
1386 return;
1387 case RAMROD_OPCODE_TOE_TERMINATE:
1388 DbgBreakIf(! tcp->sp_request);
1389 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_TERMINATE1_OFFLOAD);
1390 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1391 tcp->rx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_TRM_REQ_COMPLETED */
1392 return;
1393 case RAMROD_OPCODE_TOE_EMPTY_RAMROD:
1394 DbgBreakIf(nbytes);
1395 DbgBreakIf(! tcp->sp_request );
1396 DbgBreakIf((tcp->sp_request->type != SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT) &&
1397 (tcp->sp_request->type != SP_REQUEST_PENDING_REMOTE_DISCONNECT) &&
1398 (tcp->sp_request->type != SP_REQUEST_PENDING_TX_RST));
1399 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1400 return;
1401 case RAMROD_OPCODE_TOE_INITIATE_OFFLOAD:
1402 DbgBreakIf(nbytes);
1403 DbgBreakIf(! tcp->sp_request );
1404 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_INITIATE_OFFLOAD);
1405
1406 /* 13/08/08 NirV: bugbug, temp workaround for dpc watch dog bug,
1407 * complete ofld request here - assumption: tcp lock is NOT taken by caller */
1408 lm_tcp_comp_initiate_offload_request(pdev, tcp, LM_STATUS_SUCCESS);
1409 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INITIATE_OFFLOAD, tcp->ulp_type, tcp->cid);
1410
1411 return;
1412 case CMP_OPCODE_TOE_LCN_ERR:
1413 DbgBreakIf(! tcp->sp_request );
1414 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_INITIATE_OFFLOAD);
1415 tcp->rx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
1416 return;
1417 default:
1418 DbgMessage(pdev, FATAL, "unexpected rx cqe opcode=%d\n", cmd);
1419 DbgBreakIfAll(TRUE);
1420 }
1421 }
1422
lm_tcp_rx_process_cqes(lm_device_t * pdev,u8_t drv_toe_rss_id,s_list_t * connections)1423 u8_t lm_tcp_rx_process_cqes(lm_device_t *pdev, u8_t drv_toe_rss_id, s_list_t * connections)
1424 {
1425 lm_tcp_rcq_t *rcq;
1426 lm_tcp_grq_t *grq;
1427 struct toe_rx_cqe *cqe, *hist_cqe;
1428 lm_tcp_state_t *tcp = NULL;
1429 u32_t cid;
1430 u32_t avg_dpc_cnt;
1431 u16_t cq_new_idx;
1432 u16_t cq_old_idx;
1433 u16_t num_to_reproduce = 0;
1434 u8_t defer_cqe;
1435 u8_t process_rss_upd_later = FALSE;
1436 MM_INIT_TCP_LOCK_HANDLE();
1437
1438 DbgMessage(pdev, VERBOSEl4int , "###lm_tcp_rx_process_cqes START\n");
1439
1440 rcq = &pdev->toe_info.rcqs[drv_toe_rss_id];
1441 grq = &pdev->toe_info.grqs[drv_toe_rss_id];
1442 cq_new_idx = *(rcq->hw_con_idx_ptr);
1443 cq_old_idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
1444 DbgBreakIf(S16_SUB(cq_new_idx, cq_old_idx) < 0);
1445
1446 /* save statistics */
1447 rcq->num_cqes_last_dpc = S16_SUB(cq_new_idx, cq_old_idx);
1448 DbgMessage(pdev, VERBOSEl4int, "###lm_tcp_rx_process_cqes num_cqes=%d\n", rcq->num_cqes_last_dpc);
1449
1450 if (rcq->num_cqes_last_dpc) { /* Exclude zeroed value from statistics*/
1451 if(rcq->max_cqes_per_dpc < rcq->num_cqes_last_dpc) {
1452 rcq->max_cqes_per_dpc = rcq->num_cqes_last_dpc;
1453 }
1454 /* we don't want to wrap around...*/
1455 if ((rcq->sum_cqes_last_x_dpcs + rcq->num_cqes_last_dpc) < rcq->sum_cqes_last_x_dpcs) {
1456 rcq->avg_dpc_cnt = 0;
1457 rcq->sum_cqes_last_x_dpcs = 0;
1458 }
1459 rcq->sum_cqes_last_x_dpcs += rcq->num_cqes_last_dpc;
1460 rcq->avg_dpc_cnt++;
1461 avg_dpc_cnt = rcq->avg_dpc_cnt;
1462 if (avg_dpc_cnt) { /*Prevent division by 0*/
1463 rcq->avg_cqes_per_dpc = rcq->sum_cqes_last_x_dpcs / avg_dpc_cnt;
1464 } else {
1465 rcq->sum_cqes_last_x_dpcs = 0;
1466 }
1467 }
1468
1469
1470 /* if we are suspended, we need to check if we can resume processing */
1471 if (rcq->suspend_processing == TRUE) {
1472 lm_tcp_rss_update_suspend_rcq(pdev, rcq);
1473 if (rcq->suspend_processing == TRUE) {
1474 /* skip the consumption loop */
1475 cq_new_idx = cq_old_idx;
1476 DbgMessage(pdev, VERBOSEl4int, "lm_tcp_rx_process_cqes(): rcq suspended - idx:%d\n", drv_toe_rss_id);
1477 }
1478 }
1479
1480 while(cq_old_idx != cq_new_idx) {
1481 u32_t update_stats_type;
1482 u8_t opcode;
1483
1484 DbgBreakIf(S16_SUB(cq_new_idx, cq_old_idx) <= 0);
1485
1486 /* get next consumed cqe */
1487 cqe = lm_toe_bd_chain_consume_bd(&rcq->bd_chain);
1488 update_stats_type = cqe->data.raw_data;
1489 DbgBreakIf(!cqe);
1490 num_to_reproduce++;
1491
1492 /* get cid and opcode from cqe */
1493 cid = SW_CID(((cqe->params1 & TOE_RX_CQE_CID) >> TOE_RX_CQE_CID_SHIFT));
1494 opcode = (cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT;
1495
1496 if (opcode == RAMROD_OPCODE_TOE_RSS_UPDATE) {
1497
1498 /* update the saved consumer */
1499 cq_old_idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
1500
1501 /* rss update ramrod */
1502 DbgMessage(pdev, INFORMl4int, "lm_tcp_rx_process_cqes(): calling lm_tcp_rss_update_ramrod_comp - drv_toe_rss_id:%d\n", drv_toe_rss_id);
1503 if (num_to_reproduce > 1) {
1504 process_rss_upd_later = TRUE;
1505 lm_tcp_rss_update_ramrod_comp(pdev, rcq, cid, update_stats_type, FALSE);
1506 break;
1507 }
1508 lm_tcp_rss_update_ramrod_comp(pdev, rcq, cid, update_stats_type, TRUE);
1509
1510 /* suspend further RCQ processing (if needed) */
1511 if (rcq->suspend_processing == TRUE)
1512 break;
1513 else
1514 continue;
1515
1516 }
1517
1518 if (cid < MAX_ETH_REG_CONS) {
1519 /* toe init ramrod */
1520 DbgBreakIf(((cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT)
1521 != RAMROD_OPCODE_TOE_INIT);
1522 lm_tcp_init_ramrod_comp(pdev);
1523 cq_old_idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
1524 DbgBreakIf(cq_old_idx != cq_new_idx);
1525 /* We need to update the slow-path ring. This is usually done in the lm_tcp_rx_complete_sp_cqes,
1526 * but we won't get there since this completion is not associated with a connection. USUALLY we
1527 * have to update the sp-ring only AFTER we've written the CQ producer, this is to promise that there
1528 * will always be an empty entry for another ramrod completion, but in this case we're safe, since only
1529 * one CQE is occupied anyway */
1530 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INIT, TOE_CONNECTION_TYPE, LM_SW_LEADING_RSS_CID(pdev));
1531 break;
1532 }
1533
1534 tcp = lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, cid);
1535 DbgBreakIf(!tcp);
1536 /* save cqe in history_cqes */
1537 hist_cqe = (struct toe_rx_cqe *)lm_tcp_qe_buffer_next_cqe_override(&tcp->rx_con->history_cqes);
1538 *hist_cqe = *cqe;
1539
1540 /* ASSUMPTION: if COMP_DEFERRED changes from FALSE to TRUE, the change occurs only in DPC
1541 * o/w it can only change from TRUE to FALSE.
1542 *
1543 * Read flag w/o lock. Flag may change by the time we call rx_defer_cqe
1544 * Need to check again under lock. We want to avoid acquiring the lock every DPC */
1545 defer_cqe = ((tcp->rx_con->flags & TCP_RX_COMP_DEFERRED) == TCP_RX_COMP_DEFERRED);
1546 if (defer_cqe) {
1547 /* if we're deferring completions - just store the cqe and continue to the next one
1548 * Assumptions: ALL commands need to be deferred, we aren't expecting any command on
1549 * L4 that we should pay attention to for this connection ( only one outstanding sp at a time ) */
1550 /* Return if we are still deferred (may have changed since initial check was w/o a lock */
1551 mm_acquire_tcp_lock(pdev, tcp->rx_con);
1552 /* check again under lock if we're deferred */
1553 defer_cqe = ((tcp->rx_con->flags & TCP_RX_COMP_DEFERRED) == TCP_RX_COMP_DEFERRED);
1554 if (defer_cqe) {
1555 tcp->rx_con->flags |= TCP_DEFERRED_PROCESSING;
1556
1557 /* 13/08/08 NirV: bugbug, temp workaround for dpc watch dog bug,
1558 * release the tcp lock if cqe is offload complete */
1559 if (((cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT) == RAMROD_OPCODE_TOE_INITIATE_OFFLOAD)
1560 {
1561 mm_release_tcp_lock(pdev, tcp->rx_con);
1562 }
1563
1564 lm_tcp_rx_process_cqe(pdev,cqe,tcp,drv_toe_rss_id);
1565 }
1566
1567 /* 13/08/08 NirV: bugbug, temp workaround for dpc watch dog bug,
1568 * release the tcp lock if cqe is not offload complete (was released earlier) */
1569 if (((cqe->params1 & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT) != RAMROD_OPCODE_TOE_INITIATE_OFFLOAD)
1570 {
1571 mm_release_tcp_lock(pdev, tcp->rx_con);
1572 }
1573 }
1574
1575 if (!defer_cqe) {
1576 /* connections will always be initialized to a dummy, so once a tcp connection is added to the
1577 * list, it's link will be initialized to point to another link other than NULL */
1578 if (s_list_next_entry(&tcp->rx_con->dpc_info.link) == NULL) {
1579 s_list_push_head(connections, &tcp->rx_con->dpc_info.link);
1580 }
1581 lm_tcp_rx_process_cqe(pdev, cqe, tcp, drv_toe_rss_id);
1582 }
1583
1584 cq_old_idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
1585 }
1586
1587 /* We may have nothing to reproduce if we were called from a sw_dpc */
1588 if (num_to_reproduce) {
1589 lm_toe_bd_chain_bds_produced(&rcq->bd_chain, num_to_reproduce);
1590
1591 /* GilR 5/13/2006 - TBA - save some stats? */
1592
1593 /* notify the fw of the prod of the RCQ */
1594 LM_INTMEM_WRITE16(pdev, USTORM_TOE_CQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id) , PORT_ID(pdev)),
1595 lm_bd_chain_prod_idx(&rcq->bd_chain), BAR_USTRORM_INTMEM);
1596
1597 if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
1598 u32_t l4_quasi_byte_counter;
1599 u16_t prod_idx_diff = lm_bd_chain_prod_idx(&rcq->bd_chain) - rcq->bd_chain.bds_per_page * rcq->bd_chain.page_cnt;
1600 l4_quasi_byte_counter = prod_idx_diff;
1601 l4_quasi_byte_counter <<= 16;
1602 //fIXME
1603 LM_INTMEM_WRITE32(pdev, rcq->hc_sb_info.iro_dhc_offset, l4_quasi_byte_counter, BAR_CSTRORM_INTMEM);
1604 }
1605 }
1606 DbgMessage(pdev, VERBOSEl4int , "###lm_tcp_rx_process_cqes END\n");
1607 return process_rss_upd_later;
1608 }
1609
1610 /** Description
1611 * compensate the grq
1612 * Assumption:
1613 * called under the GRQ LOCK
1614 */
lm_tcp_rx_compensate_grq(lm_device_t * pdev,u8_t drv_toe_rss_id)1615 void lm_tcp_rx_compensate_grq(lm_device_t * pdev, u8_t drv_toe_rss_id)
1616 {
1617 d_list_t * collected_gen_bufs_list = &pdev->toe_info.grqs[drv_toe_rss_id].aux_gen_list;
1618
1619 MM_ACQUIRE_TOE_GRQ_LOCK_DPC(pdev, drv_toe_rss_id);
1620 if (lm_tcp_rx_fill_grq(pdev, drv_toe_rss_id, collected_gen_bufs_list,FILL_GRQ_FULL)) {
1621 DbgMessage(pdev, INFORMl4rx, "lm_toe_service_rx_intr: Updating GRQ producer\n");
1622 /* notify the fw of the prod of the GRQ */
1623 LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id) , PORT_ID(pdev)),
1624 lm_bd_chain_prod_idx(&pdev->toe_info.grqs[drv_toe_rss_id].bd_chain), BAR_USTRORM_INTMEM);
1625 }
1626 /* check if occupancy is above threshold */
1627 if (pdev->toe_info.grqs[drv_toe_rss_id].bd_chain.capacity - pdev->toe_info.grqs[drv_toe_rss_id].bd_chain.bd_left < GRQ_XON_TH) {
1628 pdev->toe_info.grqs[drv_toe_rss_id].grq_compensate_on_alloc = TRUE;
1629 } else {
1630 pdev->toe_info.grqs[drv_toe_rss_id].grq_compensate_on_alloc = FALSE;
1631 }
1632
1633 MM_RELEASE_TOE_GRQ_LOCK_DPC(pdev, drv_toe_rss_id);
1634
1635 if (!d_list_is_empty(collected_gen_bufs_list)) {
1636 mm_tcp_return_list_of_gen_bufs(pdev,collected_gen_bufs_list,0, NON_EXISTENT_SB_IDX);
1637 d_list_clear(collected_gen_bufs_list);
1638 }
1639 }
1640
lm_tcp_rx_lock_grq(lm_device_t * pdev,u8_t drv_toe_rss_id)1641 static __inline void lm_tcp_rx_lock_grq(lm_device_t *pdev, u8_t drv_toe_rss_id)
1642 {
1643 /* If we've asked for compensation on allocation (which is only set from within a dpc)
1644 * there is a risk of the grq being accessed from a different context (the alloc context)
1645 * therefore, we cancel this option. Needs to be under lock in case alloc context is already
1646 * compensating */
1647 if (pdev->toe_info.grqs[drv_toe_rss_id].grq_compensate_on_alloc) {
1648 MM_ACQUIRE_TOE_GRQ_LOCK_DPC(pdev, drv_toe_rss_id);
1649 pdev->toe_info.grqs[drv_toe_rss_id].grq_compensate_on_alloc = FALSE;
1650 MM_RELEASE_TOE_GRQ_LOCK_DPC(pdev, drv_toe_rss_id);
1651 }
1652 }
1653
lm_toe_service_rx_intr(lm_device_t * pdev,u8_t drv_toe_rss_id)1654 void lm_toe_service_rx_intr(lm_device_t *pdev, u8_t drv_toe_rss_id)
1655 {
1656 s_list_t connections;
1657 s_list_entry_t dummy;
1658 lm_tcp_con_t * con;
1659 lm_tcp_state_t * tcp;
1660 u32_t dbg_loop_cnt = 0;
1661 u8_t process_rss_upd;
1662
1663 MM_INIT_TCP_LOCK_HANDLE();
1664
1665 DbgMessage(pdev, VERBOSEl4int , "###lm_toe_service_rx_intr START\n");
1666 DbgBreakIf(!(pdev && ARRSIZE(pdev->toe_info.rcqs) > drv_toe_rss_id));
1667
1668 /* lock the grq from external access: i.e.. allocation compensation */
1669 lm_tcp_rx_lock_grq(pdev, drv_toe_rss_id);
1670
1671 while (TRUE) {
1672 dbg_loop_cnt++;
1673 s_list_clear(&connections);
1674 s_list_push_head(&connections, &dummy);
1675 /* process the cqes and initialize connections with all the connections that appeared
1676 * in the DPC */
1677 process_rss_upd = lm_tcp_rx_process_cqes(pdev,drv_toe_rss_id,&connections);
1678
1679 /* Compensate the GRQ with generic buffers from the pool : process_cqes takes buffers from the grq */
1680 lm_tcp_rx_compensate_grq(pdev,drv_toe_rss_id);
1681
1682 /* FP: traverse the connections. remember to ignore the last one */
1683 con = (lm_tcp_con_t *)s_list_peek_head(&connections);
1684 tcp = con->tcp_state;
1685 while (s_list_next_entry(&con->dpc_info.link) != NULL) {
1686 mm_acquire_tcp_lock(pdev, con);
1687 lm_tcp_rx_complete_tcp_fp(pdev, con->tcp_state, con);
1688 mm_release_tcp_lock(pdev, con);
1689 con = (lm_tcp_con_t *)s_list_next_entry(&con->dpc_info.link);
1690 tcp = con->tcp_state;
1691 }
1692
1693 /* SP : traverse the connections. remember to ignore the last one */
1694 con = (lm_tcp_con_t *)s_list_pop_head(&connections);
1695 s_list_next_entry(&con->dpc_info.link) = NULL;
1696 tcp = con->tcp_state;
1697 while (s_list_entry_cnt(&connections) > 0) {
1698 /* we access snapshot and not dpc, since once the dpc_flags were copied
1699 * to snapshot they were zeroized */
1700 if (con->dpc_info.snapshot_flags) {
1701 lm_tcp_rx_complete_tcp_sp(pdev, tcp, con);
1702 }
1703 con = (lm_tcp_con_t *)s_list_pop_head(&connections);
1704 s_list_next_entry(&con->dpc_info.link) = NULL;
1705 tcp = con->tcp_state;
1706 }
1707
1708 if (process_rss_upd) {
1709 lm_tcp_rss_update_suspend_rcq(pdev,&pdev->toe_info.rcqs[drv_toe_rss_id]);
1710 if (!pdev->toe_info.rcqs[drv_toe_rss_id].suspend_processing) {
1711 pdev->toe_info.rcqs[drv_toe_rss_id].rss_update_processing_continued++;
1712 continue;
1713 }
1714 }
1715 break;
1716 }
1717 if (pdev->toe_info.rcqs[drv_toe_rss_id].rss_update_processing_max_continued < dbg_loop_cnt) {
1718 pdev->toe_info.rcqs[drv_toe_rss_id].rss_update_processing_max_continued = dbg_loop_cnt;
1719 }
1720
1721 if (pdev->toe_info.grqs[drv_toe_rss_id].number_of_isles_delta || pdev->toe_info.grqs[drv_toe_rss_id].gen_bufs_in_isles_delta) {
1722 MM_ACQUIRE_ISLES_CONTROL_LOCK_DPC(pdev);
1723 lm_tcp_update_isles_cnts(pdev, pdev->toe_info.grqs[drv_toe_rss_id].number_of_isles_delta,
1724 pdev->toe_info.grqs[drv_toe_rss_id].gen_bufs_in_isles_delta);
1725 MM_RELEASE_ISLES_CONTROL_LOCK_DPC(pdev);
1726 pdev->toe_info.grqs[drv_toe_rss_id].number_of_isles_delta = pdev->toe_info.grqs[drv_toe_rss_id].gen_bufs_in_isles_delta = 0;
1727 }
1728
1729 DbgMessage(pdev, VERBOSEl4int , "###lm_toe_service_rx_intr END\n");
1730 }
1731
1732 /** Description:
1733 * Post a single tcp buffer to the Rx bd chain
1734 * Assumptions:
1735 * - caller initiated tcp_buf->flags field with BUFFER_START/BUFFER_END/PUSH appropriately
1736 * Returns:
1737 * - SUCCESS - tcp buf was successfully attached to the bd chain
1738 * - RESOURCE - not enough available BDs on bd chain for given tcp buf
1739 * - CONNECTION_CLOSED - whenever connection's flag are marked as 'POST BLOCKED' */
lm_tcp_rx_post_buf(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_buffer_t * tcp_buf,lm_frag_list_t * frag_list)1740 lm_status_t lm_tcp_rx_post_buf(
1741 struct _lm_device_t *pdev,
1742 lm_tcp_state_t *tcp,
1743 lm_tcp_buffer_t *tcp_buf,
1744 lm_frag_list_t *frag_list
1745 )
1746 {
1747 lm_tcp_con_t * rx_con;
1748 lm_tcp_con_rx_gen_info_t * gen_info;
1749 lm_status_t lm_stat = LM_STATUS_SUCCESS;
1750 d_list_t return_list; /* buffers to return to pool in case of copying to buffer */
1751 u32_t copied_bytes = 0;
1752 u32_t add_sws_bytes = 0;
1753 u8_t split_buffer = FALSE;
1754
1755 DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_post_buf cid=%d\n", tcp->cid);
1756 DbgBreakIf(!(pdev && tcp));
1757 DbgBreakIf(tcp->cid && (tcp != lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, tcp->cid)));
1758 /* (tcp_buf==NULL <=> frag_list==NULL) && (frag_list!= NULL => frag_list->cnt != 0) */
1759 DbgBreakIf( ( ! ( ( (!tcp_buf) && (!frag_list) ) || (tcp_buf && frag_list) ) ) ||
1760 ( frag_list && (frag_list->cnt == 0) ) );
1761
1762 rx_con = tcp->rx_con;
1763 if ( GET_FLAGS(rx_con->flags, TCP_RX_POST_BLOCKED) ) {
1764 // DbgBreakIf(!tcp_buf); /* (lm_tcp_rx_post_buf design guides VBD doc) */
1765 if (!tcp_buf) {
1766 tcp->rx_con->zb_rx_post_blocked++;
1767 return LM_STATUS_SUCCESS;
1768 } else {
1769 tcp->rx_con->rx_post_blocked++;
1770 return LM_STATUS_CONNECTION_CLOSED;
1771 }
1772 }
1773
1774 /* TCP_POST_DELAYED is turned on when the lm can not process new buffers for some reason, but not permanently
1775 * Assumption: UM will eventually try to repost this buffer... */
1776 if ( GET_FLAGS(rx_con->flags, TCP_POST_DELAYED)) {
1777 return LM_STATUS_FAILURE;
1778 }
1779
1780 RESET_FLAGS(rx_con->flags, TCP_INDICATE_REJECTED);
1781
1782 /* set tcp_buf fields */
1783 if (tcp_buf) {
1784 /* check bd chain availability */
1785 if(lm_bd_chain_avail_bds(&rx_con->bd_chain) < frag_list->cnt) {
1786 DbgBreakIf(s_list_is_empty(&rx_con->active_tb_list));
1787 /* Check if the last placed BD was part of a split buffer (no end flag) if so, mark is at special split-end
1788 * and give a doorbell as if it was with END. Also, block UM from giving us more buffers until we've completed
1789 * this one (See L4 VBD Spec for more details on "Large Application Buffers" */
1790 if (!(GET_FLAGS(rx_con->u.rx.last_rx_bd->flags , TOE_RX_BD_END))) {
1791 SET_FLAGS(rx_con->u.rx.last_rx_bd->flags, (TOE_RX_BD_END | TOE_RX_BD_SPLIT));
1792 /* Mark the last buffer in active-tb-list as 'special' so that we know when we complete it that we can
1793 * unblock UM... */
1794 tcp_buf = (lm_tcp_buffer_t *)s_list_peek_tail(&rx_con->active_tb_list);
1795 SET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_SPLIT);
1796 SET_FLAGS(rx_con->flags, TCP_POST_DELAYED);
1797 lm_tcp_rx_write_db(pdev, tcp);
1798 }
1799 DbgMessage(pdev, INFORMl4rx, "post rx buf failed, rx chain is full (cid=%d, avail bds=%d, buf nfrags=%d)\n",
1800 tcp->cid, lm_bd_chain_avail_bds(&rx_con->bd_chain), frag_list->cnt);
1801 return LM_STATUS_RESOURCE;
1802 }
1803
1804 tcp_buf->size = tcp_buf->more_to_comp = (u32_t)frag_list->size;
1805 tcp_buf->bd_used = 0; /* will be modified if buffer will be posted */
1806 DbgBreakIf(!(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_START ?
1807 rx_con->app_buf_bytes_acc_post == 0 :
1808 rx_con->app_buf_bytes_acc_post > 0));
1809 rx_con->app_buf_bytes_acc_post += tcp_buf->size;
1810
1811 /* special care in case of last tcp buffer of an application buffer */
1812 if(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_END) {
1813 tcp_buf->app_buf_xferred = 0; /* just for safety */
1814 tcp_buf->app_buf_size = rx_con->app_buf_bytes_acc_post;
1815 rx_con->app_buf_bytes_acc_post = 0;
1816 }
1817 split_buffer = !(GET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_POST_START) && GET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_POST_END));
1818 } else {
1819 /* zero-byte request */
1820 rx_con->u.rx.rx_zero_byte_recv_reqs++;
1821 }
1822
1823 /* we could be in the middle of completing a split-buffer... this is in case the previous split buffer was posted partially and we got a
1824 * cmp with push... need to complete it here. */
1825 if (GET_FLAGS(rx_con->flags, TCP_POST_COMPLETE_SPLIT)) {
1826 DbgBreakIf(!split_buffer); /* we can only be in this state if we're completing split buffers... */
1827 rx_con->bytes_push_skip_cnt += tcp_buf->more_to_comp; /* how many bytes did we skip? */
1828 tcp_buf->more_to_comp = 0;
1829 rx_con->partially_completed_buf_cnt++;
1830 /* complete buffer */
1831 s_list_push_tail(&(tcp->rx_con->active_tb_list), &(tcp_buf->link));
1832 rx_con->rq_nbytes += tcp_buf->size;
1833 rx_con->buffer_skip_post_cnt++;
1834 lm_tcp_complete_tcp_buf(pdev, tcp, rx_con,tcp_buf,0);
1835 return LM_STATUS_SUCCESS;
1836 }
1837
1838 gen_info = &rx_con->u.rx.gen_info;
1839
1840 if ( gen_info->peninsula_nbytes ) {
1841 DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_post_buf WITH GENERIC, cid=%d, tcp_buf=%p, buf_size=%d, buf_flags=%d, peninsula_nbytes=%d\n",
1842 tcp->cid, tcp_buf, frag_list ? frag_list->size : 0, tcp_buf ? tcp_buf->flags : 0, rx_con->u.rx.gen_info.peninsula_nbytes);
1843 if (tcp_buf) {
1844 d_list_init(&return_list, NULL, NULL, 0);
1845 copied_bytes = lm_tcp_rx_peninsula_to_rq_copy(pdev,tcp,tcp_buf,&return_list, 0xffffffff, FALSE);
1846 gen_info->bytes_copied_cnt_in_post += copied_bytes;
1847 if (!d_list_is_empty(&return_list)) {
1848 lm_tcp_return_list_of_gen_bufs(pdev,tcp,&return_list, MM_TCP_RGB_COMPENSATE_GRQS, NON_EXISTENT_SB_IDX);
1849 }
1850 if ((copied_bytes == tcp_buf->size) && !split_buffer && s_list_is_empty(&rx_con->active_tb_list)) {
1851 /* consumed_cnt: our way of telling fw we bypassed it */
1852 lm_tcp_incr_consumed_gen(pdev, tcp, tcp_buf->size);
1853 /* simulate a _lm_tcp_rx_post_buf for lm_tcp_complete_bufs */
1854 s_list_push_tail(&(tcp->rx_con->active_tb_list), &(tcp_buf->link));
1855 rx_con->rq_nbytes += tcp_buf->size;
1856 rx_con->buffer_skip_post_cnt++;
1857 rx_con->bytes_skip_post_cnt += copied_bytes;
1858 /* If we copied some bytes to the RQ, we can now compensate FW-Window with these copied bytes. */
1859 add_sws_bytes += copied_bytes;
1860 /* this function completes nbytes on the tcp buf and may complete the buffer if more_to_comp = 0*/
1861 lm_tcp_complete_tcp_buf(pdev, tcp, rx_con,tcp_buf,copied_bytes);
1862 } else {
1863 /* will be posted and therefore get a SKP at some stage. */
1864 if (!GET_FLAGS(rx_con->flags, TCP_POST_NO_SKP)) {
1865 rx_con->u.rx.skp_bytes_copied += copied_bytes;
1866 }
1867 lm_stat = _lm_tcp_rx_post_buf(pdev, tcp, tcp_buf, frag_list);
1868 DbgBreakIf(lm_stat != LM_STATUS_SUCCESS);
1869 if (copied_bytes && GET_FLAGS(rx_con->flags, TCP_POST_NO_SKP)) {
1870 lm_tcp_rx_write_db(pdev, tcp); /* for the case of split buffer in which bytes/bds are accumulated in bd_more* fields. bd_more* fields must be cleaned at this phase */
1871 rx_con->bytes_comp_cnt += copied_bytes;
1872 /* If we copied some bytes to the RQ, we can now compensate FW-Window with these copied bytes. */
1873 add_sws_bytes += copied_bytes;
1874 /* this function completes nbytes on the tcp buf and may complete the buffer if more_to_comp = 0*/
1875 lm_tcp_complete_tcp_buf(pdev, tcp, rx_con,tcp_buf,copied_bytes);
1876 }
1877 }
1878 }
1879 /* if we have something to indicate after copying and it's ok to indicate... - indicate it */
1880 if (gen_info->peninsula_nbytes && _lm_tcp_ok_to_indicate(rx_con)) {
1881 DbgBreakIf(frag_list && (frag_list->size != copied_bytes)); /* can't have bytes left with free space in tcp buf */
1882 mm_tcp_rx_indicate_gen(pdev, tcp);
1883 add_sws_bytes += gen_info->add_sws_bytes; /* any bytes we need to update will be aggregated here during indicate */
1884 gen_info->add_sws_bytes = 0;
1885
1886 }
1887 } else if (tcp_buf) {
1888 DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_post_buf NO COPY, cid=%d, tcp_buf=%p, buf_size=%d, buf_flags=%d, peninsula_nbytes=%d\n",
1889 tcp->cid, tcp_buf, frag_list->size, tcp_buf->flags, rx_con->u.rx.gen_info.peninsula_nbytes);
1890 lm_stat = _lm_tcp_rx_post_buf(pdev, tcp, tcp_buf, frag_list);
1891 DbgBreakIf(lm_stat != LM_STATUS_SUCCESS);
1892 }
1893
1894 if (add_sws_bytes) {
1895 lm_tcp_rx_post_sws(pdev, tcp, rx_con, add_sws_bytes, TCP_RX_POST_SWS_INC);
1896 }
1897
1898
1899 return lm_stat;
1900 }
1901
1902
1903 /* Assumptions:
1904 * - caller initiated appropriately the following fields:
1905 * - tcp_buf->flags
1906 * - tcp_buf->size, tcp_buf->more_to_comp
1907 * - tcp_buf->app_buf_size, tcp_buf->app_buf_xferred
1908 * - caller verified that there is enough availabe BDs in the BD chain for the given buffer */
_lm_tcp_rx_post_buf(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_buffer_t * tcp_buf,lm_frag_list_t * frag_list)1909 static lm_status_t _lm_tcp_rx_post_buf(
1910 struct _lm_device_t *pdev,
1911 lm_tcp_state_t *tcp,
1912 lm_tcp_buffer_t *tcp_buf,
1913 lm_frag_list_t *frag_list
1914 )
1915 {
1916 lm_tcp_con_t *rx_con = tcp->rx_con;
1917 lm_bd_chain_t * rx_chain;
1918 u16_t old_prod, new_prod;
1919 struct toe_rx_bd * rx_bd;
1920 lm_frag_t * frag = frag_list->frag_arr;
1921 u32_t dbg_buf_size = 0;
1922 u32_t bd_bytes_prod; /* Each bd is initialized with a cyclic counter of bytes prod until that bd. */
1923 u16_t flags = 0;
1924 u32_t i;
1925
1926 /* Number of fragments of entire application buffer can't be bigger
1927 * than size of the BD chain (entire application buffer since we can't
1928 * post partial application buffer to the FW , db_more_bds however includes the "next" bd, so we need
1929 * to take that into consideration as well */
1930 DbgBreakIfAll( (rx_con->db_more_bds + frag_list->cnt) > (u32_t)(rx_con->bd_chain.capacity + rx_con->bd_chain.page_cnt));
1931
1932 rx_chain = &rx_con->bd_chain;
1933 DbgBreakIf(lm_bd_chain_avail_bds(rx_chain) < frag_list->cnt);
1934
1935 old_prod = lm_bd_chain_prod_idx(rx_chain);
1936
1937 /* First BD should have the START flag */
1938 if(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_START) {
1939 flags = TOE_RX_BD_START;
1940 }
1941
1942 /* Set NO_PUSH flag if needed */
1943 if ( tcp_buf->flags & TCP_BUF_FLAG_L4_RX_NO_PUSH ) {
1944 flags |= TOE_RX_BD_NO_PUSH;
1945 }
1946 if (tcp_buf->flags & TCP_BUF_FLAG_L4_PARTIAL_FILLED) {
1947 if (!rx_con->partially_filled_buf_sent && !rx_con->rq_completion_calls) {
1948 SET_FLAGS(rx_con->db_data.rx->flags, TOE_RX_DB_DATA_PARTIAL_FILLED_BUF);
1949 } else {
1950 RESET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_PARTIAL_FILLED);
1951 }
1952 rx_con->partially_filled_buf_sent++;
1953 }
1954 /* Attach the first frag to the BD-chain */
1955 bd_bytes_prod = rx_con->db_data.rx->bytes_prod + rx_con->db_more_bytes;
1956 rx_bd = _lm_tcp_rx_set_bd(frag, flags, rx_chain, bd_bytes_prod);
1957 bd_bytes_prod += frag->size;
1958 dbg_buf_size += frag->size;
1959 flags &= ~TOE_RX_BD_START;
1960 frag++;
1961
1962 /* "attach" the frags to the bd chain */
1963 for(i = 1; i < frag_list->cnt; i++, frag++) {
1964 rx_bd = _lm_tcp_rx_set_bd(frag, flags, rx_chain, bd_bytes_prod);
1965 dbg_buf_size += frag->size;
1966 bd_bytes_prod += frag->size;
1967 }
1968 tcp->rx_con->u.rx.last_rx_bd = rx_bd;
1969
1970 /* The last BD must have the END flag */
1971 if(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_END) {
1972 rx_bd->flags |= TOE_RX_BD_END;
1973 DbgMessage(NULL, VERBOSEl4rx, "Setting Rx last BD flags=0x%x\n", rx_bd->flags);
1974 }
1975
1976 DbgBreakIf(frag_list->cnt > TCP_MAX_SGL_SIZE);
1977 tcp_buf->bd_used = frag_list->cnt & TCP_MAX_SGL_SIZE;
1978 DbgBreakIf(tcp_buf->size != dbg_buf_size);
1979
1980 /* Perpare data for a DoorBell */
1981 rx_con->db_more_bytes += tcp_buf->size;
1982 new_prod = lm_bd_chain_prod_idx(rx_chain);
1983 DbgBreakIf(S16_SUB(new_prod, old_prod) < tcp_buf->bd_used);
1984 rx_con->db_more_bds += S16_SUB(new_prod, old_prod);
1985 rx_con->db_more_bufs++;
1986
1987 /* Enqueue the buffer to the active_tb_list */
1988 s_list_push_tail(&(rx_con->active_tb_list), &(tcp_buf->link));
1989 rx_con->rq_nbytes += tcp_buf->size;
1990
1991 if(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_END) {
1992 lm_tcp_rx_write_db(pdev, tcp);
1993 }
1994
1995
1996 return LM_STATUS_SUCCESS;
1997 }
1998
_lm_tcp_rx_get_buffered_data(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_frag_list_t ** frag_list,lm_tcp_gen_buf_t ** gen_buf)1999 static lm_status_t _lm_tcp_rx_get_buffered_data(
2000 struct _lm_device_t * pdev,
2001 lm_tcp_state_t * tcp,
2002 lm_frag_list_t ** frag_list, /* if *frag_list is NULL, the rx con pre-allocaed will be used */
2003 lm_tcp_gen_buf_t ** gen_buf
2004 )
2005 {
2006 lm_tcp_con_t * rx_con = tcp->rx_con;
2007 lm_tcp_con_rx_gen_info_t * gen_info;
2008 lm_tcp_gen_buf_t * head_of_indication;
2009 d_list_t indicate_list;
2010 d_list_entry_t * entry;
2011 lm_tcp_gen_buf_t * curr_gen_buf;
2012 u32_t gen_offset, i;
2013 u32_t num_bufs_to_indicate;
2014 u32_t ind_nbufs=0, ind_nbytes=0;
2015 u8_t dont_send_to_system_more_then_rwin;
2016 DbgMessage(pdev, VERBOSEl4rx, "###_lm_tcp_rx_get_buffered_data cid=%d\n", tcp->cid);
2017
2018 gen_info = &rx_con->u.rx.gen_info;
2019
2020
2021 if ((u16_t)tcp->tcp_cached.rcv_indication_size != 0) {
2022 DbgBreakMsg("MichalS rcv_indication_size != 0 not implemented\n");
2023 /* MichalS TBA: RcvIndicationSize > 0 will change following block quite a lot */
2024 }
2025
2026 num_bufs_to_indicate = d_list_entry_cnt(&gen_info->peninsula_list);
2027
2028 /* The buffers in peninsula_list are ALWAYS released, unreleased buffers are in the dpc_peninsula_list. */
2029 DbgBreakIf(((lm_tcp_gen_buf_t *)d_list_peek_tail(&gen_info->peninsula_list))->placed_bytes == 0);
2030
2031 if (*frag_list == NULL) {
2032 *frag_list = gen_info->frag_list;
2033 (*frag_list)->cnt = gen_info->max_frag_count;
2034 }
2035
2036 if (num_bufs_to_indicate > (*frag_list)->cnt) {
2037 DbgMessage(pdev, WARNl4rx, "_lm_tcp_rx_get_buffered_data: number of buffers to indicate[%d] is larger than frag_cnt[%d] cid=%d\n",
2038 num_bufs_to_indicate, (*frag_list)->cnt, tcp->cid);
2039 num_bufs_to_indicate = gen_info->max_frag_count;
2040 gen_info->num_non_full_indications++;
2041 }
2042 d_list_init(&indicate_list, NULL, NULL, 0);
2043 dont_send_to_system_more_then_rwin = (u8_t)gen_info->dont_send_to_system_more_then_rwin;
2044 while (num_bufs_to_indicate--) {
2045 entry = d_list_pop_head(&gen_info->peninsula_list);
2046 DbgBreakIf(entry == NULL);
2047 if (dont_send_to_system_more_then_rwin) {
2048 if ((ind_nbytes + ((lm_tcp_gen_buf_t *)entry)->placed_bytes)
2049 > tcp->tcp_cached.initial_rcv_wnd) {
2050 if (ind_nbytes) {
2051 d_list_push_head(&gen_info->peninsula_list, entry);
2052 break;
2053 } else {
2054 dont_send_to_system_more_then_rwin = FALSE;
2055 }
2056 }
2057 }
2058 d_list_push_tail(&indicate_list, entry);
2059 ind_nbufs ++;
2060 ind_nbytes += ((lm_tcp_gen_buf_t *)entry)->placed_bytes;
2061 }
2062
2063 ind_nbytes -= gen_info->first_buf_offset;
2064
2065 head_of_indication = (lm_tcp_gen_buf_t *)d_list_peek_head(&indicate_list);
2066
2067 if CHK_NULL(head_of_indication)
2068 {
2069 DbgBreakIfAll( !head_of_indication ) ;
2070 return LM_STATUS_FAILURE ;
2071 }
2072
2073 head_of_indication->tcp = tcp;
2074 head_of_indication->ind_nbufs = ind_nbufs;
2075 head_of_indication->ind_bytes = ind_nbytes;
2076 DbgBreakIf(gen_info->peninsula_nbytes < ind_nbytes);
2077 gen_info->peninsula_nbytes -= ind_nbytes;
2078
2079 /* initialize frag list */
2080 (*frag_list)->cnt = ind_nbufs;
2081 (*frag_list)->size = ind_nbytes;
2082 curr_gen_buf = head_of_indication;
2083
2084 gen_offset = gen_info->first_buf_offset;
2085 for (i = 0; i < (*frag_list)->cnt; i++ ) {
2086 (*frag_list)->frag_arr[i].addr.as_ptr = curr_gen_buf->buf_virt + gen_offset;
2087 (*frag_list)->frag_arr[i].size = curr_gen_buf->placed_bytes - gen_offset;
2088 curr_gen_buf = NEXT_GEN_BUF(curr_gen_buf);
2089 gen_offset = 0; /* only first buffer can have an offset */
2090 /* we don't touch gen_info->first_buf_offset - this is handled in lm_tcp_rx_buffered_data_indicated */
2091 }
2092 *gen_buf = head_of_indication;
2093 DbgMessage(pdev, VERBOSEl4rx, "###_lm_tcp_rx_get_buffered_data ind_bytes = %d\n", (*frag_list)->size);
2094
2095 mm_atomic_inc(&pdev->toe_info.stats.total_indicated);
2096 return LM_STATUS_SUCCESS;
2097 }
2098
lm_tcp_rx_get_buffered_data_from_terminate(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_frag_list_t ** frag_list,lm_tcp_gen_buf_t ** gen_buf)2099 lm_status_t lm_tcp_rx_get_buffered_data_from_terminate (
2100 struct _lm_device_t * pdev,
2101 lm_tcp_state_t * tcp,
2102 lm_frag_list_t ** frag_list,
2103 lm_tcp_gen_buf_t ** gen_buf
2104 )
2105 {
2106 lm_tcp_con_t * rx_con = tcp->rx_con;
2107 lm_tcp_con_rx_gen_info_t * gen_info;
2108 u16_t buff_cnt;
2109 lm_tcp_gen_buf_t * unwanted_gen_buf = NULL;
2110 lm_tcp_gen_buf_t * temp_gen_buf = NULL;
2111 lm_status_t lm_status = LM_STATUS_SUCCESS;
2112
2113 DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_get_buffered_data_from_terminate cid=%d\n", tcp->cid);
2114
2115 gen_info = &rx_con->u.rx.gen_info;
2116
2117 /* make sure ALL the peninsula is released */
2118 DbgBreakIf(!d_list_is_empty(&gen_info->peninsula_list) &&
2119 (((lm_tcp_gen_buf_t *)d_list_peek_tail(&gen_info->peninsula_list))->placed_bytes == 0));
2120
2121 *frag_list = NULL;
2122 if (gen_info->peninsula_nbytes == 0) {
2123 return LM_STATUS_SUCCESS;
2124 }
2125
2126 /* DbgBreakIf(gen_info->peninsula_nbytes > tcp->tcp_cached.initial_rcv_wnd);*/
2127 gen_info->dont_send_to_system_more_then_rwin = FALSE;
2128 if ((buff_cnt = (u16_t)d_list_entry_cnt(&gen_info->peninsula_list)) > gen_info->max_frag_count) {
2129 lm_bd_chain_t *bd_chain = &tcp->rx_con->bd_chain;
2130 u16_t possible_frag_count, decreased_count;
2131 possible_frag_count = (/*bd_chain->page_cnt**/
2132 LM_PAGE_SIZE - sizeof(lm_frag_list_t)) / sizeof(lm_frag_t);
2133 DbgMessage(pdev, WARNl4rx | WARNl4sp,
2134 "###lm_tcp_rx_get_buffered_data_from_terminate cid=%d: peninsula_list cnt (%d) > max frag_count (%d)\n",
2135 tcp->cid, buff_cnt, gen_info->max_frag_count);
2136
2137 if (possible_frag_count > gen_info->max_frag_count) {
2138 /* This solution is ugly:
2139 since there will not be any further buffered data indications to the client, we must be able to
2140 indicate all the buffered data now. But the preallocated frag list in the rx con is too short!
2141 So instead of the pre-allocated frag list we need to use a larger memory. Our options:
2142 1. allocate memory here and release it later.
2143 2. use other pre-allocated memory that is not in use anymore (e.g. the bd chain) [chosen solution]
2144 In any case both solutions may fail: memory allocation can fail and the other pre-allocated memory
2145 might also be too short. the fallback from this is:
2146 - don't indicate anything and release the peninsula (NOT IMPLEMENTED)
2147 DbgBreakIfAll((u16_t)(sizeof(lm_frag_list_t) + sizeof(lm_frag_t)*buff_cnt) > bd_chain->page_cnt*LM_PAGE_SIZE); */
2148 if (possible_frag_count < buff_cnt) {
2149 decreased_count = possible_frag_count;
2150 DbgMessage(pdev, WARNl4rx | WARNl4sp,
2151 "###lm_tcp_rx_get_buffered_data_from_terminate cid=%d: peninsula_list cnt (%d) > aux.frag_cnt (%d)\n",
2152 tcp->cid, buff_cnt, possible_frag_count);
2153 } else {
2154 decreased_count = 0;
2155 DbgMessage(pdev, WARNl4rx | WARNl4sp,
2156 "###lm_tcp_rx_get_buffered_data_from_terminate cid=%d: aux.frag_cnt (%d) is enough for %d buffs\n",
2157 tcp->cid, possible_frag_count, buff_cnt);
2158 }
2159 *frag_list = (lm_frag_list_t*)bd_chain->bd_chain_virt;
2160 (*frag_list)->cnt = possible_frag_count;
2161 (*frag_list)->size = 0;
2162 } else {
2163 decreased_count = (u16_t)gen_info->max_frag_count;
2164 }
2165 if (decreased_count) {
2166 u16_t returned_buff_cnt = lm_squeeze_rx_buffer_list(pdev, tcp, decreased_count, &unwanted_gen_buf);
2167 if (decreased_count < returned_buff_cnt) {
2168 lm_frag_list_t* new_frag_list;
2169 u32_t mem_size_for_new_frag_list = returned_buff_cnt * sizeof(lm_frag_t) + sizeof(lm_frag_list_t);
2170 // new_frag_list = (lm_frag_list_t*)mm_alloc_mem(pdev, mem_size_for_new_frag_list, LM_RESOURCE_NDIS);
2171 new_frag_list = (lm_frag_list_t*)mm_rt_alloc_mem(pdev, mem_size_for_new_frag_list, LM_RESOURCE_NDIS);
2172
2173 if (new_frag_list != NULL) {
2174 tcp->type_of_aux_memory = TCP_CON_AUX_RT_MEM;
2175 tcp->aux_memory = new_frag_list;
2176 tcp->aux_mem_size = mem_size_for_new_frag_list;
2177 *frag_list = new_frag_list;
2178 (*frag_list)->cnt = returned_buff_cnt;
2179 (*frag_list)->size = 0;
2180 tcp->aux_mem_flag = TCP_CON_AUX_RT_MEM_SUCCSESS_ALLOCATION;
2181 } else {
2182 /* No way. Let's send up only part of data. Data distortion is unavoidable.
2183 TODO: prevent data distortion by termination the connection itself at least */
2184 lm_status = LM_STATUS_RESOURCE;
2185 tcp->aux_mem_flag = TCP_CON_AUX_RT_MEM_FAILED_ALLOCATION;
2186 /* Get rid of whatever remains in the peninsula...add it to unwanted... */
2187 if (unwanted_gen_buf)
2188 {
2189 temp_gen_buf = (lm_tcp_gen_buf_t*)d_list_peek_tail(&gen_info->peninsula_list);
2190 if (temp_gen_buf)
2191 {
2192 temp_gen_buf->link.next = &(unwanted_gen_buf->link);
2193 unwanted_gen_buf->link.prev = &(temp_gen_buf->link);
2194 unwanted_gen_buf = (lm_tcp_gen_buf_t*)d_list_peek_head(&gen_info->peninsula_list);
2195 }
2196 }
2197 else
2198 {
2199 unwanted_gen_buf = (lm_tcp_gen_buf_t*)d_list_peek_head(&gen_info->peninsula_list);
2200 }
2201 d_list_clear(&gen_info->peninsula_list);
2202
2203 }
2204 }
2205 }
2206 }
2207 if (lm_status == LM_STATUS_SUCCESS)
2208 {
2209 _lm_tcp_rx_get_buffered_data(pdev, tcp, frag_list, gen_buf);
2210
2211 /* for cleaness: lm_tcp_rx_buffered_data_indicated will not be called
2212 * indication is 'succesfull' */
2213 gen_info->num_bytes_indicated += (u32_t)(*frag_list)->size;
2214 gen_info->first_buf_offset = 0;
2215 gen_info->num_buffers_indicated += (*gen_buf)->ind_nbufs;
2216 }
2217
2218 gen_info->peninsula_blocked = TRUE;
2219
2220 if (unwanted_gen_buf) {
2221 lm_tcp_return_gen_bufs(pdev, tcp, unwanted_gen_buf,MM_TCP_RGB_COMPENSATE_GRQS, NON_EXISTENT_SB_IDX);
2222 }
2223
2224 if (*gen_buf) {
2225 /* with data taken from terminate, we can always act as in 'short-loop' since the bytes for
2226 * this connection won't increase the window anyway... */
2227 (*gen_buf)->flags &= ~GEN_FLAG_SWS_UPDATE;
2228 }
2229
2230 return lm_status;
2231 }
2232
lm_tcp_rx_get_buffered_data(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_frag_list_t ** frag_list,lm_tcp_gen_buf_t ** gen_buf)2233 lm_status_t lm_tcp_rx_get_buffered_data(
2234 struct _lm_device_t * pdev,
2235 lm_tcp_state_t * tcp,
2236 lm_frag_list_t ** frag_list,
2237 lm_tcp_gen_buf_t ** gen_buf
2238 )
2239 {
2240 lm_tcp_con_t * rx_con = tcp->rx_con;
2241 lm_tcp_con_rx_gen_info_t * gen_info;
2242 lm_status_t lm_status;
2243
2244 DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_get_buffered_data cid=%d\n", tcp->cid);
2245 gen_info = &rx_con->u.rx.gen_info;
2246
2247 DbgBreakIf(gen_info->peninsula_blocked == TRUE); /* terminate was already called */
2248
2249 if (gen_info->peninsula_nbytes == 0 || (rx_con->flags & TCP_RX_IND_BLOCKED)) {
2250 return LM_STATUS_FAILURE;
2251 }
2252
2253 *frag_list = NULL;
2254 lm_status = _lm_tcp_rx_get_buffered_data(pdev, tcp, frag_list, gen_buf);
2255 if (*gen_buf) {
2256 if (gen_info->update_window_mode == LM_TOE_UPDATE_MODE_LONG_LOOP) {
2257 gen_info->pending_indicated_bytes += (*gen_buf)->ind_bytes;
2258 /* We need to increase the number of pending return indications here, since once we return
2259 * we are basically pending for the return of this specific indication. There are two cases
2260 * that require decreasing the pending return indications. The first is if the indication failed
2261 * the second is if it succeeded AND the buffers returned... */
2262 gen_info->pending_return_indications++;
2263 (*gen_buf)->flags |= GEN_FLAG_SWS_UPDATE;
2264 } else {
2265 (*gen_buf)->flags &= ~GEN_FLAG_SWS_UPDATE;
2266 }
2267 }
2268
2269 return LM_STATUS_SUCCESS;
2270 }
2271
lm_tcp_rx_buffered_data_indicated(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t accepted_bytes,lm_tcp_gen_buf_t * gen_buf)2272 void lm_tcp_rx_buffered_data_indicated(
2273 struct _lm_device_t * pdev,
2274 lm_tcp_state_t * tcp,
2275 u32_t accepted_bytes,
2276 lm_tcp_gen_buf_t * gen_buf /* head of indications generic buffer NULL if indication succeeded */
2277 )
2278 {
2279 lm_tcp_con_rx_gen_info_t * gen_info = &tcp->rx_con->u.rx.gen_info;
2280
2281 DbgMessage(pdev, VERBOSEl4rx , "###lm_tcp_rx_buffered_data_indicated accepted_bytes = %d cid=%d\n", accepted_bytes, tcp->cid);
2282
2283 DbgBreakIf(gen_info->peninsula_blocked == TRUE); /* terminate was already called */
2284
2285 lm_tcp_incr_consumed_gen(pdev, tcp, accepted_bytes);
2286 gen_info->num_bytes_indicated += accepted_bytes;
2287
2288 if (gen_buf == NULL) { /* succesfull indication */
2289 gen_info->first_buf_offset = 0;
2290 if (gen_info->update_window_mode == LM_TOE_UPDATE_MODE_SHORT_LOOP) {
2291 gen_info->add_sws_bytes += accepted_bytes;
2292 }
2293 gen_info->num_success_indicates++;
2294 gen_info->bytes_indicated_accepted += accepted_bytes;
2295 tcp->rx_con->u.rx.zero_byte_posted_during_ind = FALSE;
2296 } else { /* complete rejection / partial success, gen_buf remains in our control */
2297 /* indication failed */
2298 lm_tcp_gen_buf_t * curr_gen_buf, * ret_buf;
2299 d_list_t return_to_pool_list;
2300 d_list_t return_to_peninsula_list;
2301 u32_t nbytes;
2302 DbgBreakIf(accepted_bytes > gen_buf->ind_bytes);
2303 gen_info->peninsula_nbytes += gen_buf->ind_bytes - accepted_bytes;
2304
2305 gen_info->num_failed_indicates++;
2306 gen_info->bytes_indicated_accepted+= accepted_bytes;
2307 gen_info->bytes_indicated_rejected+= gen_buf->ind_bytes - accepted_bytes;
2308
2309 DbgMessage(pdev, INFORMl4rx, "GENERIC: %s Indication for cid=%d accepted_bytes=%d\n",
2310 (accepted_bytes == 0)? "Rejected" : "Partial", tcp->cid, accepted_bytes);
2311
2312 d_list_init(&return_to_pool_list, NULL, NULL, 0);
2313 d_list_init(&return_to_peninsula_list, NULL, NULL, 0);
2314
2315 DbgBreakIf(gen_buf->tcp->rx_con->flags & TCP_INDICATE_REJECTED);
2316 if (tcp->rx_con->u.rx.zero_byte_posted_during_ind) {
2317 tcp->rx_con->u.rx.zero_byte_posted_during_ind = FALSE;
2318 } else {
2319 gen_buf->tcp->rx_con->flags |= TCP_INDICATE_REJECTED;
2320 }
2321
2322 curr_gen_buf = gen_buf;
2323
2324 /* indicated bytes are in fact 'freed up' space: so we can make the sws_bytes larger,
2325 * this is always true here luxury-mode or not */
2326 gen_info->add_sws_bytes += accepted_bytes;
2327
2328 /* buffer was returned to us so it is no longer pending return...if we increased the 'pending' we have
2329 * to decrease */
2330 if (gen_buf->flags & GEN_FLAG_SWS_UPDATE) {
2331 gen_info->pending_return_indications--;
2332 gen_info->pending_indicated_bytes-=gen_buf->ind_bytes;
2333 }
2334 mm_atomic_inc(&pdev->toe_info.stats.total_indicated_returned); /* stats */
2335
2336 /* return buffers that were fully indicated to the generic pool, ones that we're not, to the peninsula */
2337 while (accepted_bytes) {
2338 nbytes = ((lm_tcp_gen_buf_t *)curr_gen_buf)->placed_bytes - gen_info->first_buf_offset;
2339 if (accepted_bytes >= nbytes) {
2340 /* the buffer was completely accepted */
2341 accepted_bytes -= nbytes;
2342 ret_buf = curr_gen_buf;
2343 curr_gen_buf = NEXT_GEN_BUF(curr_gen_buf);
2344 d_list_push_tail(&return_to_pool_list, &ret_buf->link);
2345 gen_info->num_buffers_indicated++;
2346 gen_info->first_buf_offset = 0;
2347 } else {
2348 gen_info->first_buf_offset += (u16_t)accepted_bytes;
2349 accepted_bytes = 0;
2350 }
2351 }
2352
2353 /* is there anything to return to the peninsula ? (i.e. return_head moved) */
2354 while (curr_gen_buf) {
2355 curr_gen_buf->ind_bytes = 0;
2356 curr_gen_buf->ind_nbufs = 0;
2357 ret_buf = curr_gen_buf;
2358 curr_gen_buf = NEXT_GEN_BUF(curr_gen_buf);
2359 gen_info->bufs_indicated_rejected++;
2360 d_list_push_tail(&return_to_peninsula_list, &ret_buf->link);
2361 }
2362
2363 if (!d_list_is_empty(&return_to_pool_list)) {
2364 lm_tcp_return_list_of_gen_bufs(pdev, tcp, &return_to_pool_list, MM_TCP_RGB_COMPENSATE_GRQS, NON_EXISTENT_SB_IDX);
2365 }
2366
2367 /* There must be at least something to return to the peninsula since this was partial indication */
2368 DbgBreakIf(d_list_is_empty(&return_to_peninsula_list));
2369 /* re-insert generic buffers to the peninsula.
2370 * we need to re-insert the buffers to the head of the peninsula */
2371 d_list_add_head(&gen_info->peninsula_list, &return_to_peninsula_list);
2372
2373 }
2374
2375 }
2376
2377 /** Description
2378 * returns the buffers to the generic pool
2379 */
lm_tcp_return_gen_bufs(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_gen_buf_t * gen_buf,u32_t flags,u8_t grq_idx)2380 void lm_tcp_return_gen_bufs(struct _lm_device_t * pdev, lm_tcp_state_t * tcp, lm_tcp_gen_buf_t * gen_buf,u32_t flags, u8_t grq_idx)
2381 {
2382 lm_tcp_gen_buf_t * curr_gen_buf = gen_buf;
2383
2384 #if DBG
2385 gen_buf->ind_nbufs = 0; /* for debugging purposes will count how many buffers are in our list */
2386 while (curr_gen_buf) {
2387 DbgBreakIf(SIG(curr_gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
2388 DbgBreakIf(END_SIG(curr_gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
2389 /* We increase the bytes for both pool-buffers, and buffered-data buffers because when the OS
2390 * gives posted buffers the window is smaller */
2391 curr_gen_buf = NEXT_GEN_BUF(curr_gen_buf);
2392 gen_buf->ind_nbufs++;
2393 }
2394 #endif
2395
2396 mm_tcp_return_gen_bufs(pdev, gen_buf,flags,grq_idx);
2397 }
2398
2399 /** Description
2400 * returns the buffers to the generic pool
2401 */
lm_tcp_return_list_of_gen_bufs(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,d_list_t * gen_buf_list,u32_t flags,u8_t grq_idx)2402 void lm_tcp_return_list_of_gen_bufs(struct _lm_device_t * pdev, lm_tcp_state_t * tcp, d_list_t * gen_buf_list,u32_t flags, u8_t grq_idx)
2403 {
2404 lm_tcp_gen_buf_t * gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(gen_buf_list);
2405 lm_tcp_gen_buf_t * curr_gen_buf = gen_buf;
2406
2407 #if DBG
2408 gen_buf->ind_nbufs = 0; /* for debugging purposes will count how many buffers are in our list */
2409 while (curr_gen_buf) {
2410 DbgBreakIf(SIG(curr_gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
2411 DbgBreakIf(END_SIG(curr_gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
2412 /* We increase the bytes for both pool-buffers, and buffered-data buffers because when the OS
2413 * gives posted buffers the window is smaller */
2414 curr_gen_buf = NEXT_GEN_BUF(curr_gen_buf);
2415 gen_buf->ind_nbufs++;
2416 }
2417 #endif
2418
2419 mm_tcp_return_list_of_gen_bufs(pdev, gen_buf_list,flags,grq_idx);
2420 }
2421
lm_tcp_rx_indication_returned(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_gen_buf_t * gen_buf)2422 void lm_tcp_rx_indication_returned(struct _lm_device_t *pdev, lm_tcp_state_t * tcp, lm_tcp_gen_buf_t * gen_buf)
2423 {
2424 DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_con_indication_returned cid=%d\n", tcp->cid);
2425
2426 DbgBreakIf(tcp != gen_buf->tcp);
2427 DbgBreakIf(tcp->cid && (tcp != lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, tcp->cid)));
2428
2429 /* TBA fix in case of RcvIndicateSize > 0 */
2430 DbgBreakIf(gen_buf->refcnt != 0);
2431
2432 tcp->rx_con->u.rx.gen_info.pending_return_indications--;
2433 tcp->rx_con->u.rx.gen_info.pending_indicated_bytes -= gen_buf->ind_bytes;
2434
2435 /* Update the sws bytes according to the ind number of bytes this function is only called if in fact
2436 * this is a buffer that is marked as an 'update buffer' otherwise this function isn't called. */
2437 DbgBreakIfAll(!(gen_buf->flags & GEN_FLAG_SWS_UPDATE));
2438 lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, gen_buf->ind_bytes, TCP_RX_POST_SWS_INC);
2439 lm_tcp_return_gen_bufs(pdev, tcp, gen_buf, MM_TCP_RGB_COMPENSATE_GRQS, NON_EXISTENT_SB_IDX);
2440 }
2441
lm_tcp_is_tcp_dead(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t op)2442 u8_t lm_tcp_is_tcp_dead(struct _lm_device_t * pdev, lm_tcp_state_t * tcp, u8_t op)
2443 {
2444 UNREFERENCED_PARAMETER_(pdev);
2445
2446 if(op == TCP_IS_DEAD_OP_UPLD_COMP) {
2447 DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
2448 tcp->hdr.status = STATE_STATUS_UPLOAD_DONE;
2449 }
2450 if (GET_FLAGS(tcp->rx_con->flags, TCP_COMP_DEFERRED)) {
2451 /* we can't kill the connection here! it's still being handled by deferred function which will
2452 * access it... killing will be done from that context... */
2453 return FALSE;
2454 }
2455 if (tcp->rx_con->u.rx.gen_info.pending_return_indications == 0) {
2456 /* If the function is called from offload completion flow, we might have completions on the RCQ
2457 that we haven't processed yet so haven't completed / indicated bufs,
2458 so there are bytes in the peninsula and this state is legal */
2459 DbgBreakIf(!(tcp->rx_con->flags & TCP_RX_IND_BLOCKED) &&
2460 (tcp->rx_con->u.rx.gen_info.peninsula_nbytes != 0) &&
2461 (op != TCP_IS_DEAD_OP_OFLD_COMP_DFRD));
2462 if (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE) {
2463 return TRUE;
2464 }
2465 }
2466 return FALSE;
2467 }
2468
lm_tcp_con_status(struct _lm_device_t * pdev,lm_tcp_con_t * rx_con)2469 lm_status_t lm_tcp_con_status(struct _lm_device_t * pdev, lm_tcp_con_t * rx_con)
2470 {
2471 UNREFERENCED_PARAMETER_(pdev);
2472
2473 if (rx_con->flags & TCP_RX_POST_BLOCKED) {
2474 return LM_STATUS_CONNECTION_CLOSED;
2475 }
2476 return LM_STATUS_SUCCESS;
2477 }
2478
lm_tcp_calc_gen_buf_size(struct _lm_device_t * pdev)2479 u32_t lm_tcp_calc_gen_buf_size(struct _lm_device_t * pdev)
2480 {
2481 u32_t gen_buf_size = 0;
2482 u32_t const chain_idx = LM_SW_LEADING_RSS_CID(pdev);
2483
2484 /* determine size of buffer: in steps of pages, larger than the minimum and
2485 * the mtu */
2486 if(CHK_NULL(pdev) ||
2487 ERR_IF((ARRSIZE(pdev->params.l2_cli_con_params) <= chain_idx) ||
2488 (CHIP_IS_E1H(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1H)) || /* TODO E2 add IS_E2*/
2489 (CHIP_IS_E1(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1)) ))
2490 {
2491 DbgBreakIf(1);
2492 return 0;
2493 }
2494
2495 if (pdev->params.l4_gen_buf_size < pdev->params.l2_cli_con_params[chain_idx].mtu)
2496 {
2497 gen_buf_size = pdev->params.l2_cli_con_params[chain_idx].mtu;
2498
2499 }
2500 else
2501 {
2502 gen_buf_size = pdev->params.l4_gen_buf_size;
2503 }
2504 /* bring to page-size boundary */
2505 gen_buf_size = (gen_buf_size + (LM_PAGE_SIZE-1)) & ~(LM_PAGE_SIZE-1);
2506
2507 return gen_buf_size;
2508 }
2509
lm_squeeze_rx_buffer_list(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u16_t adjust_number,lm_tcp_gen_buf_t ** unwanted_gen_buf)2510 u16_t lm_squeeze_rx_buffer_list(
2511 struct _lm_device_t * pdev,
2512 lm_tcp_state_t * tcp,
2513 u16_t adjust_number,
2514 lm_tcp_gen_buf_t ** unwanted_gen_buf
2515 )
2516 {
2517 u32_t gen_buff_size = lm_tcp_calc_gen_buf_size(pdev);
2518 lm_tcp_con_t * rx_con = tcp->rx_con;
2519 lm_tcp_con_rx_gen_info_t * gen_info = &rx_con->u.rx.gen_info;
2520 d_list_t unwanted_list = {0};
2521 lm_tcp_gen_buf_t * gen_buf_copy_to = NULL;
2522 lm_tcp_gen_buf_t * gen_buf_copy_from = NULL, *next_buffer = NULL;
2523 u16_t free_bytes_to_copy = 0, bytes_to_copy = 0, gen_buf_offset = 0;
2524 u8_t force_buffer_division = FALSE;
2525 u16_t buffers_number = (u16_t)d_list_entry_cnt(&gen_info->peninsula_list);
2526
2527 *unwanted_gen_buf = NULL;
2528
2529 if ((adjust_number * gen_buff_size) >= gen_info->peninsula_nbytes) {
2530 d_list_init(&unwanted_list, NULL, NULL, 0);
2531 gen_buf_copy_to = (lm_tcp_gen_buf_t*)d_list_peek_head(&gen_info->peninsula_list);
2532 next_buffer = NEXT_GEN_BUF(gen_buf_copy_to);
2533 free_bytes_to_copy = gen_buff_size - gen_buf_copy_to->placed_bytes;
2534 while (buffers_number > adjust_number) {
2535 gen_buf_copy_from = next_buffer;
2536 if (gen_buf_copy_from != NULL) {
2537 next_buffer = NEXT_GEN_BUF(gen_buf_copy_from);
2538 bytes_to_copy = gen_buf_copy_from->placed_bytes;
2539 if (bytes_to_copy <= free_bytes_to_copy) {
2540 mm_memcpy(gen_buf_copy_to->buf_virt + gen_buf_copy_to->placed_bytes,
2541 gen_buf_copy_from->buf_virt, bytes_to_copy);
2542 free_bytes_to_copy -= bytes_to_copy;
2543 gen_buf_copy_to->placed_bytes += bytes_to_copy;
2544 d_list_remove_entry(&gen_info->peninsula_list, &gen_buf_copy_from->link);
2545 d_list_push_tail(&unwanted_list, &gen_buf_copy_from->link);
2546 buffers_number--;
2547 continue;
2548 } else {
2549 if (force_buffer_division) {
2550 if (free_bytes_to_copy) {
2551 mm_memcpy(gen_buf_copy_to->buf_virt + gen_buf_copy_to->placed_bytes,
2552 gen_buf_copy_from->buf_virt, free_bytes_to_copy);
2553 gen_buf_copy_to->placed_bytes += free_bytes_to_copy;
2554 mm_memcpy(gen_buf_copy_from->buf_virt,
2555 gen_buf_copy_from->buf_virt + free_bytes_to_copy, bytes_to_copy - free_bytes_to_copy);
2556 gen_buf_copy_from->placed_bytes -= free_bytes_to_copy;
2557 }
2558 }
2559 gen_buf_copy_to = gen_buf_copy_from;
2560 next_buffer = NEXT_GEN_BUF(gen_buf_copy_from);
2561 free_bytes_to_copy = gen_buff_size - gen_buf_copy_to->placed_bytes;
2562 continue;
2563 }
2564 } else {
2565 if (!force_buffer_division) {
2566 force_buffer_division = TRUE;
2567 gen_buf_copy_to = (lm_tcp_gen_buf_t*)d_list_peek_head(&gen_info->peninsula_list);
2568 next_buffer = NEXT_GEN_BUF(gen_buf_copy_to);
2569 gen_buf_offset = gen_info->first_buf_offset;
2570 if (gen_buf_offset) {
2571 /* move to start of buffer*/
2572 mm_memcpy(gen_buf_copy_to->buf_virt,
2573 gen_buf_copy_to->buf_virt + gen_buf_offset, gen_buf_copy_to->placed_bytes - gen_buf_offset);
2574 gen_buf_copy_to->placed_bytes -= gen_buf_offset;
2575 gen_buf_offset = gen_info->first_buf_offset = 0;
2576 }
2577 free_bytes_to_copy = gen_buff_size - gen_buf_copy_to->placed_bytes;
2578 continue;
2579 } else {
2580 DbgMessage(pdev, WARNl4rx | WARNl4sp,
2581 "###lm_squeeze_rx_buffer_list cid=%d: peninsula_list cnt (%d) is still more frag_count (%d)\n",
2582 tcp->cid, buffers_number, adjust_number);
2583 break;
2584 }
2585 }
2586 }
2587 *unwanted_gen_buf = (lm_tcp_gen_buf_t*)d_list_peek_head(&unwanted_list);
2588 DbgMessage(pdev, WARNl4rx | WARNl4sp,
2589 "###lm_squeeze_rx_buffer_list cid=%d(%d,%d,%d): peninsula_list cnt is decreased till %d\n",
2590 tcp->cid, tcp->tcp_cached.initial_rcv_wnd, tcp->tcp_cached.rcv_indication_size, gen_buff_size, buffers_number);
2591 } else {
2592 DbgMessage(pdev, WARNl4rx | WARNl4sp,
2593 "###lm_squeeze_rx_buffer_list cid=%d(%d,%d): could not replace %dB (%d bufs) into %d frags of %dB each\n",
2594 tcp->cid, tcp->tcp_cached.initial_rcv_wnd, tcp->tcp_cached.rcv_indication_size,
2595 gen_info->peninsula_nbytes, buffers_number, adjust_number, gen_buff_size);
2596 }
2597 return buffers_number;
2598 }
2599
lm_tcp_rx_clear_isles(struct _lm_device_t * pdev,lm_tcp_state_t * tcp_state,d_list_t * isles_list)2600 void lm_tcp_rx_clear_isles(struct _lm_device_t * pdev, lm_tcp_state_t * tcp_state, d_list_t * isles_list)
2601 {
2602 lm_tcp_con_rx_gen_info_t * gen_info;
2603 u8_t isle_cnt;
2604
2605 DbgBreakIf(!(tcp_state && tcp_state->rx_con));
2606 gen_info = &tcp_state->rx_con->u.rx.gen_info;
2607 while ((isle_cnt = (u8_t)d_list_entry_cnt(&gen_info->isles_list))) {
2608 d_list_t aux_isles_list;
2609 d_list_init(&aux_isles_list, NULL, NULL, 0);
2610 _lm_tcp_isle_remove(pdev, tcp_state, NON_EXISTENT_SB_IDX, isle_cnt, &aux_isles_list);
2611 if (!d_list_is_empty(&aux_isles_list)) {
2612 d_list_add_head(isles_list, &aux_isles_list);
2613 }
2614 }
2615 return;
2616 }
2617
2618