1 2 3 #include "lm5710.h" 4 #include "bd_chain.h" 5 #include "lm_l4fp.h" 6 #include "mm_l4if.h" 7 8 /** 9 * Description 10 * complete Rx and Tx application buffers to client 11 * Assumption: 12 * Called under fp-lock 13 * Called only from DPC-Flow 14 */ 15 void lm_tcp_complete_bufs( 16 struct _lm_device_t *pdev, 17 lm_tcp_state_t *tcp, 18 lm_tcp_con_t *con) 19 { 20 s_list_t completed_bufs; 21 22 /* check which buffers need to be completed to client */ 23 s_list_clear(&completed_bufs); 24 25 /* should only be here if we have something to complete */ 26 DbgBreakIf(con->dpc_info.dpc_completed_tail == NULL); 27 s_list_split(&con->active_tb_list, &completed_bufs, 28 con->dpc_info.dpc_completed_tail, con->dpc_info.dpc_bufs_completed); 29 con->dpc_info.dpc_completed_tail = NULL; 30 DbgBreakIf(con->rq_nbytes < con->dpc_info.dpc_actual_bytes_completed); 31 con->rq_nbytes -= con->dpc_info.dpc_actual_bytes_completed; 32 33 lm_bd_chain_bds_consumed(&con->bd_chain, con->dpc_info.dpc_bd_used); 34 con->dpc_info.dpc_bd_used = 0; 35 con->dpc_info.dpc_bufs_completed = 0; 36 37 con->buffer_completed_cnt += s_list_entry_cnt(&completed_bufs); 38 DbgMessage(pdev, VERBOSEl4fp, 39 "cid=%d, completing %d bufs towards mm, actual_completed_bytes=%d, %d bufs still in active tb list\n", 40 tcp->cid, s_list_entry_cnt(&completed_bufs), con->dpc_info.dpc_actual_bytes_completed, s_list_entry_cnt(&con->active_tb_list)); 41 42 con->dpc_info.dpc_actual_bytes_completed = 0; 43 44 45 /* GilR 5/10/2006 - TBD - Might need some kind of indicating policy towards the mm - i.e. indicate MaxIndicationLimit at a time*/ 46 DbgBreakIf(s_list_is_empty(&completed_bufs)); 47 if (!con->rq_completion_calls) { 48 lm_tcp_buffer_t *tcp_buf = (lm_tcp_buffer_t *)s_list_peek_head(&completed_bufs); 49 if (tcp_buf->flags & TCP_BUF_FLAG_L4_PARTIAL_FILLED) { 50 RESET_FLAGS(con->db_data.rx->flags, TOE_RX_DB_DATA_PARTIAL_FILLED_BUF); 51 } 52 } 53 con->rq_completion_calls++; 54 mm_tcp_complete_bufs(pdev, tcp, con, &completed_bufs, LM_STATUS_SUCCESS); 55 } 56 /** Description: 57 * Complete nbytes from Tx and Rx application buffers 58 * Assumptions: 59 * Called ONLY from dpc-flow (or deferred_cqes) not POST - flow 60 * Called W/O a lock (unless from deferred) 61 * push can have 3 values: 62 * - 0: no-push 63 * - 1: regular push 64 * - 2: push as a result of terminate / reset / fin... 65 * Returns: 66 * Actual bytes completed towards mm. (If push==0 this number is equal to 67 * given completed_bytes, and if push==1 it might be larger), if fin is received this 68 * may be smaller than completed_bytes by a maximum of '1' */ 69 u32_t lm_tcp_complete_nbytes( 70 struct _lm_device_t *pdev, 71 lm_tcp_state_t *tcp, 72 lm_tcp_con_t *con, /* Rx OR Tx connection */ 73 u32_t completed_bytes, /* num bytes completed (might be 0) */ 74 u8_t push /* if == 0, don't complete partialy 75 completed buffers towards mm. If (1) - regular push, if (2) push as result of sp-completion (terminate for example) */) 76 { 77 lm_tcp_buffer_t * tcp_buf = lm_tcp_next_entry_dpc_active_list(con); /* tcp_buf is the next buffer after tail... */ 78 u32_t actual_completed_bytes = completed_bytes; 79 u8_t dbg_no_more_bufs = FALSE; 80 81 UNREFERENCED_PARAMETER_(tcp); 82 83 DbgMessage(pdev, VERBOSEl4fp, "#lm_tcp_complete_bufs\n"); 84 85 /* we now have completed_bytes on RQ ( could be a result of copying from GRQ (in case of rx) or a regular rq-completion */ 86 con->dpc_info.dpc_rq_placed_bytes += completed_bytes; 87 88 DbgBreakIf((con->type == TCP_CON_TYPE_RX) && !tcp_buf); /* RX: even if completed_bytes==0 */ 89 /* Tx: tcp_buf can be NULL since this can be a fin completion 90 */ 91 92 while(tcp_buf && tcp_buf->more_to_comp <= completed_bytes) { /* buffer fully completed */ 93 DbgBreakIf((tcp_buf->more_to_comp == tcp_buf->size) && 94 !(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_START ? 95 con->app_buf_bytes_acc_comp == 0 : 96 con->app_buf_bytes_acc_comp > 0)); 97 98 completed_bytes -= tcp_buf->more_to_comp; 99 con->app_buf_bytes_acc_comp += tcp_buf->more_to_comp; 100 tcp_buf->more_to_comp = 0; /* essential */ 101 102 /* complete buffer */ 103 con->dpc_info.dpc_completed_tail = &tcp_buf->link; /* last tcp_buf that needs to be completed */ 104 con->dpc_info.dpc_bd_used += tcp_buf->bd_used; 105 con->dpc_info.dpc_bufs_completed += 1; 106 con->dpc_info.dpc_actual_bytes_completed += tcp_buf->size; 107 108 if(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_END) { 109 tcp_buf->app_buf_xferred = con->app_buf_bytes_acc_comp; 110 DbgBreakIf(tcp_buf->app_buf_xferred != tcp_buf->app_buf_size); /* this is NOT partial completion */ 111 con->app_buf_bytes_acc_comp = 0; 112 } else { 113 if (tcp_buf->flags & TCP_BUF_FLAG_L4_SPLIT) { 114 /* we've completed a split buffer */ 115 DbgBreakIf(GET_FLAGS(con->flags, TCP_POST_DELAYED) == 0); 116 con->dpc_info.dpc_unblock_post = TRUE; 117 RESET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_SPLIT); 118 dbg_no_more_bufs = TRUE; /* we don't expect any more buffers after this one... */ 119 } 120 tcp_buf->app_buf_xferred = 0; 121 } 122 123 tcp_buf = (lm_tcp_buffer_t *)s_list_next_entry(&tcp_buf->link); 124 DbgBreakIf((con->type == TCP_CON_TYPE_RX) && completed_bytes && !tcp_buf); 125 DbgBreakIf((con->type == TCP_CON_TYPE_TX) && completed_bytes > 1 && !tcp_buf); /* could be 1 if fin */ 126 DbgBreakIf(tcp_buf && dbg_no_more_bufs); 127 } 128 129 if(tcp_buf) { /* possibly, partialy completed buffer */ 130 DbgBreakIf((tcp_buf->more_to_comp == tcp_buf->size) && 131 !(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_START ? 132 con->app_buf_bytes_acc_comp == 0 : 133 con->app_buf_bytes_acc_comp > 0)); 134 tcp_buf->more_to_comp -= completed_bytes; 135 con->app_buf_bytes_acc_comp += completed_bytes; 136 completed_bytes = 0; 137 /* special care if push==1 AND some bytes were really completed for this buf */ 138 if(push && ((tcp_buf->flags & TCP_BUF_FLAG_L4_PARTIAL_FILLED) || (con->app_buf_bytes_acc_comp > 0)) ) { 139 DbgBreakIf(con->type != TCP_CON_TYPE_RX); /* push is relevant for Rx con only */ 140 DbgBreakIf((push == 1) && (tcp_buf->flags & TCP_BUF_FLAG_L4_RX_NO_PUSH)); 141 142 /* skip TBs untill end of app buff - note, it's possible we don't have an end buff in case of 143 * large split buffers, in this case we'll hit the tcp buffer with the "reserved" flag, we then 144 * need to mark the connection as being in the middle of completing a split buffer - meaning every 145 * new buffer that will arrive will be immediately completed until the one with 'end' arrives... 146 * terrible -but there is no elegant way to deal with large split buffers... */ 147 do { 148 tcp_buf = lm_tcp_next_entry_dpc_active_list(con); 149 DbgBreakIf(!tcp_buf); /* push only comes from FW. Therefore: 150 - we can't reach this place from a peninsula to rq copy completion 151 - Since we do not post partial app bufs to the FW, if we get here 152 it is only after the entire app buff is attached to the bd chain */ 153 actual_completed_bytes += tcp_buf->more_to_comp; 154 con->bytes_push_skip_cnt += tcp_buf->more_to_comp; /* how many bytes did we skip? */ 155 tcp_buf->more_to_comp = 0; 156 con->partially_completed_buf_cnt++; 157 /* complete buffer */ 158 con->dpc_info.dpc_completed_tail = &tcp_buf->link; 159 con->dpc_info.dpc_bd_used += tcp_buf->bd_used; 160 con->dpc_info.dpc_bufs_completed += 1; 161 con->dpc_info.dpc_actual_bytes_completed += tcp_buf->size; 162 } while ( !(GET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_POST_END)) && !(GET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_SPLIT)) ); 163 164 if (GET_FLAGS(tcp_buf->flags, TCP_BUF_FLAG_L4_SPLIT)) { 165 /* we've completed a split buffer */ 166 DbgBreakIf(GET_FLAGS(con->flags, TCP_POST_DELAYED) == 0); 167 /* mark connection as "complete next split buffers" , in the meantime this connection is delayed, so post won't look 168 * at this flag it's safe to change it lockless */ 169 SET_FLAGS(con->flags, TCP_POST_COMPLETE_SPLIT); 170 con->dpc_info.dpc_unblock_post = TRUE; 171 RESET_FLAGS(tcp_buf->flags ,TCP_BUF_FLAG_L4_SPLIT); /* this is everest internal, don't want miniport looking at this... */ 172 } else { 173 tcp_buf->app_buf_xferred = con->app_buf_bytes_acc_comp; 174 DbgBreakIf(tcp_buf->app_buf_xferred >= tcp_buf->app_buf_size); /* this is partial completion */ 175 con->app_buf_bytes_acc_comp = 0; 176 } 177 } 178 } 179 180 /* if all bytes were completed, completed_bytes should be zero. The only case that it won't be zero is if 181 * one of the completion bytes was a 'fin' completion (TX only). In this case, completed_bytes will be '1' 182 * In Rx Case, completed_bytes must always be zero. */ 183 DbgBreakIf((con->type == TCP_CON_TYPE_RX) && (completed_bytes != 0)); 184 DbgBreakIf((con->type == TCP_CON_TYPE_TX) && (completed_bytes > 1)); 185 return actual_completed_bytes - completed_bytes; 186 } /* lm_tcp_complete_nbytes */ 187 188 189 void lm_tcp_abort_bufs( 190 struct _lm_device_t * pdev, 191 lm_tcp_state_t * tcp, 192 lm_tcp_con_t * con, 193 lm_status_t stat 194 ) 195 { 196 lm_tcp_buffer_t * tcp_buf; 197 s_list_entry_t * lentry_p; 198 s_list_t tmp_list; 199 200 201 DbgBreakIf( ! (pdev && con) ); 202 DbgMessage(pdev, INFORMl4, 203 "#lm_tcp_abort_bufs: tcp=%p, con type=%d, stat=%d\n", 204 tcp, con->type, stat); 205 206 s_list_init(&tmp_list, NULL, NULL, 0); 207 208 /* we don't expect there to be any pending completions... (unless we're in error recovery) */ 209 if (!lm_reset_is_inprogress(pdev)) 210 { 211 DbgBreakIf ((con->type == TCP_CON_TYPE_RX) && (con->u.rx.skp_bytes_copied)); 212 } 213 214 215 /* If there is completed data, report it in the first seen END-buffer. 216 There must be at most one not completed App. Buf. 217 */ 218 lentry_p = s_list_pop_head(&con->active_tb_list); 219 while( lentry_p) { 220 221 tcp_buf = (lm_tcp_buffer_t *)lentry_p; 222 con->rq_nbytes -= tcp_buf->size; 223 224 tcp_buf->app_buf_xferred = 0; 225 226 /* Take care of partially completed buffer */ 227 if (tcp_buf->flags & TCP_BUF_FLAG_L4_POST_END) { 228 tcp_buf->app_buf_xferred = con->app_buf_bytes_acc_comp; 229 DbgBreakIf(tcp_buf->app_buf_size < con->app_buf_bytes_acc_comp); 230 con->app_buf_bytes_acc_comp = 0; 231 DbgBreakIf(S32_SUB(S64_SUB(con->bytes_post_cnt, con->bytes_comp_cnt), (tcp_buf->app_buf_size - tcp_buf->app_buf_xferred)) < 0); 232 con->bytes_comp_cnt += (tcp_buf->app_buf_size - tcp_buf->app_buf_xferred); 233 con->bytes_aborted_cnt += (tcp_buf->app_buf_size - tcp_buf->app_buf_xferred); 234 } 235 236 s_list_push_tail(&tmp_list, &tcp_buf->link); 237 238 lentry_p = s_list_pop_head(&con->active_tb_list); 239 } 240 241 /* GilR 8/3/2006 - TODO - can't assert here. pending might be 1 if fin request was posted and not completed (tx con) */ 242 //DbgBreakIf(con->pending_bytes); 243 244 /* Complete all buffers from active_list */ 245 if(s_list_entry_cnt(&tmp_list)) { 246 con->buffer_aborted_cnt += s_list_entry_cnt(&tmp_list); 247 if (lm_fl_reset_is_inprogress(pdev)) { 248 con->abortion_under_flr++; 249 } 250 mm_tcp_complete_bufs(pdev, tcp, con, &tmp_list, stat); 251 } 252 con->flags |= TCP_BUFFERS_ABORTED; 253 254 /* Abort all pending buffers in UM */ 255 mm_tcp_abort_bufs(pdev,tcp,con,stat); 256 257 DbgBreakIf(!s_list_is_empty(&con->active_tb_list)); 258 } 259 260 /******** qe_buffer interface: cyclic NON-OVERRIDE buffer ****************/ 261 262 /** Description 263 * returns the next cqe in the cqe_buffer and updates the buffer params 264 */ 265 char * lm_tcp_qe_buffer_next_free_cqe(lm_tcp_qe_buffer_t * cqe_buffer) 266 { 267 char * cqe; 268 269 cqe = cqe_buffer->head; 270 271 if(cqe == cqe_buffer->last) { 272 cqe_buffer->head = cqe_buffer->first; /* cyclic*/ 273 } else { 274 cqe_buffer->head = cqe + cqe_buffer->qe_size; 275 } 276 277 DbgBreakIf(cqe_buffer->left == 0); 278 cqe_buffer->left--; 279 280 return cqe; 281 } 282 283 /** Description 284 * returns the next occupied cqe in the cqe_buffer and updates the buffer params 285 * (tail) 286 */ 287 char * lm_tcp_qe_buffer_next_occupied_cqe(lm_tcp_qe_buffer_t * cqe_buffer) 288 { 289 char * cqe; 290 291 cqe = cqe_buffer->tail; 292 293 if ((cqe == cqe_buffer->head) && (cqe_buffer->left > 0)) { 294 return NULL; 295 } 296 297 if(cqe == cqe_buffer->last) { 298 cqe_buffer->tail = cqe_buffer->first; /* cyclic*/ 299 } else { 300 cqe_buffer->tail = cqe + cqe_buffer->qe_size; 301 } 302 303 cqe_buffer->left++; 304 305 return cqe; 306 } 307 308 u8_t lm_tcp_qe_buffer_is_empty(lm_tcp_qe_buffer_t * cqe_buffer) 309 { 310 return ((cqe_buffer->head == cqe_buffer->tail) && (cqe_buffer->left > 0)); 311 } 312 313 /******** qe_buffer interface: cyclic OVERRIDE buffer ****************/ 314 char * lm_tcp_qe_buffer_next_cqe_override(lm_tcp_qe_buffer_t * cqe_buffer) 315 { 316 char * cqe; 317 318 cqe = cqe_buffer->head; 319 320 if(cqe == cqe_buffer->last) { 321 cqe_buffer->head = cqe_buffer->first; /* cyclic*/ 322 } else { 323 cqe_buffer->head = cqe + cqe_buffer->qe_size; 324 } 325 326 if (cqe_buffer->left) { 327 cqe_buffer->left--; 328 } 329 330 return cqe; 331 } 332 333 334 335 336