Lines Matching refs:ssk

107 sdp_nagle_off(struct sdp_sock *ssk, struct mbuf *mb)  in sdp_nagle_off()  argument
118 (ssk->flags & SDP_NODELAY) || in sdp_nagle_off()
119 !ssk->nagle_last_unacked || in sdp_nagle_off()
120 mb->m_pkthdr.len >= ssk->xmit_size_goal / 4 || in sdp_nagle_off()
124 unsigned long mseq = ring_head(ssk->tx_ring); in sdp_nagle_off()
125 ssk->nagle_last_unacked = mseq; in sdp_nagle_off()
127 if (!callout_pending(&ssk->nagle_timer)) { in sdp_nagle_off()
128 callout_reset(&ssk->nagle_timer, SDP_NAGLE_TIMEOUT, in sdp_nagle_off()
129 sdp_nagle_timeout, ssk); in sdp_nagle_off()
130 sdp_dbg_data(ssk->socket, "Starting nagle timer\n"); in sdp_nagle_off()
133 sdp_dbg_data(ssk->socket, "send_now = %d last_unacked = %ld\n", in sdp_nagle_off()
134 send_now, ssk->nagle_last_unacked); in sdp_nagle_off()
142 struct sdp_sock *ssk = (struct sdp_sock *)data; in sdp_nagle_timeout() local
143 struct socket *sk = ssk->socket; in sdp_nagle_timeout()
145 sdp_dbg_data(sk, "last_unacked = %ld\n", ssk->nagle_last_unacked); in sdp_nagle_timeout()
147 if (!callout_active(&ssk->nagle_timer)) in sdp_nagle_timeout()
149 callout_deactivate(&ssk->nagle_timer); in sdp_nagle_timeout()
151 if (!ssk->nagle_last_unacked) in sdp_nagle_timeout()
153 if (ssk->state == TCPS_CLOSED) in sdp_nagle_timeout()
155 ssk->nagle_last_unacked = 0; in sdp_nagle_timeout()
156 sdp_post_sends(ssk, M_NOWAIT); in sdp_nagle_timeout()
158 sowwakeup(ssk->socket); in sdp_nagle_timeout()
161 callout_reset(&ssk->nagle_timer, SDP_NAGLE_TIMEOUT, in sdp_nagle_timeout()
162 sdp_nagle_timeout, ssk); in sdp_nagle_timeout()
166 sdp_post_sends(struct sdp_sock *ssk, int wait) in sdp_post_sends() argument
173 sk = ssk->socket; in sdp_post_sends()
174 if (unlikely(!ssk->id)) { in sdp_post_sends()
176 sdp_dbg(ssk->socket, in sdp_post_sends()
178 sdp_notify(ssk, ECONNRESET); in sdp_post_sends()
183 if (sdp_tx_ring_slots_left(ssk) < SDP_TX_SIZE / 2) in sdp_post_sends()
184 sdp_xmit_poll(ssk, 1); in sdp_post_sends()
186 if (ssk->recv_request && in sdp_post_sends()
187 ring_tail(ssk->rx_ring) >= ssk->recv_request_head && in sdp_post_sends()
188 tx_credits(ssk) >= SDP_MIN_TX_CREDITS && in sdp_post_sends()
189 sdp_tx_ring_slots_left(ssk)) { in sdp_post_sends()
191 ssk->recv_bytes - SDP_HEAD_SIZE, wait); in sdp_post_sends()
194 ssk->recv_request = 0; in sdp_post_sends()
195 sdp_post_send(ssk, mb); in sdp_post_sends()
199 if (tx_credits(ssk) <= SDP_MIN_TX_CREDITS && in sdp_post_sends()
200 sdp_tx_ring_slots_left(ssk) && sk->so_snd.sb_sndptr && in sdp_post_sends()
201 sdp_nagle_off(ssk, sk->so_snd.sb_sndptr)) { in sdp_post_sends()
205 while (tx_credits(ssk) > SDP_MIN_TX_CREDITS && in sdp_post_sends()
206 sdp_tx_ring_slots_left(ssk) && (mb = sk->so_snd.sb_sndptr) && in sdp_post_sends()
207 sdp_nagle_off(ssk, mb)) { in sdp_post_sends()
218 sdp_post_send(ssk, mb); in sdp_post_sends()
222 if (credit_update_needed(ssk) && ssk->state >= TCPS_ESTABLISHED && in sdp_post_sends()
223 ssk->state < TCPS_FIN_WAIT_2) { in sdp_post_sends()
224 mb = sdp_alloc_mb_data(ssk->socket, wait); in sdp_post_sends()
227 sdp_post_send(ssk, mb); in sdp_post_sends()
238 if ((ssk->flags & SDP_NEEDFIN) && !sk->so_snd.sb_sndptr && in sdp_post_sends()
239 tx_credits(ssk) > 1) { in sdp_post_sends()
243 ssk->flags &= ~SDP_NEEDFIN; in sdp_post_sends()
244 sdp_post_send(ssk, mb); in sdp_post_sends()
247 low = (sdp_tx_ring_slots_left(ssk) <= SDP_MIN_TX_CREDITS); in sdp_post_sends()
250 sdp_arm_tx_cq(ssk); in sdp_post_sends()
251 if (sdp_xmit_poll(ssk, low)) in sdp_post_sends()
257 ssk->nagle_last_unacked = -1; in sdp_post_sends()
258 callout_reset(&ssk->nagle_timer, 1, sdp_nagle_timeout, ssk); in sdp_post_sends()