Lines Matching refs:ssk

41 static int sdp_process_tx_cq(struct sdp_sock *ssk);
45 sdp_xmit_poll(struct sdp_sock *ssk, int force) in sdp_xmit_poll() argument
49 SDP_WLOCK_ASSERT(ssk); in sdp_xmit_poll()
50 sdp_prf(ssk->socket, NULL, "%s", __func__); in sdp_xmit_poll()
54 if (!callout_pending(&ssk->tx_ring.timer)) in sdp_xmit_poll()
55 callout_reset(&ssk->tx_ring.timer, SDP_TX_POLL_TIMEOUT, in sdp_xmit_poll()
56 sdp_poll_tx_timeout, ssk); in sdp_xmit_poll()
59 if (force || (++ssk->tx_ring.poll_cnt & (SDP_TX_POLL_MODER - 1)) == 0) in sdp_xmit_poll()
60 wc_processed = sdp_process_tx_cq(ssk); in sdp_xmit_poll()
66 sdp_post_send(struct sdp_sock *ssk, struct mbuf *mb) in sdp_post_send() argument
82 if (!ssk->qp_active) { in sdp_post_send()
87 mseq = ring_head(ssk->tx_ring); in sdp_post_send()
89 ssk->tx_packets++; in sdp_post_send()
90 ssk->tx_bytes += mb->m_pkthdr.len; in sdp_post_send()
95 if (ssk->tx_sa != tx_sa) { in sdp_post_send()
96 sdp_dbg_data(ssk->socket, "SrcAvail cancelled " in sdp_post_send()
112 h->bufs = htons(rx_ring_posted(ssk)); in sdp_post_send()
115 h->mseq_ack = htonl(mseq_ack(ssk)); in sdp_post_send()
117 sdp_prf1(ssk->socket, mb, "TX: %s bufs: %d mseq:%ld ack:%d", in sdp_post_send()
118 mid2str(h->mid), rx_ring_posted(ssk), mseq, in sdp_post_send()
121 SDP_DUMP_PACKET(ssk->socket, "TX", mb, h); in sdp_post_send()
123 tx_req = &ssk->tx_ring.buffer[mseq & (SDP_TX_SIZE - 1)]; in sdp_post_send()
125 dev = ssk->ib_device; in sdp_post_send()
136 sge->lkey = ssk->sdp_dev->pd->local_dma_lkey; in sdp_post_send()
147 rc = ib_post_send(ssk->qp, &tx_wr, &bad_wr); in sdp_post_send()
149 sdp_dbg(ssk->socket, in sdp_post_send()
152 sdp_cleanup_sdp_buf(ssk, tx_req, DMA_TO_DEVICE); in sdp_post_send()
154 sdp_notify(ssk, ECONNRESET); in sdp_post_send()
159 atomic_inc(&ssk->tx_ring.head); in sdp_post_send()
160 atomic_dec(&ssk->tx_ring.credits); in sdp_post_send()
161 atomic_set(&ssk->remote_credits, rx_ring_posted(ssk)); in sdp_post_send()
167 sdp_send_completion(struct sdp_sock *ssk, int mseq) in sdp_send_completion() argument
172 struct sdp_tx_ring *tx_ring = &ssk->tx_ring; in sdp_send_completion()
180 dev = ssk->ib_device; in sdp_send_completion()
183 sdp_cleanup_sdp_buf(ssk, tx_req, DMA_TO_DEVICE); in sdp_send_completion()
198 sdp_handle_send_comp(struct sdp_sock *ssk, struct ib_wc *wc) in sdp_handle_send_comp() argument
205 sdp_prf(ssk->socket, mb, "Send completion with error. " in sdp_handle_send_comp()
207 sdp_dbg_data(ssk->socket, "Send completion with error. " in sdp_handle_send_comp()
209 sdp_notify(ssk, ECONNRESET); in sdp_handle_send_comp()
213 mb = sdp_send_completion(ssk, wc->wr_id); in sdp_handle_send_comp()
218 sdp_prf1(ssk->socket, mb, "tx completion. mseq:%d", ntohl(h->mseq)); in sdp_handle_send_comp()
219 sdp_dbg(ssk->socket, "tx completion. %p %d mseq:%d", in sdp_handle_send_comp()
227 sdp_process_tx_wc(struct sdp_sock *ssk, struct ib_wc *wc) in sdp_process_tx_wc() argument
231 sdp_handle_send_comp(ssk, wc); in sdp_process_tx_wc()
239 sdp_dbg_data(ssk->socket, in sdp_process_tx_wc()
243 if (!ssk->tx_ring.rdma_inflight) { in sdp_process_tx_wc()
244 sdp_warn(ssk->socket, "ERROR: unexpected RDMA read\n"); in sdp_process_tx_wc()
248 if (!ssk->tx_ring.rdma_inflight->busy) { in sdp_process_tx_wc()
249 sdp_warn(ssk->socket, in sdp_process_tx_wc()
257 ssk->tx_ring.rdma_inflight->busy = 0; in sdp_process_tx_wc()
258 sowwakeup(ssk->socket); in sdp_process_tx_wc()
259 sdp_dbg_data(ssk->socket, "woke up sleepers\n"); in sdp_process_tx_wc()
270 sdp_dbg(ssk->socket, " %s consumes KEEPALIVE status %d\n", in sdp_process_tx_wc()
276 sdp_notify(ssk, ECONNRESET); in sdp_process_tx_wc()
280 sdp_process_tx_cq(struct sdp_sock *ssk) in sdp_process_tx_cq() argument
286 SDP_WLOCK_ASSERT(ssk); in sdp_process_tx_cq()
288 if (!ssk->tx_ring.cq) { in sdp_process_tx_cq()
289 sdp_dbg(ssk->socket, "tx irq on destroyed tx_cq\n"); in sdp_process_tx_cq()
294 n = ib_poll_cq(ssk->tx_ring.cq, SDP_NUM_WC, ibwc); in sdp_process_tx_cq()
296 sdp_process_tx_wc(ssk, ibwc + i); in sdp_process_tx_cq()
302 sdp_post_sends(ssk, M_NOWAIT); in sdp_process_tx_cq()
304 (u32) tx_ring_posted(ssk)); in sdp_process_tx_cq()
305 sowwakeup(ssk->socket); in sdp_process_tx_cq()
312 sdp_poll_tx(struct sdp_sock *ssk) in sdp_poll_tx() argument
314 struct socket *sk = ssk->socket; in sdp_poll_tx()
317 sdp_prf1(ssk->socket, NULL, "TX timeout: inflight=%d, head=%d tail=%d", in sdp_poll_tx()
318 (u32) tx_ring_posted(ssk), in sdp_poll_tx()
319 ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring)); in sdp_poll_tx()
321 if (unlikely(ssk->state == TCPS_CLOSED)) { in sdp_poll_tx()
326 wc_processed = sdp_process_tx_cq(ssk); in sdp_poll_tx()
332 inflight = (u32) tx_ring_posted(ssk); in sdp_poll_tx()
333 sdp_prf1(ssk->socket, NULL, "finished tx processing. inflight = %d", in sdp_poll_tx()
340 callout_reset(&ssk->tx_ring.timer, SDP_TX_POLL_TIMEOUT, in sdp_poll_tx()
341 sdp_poll_tx_timeout, ssk); in sdp_poll_tx()
344 if (ssk->tx_ring.rdma_inflight && ssk->tx_ring.rdma_inflight->busy) { in sdp_poll_tx()
346 sdp_arm_tx_cq(ssk); in sdp_poll_tx()
355 struct sdp_sock *ssk = (struct sdp_sock *)data; in sdp_poll_tx_timeout() local
357 if (!callout_active(&ssk->tx_ring.timer)) in sdp_poll_tx_timeout()
359 callout_deactivate(&ssk->tx_ring.timer); in sdp_poll_tx_timeout()
360 sdp_poll_tx(ssk); in sdp_poll_tx_timeout()
366 struct sdp_sock *ssk; in sdp_tx_irq() local
368 ssk = cq_context; in sdp_tx_irq()
369 sdp_prf1(ssk->socket, NULL, "tx irq"); in sdp_tx_irq()
370 sdp_dbg_data(ssk->socket, "Got tx comp interrupt\n"); in sdp_tx_irq()
372 SDP_WLOCK(ssk); in sdp_tx_irq()
373 sdp_poll_tx(ssk); in sdp_tx_irq()
374 SDP_WUNLOCK(ssk); in sdp_tx_irq()
378 void sdp_tx_ring_purge(struct sdp_sock *ssk) in sdp_tx_ring_purge() argument
380 while (tx_ring_posted(ssk)) { in sdp_tx_ring_purge()
382 mb = sdp_send_completion(ssk, ring_tail(ssk->tx_ring)); in sdp_tx_ring_purge()
390 sdp_post_keepalive(struct sdp_sock *ssk) in sdp_post_keepalive() argument
396 sdp_dbg(ssk->socket, "%s\n", __func__); in sdp_post_keepalive()
406 rc = ib_post_send(ssk->qp, &wr, &bad_wr); in sdp_post_keepalive()
408 sdp_dbg(ssk->socket, in sdp_post_keepalive()
410 sdp_notify(ssk, ECONNRESET); in sdp_post_keepalive()
422 sdp_tx_ring_create(struct sdp_sock *ssk, struct ib_device *device) in sdp_tx_ring_create() argument
432 sdp_dbg(ssk->socket, "tx ring create\n"); in sdp_tx_ring_create()
433 callout_init_rw(&ssk->tx_ring.timer, &ssk->lock, 0); in sdp_tx_ring_create()
434 callout_init_rw(&ssk->nagle_timer, &ssk->lock, 0); in sdp_tx_ring_create()
435 atomic_set(&ssk->tx_ring.head, 1); in sdp_tx_ring_create()
436 atomic_set(&ssk->tx_ring.tail, 1); in sdp_tx_ring_create()
438 ssk->tx_ring.buffer = malloc(sizeof(*ssk->tx_ring.buffer) * SDP_TX_SIZE, in sdp_tx_ring_create()
442 ssk, &tx_cq_attr); in sdp_tx_ring_create()
445 sdp_warn(ssk->socket, "Unable to allocate TX CQ: %d.\n", rc); in sdp_tx_ring_create()
448 ssk->tx_ring.cq = tx_cq; in sdp_tx_ring_create()
449 ssk->tx_ring.poll_cnt = 0; in sdp_tx_ring_create()
450 sdp_arm_tx_cq(ssk); in sdp_tx_ring_create()
455 free(ssk->tx_ring.buffer, M_SDP); in sdp_tx_ring_create()
456 ssk->tx_ring.buffer = NULL; in sdp_tx_ring_create()
461 sdp_tx_ring_destroy(struct sdp_sock *ssk) in sdp_tx_ring_destroy() argument
464 sdp_dbg(ssk->socket, "tx ring destroy\n"); in sdp_tx_ring_destroy()
465 SDP_WLOCK(ssk); in sdp_tx_ring_destroy()
466 callout_stop(&ssk->tx_ring.timer); in sdp_tx_ring_destroy()
467 callout_stop(&ssk->nagle_timer); in sdp_tx_ring_destroy()
468 SDP_WUNLOCK(ssk); in sdp_tx_ring_destroy()
469 callout_drain(&ssk->tx_ring.timer); in sdp_tx_ring_destroy()
470 callout_drain(&ssk->nagle_timer); in sdp_tx_ring_destroy()
472 if (ssk->tx_ring.buffer) { in sdp_tx_ring_destroy()
473 sdp_tx_ring_purge(ssk); in sdp_tx_ring_destroy()
474 free(ssk->tx_ring.buffer, M_SDP); in sdp_tx_ring_destroy()
475 ssk->tx_ring.buffer = NULL; in sdp_tx_ring_destroy()
478 if (ssk->tx_ring.cq) { in sdp_tx_ring_destroy()
479 ib_destroy_cq(ssk->tx_ring.cq); in sdp_tx_ring_destroy()
480 ssk->tx_ring.cq = NULL; in sdp_tx_ring_destroy()
483 WARN_ON(ring_head(ssk->tx_ring) != ring_tail(ssk->tx_ring)); in sdp_tx_ring_destroy()