1fe267a55SPedro F. Giffuni /*-
2fe267a55SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3fe267a55SPedro F. Giffuni *
4aa0a1e58SJeff Roberson * Copyright (c) 2009 Mellanox Technologies Ltd. All rights reserved.
5aa0a1e58SJeff Roberson *
6aa0a1e58SJeff Roberson * This software is available to you under a choice of one of two
7aa0a1e58SJeff Roberson * licenses. You may choose to be licensed under the terms of the GNU
8aa0a1e58SJeff Roberson * General Public License (GPL) Version 2, available from the file
9aa0a1e58SJeff Roberson * COPYING in the main directory of this source tree, or the
10aa0a1e58SJeff Roberson * OpenIB.org BSD license below:
11aa0a1e58SJeff Roberson *
12aa0a1e58SJeff Roberson * Redistribution and use in source and binary forms, with or
13aa0a1e58SJeff Roberson * without modification, are permitted provided that the following
14aa0a1e58SJeff Roberson * conditions are met:
15aa0a1e58SJeff Roberson *
16aa0a1e58SJeff Roberson * - Redistributions of source code must retain the above
17aa0a1e58SJeff Roberson * copyright notice, this list of conditions and the following
18aa0a1e58SJeff Roberson * disclaimer.
19aa0a1e58SJeff Roberson *
20aa0a1e58SJeff Roberson * - Redistributions in binary form must reproduce the above
21aa0a1e58SJeff Roberson * copyright notice, this list of conditions and the following
22aa0a1e58SJeff Roberson * disclaimer in the documentation and/or other materials
23aa0a1e58SJeff Roberson * provided with the distribution.
24aa0a1e58SJeff Roberson *
25aa0a1e58SJeff Roberson * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26aa0a1e58SJeff Roberson * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27aa0a1e58SJeff Roberson * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28aa0a1e58SJeff Roberson * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29aa0a1e58SJeff Roberson * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30aa0a1e58SJeff Roberson * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31aa0a1e58SJeff Roberson * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32aa0a1e58SJeff Roberson * SOFTWARE.
33aa0a1e58SJeff Roberson */
34aa0a1e58SJeff Roberson #include "sdp.h"
35aa0a1e58SJeff Roberson
36aa0a1e58SJeff Roberson SDP_MODPARAM_INT(rcvbuf_initial_size, 32 * 1024,
37aa0a1e58SJeff Roberson "Receive buffer initial size in bytes.");
38aa0a1e58SJeff Roberson SDP_MODPARAM_SINT(rcvbuf_scale, 0x8,
39aa0a1e58SJeff Roberson "Receive buffer size scale factor.");
40aa0a1e58SJeff Roberson
41aa0a1e58SJeff Roberson /* Like tcp_fin - called when SDP_MID_DISCONNECT is received */
42aa0a1e58SJeff Roberson static void
sdp_handle_disconn(struct sdp_sock * ssk)43aa0a1e58SJeff Roberson sdp_handle_disconn(struct sdp_sock *ssk)
44aa0a1e58SJeff Roberson {
45aa0a1e58SJeff Roberson
46aa0a1e58SJeff Roberson sdp_dbg(ssk->socket, "%s\n", __func__);
47aa0a1e58SJeff Roberson
48aa0a1e58SJeff Roberson SDP_WLOCK_ASSERT(ssk);
49aa0a1e58SJeff Roberson if (TCPS_HAVERCVDFIN(ssk->state) == 0)
50aa0a1e58SJeff Roberson socantrcvmore(ssk->socket);
51aa0a1e58SJeff Roberson
52aa0a1e58SJeff Roberson switch (ssk->state) {
53aa0a1e58SJeff Roberson case TCPS_SYN_RECEIVED:
54aa0a1e58SJeff Roberson case TCPS_ESTABLISHED:
55aa0a1e58SJeff Roberson ssk->state = TCPS_CLOSE_WAIT;
56aa0a1e58SJeff Roberson break;
57aa0a1e58SJeff Roberson
58aa0a1e58SJeff Roberson case TCPS_FIN_WAIT_1:
59aa0a1e58SJeff Roberson /* Received a reply FIN - start Infiniband tear down */
60aa0a1e58SJeff Roberson sdp_dbg(ssk->socket,
61aa0a1e58SJeff Roberson "%s: Starting Infiniband tear down sending DREQ\n",
62aa0a1e58SJeff Roberson __func__);
63aa0a1e58SJeff Roberson
64aa0a1e58SJeff Roberson sdp_cancel_dreq_wait_timeout(ssk);
65aa0a1e58SJeff Roberson ssk->qp_active = 0;
66aa0a1e58SJeff Roberson if (ssk->id) {
67aa0a1e58SJeff Roberson struct rdma_cm_id *id;
68aa0a1e58SJeff Roberson
69aa0a1e58SJeff Roberson id = ssk->id;
70aa0a1e58SJeff Roberson SDP_WUNLOCK(ssk);
71aa0a1e58SJeff Roberson rdma_disconnect(id);
72aa0a1e58SJeff Roberson SDP_WLOCK(ssk);
73aa0a1e58SJeff Roberson } else {
74aa0a1e58SJeff Roberson sdp_warn(ssk->socket,
75aa0a1e58SJeff Roberson "%s: ssk->id is NULL\n", __func__);
76aa0a1e58SJeff Roberson return;
77aa0a1e58SJeff Roberson }
78aa0a1e58SJeff Roberson break;
79aa0a1e58SJeff Roberson case TCPS_TIME_WAIT:
80aa0a1e58SJeff Roberson /* This is a mutual close situation and we've got the DREQ from
81aa0a1e58SJeff Roberson the peer before the SDP_MID_DISCONNECT */
82aa0a1e58SJeff Roberson break;
83aa0a1e58SJeff Roberson case TCPS_CLOSED:
84aa0a1e58SJeff Roberson /* FIN arrived after IB teardown started - do nothing */
85aa0a1e58SJeff Roberson sdp_dbg(ssk->socket, "%s: fin in state %s\n",
86aa0a1e58SJeff Roberson __func__, sdp_state_str(ssk->state));
87aa0a1e58SJeff Roberson return;
88aa0a1e58SJeff Roberson default:
89aa0a1e58SJeff Roberson sdp_warn(ssk->socket,
90aa0a1e58SJeff Roberson "%s: FIN in unexpected state. state=%d\n",
91aa0a1e58SJeff Roberson __func__, ssk->state);
92aa0a1e58SJeff Roberson break;
93aa0a1e58SJeff Roberson }
94aa0a1e58SJeff Roberson }
95aa0a1e58SJeff Roberson
96aa0a1e58SJeff Roberson static int
sdp_post_recv(struct sdp_sock * ssk)97aa0a1e58SJeff Roberson sdp_post_recv(struct sdp_sock *ssk)
98aa0a1e58SJeff Roberson {
99aa0a1e58SJeff Roberson struct sdp_buf *rx_req;
100aa0a1e58SJeff Roberson int i, rc;
101aa0a1e58SJeff Roberson u64 addr;
102aa0a1e58SJeff Roberson struct ib_device *dev;
103aa0a1e58SJeff Roberson struct ib_recv_wr rx_wr = { NULL };
104aa0a1e58SJeff Roberson struct ib_sge ibsge[SDP_MAX_RECV_SGES];
105aa0a1e58SJeff Roberson struct ib_sge *sge = ibsge;
106693ddf4dSHans Petter Selasky const struct ib_recv_wr *bad_wr;
107aa0a1e58SJeff Roberson struct mbuf *mb, *m;
108aa0a1e58SJeff Roberson struct sdp_bsdh *h;
109aa0a1e58SJeff Roberson int id = ring_head(ssk->rx_ring);
110aa0a1e58SJeff Roberson
111aa0a1e58SJeff Roberson /* Now, allocate and repost recv */
112aa0a1e58SJeff Roberson sdp_prf(ssk->socket, mb, "Posting mb");
113aa0a1e58SJeff Roberson mb = m_getm2(NULL, ssk->recv_bytes, M_NOWAIT, MT_DATA, M_PKTHDR);
114aa0a1e58SJeff Roberson if (mb == NULL) {
115aa0a1e58SJeff Roberson /* Retry so we can't stall out with no memory. */
116aa0a1e58SJeff Roberson if (!rx_ring_posted(ssk))
117aa0a1e58SJeff Roberson queue_work(rx_comp_wq, &ssk->rx_comp_work);
118aa0a1e58SJeff Roberson return -1;
119aa0a1e58SJeff Roberson }
120aa0a1e58SJeff Roberson for (m = mb; m != NULL; m = m->m_next) {
1215e3cd9e1SHans Petter Selasky m->m_len = M_SIZE(m);
122aa0a1e58SJeff Roberson mb->m_pkthdr.len += m->m_len;
123aa0a1e58SJeff Roberson }
124aa0a1e58SJeff Roberson h = mtod(mb, struct sdp_bsdh *);
125aa0a1e58SJeff Roberson rx_req = ssk->rx_ring.buffer + (id & (SDP_RX_SIZE - 1));
126aa0a1e58SJeff Roberson rx_req->mb = mb;
127aa0a1e58SJeff Roberson dev = ssk->ib_device;
128aa0a1e58SJeff Roberson for (i = 0; mb != NULL; i++, mb = mb->m_next, sge++) {
129aa0a1e58SJeff Roberson addr = ib_dma_map_single(dev, mb->m_data, mb->m_len,
130aa0a1e58SJeff Roberson DMA_TO_DEVICE);
131aa0a1e58SJeff Roberson /* TODO: proper error handling */
132aa0a1e58SJeff Roberson BUG_ON(ib_dma_mapping_error(dev, addr));
133aa0a1e58SJeff Roberson BUG_ON(i >= SDP_MAX_RECV_SGES);
134aa0a1e58SJeff Roberson rx_req->mapping[i] = addr;
135aa0a1e58SJeff Roberson sge->addr = addr;
136aa0a1e58SJeff Roberson sge->length = mb->m_len;
137c69c74b8SHans Petter Selasky sge->lkey = ssk->sdp_dev->pd->local_dma_lkey;
138aa0a1e58SJeff Roberson }
139aa0a1e58SJeff Roberson
140aa0a1e58SJeff Roberson rx_wr.next = NULL;
141aa0a1e58SJeff Roberson rx_wr.wr_id = id | SDP_OP_RECV;
142aa0a1e58SJeff Roberson rx_wr.sg_list = ibsge;
143aa0a1e58SJeff Roberson rx_wr.num_sge = i;
144aa0a1e58SJeff Roberson rc = ib_post_recv(ssk->qp, &rx_wr, &bad_wr);
145aa0a1e58SJeff Roberson if (unlikely(rc)) {
146aa0a1e58SJeff Roberson sdp_warn(ssk->socket, "ib_post_recv failed. status %d\n", rc);
147aa0a1e58SJeff Roberson
148aa0a1e58SJeff Roberson sdp_cleanup_sdp_buf(ssk, rx_req, DMA_FROM_DEVICE);
149aa0a1e58SJeff Roberson m_freem(mb);
150aa0a1e58SJeff Roberson
151aa0a1e58SJeff Roberson sdp_notify(ssk, ECONNRESET);
152aa0a1e58SJeff Roberson
153aa0a1e58SJeff Roberson return -1;
154aa0a1e58SJeff Roberson }
155aa0a1e58SJeff Roberson
156aa0a1e58SJeff Roberson atomic_inc(&ssk->rx_ring.head);
157aa0a1e58SJeff Roberson SDPSTATS_COUNTER_INC(post_recv);
158aa0a1e58SJeff Roberson
159aa0a1e58SJeff Roberson return 0;
160aa0a1e58SJeff Roberson }
161aa0a1e58SJeff Roberson
162aa0a1e58SJeff Roberson static inline int
sdp_post_recvs_needed(struct sdp_sock * ssk)163aa0a1e58SJeff Roberson sdp_post_recvs_needed(struct sdp_sock *ssk)
164aa0a1e58SJeff Roberson {
165aa0a1e58SJeff Roberson unsigned long bytes_in_process;
166aa0a1e58SJeff Roberson unsigned long max_bytes;
167aa0a1e58SJeff Roberson int buffer_size;
168aa0a1e58SJeff Roberson int posted;
169aa0a1e58SJeff Roberson
170aa0a1e58SJeff Roberson if (!ssk->qp_active || !ssk->socket)
171aa0a1e58SJeff Roberson return 0;
172aa0a1e58SJeff Roberson
173aa0a1e58SJeff Roberson posted = rx_ring_posted(ssk);
174aa0a1e58SJeff Roberson if (posted >= SDP_RX_SIZE)
175aa0a1e58SJeff Roberson return 0;
176aa0a1e58SJeff Roberson if (posted < SDP_MIN_TX_CREDITS)
177aa0a1e58SJeff Roberson return 1;
178aa0a1e58SJeff Roberson
179aa0a1e58SJeff Roberson buffer_size = ssk->recv_bytes;
180f66ec152SMark Johnston max_bytes = max(ssk->socket->so_rcv.sb_hiwat,
181aa0a1e58SJeff Roberson (1 + SDP_MIN_TX_CREDITS) * buffer_size);
182aa0a1e58SJeff Roberson max_bytes *= rcvbuf_scale;
183aa0a1e58SJeff Roberson /*
184aa0a1e58SJeff Roberson * Compute bytes in the receive queue and socket buffer.
185aa0a1e58SJeff Roberson */
186aa0a1e58SJeff Roberson bytes_in_process = (posted - SDP_MIN_TX_CREDITS) * buffer_size;
187cfa6009eSGleb Smirnoff bytes_in_process += sbused(&ssk->socket->so_rcv);
188aa0a1e58SJeff Roberson
189aa0a1e58SJeff Roberson return bytes_in_process < max_bytes;
190aa0a1e58SJeff Roberson }
191aa0a1e58SJeff Roberson
192aa0a1e58SJeff Roberson static inline void
sdp_post_recvs(struct sdp_sock * ssk)193aa0a1e58SJeff Roberson sdp_post_recvs(struct sdp_sock *ssk)
194aa0a1e58SJeff Roberson {
195aa0a1e58SJeff Roberson
196aa0a1e58SJeff Roberson while (sdp_post_recvs_needed(ssk))
197aa0a1e58SJeff Roberson if (sdp_post_recv(ssk))
198aa0a1e58SJeff Roberson return;
199aa0a1e58SJeff Roberson }
200aa0a1e58SJeff Roberson
201aa0a1e58SJeff Roberson static inline struct mbuf *
sdp_sock_queue_rcv_mb(struct socket * sk,struct mbuf * mb)202aa0a1e58SJeff Roberson sdp_sock_queue_rcv_mb(struct socket *sk, struct mbuf *mb)
203aa0a1e58SJeff Roberson {
204aa0a1e58SJeff Roberson struct sdp_sock *ssk = sdp_sk(sk);
205aa0a1e58SJeff Roberson struct sdp_bsdh *h;
206aa0a1e58SJeff Roberson
207aa0a1e58SJeff Roberson h = mtod(mb, struct sdp_bsdh *);
208aa0a1e58SJeff Roberson
209aa0a1e58SJeff Roberson #ifdef SDP_ZCOPY
210aa0a1e58SJeff Roberson SDP_SKB_CB(mb)->seq = rcv_nxt(ssk);
211aa0a1e58SJeff Roberson if (h->mid == SDP_MID_SRCAVAIL) {
212aa0a1e58SJeff Roberson struct sdp_srcah *srcah = (struct sdp_srcah *)(h+1);
213aa0a1e58SJeff Roberson struct rx_srcavail_state *rx_sa;
214aa0a1e58SJeff Roberson
215aa0a1e58SJeff Roberson ssk->srcavail_cancel_mseq = 0;
216aa0a1e58SJeff Roberson
217aa0a1e58SJeff Roberson ssk->rx_sa = rx_sa = RX_SRCAVAIL_STATE(mb) = kzalloc(
218aa0a1e58SJeff Roberson sizeof(struct rx_srcavail_state), M_NOWAIT);
219aa0a1e58SJeff Roberson
220aa0a1e58SJeff Roberson rx_sa->mseq = ntohl(h->mseq);
221aa0a1e58SJeff Roberson rx_sa->used = 0;
222aa0a1e58SJeff Roberson rx_sa->len = mb_len = ntohl(srcah->len);
223aa0a1e58SJeff Roberson rx_sa->rkey = ntohl(srcah->rkey);
224aa0a1e58SJeff Roberson rx_sa->vaddr = be64_to_cpu(srcah->vaddr);
225aa0a1e58SJeff Roberson rx_sa->flags = 0;
226aa0a1e58SJeff Roberson
227aa0a1e58SJeff Roberson if (ssk->tx_sa) {
228aa0a1e58SJeff Roberson sdp_dbg_data(ssk->socket, "got RX SrcAvail while waiting "
229aa0a1e58SJeff Roberson "for TX SrcAvail. waking up TX SrcAvail"
230aa0a1e58SJeff Roberson "to be aborted\n");
231aa0a1e58SJeff Roberson wake_up(sk->sk_sleep);
232aa0a1e58SJeff Roberson }
233aa0a1e58SJeff Roberson
234aa0a1e58SJeff Roberson atomic_add(mb->len, &ssk->rcv_nxt);
235aa0a1e58SJeff Roberson sdp_dbg_data(sk, "queueing SrcAvail. mb_len = %d vaddr = %lld\n",
236aa0a1e58SJeff Roberson mb_len, rx_sa->vaddr);
237aa0a1e58SJeff Roberson } else
238aa0a1e58SJeff Roberson #endif
239aa0a1e58SJeff Roberson {
240aa0a1e58SJeff Roberson atomic_add(mb->m_pkthdr.len, &ssk->rcv_nxt);
241aa0a1e58SJeff Roberson }
242aa0a1e58SJeff Roberson
243aa0a1e58SJeff Roberson m_adj(mb, SDP_HEAD_SIZE);
244aa0a1e58SJeff Roberson SOCKBUF_LOCK(&sk->so_rcv);
245aa0a1e58SJeff Roberson if (unlikely(h->flags & SDP_OOB_PRES))
246aa0a1e58SJeff Roberson sdp_urg(ssk, mb);
247829fae90SGleb Smirnoff sbappend_locked(&sk->so_rcv, mb, 0);
248aa0a1e58SJeff Roberson sorwakeup_locked(sk);
249aa0a1e58SJeff Roberson return mb;
250aa0a1e58SJeff Roberson }
251aa0a1e58SJeff Roberson
252aa0a1e58SJeff Roberson static int
sdp_get_recv_bytes(struct sdp_sock * ssk,u32 new_size)253aa0a1e58SJeff Roberson sdp_get_recv_bytes(struct sdp_sock *ssk, u32 new_size)
254aa0a1e58SJeff Roberson {
255aa0a1e58SJeff Roberson
256aa0a1e58SJeff Roberson return MIN(new_size, SDP_MAX_PACKET);
257aa0a1e58SJeff Roberson }
258aa0a1e58SJeff Roberson
259aa0a1e58SJeff Roberson int
sdp_init_buffers(struct sdp_sock * ssk,u32 new_size)260aa0a1e58SJeff Roberson sdp_init_buffers(struct sdp_sock *ssk, u32 new_size)
261aa0a1e58SJeff Roberson {
262aa0a1e58SJeff Roberson
263aa0a1e58SJeff Roberson ssk->recv_bytes = sdp_get_recv_bytes(ssk, new_size);
264aa0a1e58SJeff Roberson sdp_post_recvs(ssk);
265aa0a1e58SJeff Roberson
266aa0a1e58SJeff Roberson return 0;
267aa0a1e58SJeff Roberson }
268aa0a1e58SJeff Roberson
269aa0a1e58SJeff Roberson int
sdp_resize_buffers(struct sdp_sock * ssk,u32 new_size)270aa0a1e58SJeff Roberson sdp_resize_buffers(struct sdp_sock *ssk, u32 new_size)
271aa0a1e58SJeff Roberson {
272aa0a1e58SJeff Roberson u32 curr_size = ssk->recv_bytes;
273aa0a1e58SJeff Roberson u32 max_size = SDP_MAX_PACKET;
274aa0a1e58SJeff Roberson
275aa0a1e58SJeff Roberson if (new_size > curr_size && new_size <= max_size) {
276aa0a1e58SJeff Roberson ssk->recv_bytes = sdp_get_recv_bytes(ssk, new_size);
277aa0a1e58SJeff Roberson return 0;
278aa0a1e58SJeff Roberson }
279aa0a1e58SJeff Roberson return -1;
280aa0a1e58SJeff Roberson }
281aa0a1e58SJeff Roberson
282aa0a1e58SJeff Roberson static void
sdp_handle_resize_request(struct sdp_sock * ssk,struct sdp_chrecvbuf * buf)283aa0a1e58SJeff Roberson sdp_handle_resize_request(struct sdp_sock *ssk, struct sdp_chrecvbuf *buf)
284aa0a1e58SJeff Roberson {
285aa0a1e58SJeff Roberson if (sdp_resize_buffers(ssk, ntohl(buf->size)) == 0)
286aa0a1e58SJeff Roberson ssk->recv_request_head = ring_head(ssk->rx_ring) + 1;
287aa0a1e58SJeff Roberson else
288aa0a1e58SJeff Roberson ssk->recv_request_head = ring_tail(ssk->rx_ring);
289aa0a1e58SJeff Roberson ssk->recv_request = 1;
290aa0a1e58SJeff Roberson }
291aa0a1e58SJeff Roberson
292aa0a1e58SJeff Roberson static void
sdp_handle_resize_ack(struct sdp_sock * ssk,struct sdp_chrecvbuf * buf)293aa0a1e58SJeff Roberson sdp_handle_resize_ack(struct sdp_sock *ssk, struct sdp_chrecvbuf *buf)
294aa0a1e58SJeff Roberson {
295aa0a1e58SJeff Roberson u32 new_size = ntohl(buf->size);
296aa0a1e58SJeff Roberson
297aa0a1e58SJeff Roberson if (new_size > ssk->xmit_size_goal)
298aa0a1e58SJeff Roberson ssk->xmit_size_goal = new_size;
299aa0a1e58SJeff Roberson }
300aa0a1e58SJeff Roberson
301aa0a1e58SJeff Roberson static struct mbuf *
sdp_recv_completion(struct sdp_sock * ssk,int id)302aa0a1e58SJeff Roberson sdp_recv_completion(struct sdp_sock *ssk, int id)
303aa0a1e58SJeff Roberson {
304aa0a1e58SJeff Roberson struct sdp_buf *rx_req;
305aa0a1e58SJeff Roberson struct ib_device *dev;
306aa0a1e58SJeff Roberson struct mbuf *mb;
307aa0a1e58SJeff Roberson
308aa0a1e58SJeff Roberson if (unlikely(id != ring_tail(ssk->rx_ring))) {
309aa0a1e58SJeff Roberson printk(KERN_WARNING "Bogus recv completion id %d tail %d\n",
310aa0a1e58SJeff Roberson id, ring_tail(ssk->rx_ring));
311aa0a1e58SJeff Roberson return NULL;
312aa0a1e58SJeff Roberson }
313aa0a1e58SJeff Roberson
314aa0a1e58SJeff Roberson dev = ssk->ib_device;
315aa0a1e58SJeff Roberson rx_req = &ssk->rx_ring.buffer[id & (SDP_RX_SIZE - 1)];
316aa0a1e58SJeff Roberson mb = rx_req->mb;
317aa0a1e58SJeff Roberson sdp_cleanup_sdp_buf(ssk, rx_req, DMA_FROM_DEVICE);
318aa0a1e58SJeff Roberson
319aa0a1e58SJeff Roberson atomic_inc(&ssk->rx_ring.tail);
320aa0a1e58SJeff Roberson atomic_dec(&ssk->remote_credits);
321aa0a1e58SJeff Roberson return mb;
322aa0a1e58SJeff Roberson }
323aa0a1e58SJeff Roberson
3247e968dabSMark Johnston static void
sdp_process_rx_ctl_mb(struct sdp_sock * ssk,struct mbuf * mb)325aa0a1e58SJeff Roberson sdp_process_rx_ctl_mb(struct sdp_sock *ssk, struct mbuf *mb)
326aa0a1e58SJeff Roberson {
327aa0a1e58SJeff Roberson struct sdp_bsdh *h;
328aa0a1e58SJeff Roberson struct socket *sk;
329aa0a1e58SJeff Roberson
330aa0a1e58SJeff Roberson SDP_WLOCK_ASSERT(ssk);
3317e968dabSMark Johnston
332aa0a1e58SJeff Roberson sk = ssk->socket;
333aa0a1e58SJeff Roberson h = mtod(mb, struct sdp_bsdh *);
334aa0a1e58SJeff Roberson switch (h->mid) {
335aa0a1e58SJeff Roberson case SDP_MID_DATA:
336aa0a1e58SJeff Roberson case SDP_MID_SRCAVAIL:
337aa0a1e58SJeff Roberson sdp_dbg(sk, "DATA after socket rcv was shutdown\n");
338aa0a1e58SJeff Roberson
339aa0a1e58SJeff Roberson /* got data in RCV_SHUTDOWN */
340aa0a1e58SJeff Roberson if (ssk->state == TCPS_FIN_WAIT_1) {
341aa0a1e58SJeff Roberson sdp_dbg(sk, "RX data when state = FIN_WAIT1\n");
342aa0a1e58SJeff Roberson sdp_notify(ssk, ECONNRESET);
343aa0a1e58SJeff Roberson }
344aa0a1e58SJeff Roberson
345aa0a1e58SJeff Roberson break;
346aa0a1e58SJeff Roberson #ifdef SDP_ZCOPY
347aa0a1e58SJeff Roberson case SDP_MID_RDMARDCOMPL:
348aa0a1e58SJeff Roberson break;
349aa0a1e58SJeff Roberson case SDP_MID_SENDSM:
350aa0a1e58SJeff Roberson sdp_handle_sendsm(ssk, ntohl(h->mseq_ack));
351aa0a1e58SJeff Roberson break;
352aa0a1e58SJeff Roberson case SDP_MID_SRCAVAIL_CANCEL:
353aa0a1e58SJeff Roberson sdp_dbg_data(sk, "Handling SrcAvailCancel\n");
354aa0a1e58SJeff Roberson sdp_prf(sk, NULL, "Handling SrcAvailCancel");
355aa0a1e58SJeff Roberson if (ssk->rx_sa) {
356aa0a1e58SJeff Roberson ssk->srcavail_cancel_mseq = ntohl(h->mseq);
357aa0a1e58SJeff Roberson ssk->rx_sa->flags |= RX_SA_ABORTED;
358aa0a1e58SJeff Roberson ssk->rx_sa = NULL; /* TODO: change it into SDP_MID_DATA and get
359aa0a1e58SJeff Roberson the dirty logic from recvmsg */
360aa0a1e58SJeff Roberson } else {
361aa0a1e58SJeff Roberson sdp_dbg(sk, "Got SrcAvailCancel - "
362aa0a1e58SJeff Roberson "but no SrcAvail in process\n");
363aa0a1e58SJeff Roberson }
364aa0a1e58SJeff Roberson break;
365aa0a1e58SJeff Roberson case SDP_MID_SINKAVAIL:
366aa0a1e58SJeff Roberson sdp_dbg_data(sk, "Got SinkAvail - not supported: ignored\n");
367aa0a1e58SJeff Roberson sdp_prf(sk, NULL, "Got SinkAvail - not supported: ignored");
368aa0a1e58SJeff Roberson /* FALLTHROUGH */
369aa0a1e58SJeff Roberson #endif
370aa0a1e58SJeff Roberson case SDP_MID_ABORT:
371aa0a1e58SJeff Roberson sdp_dbg_data(sk, "Handling ABORT\n");
372aa0a1e58SJeff Roberson sdp_prf(sk, NULL, "Handling ABORT");
373aa0a1e58SJeff Roberson sdp_notify(ssk, ECONNRESET);
374aa0a1e58SJeff Roberson break;
375aa0a1e58SJeff Roberson case SDP_MID_DISCONN:
376aa0a1e58SJeff Roberson sdp_dbg_data(sk, "Handling DISCONN\n");
377aa0a1e58SJeff Roberson sdp_prf(sk, NULL, "Handling DISCONN");
378aa0a1e58SJeff Roberson sdp_handle_disconn(ssk);
379aa0a1e58SJeff Roberson break;
380aa0a1e58SJeff Roberson case SDP_MID_CHRCVBUF:
381aa0a1e58SJeff Roberson sdp_dbg_data(sk, "Handling RX CHRCVBUF\n");
382aa0a1e58SJeff Roberson sdp_handle_resize_request(ssk, (struct sdp_chrecvbuf *)(h+1));
383aa0a1e58SJeff Roberson break;
384aa0a1e58SJeff Roberson case SDP_MID_CHRCVBUF_ACK:
385aa0a1e58SJeff Roberson sdp_dbg_data(sk, "Handling RX CHRCVBUF_ACK\n");
386aa0a1e58SJeff Roberson sdp_handle_resize_ack(ssk, (struct sdp_chrecvbuf *)(h+1));
387aa0a1e58SJeff Roberson break;
388aa0a1e58SJeff Roberson default:
389aa0a1e58SJeff Roberson /* TODO: Handle other messages */
390aa0a1e58SJeff Roberson sdp_warn(sk, "SDP: FIXME MID %d\n", h->mid);
3917e968dabSMark Johnston break;
392aa0a1e58SJeff Roberson }
3937e968dabSMark Johnston m_freem(mb);
394aa0a1e58SJeff Roberson }
395aa0a1e58SJeff Roberson
396aa0a1e58SJeff Roberson static int
sdp_process_rx_mb(struct sdp_sock * ssk,struct mbuf * mb)397aa0a1e58SJeff Roberson sdp_process_rx_mb(struct sdp_sock *ssk, struct mbuf *mb)
398aa0a1e58SJeff Roberson {
399aa0a1e58SJeff Roberson struct socket *sk;
400aa0a1e58SJeff Roberson struct sdp_bsdh *h;
401aa0a1e58SJeff Roberson unsigned long mseq_ack;
402aa0a1e58SJeff Roberson int credits_before;
403aa0a1e58SJeff Roberson
404aa0a1e58SJeff Roberson h = mtod(mb, struct sdp_bsdh *);
405aa0a1e58SJeff Roberson sk = ssk->socket;
406aa0a1e58SJeff Roberson /*
407aa0a1e58SJeff Roberson * If another thread is in so_pcbfree this may be partially torn
408aa0a1e58SJeff Roberson * down but no further synchronization is required as the destroying
409aa0a1e58SJeff Roberson * thread will wait for receive to shutdown before discarding the
410aa0a1e58SJeff Roberson * socket.
411aa0a1e58SJeff Roberson */
412aa0a1e58SJeff Roberson if (sk == NULL) {
413aa0a1e58SJeff Roberson m_freem(mb);
414aa0a1e58SJeff Roberson return 0;
415aa0a1e58SJeff Roberson }
416aa0a1e58SJeff Roberson
417aa0a1e58SJeff Roberson SDPSTATS_HIST_LINEAR(credits_before_update, tx_credits(ssk));
418aa0a1e58SJeff Roberson
419aa0a1e58SJeff Roberson mseq_ack = ntohl(h->mseq_ack);
420aa0a1e58SJeff Roberson credits_before = tx_credits(ssk);
421aa0a1e58SJeff Roberson atomic_set(&ssk->tx_ring.credits, mseq_ack - ring_head(ssk->tx_ring) +
422aa0a1e58SJeff Roberson 1 + ntohs(h->bufs));
423aa0a1e58SJeff Roberson if (mseq_ack >= ssk->nagle_last_unacked)
424aa0a1e58SJeff Roberson ssk->nagle_last_unacked = 0;
425aa0a1e58SJeff Roberson
426aa0a1e58SJeff Roberson sdp_prf1(ssk->socket, mb, "RX %s +%d c:%d->%d mseq:%d ack:%d\n",
427aa0a1e58SJeff Roberson mid2str(h->mid), ntohs(h->bufs), credits_before,
428aa0a1e58SJeff Roberson tx_credits(ssk), ntohl(h->mseq), ntohl(h->mseq_ack));
429aa0a1e58SJeff Roberson
430aa0a1e58SJeff Roberson if (unlikely(h->mid == SDP_MID_DATA &&
431aa0a1e58SJeff Roberson mb->m_pkthdr.len == SDP_HEAD_SIZE)) {
432aa0a1e58SJeff Roberson /* Credit update is valid even after RCV_SHUTDOWN */
433aa0a1e58SJeff Roberson m_freem(mb);
434aa0a1e58SJeff Roberson return 0;
435aa0a1e58SJeff Roberson }
436aa0a1e58SJeff Roberson
437aa0a1e58SJeff Roberson if ((h->mid != SDP_MID_DATA && h->mid != SDP_MID_SRCAVAIL) ||
438aa0a1e58SJeff Roberson TCPS_HAVERCVDFIN(ssk->state)) {
439aa0a1e58SJeff Roberson sdp_prf(sk, NULL, "Control mb - queing to control queue");
440aa0a1e58SJeff Roberson #ifdef SDP_ZCOPY
441aa0a1e58SJeff Roberson if (h->mid == SDP_MID_SRCAVAIL_CANCEL) {
442aa0a1e58SJeff Roberson sdp_dbg_data(sk, "Got SrcAvailCancel. "
443aa0a1e58SJeff Roberson "seq: 0x%d seq_ack: 0x%d\n",
444aa0a1e58SJeff Roberson ntohl(h->mseq), ntohl(h->mseq_ack));
445aa0a1e58SJeff Roberson ssk->srcavail_cancel_mseq = ntohl(h->mseq);
446aa0a1e58SJeff Roberson }
447aa0a1e58SJeff Roberson
448aa0a1e58SJeff Roberson
449aa0a1e58SJeff Roberson if (h->mid == SDP_MID_RDMARDCOMPL) {
450aa0a1e58SJeff Roberson struct sdp_rrch *rrch = (struct sdp_rrch *)(h+1);
451aa0a1e58SJeff Roberson sdp_dbg_data(sk, "RdmaRdCompl message arrived\n");
452aa0a1e58SJeff Roberson sdp_handle_rdma_read_compl(ssk, ntohl(h->mseq_ack),
453aa0a1e58SJeff Roberson ntohl(rrch->len));
454aa0a1e58SJeff Roberson }
455aa0a1e58SJeff Roberson #endif
4562cefa87bSMark Johnston if (mbufq_enqueue(&ssk->rxctlq, mb) != 0)
4572cefa87bSMark Johnston m_freem(mb);
4582cefa87bSMark Johnston return (0);
459aa0a1e58SJeff Roberson }
460aa0a1e58SJeff Roberson
461aa0a1e58SJeff Roberson sdp_prf1(sk, NULL, "queueing %s mb\n", mid2str(h->mid));
462aa0a1e58SJeff Roberson mb = sdp_sock_queue_rcv_mb(sk, mb);
463aa0a1e58SJeff Roberson
464aa0a1e58SJeff Roberson
465aa0a1e58SJeff Roberson return 0;
466aa0a1e58SJeff Roberson }
467aa0a1e58SJeff Roberson
468aa0a1e58SJeff Roberson /* called only from irq */
469aa0a1e58SJeff Roberson static struct mbuf *
sdp_process_rx_wc(struct sdp_sock * ssk,struct ib_wc * wc)470aa0a1e58SJeff Roberson sdp_process_rx_wc(struct sdp_sock *ssk, struct ib_wc *wc)
471aa0a1e58SJeff Roberson {
472aa0a1e58SJeff Roberson struct mbuf *mb;
473aa0a1e58SJeff Roberson struct sdp_bsdh *h;
474aa0a1e58SJeff Roberson struct socket *sk = ssk->socket;
475aa0a1e58SJeff Roberson int mseq;
476aa0a1e58SJeff Roberson
477aa0a1e58SJeff Roberson mb = sdp_recv_completion(ssk, wc->wr_id);
478aa0a1e58SJeff Roberson if (unlikely(!mb))
479aa0a1e58SJeff Roberson return NULL;
480aa0a1e58SJeff Roberson
481aa0a1e58SJeff Roberson if (unlikely(wc->status)) {
482aa0a1e58SJeff Roberson if (ssk->qp_active && sk) {
483aa0a1e58SJeff Roberson sdp_dbg(sk, "Recv completion with error. "
4841ac512e8SConrad Meyer "Status %s (%d), vendor: %d\n",
4851ac512e8SConrad Meyer ib_wc_status_msg(wc->status), wc->status,
4861ac512e8SConrad Meyer wc->vendor_err);
487aa0a1e58SJeff Roberson sdp_abort(sk);
488aa0a1e58SJeff Roberson ssk->qp_active = 0;
489aa0a1e58SJeff Roberson }
490aa0a1e58SJeff Roberson m_freem(mb);
491aa0a1e58SJeff Roberson return NULL;
492aa0a1e58SJeff Roberson }
493aa0a1e58SJeff Roberson
494aa0a1e58SJeff Roberson sdp_dbg_data(sk, "Recv completion. ID %d Length %d\n",
495aa0a1e58SJeff Roberson (int)wc->wr_id, wc->byte_len);
496aa0a1e58SJeff Roberson if (unlikely(wc->byte_len < sizeof(struct sdp_bsdh))) {
497aa0a1e58SJeff Roberson sdp_warn(sk, "SDP BUG! byte_len %d < %zd\n",
498aa0a1e58SJeff Roberson wc->byte_len, sizeof(struct sdp_bsdh));
499aa0a1e58SJeff Roberson m_freem(mb);
500aa0a1e58SJeff Roberson return NULL;
501aa0a1e58SJeff Roberson }
502aa0a1e58SJeff Roberson /* Use m_adj to trim the tail of data we didn't use. */
503aa0a1e58SJeff Roberson m_adj(mb, -(mb->m_pkthdr.len - wc->byte_len));
504aa0a1e58SJeff Roberson h = mtod(mb, struct sdp_bsdh *);
505aa0a1e58SJeff Roberson
506aa0a1e58SJeff Roberson SDP_DUMP_PACKET(ssk->socket, "RX", mb, h);
507aa0a1e58SJeff Roberson
508aa0a1e58SJeff Roberson ssk->rx_packets++;
509aa0a1e58SJeff Roberson ssk->rx_bytes += mb->m_pkthdr.len;
510aa0a1e58SJeff Roberson
511aa0a1e58SJeff Roberson mseq = ntohl(h->mseq);
512aa0a1e58SJeff Roberson atomic_set(&ssk->mseq_ack, mseq);
513aa0a1e58SJeff Roberson if (mseq != (int)wc->wr_id)
514aa0a1e58SJeff Roberson sdp_warn(sk, "SDP BUG! mseq %d != wrid %d\n",
515aa0a1e58SJeff Roberson mseq, (int)wc->wr_id);
516aa0a1e58SJeff Roberson
517aa0a1e58SJeff Roberson return mb;
518aa0a1e58SJeff Roberson }
519aa0a1e58SJeff Roberson
520aa0a1e58SJeff Roberson /* Wakeup writers if we now have credits. */
521aa0a1e58SJeff Roberson static void
sdp_bzcopy_write_space(struct sdp_sock * ssk)522aa0a1e58SJeff Roberson sdp_bzcopy_write_space(struct sdp_sock *ssk)
523aa0a1e58SJeff Roberson {
524aa0a1e58SJeff Roberson struct socket *sk = ssk->socket;
525aa0a1e58SJeff Roberson
526aa0a1e58SJeff Roberson if (tx_credits(ssk) >= ssk->min_bufs && sk)
527aa0a1e58SJeff Roberson sowwakeup(sk);
528aa0a1e58SJeff Roberson }
529aa0a1e58SJeff Roberson
530aa0a1e58SJeff Roberson /* only from interrupt. */
531aa0a1e58SJeff Roberson static int
sdp_poll_rx_cq(struct sdp_sock * ssk)532aa0a1e58SJeff Roberson sdp_poll_rx_cq(struct sdp_sock *ssk)
533aa0a1e58SJeff Roberson {
534aa0a1e58SJeff Roberson struct ib_cq *cq = ssk->rx_ring.cq;
535aa0a1e58SJeff Roberson struct ib_wc ibwc[SDP_NUM_WC];
536aa0a1e58SJeff Roberson int n, i;
537aa0a1e58SJeff Roberson int wc_processed = 0;
538aa0a1e58SJeff Roberson struct mbuf *mb;
539aa0a1e58SJeff Roberson
540aa0a1e58SJeff Roberson do {
541aa0a1e58SJeff Roberson n = ib_poll_cq(cq, SDP_NUM_WC, ibwc);
542aa0a1e58SJeff Roberson for (i = 0; i < n; ++i) {
543aa0a1e58SJeff Roberson struct ib_wc *wc = &ibwc[i];
544aa0a1e58SJeff Roberson
545aa0a1e58SJeff Roberson BUG_ON(!(wc->wr_id & SDP_OP_RECV));
546aa0a1e58SJeff Roberson mb = sdp_process_rx_wc(ssk, wc);
547aa0a1e58SJeff Roberson if (!mb)
548aa0a1e58SJeff Roberson continue;
549aa0a1e58SJeff Roberson
550aa0a1e58SJeff Roberson sdp_process_rx_mb(ssk, mb);
551aa0a1e58SJeff Roberson wc_processed++;
552aa0a1e58SJeff Roberson }
553aa0a1e58SJeff Roberson } while (n == SDP_NUM_WC);
554aa0a1e58SJeff Roberson
555aa0a1e58SJeff Roberson if (wc_processed)
556aa0a1e58SJeff Roberson sdp_bzcopy_write_space(ssk);
557aa0a1e58SJeff Roberson
558aa0a1e58SJeff Roberson return wc_processed;
559aa0a1e58SJeff Roberson }
560aa0a1e58SJeff Roberson
561aa0a1e58SJeff Roberson static void
sdp_rx_comp_work(struct work_struct * work)562aa0a1e58SJeff Roberson sdp_rx_comp_work(struct work_struct *work)
563aa0a1e58SJeff Roberson {
564aa0a1e58SJeff Roberson struct sdp_sock *ssk = container_of(work, struct sdp_sock,
565aa0a1e58SJeff Roberson rx_comp_work);
566aa0a1e58SJeff Roberson
567aa0a1e58SJeff Roberson sdp_prf(ssk->socket, NULL, "%s", __func__);
568aa0a1e58SJeff Roberson
569aa0a1e58SJeff Roberson SDP_WLOCK(ssk);
570aa0a1e58SJeff Roberson if (unlikely(!ssk->qp)) {
571aa0a1e58SJeff Roberson sdp_prf(ssk->socket, NULL, "qp was destroyed");
572aa0a1e58SJeff Roberson goto out;
573aa0a1e58SJeff Roberson }
574aa0a1e58SJeff Roberson if (unlikely(!ssk->rx_ring.cq)) {
575aa0a1e58SJeff Roberson sdp_prf(ssk->socket, NULL, "rx_ring.cq is NULL");
576aa0a1e58SJeff Roberson goto out;
577aa0a1e58SJeff Roberson }
578aa0a1e58SJeff Roberson
579aa0a1e58SJeff Roberson if (unlikely(!ssk->poll_cq)) {
580aa0a1e58SJeff Roberson struct rdma_cm_id *id = ssk->id;
581aa0a1e58SJeff Roberson if (id && id->qp)
5823910bc63SDimitry Andric rdma_notify(id, IB_EVENT_COMM_EST);
583aa0a1e58SJeff Roberson goto out;
584aa0a1e58SJeff Roberson }
585aa0a1e58SJeff Roberson
586aa0a1e58SJeff Roberson sdp_do_posts(ssk);
587aa0a1e58SJeff Roberson out:
588aa0a1e58SJeff Roberson SDP_WUNLOCK(ssk);
589aa0a1e58SJeff Roberson }
590aa0a1e58SJeff Roberson
591aa0a1e58SJeff Roberson void
sdp_do_posts(struct sdp_sock * ssk)592aa0a1e58SJeff Roberson sdp_do_posts(struct sdp_sock *ssk)
593aa0a1e58SJeff Roberson {
594aa0a1e58SJeff Roberson struct socket *sk = ssk->socket;
595aa0a1e58SJeff Roberson int xmit_poll_force;
596aa0a1e58SJeff Roberson struct mbuf *mb;
597aa0a1e58SJeff Roberson
598aa0a1e58SJeff Roberson SDP_WLOCK_ASSERT(ssk);
599aa0a1e58SJeff Roberson if (!ssk->qp_active) {
600aa0a1e58SJeff Roberson sdp_dbg(sk, "QP is deactivated\n");
601aa0a1e58SJeff Roberson return;
602aa0a1e58SJeff Roberson }
603aa0a1e58SJeff Roberson
6042cefa87bSMark Johnston while ((mb = mbufq_dequeue(&ssk->rxctlq)) != NULL)
605aa0a1e58SJeff Roberson sdp_process_rx_ctl_mb(ssk, mb);
606aa0a1e58SJeff Roberson
607aa0a1e58SJeff Roberson if (ssk->state == TCPS_TIME_WAIT)
608aa0a1e58SJeff Roberson return;
609aa0a1e58SJeff Roberson
610aa0a1e58SJeff Roberson if (!ssk->rx_ring.cq || !ssk->tx_ring.cq)
611aa0a1e58SJeff Roberson return;
612aa0a1e58SJeff Roberson
613aa0a1e58SJeff Roberson sdp_post_recvs(ssk);
614aa0a1e58SJeff Roberson
615aa0a1e58SJeff Roberson if (tx_ring_posted(ssk))
616aa0a1e58SJeff Roberson sdp_xmit_poll(ssk, 1);
617aa0a1e58SJeff Roberson
618aa0a1e58SJeff Roberson sdp_post_sends(ssk, M_NOWAIT);
619aa0a1e58SJeff Roberson
620aa0a1e58SJeff Roberson xmit_poll_force = tx_credits(ssk) < SDP_MIN_TX_CREDITS;
621aa0a1e58SJeff Roberson
622aa0a1e58SJeff Roberson if (credit_update_needed(ssk) || xmit_poll_force) {
623aa0a1e58SJeff Roberson /* if has pending tx because run out of tx_credits - xmit it */
624aa0a1e58SJeff Roberson sdp_prf(sk, NULL, "Processing to free pending sends");
625aa0a1e58SJeff Roberson sdp_xmit_poll(ssk, xmit_poll_force);
626aa0a1e58SJeff Roberson sdp_prf(sk, NULL, "Sending credit update");
627aa0a1e58SJeff Roberson sdp_post_sends(ssk, M_NOWAIT);
628aa0a1e58SJeff Roberson }
629aa0a1e58SJeff Roberson
630aa0a1e58SJeff Roberson }
631aa0a1e58SJeff Roberson
632aa0a1e58SJeff Roberson int
sdp_process_rx(struct sdp_sock * ssk)633aa0a1e58SJeff Roberson sdp_process_rx(struct sdp_sock *ssk)
634aa0a1e58SJeff Roberson {
635aa0a1e58SJeff Roberson int wc_processed = 0;
636aa0a1e58SJeff Roberson int credits_before;
637aa0a1e58SJeff Roberson
638aa0a1e58SJeff Roberson if (!rx_ring_trylock(&ssk->rx_ring)) {
639aa0a1e58SJeff Roberson sdp_dbg(ssk->socket, "ring destroyed. not polling it\n");
640aa0a1e58SJeff Roberson return 0;
641aa0a1e58SJeff Roberson }
642aa0a1e58SJeff Roberson
643aa0a1e58SJeff Roberson credits_before = tx_credits(ssk);
644aa0a1e58SJeff Roberson
645aa0a1e58SJeff Roberson wc_processed = sdp_poll_rx_cq(ssk);
646aa0a1e58SJeff Roberson sdp_prf(ssk->socket, NULL, "processed %d", wc_processed);
647aa0a1e58SJeff Roberson
648aa0a1e58SJeff Roberson if (wc_processed) {
649aa0a1e58SJeff Roberson sdp_prf(ssk->socket, NULL, "credits: %d -> %d",
650aa0a1e58SJeff Roberson credits_before, tx_credits(ssk));
651aa0a1e58SJeff Roberson queue_work(rx_comp_wq, &ssk->rx_comp_work);
652aa0a1e58SJeff Roberson }
653aa0a1e58SJeff Roberson sdp_arm_rx_cq(ssk);
654aa0a1e58SJeff Roberson
655aa0a1e58SJeff Roberson rx_ring_unlock(&ssk->rx_ring);
656aa0a1e58SJeff Roberson
657aa0a1e58SJeff Roberson return (wc_processed);
658aa0a1e58SJeff Roberson }
659aa0a1e58SJeff Roberson
660aa0a1e58SJeff Roberson static void
sdp_rx_irq(struct ib_cq * cq,void * cq_context)661aa0a1e58SJeff Roberson sdp_rx_irq(struct ib_cq *cq, void *cq_context)
662aa0a1e58SJeff Roberson {
663a3c0b052SMark Johnston struct sdp_sock *ssk;
664aa0a1e58SJeff Roberson
665a3c0b052SMark Johnston ssk = cq_context;
6663b3e6d88SMark Johnston KASSERT(cq == ssk->rx_ring.cq,
6673b3e6d88SMark Johnston ("%s: mismatched cq on %p", __func__, ssk));
668aa0a1e58SJeff Roberson
669aa0a1e58SJeff Roberson SDPSTATS_COUNTER_INC(rx_int_count);
670aa0a1e58SJeff Roberson
671aa0a1e58SJeff Roberson sdp_prf(sk, NULL, "rx irq");
672aa0a1e58SJeff Roberson
673aa0a1e58SJeff Roberson sdp_process_rx(ssk);
674aa0a1e58SJeff Roberson }
675aa0a1e58SJeff Roberson
676aa0a1e58SJeff Roberson static
sdp_rx_ring_purge(struct sdp_sock * ssk)677aa0a1e58SJeff Roberson void sdp_rx_ring_purge(struct sdp_sock *ssk)
678aa0a1e58SJeff Roberson {
679aa0a1e58SJeff Roberson while (rx_ring_posted(ssk) > 0) {
680aa0a1e58SJeff Roberson struct mbuf *mb;
681aa0a1e58SJeff Roberson mb = sdp_recv_completion(ssk, ring_tail(ssk->rx_ring));
682aa0a1e58SJeff Roberson if (!mb)
683aa0a1e58SJeff Roberson break;
684aa0a1e58SJeff Roberson m_freem(mb);
685aa0a1e58SJeff Roberson }
686aa0a1e58SJeff Roberson }
687aa0a1e58SJeff Roberson
688aa0a1e58SJeff Roberson void
sdp_rx_ring_init(struct sdp_sock * ssk)689aa0a1e58SJeff Roberson sdp_rx_ring_init(struct sdp_sock *ssk)
690aa0a1e58SJeff Roberson {
691aa0a1e58SJeff Roberson ssk->rx_ring.buffer = NULL;
692aa0a1e58SJeff Roberson ssk->rx_ring.destroyed = 0;
693aa0a1e58SJeff Roberson rw_init(&ssk->rx_ring.destroyed_lock, "sdp rx lock");
694aa0a1e58SJeff Roberson }
695aa0a1e58SJeff Roberson
696aa0a1e58SJeff Roberson static void
sdp_rx_cq_event_handler(struct ib_event * event,void * data)697aa0a1e58SJeff Roberson sdp_rx_cq_event_handler(struct ib_event *event, void *data)
698aa0a1e58SJeff Roberson {
699aa0a1e58SJeff Roberson }
700aa0a1e58SJeff Roberson
701aa0a1e58SJeff Roberson int
sdp_rx_ring_create(struct sdp_sock * ssk,struct ib_device * device)702aa0a1e58SJeff Roberson sdp_rx_ring_create(struct sdp_sock *ssk, struct ib_device *device)
703aa0a1e58SJeff Roberson {
704c69c74b8SHans Petter Selasky struct ib_cq_init_attr rx_cq_attr = {
705c69c74b8SHans Petter Selasky .cqe = SDP_RX_SIZE,
706c69c74b8SHans Petter Selasky .comp_vector = 0,
707c69c74b8SHans Petter Selasky .flags = 0,
708c69c74b8SHans Petter Selasky };
709aa0a1e58SJeff Roberson struct ib_cq *rx_cq;
710aa0a1e58SJeff Roberson int rc = 0;
711aa0a1e58SJeff Roberson
712aa0a1e58SJeff Roberson sdp_dbg(ssk->socket, "rx ring created");
713aa0a1e58SJeff Roberson INIT_WORK(&ssk->rx_comp_work, sdp_rx_comp_work);
714aa0a1e58SJeff Roberson atomic_set(&ssk->rx_ring.head, 1);
715aa0a1e58SJeff Roberson atomic_set(&ssk->rx_ring.tail, 1);
716aa0a1e58SJeff Roberson
717d3461164SMark Johnston ssk->rx_ring.buffer = malloc(sizeof(*ssk->rx_ring.buffer) * SDP_RX_SIZE,
718d3461164SMark Johnston M_SDP, M_WAITOK);
719aa0a1e58SJeff Roberson
720aa0a1e58SJeff Roberson rx_cq = ib_create_cq(device, sdp_rx_irq, sdp_rx_cq_event_handler,
721c69c74b8SHans Petter Selasky ssk, &rx_cq_attr);
722aa0a1e58SJeff Roberson if (IS_ERR(rx_cq)) {
723aa0a1e58SJeff Roberson rc = PTR_ERR(rx_cq);
724aa0a1e58SJeff Roberson sdp_warn(ssk->socket, "Unable to allocate RX CQ: %d.\n", rc);
725aa0a1e58SJeff Roberson goto err_cq;
726aa0a1e58SJeff Roberson }
727aa0a1e58SJeff Roberson
728aa0a1e58SJeff Roberson sdp_sk(ssk->socket)->rx_ring.cq = rx_cq;
729aa0a1e58SJeff Roberson sdp_arm_rx_cq(ssk);
730aa0a1e58SJeff Roberson
731aa0a1e58SJeff Roberson return 0;
732aa0a1e58SJeff Roberson
733aa0a1e58SJeff Roberson err_cq:
734d3461164SMark Johnston free(ssk->rx_ring.buffer, M_SDP);
735aa0a1e58SJeff Roberson ssk->rx_ring.buffer = NULL;
736aa0a1e58SJeff Roberson return rc;
737aa0a1e58SJeff Roberson }
738aa0a1e58SJeff Roberson
739aa0a1e58SJeff Roberson void
sdp_rx_ring_destroy(struct sdp_sock * ssk)740aa0a1e58SJeff Roberson sdp_rx_ring_destroy(struct sdp_sock *ssk)
741aa0a1e58SJeff Roberson {
742aa0a1e58SJeff Roberson
743aa0a1e58SJeff Roberson cancel_work_sync(&ssk->rx_comp_work);
744aa0a1e58SJeff Roberson rx_ring_destroy_lock(&ssk->rx_ring);
745aa0a1e58SJeff Roberson
746aa0a1e58SJeff Roberson if (ssk->rx_ring.buffer) {
747aa0a1e58SJeff Roberson sdp_rx_ring_purge(ssk);
748d3461164SMark Johnston free(ssk->rx_ring.buffer, M_SDP);
749aa0a1e58SJeff Roberson ssk->rx_ring.buffer = NULL;
750aa0a1e58SJeff Roberson }
751aa0a1e58SJeff Roberson
752aa0a1e58SJeff Roberson if (ssk->rx_ring.cq) {
753*b633e08cSHans Petter Selasky ib_destroy_cq(ssk->rx_ring.cq);
754aa0a1e58SJeff Roberson ssk->rx_ring.cq = NULL;
755aa0a1e58SJeff Roberson }
756aa0a1e58SJeff Roberson
757aa0a1e58SJeff Roberson WARN_ON(ring_head(ssk->rx_ring) != ring_tail(ssk->rx_ring));
758aa0a1e58SJeff Roberson }
759