xref: /linux/fs/smb/smbdirect/accept.c (revision 0fc8f6200d2313278fbf4539bbab74677c685531)
1*1249c01aSStefan Metzmacher // SPDX-License-Identifier: GPL-2.0-or-later
2*1249c01aSStefan Metzmacher /*
3*1249c01aSStefan Metzmacher  *   Copyright (C) 2017, Microsoft Corporation.
4*1249c01aSStefan Metzmacher  *   Copyright (C) 2018, LG Electronics.
5*1249c01aSStefan Metzmacher  *   Copyright (c) 2025, Stefan Metzmacher
6*1249c01aSStefan Metzmacher  */
7*1249c01aSStefan Metzmacher 
8*1249c01aSStefan Metzmacher #include "internal.h"
9*1249c01aSStefan Metzmacher #include <net/sock.h>
10*1249c01aSStefan Metzmacher #include "../common/smb2status.h"
11*1249c01aSStefan Metzmacher 
12*1249c01aSStefan Metzmacher static int smbdirect_accept_rdma_event_handler(struct rdma_cm_id *id,
13*1249c01aSStefan Metzmacher 					       struct rdma_cm_event *event);
14*1249c01aSStefan Metzmacher static int smbdirect_accept_init_params(struct smbdirect_socket *sc);
15*1249c01aSStefan Metzmacher static void smbdirect_accept_negotiate_recv_done(struct ib_cq *cq, struct ib_wc *wc);
16*1249c01aSStefan Metzmacher static void smbdirect_accept_negotiate_send_done(struct ib_cq *cq, struct ib_wc *wc);
17*1249c01aSStefan Metzmacher 
18*1249c01aSStefan Metzmacher int smbdirect_accept_connect_request(struct smbdirect_socket *sc,
19*1249c01aSStefan Metzmacher 				     const struct rdma_conn_param *param)
20*1249c01aSStefan Metzmacher {
21*1249c01aSStefan Metzmacher 	struct smbdirect_socket_parameters *sp = &sc->parameters;
22*1249c01aSStefan Metzmacher 	struct smbdirect_recv_io *recv_io;
23*1249c01aSStefan Metzmacher 	u8 peer_initiator_depth;
24*1249c01aSStefan Metzmacher 	u8 peer_responder_resources;
25*1249c01aSStefan Metzmacher 	struct rdma_conn_param conn_param;
26*1249c01aSStefan Metzmacher 	__be32 ird_ord_hdr[2];
27*1249c01aSStefan Metzmacher 	int ret;
28*1249c01aSStefan Metzmacher 
29*1249c01aSStefan Metzmacher 	if (SMBDIRECT_CHECK_STATUS_WARN(sc, SMBDIRECT_SOCKET_CREATED))
30*1249c01aSStefan Metzmacher 		return -EINVAL;
31*1249c01aSStefan Metzmacher 
32*1249c01aSStefan Metzmacher 	/*
33*1249c01aSStefan Metzmacher 	 * First set what the we as server are able to support
34*1249c01aSStefan Metzmacher 	 */
35*1249c01aSStefan Metzmacher 	sp->initiator_depth = min_t(u8, sp->initiator_depth,
36*1249c01aSStefan Metzmacher 				    sc->ib.dev->attrs.max_qp_rd_atom);
37*1249c01aSStefan Metzmacher 
38*1249c01aSStefan Metzmacher 	peer_initiator_depth = param->initiator_depth;
39*1249c01aSStefan Metzmacher 	peer_responder_resources = param->responder_resources;
40*1249c01aSStefan Metzmacher 	smbdirect_connection_negotiate_rdma_resources(sc,
41*1249c01aSStefan Metzmacher 						      peer_initiator_depth,
42*1249c01aSStefan Metzmacher 						      peer_responder_resources,
43*1249c01aSStefan Metzmacher 						      param);
44*1249c01aSStefan Metzmacher 
45*1249c01aSStefan Metzmacher 	ret = smbdirect_accept_init_params(sc);
46*1249c01aSStefan Metzmacher 	if (ret) {
47*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
48*1249c01aSStefan Metzmacher 			"smbdirect_accept_init_params() failed %1pe\n",
49*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(ret));
50*1249c01aSStefan Metzmacher 		goto init_params_failed;
51*1249c01aSStefan Metzmacher 	}
52*1249c01aSStefan Metzmacher 
53*1249c01aSStefan Metzmacher 	ret = smbdirect_connection_create_qp(sc);
54*1249c01aSStefan Metzmacher 	if (ret) {
55*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
56*1249c01aSStefan Metzmacher 			"smbdirect_connection_create_qp() failed %1pe\n",
57*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(ret));
58*1249c01aSStefan Metzmacher 		goto create_qp_failed;
59*1249c01aSStefan Metzmacher 	}
60*1249c01aSStefan Metzmacher 
61*1249c01aSStefan Metzmacher 	ret = smbdirect_connection_create_mem_pools(sc);
62*1249c01aSStefan Metzmacher 	if (ret) {
63*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
64*1249c01aSStefan Metzmacher 			"smbdirect_connection_create_mem_pools() failed %1pe\n",
65*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(ret));
66*1249c01aSStefan Metzmacher 		goto create_mem_failed;
67*1249c01aSStefan Metzmacher 	}
68*1249c01aSStefan Metzmacher 
69*1249c01aSStefan Metzmacher 	recv_io = smbdirect_connection_get_recv_io(sc);
70*1249c01aSStefan Metzmacher 	if (WARN_ON_ONCE(!recv_io)) {
71*1249c01aSStefan Metzmacher 		ret = -EINVAL;
72*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
73*1249c01aSStefan Metzmacher 			"smbdirect_connection_get_recv_io() failed %1pe\n",
74*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(ret));
75*1249c01aSStefan Metzmacher 		goto get_recv_io_failed;
76*1249c01aSStefan Metzmacher 	}
77*1249c01aSStefan Metzmacher 	recv_io->cqe.done = smbdirect_accept_negotiate_recv_done;
78*1249c01aSStefan Metzmacher 
79*1249c01aSStefan Metzmacher 	/*
80*1249c01aSStefan Metzmacher 	 * Now post the recv_io buffer in order to get
81*1249c01aSStefan Metzmacher 	 * the negotiate request
82*1249c01aSStefan Metzmacher 	 */
83*1249c01aSStefan Metzmacher 	sc->recv_io.expected = SMBDIRECT_EXPECT_NEGOTIATE_REQ;
84*1249c01aSStefan Metzmacher 	ret = smbdirect_connection_post_recv_io(recv_io);
85*1249c01aSStefan Metzmacher 	if (ret) {
86*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
87*1249c01aSStefan Metzmacher 			"smbdirect_connection_post_recv_io() failed %1pe\n",
88*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(ret));
89*1249c01aSStefan Metzmacher 		goto post_recv_io_failed;
90*1249c01aSStefan Metzmacher 	}
91*1249c01aSStefan Metzmacher 	/*
92*1249c01aSStefan Metzmacher 	 * From here recv_io is known to the RDMA QP and needs ib_drain_qp and
93*1249c01aSStefan Metzmacher 	 * smbdirect_accept_negotiate_recv_done to cleanup...
94*1249c01aSStefan Metzmacher 	 */
95*1249c01aSStefan Metzmacher 	recv_io = NULL;
96*1249c01aSStefan Metzmacher 
97*1249c01aSStefan Metzmacher 	/* already checked with SMBDIRECT_CHECK_STATUS_WARN above */
98*1249c01aSStefan Metzmacher 	WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_CREATED);
99*1249c01aSStefan Metzmacher 	sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED;
100*1249c01aSStefan Metzmacher 
101*1249c01aSStefan Metzmacher 	/*
102*1249c01aSStefan Metzmacher 	 * We already negotiated sp->initiator_depth
103*1249c01aSStefan Metzmacher 	 * and sp->responder_resources above.
104*1249c01aSStefan Metzmacher 	 */
105*1249c01aSStefan Metzmacher 	memset(&conn_param, 0, sizeof(conn_param));
106*1249c01aSStefan Metzmacher 	conn_param.initiator_depth = sp->initiator_depth;
107*1249c01aSStefan Metzmacher 	conn_param.responder_resources = sp->responder_resources;
108*1249c01aSStefan Metzmacher 
109*1249c01aSStefan Metzmacher 	if (sc->rdma.legacy_iwarp) {
110*1249c01aSStefan Metzmacher 		ird_ord_hdr[0] = cpu_to_be32(conn_param.responder_resources);
111*1249c01aSStefan Metzmacher 		ird_ord_hdr[1] = cpu_to_be32(conn_param.initiator_depth);
112*1249c01aSStefan Metzmacher 		conn_param.private_data = ird_ord_hdr;
113*1249c01aSStefan Metzmacher 		conn_param.private_data_len = sizeof(ird_ord_hdr);
114*1249c01aSStefan Metzmacher 	} else {
115*1249c01aSStefan Metzmacher 		conn_param.private_data = NULL;
116*1249c01aSStefan Metzmacher 		conn_param.private_data_len = 0;
117*1249c01aSStefan Metzmacher 	}
118*1249c01aSStefan Metzmacher 	conn_param.retry_count = SMBDIRECT_RDMA_CM_RETRY;
119*1249c01aSStefan Metzmacher 	conn_param.rnr_retry_count = SMBDIRECT_RDMA_CM_RNR_RETRY;
120*1249c01aSStefan Metzmacher 	conn_param.flow_control = 0;
121*1249c01aSStefan Metzmacher 
122*1249c01aSStefan Metzmacher 	/* explicitly set above */
123*1249c01aSStefan Metzmacher 	WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED);
124*1249c01aSStefan Metzmacher 	sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING;
125*1249c01aSStefan Metzmacher 	sc->rdma.expected_event = RDMA_CM_EVENT_ESTABLISHED;
126*1249c01aSStefan Metzmacher 	sc->rdma.cm_id->event_handler = smbdirect_accept_rdma_event_handler;
127*1249c01aSStefan Metzmacher 	ret = rdma_accept(sc->rdma.cm_id, &conn_param);
128*1249c01aSStefan Metzmacher 	if (ret) {
129*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
130*1249c01aSStefan Metzmacher 			"rdma_accept() failed %1pe\n",
131*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(ret));
132*1249c01aSStefan Metzmacher 		goto rdma_accept_failed;
133*1249c01aSStefan Metzmacher 	}
134*1249c01aSStefan Metzmacher 
135*1249c01aSStefan Metzmacher 	/*
136*1249c01aSStefan Metzmacher 	 * start with the negotiate timeout and SMBDIRECT_KEEPALIVE_PENDING
137*1249c01aSStefan Metzmacher 	 * so that the timer will cause a disconnect.
138*1249c01aSStefan Metzmacher 	 */
139*1249c01aSStefan Metzmacher 	INIT_DELAYED_WORK(&sc->idle.timer_work, smbdirect_connection_idle_timer_work);
140*1249c01aSStefan Metzmacher 	sc->idle.keepalive = SMBDIRECT_KEEPALIVE_PENDING;
141*1249c01aSStefan Metzmacher 	mod_delayed_work(sc->workqueues.idle, &sc->idle.timer_work,
142*1249c01aSStefan Metzmacher 			 msecs_to_jiffies(sp->negotiate_timeout_msec));
143*1249c01aSStefan Metzmacher 
144*1249c01aSStefan Metzmacher 	return 0;
145*1249c01aSStefan Metzmacher 
146*1249c01aSStefan Metzmacher rdma_accept_failed:
147*1249c01aSStefan Metzmacher 	/*
148*1249c01aSStefan Metzmacher 	 * smbdirect_connection_destroy_qp() calls ib_drain_qp(),
149*1249c01aSStefan Metzmacher 	 * so that smbdirect_accept_negotiate_recv_done() will
150*1249c01aSStefan Metzmacher 	 * call smbdirect_connection_put_recv_io()
151*1249c01aSStefan Metzmacher 	 */
152*1249c01aSStefan Metzmacher post_recv_io_failed:
153*1249c01aSStefan Metzmacher 	if (recv_io)
154*1249c01aSStefan Metzmacher 		smbdirect_connection_put_recv_io(recv_io);
155*1249c01aSStefan Metzmacher get_recv_io_failed:
156*1249c01aSStefan Metzmacher 	smbdirect_connection_destroy_mem_pools(sc);
157*1249c01aSStefan Metzmacher create_mem_failed:
158*1249c01aSStefan Metzmacher 	smbdirect_connection_destroy_qp(sc);
159*1249c01aSStefan Metzmacher create_qp_failed:
160*1249c01aSStefan Metzmacher init_params_failed:
161*1249c01aSStefan Metzmacher 	return ret;
162*1249c01aSStefan Metzmacher }
163*1249c01aSStefan Metzmacher 
164*1249c01aSStefan Metzmacher static int smbdirect_accept_init_params(struct smbdirect_socket *sc)
165*1249c01aSStefan Metzmacher {
166*1249c01aSStefan Metzmacher 	const struct smbdirect_socket_parameters *sp = &sc->parameters;
167*1249c01aSStefan Metzmacher 	int max_send_sges;
168*1249c01aSStefan Metzmacher 	unsigned int maxpages;
169*1249c01aSStefan Metzmacher 
170*1249c01aSStefan Metzmacher 	/* need 3 more sge. because a SMB_DIRECT header, SMB2 header,
171*1249c01aSStefan Metzmacher 	 * SMB2 response could be mapped.
172*1249c01aSStefan Metzmacher 	 */
173*1249c01aSStefan Metzmacher 	max_send_sges = DIV_ROUND_UP(sp->max_send_size, PAGE_SIZE) + 3;
174*1249c01aSStefan Metzmacher 	if (max_send_sges > SMBDIRECT_SEND_IO_MAX_SGE) {
175*1249c01aSStefan Metzmacher 		pr_err("max_send_size %d is too large\n", sp->max_send_size);
176*1249c01aSStefan Metzmacher 		return -EINVAL;
177*1249c01aSStefan Metzmacher 	}
178*1249c01aSStefan Metzmacher 
179*1249c01aSStefan Metzmacher 	/*
180*1249c01aSStefan Metzmacher 	 * There is only a single batch credit
181*1249c01aSStefan Metzmacher 	 */
182*1249c01aSStefan Metzmacher 	atomic_set(&sc->send_io.bcredits.count, 1);
183*1249c01aSStefan Metzmacher 
184*1249c01aSStefan Metzmacher 	/*
185*1249c01aSStefan Metzmacher 	 * Initialize the local credits to post
186*1249c01aSStefan Metzmacher 	 * IB_WR_SEND[_WITH_INV].
187*1249c01aSStefan Metzmacher 	 */
188*1249c01aSStefan Metzmacher 	atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target);
189*1249c01aSStefan Metzmacher 
190*1249c01aSStefan Metzmacher 	if (sp->max_read_write_size) {
191*1249c01aSStefan Metzmacher 		maxpages = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE);
192*1249c01aSStefan Metzmacher 		sc->rw_io.credits.max = rdma_rw_mr_factor(sc->ib.dev,
193*1249c01aSStefan Metzmacher 							  sc->rdma.cm_id->port_num,
194*1249c01aSStefan Metzmacher 							  maxpages);
195*1249c01aSStefan Metzmacher 		sc->rw_io.credits.num_pages = DIV_ROUND_UP(maxpages, sc->rw_io.credits.max);
196*1249c01aSStefan Metzmacher 		/* add one extra in order to handle unaligned pages */
197*1249c01aSStefan Metzmacher 		sc->rw_io.credits.max += 1;
198*1249c01aSStefan Metzmacher 	}
199*1249c01aSStefan Metzmacher 
200*1249c01aSStefan Metzmacher 	sc->recv_io.credits.target = 1;
201*1249c01aSStefan Metzmacher 
202*1249c01aSStefan Metzmacher 	atomic_set(&sc->rw_io.credits.count, sc->rw_io.credits.max);
203*1249c01aSStefan Metzmacher 
204*1249c01aSStefan Metzmacher 	return 0;
205*1249c01aSStefan Metzmacher }
206*1249c01aSStefan Metzmacher 
207*1249c01aSStefan Metzmacher static void smbdirect_accept_negotiate_recv_work(struct work_struct *work);
208*1249c01aSStefan Metzmacher 
209*1249c01aSStefan Metzmacher static void smbdirect_accept_negotiate_recv_done(struct ib_cq *cq, struct ib_wc *wc)
210*1249c01aSStefan Metzmacher {
211*1249c01aSStefan Metzmacher 	struct smbdirect_recv_io *recv_io =
212*1249c01aSStefan Metzmacher 		container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe);
213*1249c01aSStefan Metzmacher 	struct smbdirect_socket *sc = recv_io->socket;
214*1249c01aSStefan Metzmacher 	unsigned long flags;
215*1249c01aSStefan Metzmacher 
216*1249c01aSStefan Metzmacher 	if (unlikely(wc->status != IB_WC_SUCCESS || WARN_ON_ONCE(wc->opcode != IB_WC_RECV))) {
217*1249c01aSStefan Metzmacher 		if (wc->status != IB_WC_WR_FLUSH_ERR)
218*1249c01aSStefan Metzmacher 			smbdirect_log_rdma_recv(sc, SMBDIRECT_LOG_ERR,
219*1249c01aSStefan Metzmacher 				"wc->status=%s (%d) wc->opcode=%d\n",
220*1249c01aSStefan Metzmacher 				ib_wc_status_msg(wc->status), wc->status, wc->opcode);
221*1249c01aSStefan Metzmacher 		goto error;
222*1249c01aSStefan Metzmacher 	}
223*1249c01aSStefan Metzmacher 
224*1249c01aSStefan Metzmacher 	smbdirect_log_rdma_recv(sc, SMBDIRECT_LOG_INFO,
225*1249c01aSStefan Metzmacher 		"smbdirect_recv_io completed. status='%s (%d)', opcode=%d\n",
226*1249c01aSStefan Metzmacher 		ib_wc_status_msg(wc->status), wc->status, wc->opcode);
227*1249c01aSStefan Metzmacher 
228*1249c01aSStefan Metzmacher 	/*
229*1249c01aSStefan Metzmacher 	 * This is an internal error!
230*1249c01aSStefan Metzmacher 	 */
231*1249c01aSStefan Metzmacher 	if (WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_NEGOTIATE_REQ))
232*1249c01aSStefan Metzmacher 		goto error;
233*1249c01aSStefan Metzmacher 
234*1249c01aSStefan Metzmacher 	/*
235*1249c01aSStefan Metzmacher 	 * Don't reset timer to the keepalive interval in
236*1249c01aSStefan Metzmacher 	 * this will be done in smbdirect_accept_direct_negotiate_recv_work.
237*1249c01aSStefan Metzmacher 	 */
238*1249c01aSStefan Metzmacher 
239*1249c01aSStefan Metzmacher 	ib_dma_sync_single_for_cpu(sc->ib.dev,
240*1249c01aSStefan Metzmacher 				   recv_io->sge.addr,
241*1249c01aSStefan Metzmacher 				   recv_io->sge.length,
242*1249c01aSStefan Metzmacher 				   DMA_FROM_DEVICE);
243*1249c01aSStefan Metzmacher 
244*1249c01aSStefan Metzmacher 	/*
245*1249c01aSStefan Metzmacher 	 * Only remember recv_io if it has enough bytes,
246*1249c01aSStefan Metzmacher 	 * this gives smbdirect_accept_negotiate_recv_work enough
247*1249c01aSStefan Metzmacher 	 * information in order to disconnect if it was not
248*1249c01aSStefan Metzmacher 	 * valid.
249*1249c01aSStefan Metzmacher 	 */
250*1249c01aSStefan Metzmacher 	sc->recv_io.reassembly.full_packet_received = true;
251*1249c01aSStefan Metzmacher 	if (wc->byte_len >= sizeof(struct smbdirect_negotiate_req))
252*1249c01aSStefan Metzmacher 		smbdirect_connection_reassembly_append_recv_io(sc, recv_io, 0);
253*1249c01aSStefan Metzmacher 	else
254*1249c01aSStefan Metzmacher 		smbdirect_connection_put_recv_io(recv_io);
255*1249c01aSStefan Metzmacher 
256*1249c01aSStefan Metzmacher 	/*
257*1249c01aSStefan Metzmacher 	 * Some drivers (at least mlx5_ib and irdma) might post a
258*1249c01aSStefan Metzmacher 	 * recv completion before RDMA_CM_EVENT_ESTABLISHED,
259*1249c01aSStefan Metzmacher 	 * we need to adjust our expectation in that case.
260*1249c01aSStefan Metzmacher 	 *
261*1249c01aSStefan Metzmacher 	 * So we defer further processing of the negotiation
262*1249c01aSStefan Metzmacher 	 * to smbdirect_accept_negotiate_recv_work().
263*1249c01aSStefan Metzmacher 	 *
264*1249c01aSStefan Metzmacher 	 * If we are already in SMBDIRECT_SOCKET_NEGOTIATE_NEEDED
265*1249c01aSStefan Metzmacher 	 * we queue the work directly otherwise
266*1249c01aSStefan Metzmacher 	 * smbdirect_accept_rdma_event_handler() will do it, when
267*1249c01aSStefan Metzmacher 	 * RDMA_CM_EVENT_ESTABLISHED arrived.
268*1249c01aSStefan Metzmacher 	 */
269*1249c01aSStefan Metzmacher 	spin_lock_irqsave(&sc->connect.lock, flags);
270*1249c01aSStefan Metzmacher 	if (!sc->first_error) {
271*1249c01aSStefan Metzmacher 		INIT_WORK(&sc->connect.work, smbdirect_accept_negotiate_recv_work);
272*1249c01aSStefan Metzmacher 		if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_NEEDED)
273*1249c01aSStefan Metzmacher 			queue_work(sc->workqueues.accept, &sc->connect.work);
274*1249c01aSStefan Metzmacher 	}
275*1249c01aSStefan Metzmacher 	spin_unlock_irqrestore(&sc->connect.lock, flags);
276*1249c01aSStefan Metzmacher 
277*1249c01aSStefan Metzmacher 	return;
278*1249c01aSStefan Metzmacher 
279*1249c01aSStefan Metzmacher error:
280*1249c01aSStefan Metzmacher 	/*
281*1249c01aSStefan Metzmacher 	 * recv_io.posted.refill_work is still disabled,
282*1249c01aSStefan Metzmacher 	 * so smbdirect_connection_put_recv_io() won't
283*1249c01aSStefan Metzmacher 	 * start it.
284*1249c01aSStefan Metzmacher 	 */
285*1249c01aSStefan Metzmacher 	smbdirect_connection_put_recv_io(recv_io);
286*1249c01aSStefan Metzmacher 	smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
287*1249c01aSStefan Metzmacher }
288*1249c01aSStefan Metzmacher 
289*1249c01aSStefan Metzmacher static void smbdirect_accept_negotiate_recv_work(struct work_struct *work)
290*1249c01aSStefan Metzmacher {
291*1249c01aSStefan Metzmacher 	struct smbdirect_socket *sc =
292*1249c01aSStefan Metzmacher 		container_of(work, struct smbdirect_socket, connect.work);
293*1249c01aSStefan Metzmacher 	struct smbdirect_socket_parameters *sp = &sc->parameters;
294*1249c01aSStefan Metzmacher 	struct smbdirect_recv_io *recv_io;
295*1249c01aSStefan Metzmacher 	struct smbdirect_negotiate_req *nreq;
296*1249c01aSStefan Metzmacher 	unsigned long flags;
297*1249c01aSStefan Metzmacher 	u16 min_version;
298*1249c01aSStefan Metzmacher 	u16 max_version;
299*1249c01aSStefan Metzmacher 	u16 credits_requested;
300*1249c01aSStefan Metzmacher 	u32 preferred_send_size;
301*1249c01aSStefan Metzmacher 	u32 max_receive_size;
302*1249c01aSStefan Metzmacher 	u32 max_fragmented_size;
303*1249c01aSStefan Metzmacher 	u32 ntstatus;
304*1249c01aSStefan Metzmacher 
305*1249c01aSStefan Metzmacher 	if (sc->first_error)
306*1249c01aSStefan Metzmacher 		return;
307*1249c01aSStefan Metzmacher 
308*1249c01aSStefan Metzmacher 	/*
309*1249c01aSStefan Metzmacher 	 * make sure we won't start again...
310*1249c01aSStefan Metzmacher 	 */
311*1249c01aSStefan Metzmacher 	disable_work(work);
312*1249c01aSStefan Metzmacher 
313*1249c01aSStefan Metzmacher 	/*
314*1249c01aSStefan Metzmacher 	 * Reset timer to the keepalive interval in
315*1249c01aSStefan Metzmacher 	 * order to trigger our next keepalive message.
316*1249c01aSStefan Metzmacher 	 */
317*1249c01aSStefan Metzmacher 	sc->idle.keepalive = SMBDIRECT_KEEPALIVE_NONE;
318*1249c01aSStefan Metzmacher 	mod_delayed_work(sc->workqueues.idle, &sc->idle.timer_work,
319*1249c01aSStefan Metzmacher 			 msecs_to_jiffies(sp->keepalive_interval_msec));
320*1249c01aSStefan Metzmacher 
321*1249c01aSStefan Metzmacher 	/*
322*1249c01aSStefan Metzmacher 	 * If smbdirect_accept_negotiate_recv_done() detected an
323*1249c01aSStefan Metzmacher 	 * invalid request we want to disconnect.
324*1249c01aSStefan Metzmacher 	 */
325*1249c01aSStefan Metzmacher 	recv_io = smbdirect_connection_reassembly_first_recv_io(sc);
326*1249c01aSStefan Metzmacher 	if (!recv_io) {
327*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
328*1249c01aSStefan Metzmacher 		return;
329*1249c01aSStefan Metzmacher 	}
330*1249c01aSStefan Metzmacher 	spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
331*1249c01aSStefan Metzmacher 	sc->recv_io.reassembly.queue_length--;
332*1249c01aSStefan Metzmacher 	list_del(&recv_io->list);
333*1249c01aSStefan Metzmacher 	spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
334*1249c01aSStefan Metzmacher 	smbdirect_connection_put_recv_io(recv_io);
335*1249c01aSStefan Metzmacher 
336*1249c01aSStefan Metzmacher 	if (SMBDIRECT_CHECK_STATUS_DISCONNECT(sc, SMBDIRECT_SOCKET_NEGOTIATE_NEEDED))
337*1249c01aSStefan Metzmacher 		return;
338*1249c01aSStefan Metzmacher 	sc->status = SMBDIRECT_SOCKET_NEGOTIATE_RUNNING;
339*1249c01aSStefan Metzmacher 
340*1249c01aSStefan Metzmacher 	/*
341*1249c01aSStefan Metzmacher 	 * Note recv_io is already part of the free list,
342*1249c01aSStefan Metzmacher 	 * as we just called smbdirect_connection_put_recv_io(),
343*1249c01aSStefan Metzmacher 	 * but it won't be reused before we call
344*1249c01aSStefan Metzmacher 	 * smbdirect_connection_recv_io_refill() below.
345*1249c01aSStefan Metzmacher 	 */
346*1249c01aSStefan Metzmacher 
347*1249c01aSStefan Metzmacher 	nreq = (struct smbdirect_negotiate_req *)recv_io->packet;
348*1249c01aSStefan Metzmacher 	min_version = le16_to_cpu(nreq->min_version);
349*1249c01aSStefan Metzmacher 	max_version = le16_to_cpu(nreq->max_version);
350*1249c01aSStefan Metzmacher 	credits_requested = le16_to_cpu(nreq->credits_requested);
351*1249c01aSStefan Metzmacher 	preferred_send_size = le32_to_cpu(nreq->preferred_send_size);
352*1249c01aSStefan Metzmacher 	max_receive_size = le32_to_cpu(nreq->max_receive_size);
353*1249c01aSStefan Metzmacher 	max_fragmented_size = le32_to_cpu(nreq->max_fragmented_size);
354*1249c01aSStefan Metzmacher 
355*1249c01aSStefan Metzmacher 	smbdirect_log_negotiate(sc, SMBDIRECT_LOG_INFO,
356*1249c01aSStefan Metzmacher 		"ReqIn: %s%x, %s%x, %s%u, %s%u, %s%u, %s%u\n",
357*1249c01aSStefan Metzmacher 		"MinVersion=0x",
358*1249c01aSStefan Metzmacher 		le16_to_cpu(nreq->min_version),
359*1249c01aSStefan Metzmacher 		"MaxVersion=0x",
360*1249c01aSStefan Metzmacher 		le16_to_cpu(nreq->max_version),
361*1249c01aSStefan Metzmacher 		"CreditsRequested=",
362*1249c01aSStefan Metzmacher 		le16_to_cpu(nreq->credits_requested),
363*1249c01aSStefan Metzmacher 		"PreferredSendSize=",
364*1249c01aSStefan Metzmacher 		le32_to_cpu(nreq->preferred_send_size),
365*1249c01aSStefan Metzmacher 		"MaxRecvSize=",
366*1249c01aSStefan Metzmacher 		le32_to_cpu(nreq->max_receive_size),
367*1249c01aSStefan Metzmacher 		"MaxFragmentedSize=",
368*1249c01aSStefan Metzmacher 		le32_to_cpu(nreq->max_fragmented_size));
369*1249c01aSStefan Metzmacher 
370*1249c01aSStefan Metzmacher 	if (!(min_version <= SMBDIRECT_V1 && max_version >= SMBDIRECT_V1)) {
371*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
372*1249c01aSStefan Metzmacher 			"invalid: min_version=0x%x max_version=0x%x\n",
373*1249c01aSStefan Metzmacher 			min_version, max_version);
374*1249c01aSStefan Metzmacher 		ntstatus = le32_to_cpu(STATUS_NOT_SUPPORTED);
375*1249c01aSStefan Metzmacher 		goto not_supported;
376*1249c01aSStefan Metzmacher 	}
377*1249c01aSStefan Metzmacher 
378*1249c01aSStefan Metzmacher 	if (credits_requested == 0) {
379*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
380*1249c01aSStefan Metzmacher 			"invalid: credits_requested == 0\n");
381*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
382*1249c01aSStefan Metzmacher 		return;
383*1249c01aSStefan Metzmacher 	}
384*1249c01aSStefan Metzmacher 
385*1249c01aSStefan Metzmacher 	if (max_receive_size < SMBDIRECT_MIN_RECEIVE_SIZE) {
386*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
387*1249c01aSStefan Metzmacher 			"invalid: max_receive_size=%u < %u\n",
388*1249c01aSStefan Metzmacher 			max_receive_size,
389*1249c01aSStefan Metzmacher 			SMBDIRECT_MIN_RECEIVE_SIZE);
390*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
391*1249c01aSStefan Metzmacher 		return;
392*1249c01aSStefan Metzmacher 	}
393*1249c01aSStefan Metzmacher 
394*1249c01aSStefan Metzmacher 	if (max_fragmented_size < SMBDIRECT_MIN_FRAGMENTED_SIZE) {
395*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
396*1249c01aSStefan Metzmacher 			"invalid: max_fragmented_size=%u < %u\n",
397*1249c01aSStefan Metzmacher 			max_fragmented_size,
398*1249c01aSStefan Metzmacher 			SMBDIRECT_MIN_FRAGMENTED_SIZE);
399*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
400*1249c01aSStefan Metzmacher 		return;
401*1249c01aSStefan Metzmacher 	}
402*1249c01aSStefan Metzmacher 
403*1249c01aSStefan Metzmacher 	/*
404*1249c01aSStefan Metzmacher 	 * At least the value of SMBDIRECT_MIN_RECEIVE_SIZE is used.
405*1249c01aSStefan Metzmacher 	 */
406*1249c01aSStefan Metzmacher 	sp->max_recv_size = min_t(u32, sp->max_recv_size, preferred_send_size);
407*1249c01aSStefan Metzmacher 	sp->max_recv_size = max_t(u32, sp->max_recv_size, SMBDIRECT_MIN_RECEIVE_SIZE);
408*1249c01aSStefan Metzmacher 
409*1249c01aSStefan Metzmacher 	/*
410*1249c01aSStefan Metzmacher 	 * The maximum fragmented upper-layer payload receive size supported
411*1249c01aSStefan Metzmacher 	 *
412*1249c01aSStefan Metzmacher 	 * Assume max_payload_per_credit is
413*1249c01aSStefan Metzmacher 	 * smb_direct_receive_credit_max - 24 = 1340
414*1249c01aSStefan Metzmacher 	 *
415*1249c01aSStefan Metzmacher 	 * The maximum number would be
416*1249c01aSStefan Metzmacher 	 * smb_direct_receive_credit_max * max_payload_per_credit
417*1249c01aSStefan Metzmacher 	 *
418*1249c01aSStefan Metzmacher 	 *                       1340 * 255 = 341700 (0x536C4)
419*1249c01aSStefan Metzmacher 	 *
420*1249c01aSStefan Metzmacher 	 * The minimum value from the spec is 131072 (0x20000)
421*1249c01aSStefan Metzmacher 	 *
422*1249c01aSStefan Metzmacher 	 * For now we use the logic we used in ksmbd before:
423*1249c01aSStefan Metzmacher 	 *                 (1364 * 255) / 2 = 173910 (0x2A756)
424*1249c01aSStefan Metzmacher 	 *
425*1249c01aSStefan Metzmacher 	 * We need to adjust this here in case the peer
426*1249c01aSStefan Metzmacher 	 * lowered sp->max_recv_size.
427*1249c01aSStefan Metzmacher 	 *
428*1249c01aSStefan Metzmacher 	 * TODO: instead of adjusting max_fragmented_recv_size
429*1249c01aSStefan Metzmacher 	 * we should adjust the number of available buffers,
430*1249c01aSStefan Metzmacher 	 * but for now we keep the logic as it was used
431*1249c01aSStefan Metzmacher 	 * in ksmbd before.
432*1249c01aSStefan Metzmacher 	 */
433*1249c01aSStefan Metzmacher 	sp->max_fragmented_recv_size = (sp->recv_credit_max * sp->max_recv_size) / 2;
434*1249c01aSStefan Metzmacher 
435*1249c01aSStefan Metzmacher 	/*
436*1249c01aSStefan Metzmacher 	 * We take the value from the peer, which is checked to be higher than 0,
437*1249c01aSStefan Metzmacher 	 * but we limit it to the max value we support in order to have
438*1249c01aSStefan Metzmacher 	 * the main logic simpler.
439*1249c01aSStefan Metzmacher 	 */
440*1249c01aSStefan Metzmacher 	sc->recv_io.credits.target = credits_requested;
441*1249c01aSStefan Metzmacher 	sc->recv_io.credits.target = min_t(u16, sc->recv_io.credits.target,
442*1249c01aSStefan Metzmacher 					   sp->recv_credit_max);
443*1249c01aSStefan Metzmacher 
444*1249c01aSStefan Metzmacher 	/*
445*1249c01aSStefan Metzmacher 	 * Note nreq->max_receive_size was already checked against
446*1249c01aSStefan Metzmacher 	 * SMBDIRECT_MIN_RECEIVE_SIZE above.
447*1249c01aSStefan Metzmacher 	 */
448*1249c01aSStefan Metzmacher 	sp->max_send_size = min_t(u32, sp->max_send_size, max_receive_size);
449*1249c01aSStefan Metzmacher 
450*1249c01aSStefan Metzmacher 	/*
451*1249c01aSStefan Metzmacher 	 * Note nreq->max_fragmented_size was already checked against
452*1249c01aSStefan Metzmacher 	 * SMBDIRECT_MIN_FRAGMENTED_SIZE above.
453*1249c01aSStefan Metzmacher 	 */
454*1249c01aSStefan Metzmacher 	sp->max_fragmented_send_size = max_fragmented_size;
455*1249c01aSStefan Metzmacher 
456*1249c01aSStefan Metzmacher 	if (sc->accept.listener) {
457*1249c01aSStefan Metzmacher 		struct smbdirect_socket *lsc = sc->accept.listener;
458*1249c01aSStefan Metzmacher 		unsigned long flags;
459*1249c01aSStefan Metzmacher 
460*1249c01aSStefan Metzmacher 		spin_lock_irqsave(&lsc->listen.lock, flags);
461*1249c01aSStefan Metzmacher 		list_del(&sc->accept.list);
462*1249c01aSStefan Metzmacher 		list_add_tail(&sc->accept.list, &lsc->listen.ready);
463*1249c01aSStefan Metzmacher 		wake_up(&lsc->listen.wait_queue);
464*1249c01aSStefan Metzmacher 		spin_unlock_irqrestore(&lsc->listen.lock, flags);
465*1249c01aSStefan Metzmacher 
466*1249c01aSStefan Metzmacher 		/*
467*1249c01aSStefan Metzmacher 		 * smbdirect_socket_accept() will call
468*1249c01aSStefan Metzmacher 		 * smbdirect_accept_negotiate_finish(nsc, 0);
469*1249c01aSStefan Metzmacher 		 *
470*1249c01aSStefan Metzmacher 		 * So that we don't send the negotiation
471*1249c01aSStefan Metzmacher 		 * response that grants credits to the peer
472*1249c01aSStefan Metzmacher 		 * before the socket is accepted by the
473*1249c01aSStefan Metzmacher 		 * application.
474*1249c01aSStefan Metzmacher 		 */
475*1249c01aSStefan Metzmacher 		return;
476*1249c01aSStefan Metzmacher 	}
477*1249c01aSStefan Metzmacher 
478*1249c01aSStefan Metzmacher 	ntstatus = le32_to_cpu(STATUS_SUCCESS);
479*1249c01aSStefan Metzmacher 
480*1249c01aSStefan Metzmacher not_supported:
481*1249c01aSStefan Metzmacher 	smbdirect_accept_negotiate_finish(sc, ntstatus);
482*1249c01aSStefan Metzmacher }
483*1249c01aSStefan Metzmacher 
484*1249c01aSStefan Metzmacher void smbdirect_accept_negotiate_finish(struct smbdirect_socket *sc, u32 ntstatus)
485*1249c01aSStefan Metzmacher {
486*1249c01aSStefan Metzmacher 	const struct smbdirect_socket_parameters *sp = &sc->parameters;
487*1249c01aSStefan Metzmacher 	struct smbdirect_recv_io *recv_io;
488*1249c01aSStefan Metzmacher 	struct smbdirect_send_io *send_io;
489*1249c01aSStefan Metzmacher 	struct smbdirect_negotiate_resp *nrep;
490*1249c01aSStefan Metzmacher 	int posted;
491*1249c01aSStefan Metzmacher 	u16 new_credits;
492*1249c01aSStefan Metzmacher 	int ret;
493*1249c01aSStefan Metzmacher 
494*1249c01aSStefan Metzmacher 	if (ntstatus)
495*1249c01aSStefan Metzmacher 		goto not_supported;
496*1249c01aSStefan Metzmacher 
497*1249c01aSStefan Metzmacher 	/*
498*1249c01aSStefan Metzmacher 	 * Prepare for receiving data_transfer messages
499*1249c01aSStefan Metzmacher 	 */
500*1249c01aSStefan Metzmacher 	sc->recv_io.reassembly.full_packet_received = true;
501*1249c01aSStefan Metzmacher 	sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER;
502*1249c01aSStefan Metzmacher 	list_for_each_entry(recv_io, &sc->recv_io.free.list, list)
503*1249c01aSStefan Metzmacher 		recv_io->cqe.done = smbdirect_connection_recv_io_done;
504*1249c01aSStefan Metzmacher 	recv_io = NULL;
505*1249c01aSStefan Metzmacher 
506*1249c01aSStefan Metzmacher 	/*
507*1249c01aSStefan Metzmacher 	 * We should at least post 1 smbdirect_recv_io!
508*1249c01aSStefan Metzmacher 	 */
509*1249c01aSStefan Metzmacher 	posted = smbdirect_connection_recv_io_refill(sc);
510*1249c01aSStefan Metzmacher 	if (posted < 1) {
511*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
512*1249c01aSStefan Metzmacher 			"smbdirect_connection_recv_io_refill() failed %1pe\n",
513*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(posted));
514*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
515*1249c01aSStefan Metzmacher 		return;
516*1249c01aSStefan Metzmacher 	}
517*1249c01aSStefan Metzmacher 
518*1249c01aSStefan Metzmacher 	/*
519*1249c01aSStefan Metzmacher 	 * The response will grant credits for all posted
520*1249c01aSStefan Metzmacher 	 * smbdirect_recv_io messages.
521*1249c01aSStefan Metzmacher 	 */
522*1249c01aSStefan Metzmacher 	new_credits = smbdirect_connection_grant_recv_credits(sc);
523*1249c01aSStefan Metzmacher 
524*1249c01aSStefan Metzmacher not_supported:
525*1249c01aSStefan Metzmacher 	send_io = smbdirect_connection_alloc_send_io(sc);
526*1249c01aSStefan Metzmacher 	if (IS_ERR(send_io)) {
527*1249c01aSStefan Metzmacher 		ret = PTR_ERR(send_io);
528*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
529*1249c01aSStefan Metzmacher 			"smbdirect_connection_alloc_send_io() failed %1pe\n",
530*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(ret));
531*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, ret);
532*1249c01aSStefan Metzmacher 		return;
533*1249c01aSStefan Metzmacher 	}
534*1249c01aSStefan Metzmacher 	send_io->cqe.done = smbdirect_accept_negotiate_send_done;
535*1249c01aSStefan Metzmacher 
536*1249c01aSStefan Metzmacher 	nrep = (struct smbdirect_negotiate_resp *)send_io->packet;
537*1249c01aSStefan Metzmacher 	nrep->min_version = cpu_to_le16(SMBDIRECT_V1);
538*1249c01aSStefan Metzmacher 	nrep->max_version = cpu_to_le16(SMBDIRECT_V1);
539*1249c01aSStefan Metzmacher 	if (ntstatus == 0) {
540*1249c01aSStefan Metzmacher 		nrep->negotiated_version = cpu_to_le16(SMBDIRECT_V1);
541*1249c01aSStefan Metzmacher 		nrep->reserved = 0;
542*1249c01aSStefan Metzmacher 		nrep->credits_requested = cpu_to_le16(sp->send_credit_target);
543*1249c01aSStefan Metzmacher 		nrep->credits_granted = cpu_to_le16(new_credits);
544*1249c01aSStefan Metzmacher 		nrep->status = cpu_to_le32(ntstatus);
545*1249c01aSStefan Metzmacher 		nrep->max_readwrite_size = cpu_to_le32(sp->max_read_write_size);
546*1249c01aSStefan Metzmacher 		nrep->preferred_send_size = cpu_to_le32(sp->max_send_size);
547*1249c01aSStefan Metzmacher 		nrep->max_receive_size = cpu_to_le32(sp->max_recv_size);
548*1249c01aSStefan Metzmacher 		nrep->max_fragmented_size = cpu_to_le32(sp->max_fragmented_recv_size);
549*1249c01aSStefan Metzmacher 	} else {
550*1249c01aSStefan Metzmacher 		nrep->negotiated_version = 0;
551*1249c01aSStefan Metzmacher 		nrep->reserved = 0;
552*1249c01aSStefan Metzmacher 		nrep->credits_requested = 0;
553*1249c01aSStefan Metzmacher 		nrep->credits_granted = 0;
554*1249c01aSStefan Metzmacher 		nrep->status = cpu_to_le32(ntstatus);
555*1249c01aSStefan Metzmacher 		nrep->max_readwrite_size = 0;
556*1249c01aSStefan Metzmacher 		nrep->preferred_send_size = 0;
557*1249c01aSStefan Metzmacher 		nrep->max_receive_size = 0;
558*1249c01aSStefan Metzmacher 		nrep->max_fragmented_size = 0;
559*1249c01aSStefan Metzmacher 	}
560*1249c01aSStefan Metzmacher 
561*1249c01aSStefan Metzmacher 	smbdirect_log_negotiate(sc, SMBDIRECT_LOG_INFO,
562*1249c01aSStefan Metzmacher 		"RepOut: %s%x, %s%x, %s%x, %s%u, %s%u, %s%x, %s%u, %s%u, %s%u, %s%u\n",
563*1249c01aSStefan Metzmacher 		"MinVersion=0x",
564*1249c01aSStefan Metzmacher 		le16_to_cpu(nrep->min_version),
565*1249c01aSStefan Metzmacher 		"MaxVersion=0x",
566*1249c01aSStefan Metzmacher 		le16_to_cpu(nrep->max_version),
567*1249c01aSStefan Metzmacher 		"NegotiatedVersion=0x",
568*1249c01aSStefan Metzmacher 		le16_to_cpu(nrep->negotiated_version),
569*1249c01aSStefan Metzmacher 		"CreditsRequested=",
570*1249c01aSStefan Metzmacher 		le16_to_cpu(nrep->credits_requested),
571*1249c01aSStefan Metzmacher 		"CreditsGranted=",
572*1249c01aSStefan Metzmacher 		le16_to_cpu(nrep->credits_granted),
573*1249c01aSStefan Metzmacher 		"Status=0x",
574*1249c01aSStefan Metzmacher 		le32_to_cpu(nrep->status),
575*1249c01aSStefan Metzmacher 		"MaxReadWriteSize=",
576*1249c01aSStefan Metzmacher 		le32_to_cpu(nrep->max_readwrite_size),
577*1249c01aSStefan Metzmacher 		"PreferredSendSize=",
578*1249c01aSStefan Metzmacher 		le32_to_cpu(nrep->preferred_send_size),
579*1249c01aSStefan Metzmacher 		"MaxRecvSize=",
580*1249c01aSStefan Metzmacher 		le32_to_cpu(nrep->max_receive_size),
581*1249c01aSStefan Metzmacher 		"MaxFragmentedSize=",
582*1249c01aSStefan Metzmacher 		le32_to_cpu(nrep->max_fragmented_size));
583*1249c01aSStefan Metzmacher 
584*1249c01aSStefan Metzmacher 	send_io->sge[0].addr = ib_dma_map_single(sc->ib.dev,
585*1249c01aSStefan Metzmacher 						 nrep,
586*1249c01aSStefan Metzmacher 						 sizeof(*nrep),
587*1249c01aSStefan Metzmacher 						 DMA_TO_DEVICE);
588*1249c01aSStefan Metzmacher 	ret = ib_dma_mapping_error(sc->ib.dev, send_io->sge[0].addr);
589*1249c01aSStefan Metzmacher 	if (ret) {
590*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
591*1249c01aSStefan Metzmacher 			"ib_dma_mapping_error() failed %1pe\n",
592*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(ret));
593*1249c01aSStefan Metzmacher 		smbdirect_connection_free_send_io(send_io);
594*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, ret);
595*1249c01aSStefan Metzmacher 		return;
596*1249c01aSStefan Metzmacher 	}
597*1249c01aSStefan Metzmacher 
598*1249c01aSStefan Metzmacher 	send_io->sge[0].length = sizeof(*nrep);
599*1249c01aSStefan Metzmacher 	send_io->sge[0].lkey = sc->ib.pd->local_dma_lkey;
600*1249c01aSStefan Metzmacher 	send_io->num_sge = 1;
601*1249c01aSStefan Metzmacher 
602*1249c01aSStefan Metzmacher 	ib_dma_sync_single_for_device(sc->ib.dev,
603*1249c01aSStefan Metzmacher 				      send_io->sge[0].addr,
604*1249c01aSStefan Metzmacher 				      send_io->sge[0].length,
605*1249c01aSStefan Metzmacher 				      DMA_TO_DEVICE);
606*1249c01aSStefan Metzmacher 
607*1249c01aSStefan Metzmacher 	send_io->wr.next = NULL;
608*1249c01aSStefan Metzmacher 	send_io->wr.wr_cqe = &send_io->cqe;
609*1249c01aSStefan Metzmacher 	send_io->wr.sg_list = send_io->sge;
610*1249c01aSStefan Metzmacher 	send_io->wr.num_sge = send_io->num_sge;
611*1249c01aSStefan Metzmacher 	send_io->wr.opcode = IB_WR_SEND;
612*1249c01aSStefan Metzmacher 	send_io->wr.send_flags = IB_SEND_SIGNALED;
613*1249c01aSStefan Metzmacher 
614*1249c01aSStefan Metzmacher 	ret = smbdirect_connection_post_send_wr(sc, &send_io->wr);
615*1249c01aSStefan Metzmacher 	if (ret) {
616*1249c01aSStefan Metzmacher 		/* if we reach here, post send failed */
617*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_send(sc, SMBDIRECT_LOG_ERR,
618*1249c01aSStefan Metzmacher 			"smbdirect_connection_post_send_wr() failed %1pe\n",
619*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(ret));
620*1249c01aSStefan Metzmacher 		/*
621*1249c01aSStefan Metzmacher 		 * Note smbdirect_connection_free_send_io()
622*1249c01aSStefan Metzmacher 		 * does ib_dma_unmap_page()
623*1249c01aSStefan Metzmacher 		 */
624*1249c01aSStefan Metzmacher 		smbdirect_connection_free_send_io(send_io);
625*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, ret);
626*1249c01aSStefan Metzmacher 		return;
627*1249c01aSStefan Metzmacher 	}
628*1249c01aSStefan Metzmacher 
629*1249c01aSStefan Metzmacher 	/*
630*1249c01aSStefan Metzmacher 	 * smbdirect_accept_negotiate_send_done
631*1249c01aSStefan Metzmacher 	 * will do all remaining work...
632*1249c01aSStefan Metzmacher 	 */
633*1249c01aSStefan Metzmacher }
634*1249c01aSStefan Metzmacher 
635*1249c01aSStefan Metzmacher static void smbdirect_accept_negotiate_send_done(struct ib_cq *cq, struct ib_wc *wc)
636*1249c01aSStefan Metzmacher {
637*1249c01aSStefan Metzmacher 	struct smbdirect_send_io *send_io =
638*1249c01aSStefan Metzmacher 		container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
639*1249c01aSStefan Metzmacher 	struct smbdirect_socket *sc = send_io->socket;
640*1249c01aSStefan Metzmacher 	struct smbdirect_negotiate_resp *nrep;
641*1249c01aSStefan Metzmacher 	u32 ntstatus;
642*1249c01aSStefan Metzmacher 
643*1249c01aSStefan Metzmacher 	smbdirect_log_rdma_send(sc, SMBDIRECT_LOG_INFO,
644*1249c01aSStefan Metzmacher 		"smbdirect_send_io completed. status='%s (%d)', opcode=%d\n",
645*1249c01aSStefan Metzmacher 		ib_wc_status_msg(wc->status), wc->status, wc->opcode);
646*1249c01aSStefan Metzmacher 
647*1249c01aSStefan Metzmacher 	nrep = (struct smbdirect_negotiate_resp *)send_io->packet;
648*1249c01aSStefan Metzmacher 	ntstatus = le32_to_cpu(nrep->status);
649*1249c01aSStefan Metzmacher 
650*1249c01aSStefan Metzmacher 	/* Note this frees wc->wr_cqe, but not wc */
651*1249c01aSStefan Metzmacher 	smbdirect_connection_free_send_io(send_io);
652*1249c01aSStefan Metzmacher 	atomic_dec(&sc->send_io.pending.count);
653*1249c01aSStefan Metzmacher 
654*1249c01aSStefan Metzmacher 	if (unlikely(wc->status != IB_WC_SUCCESS || WARN_ON_ONCE(wc->opcode != IB_WC_SEND))) {
655*1249c01aSStefan Metzmacher 		if (wc->status != IB_WC_WR_FLUSH_ERR)
656*1249c01aSStefan Metzmacher 			smbdirect_log_rdma_send(sc, SMBDIRECT_LOG_ERR,
657*1249c01aSStefan Metzmacher 				"wc->status=%s (%d) wc->opcode=%d\n",
658*1249c01aSStefan Metzmacher 				ib_wc_status_msg(wc->status), wc->status, wc->opcode);
659*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
660*1249c01aSStefan Metzmacher 		return;
661*1249c01aSStefan Metzmacher 	}
662*1249c01aSStefan Metzmacher 
663*1249c01aSStefan Metzmacher 	/*
664*1249c01aSStefan Metzmacher 	 * If we send a smbdirect_negotiate_resp without NT_STATUS_OK (0)
665*1249c01aSStefan Metzmacher 	 * we need to disconnect now.
666*1249c01aSStefan Metzmacher 	 *
667*1249c01aSStefan Metzmacher 	 * Otherwise smbdirect_connection_negotiation_done()
668*1249c01aSStefan Metzmacher 	 * will setup all required things and wake up
669*1249c01aSStefan Metzmacher 	 * the waiter.
670*1249c01aSStefan Metzmacher 	 */
671*1249c01aSStefan Metzmacher 	if (ntstatus)
672*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, -EOPNOTSUPP);
673*1249c01aSStefan Metzmacher 	else
674*1249c01aSStefan Metzmacher 		smbdirect_connection_negotiation_done(sc);
675*1249c01aSStefan Metzmacher }
676*1249c01aSStefan Metzmacher 
677*1249c01aSStefan Metzmacher static int smbdirect_accept_rdma_event_handler(struct rdma_cm_id *id,
678*1249c01aSStefan Metzmacher 					       struct rdma_cm_event *event)
679*1249c01aSStefan Metzmacher {
680*1249c01aSStefan Metzmacher 	struct smbdirect_socket *sc = id->context;
681*1249c01aSStefan Metzmacher 	unsigned long flags;
682*1249c01aSStefan Metzmacher 
683*1249c01aSStefan Metzmacher 	/*
684*1249c01aSStefan Metzmacher 	 * cma_cm_event_handler() has
685*1249c01aSStefan Metzmacher 	 * lockdep_assert_held(&id_priv->handler_mutex);
686*1249c01aSStefan Metzmacher 	 *
687*1249c01aSStefan Metzmacher 	 * Mutexes are not allowed in interrupts,
688*1249c01aSStefan Metzmacher 	 * and we rely on not being in an interrupt here,
689*1249c01aSStefan Metzmacher 	 * as we might sleep.
690*1249c01aSStefan Metzmacher 	 *
691*1249c01aSStefan Metzmacher 	 * We didn't timeout so we cancel our idle timer,
692*1249c01aSStefan Metzmacher 	 * it will be scheduled again if needed.
693*1249c01aSStefan Metzmacher 	 */
694*1249c01aSStefan Metzmacher 	WARN_ON_ONCE(in_interrupt());
695*1249c01aSStefan Metzmacher 
696*1249c01aSStefan Metzmacher 	if (event->status || event->event != sc->rdma.expected_event) {
697*1249c01aSStefan Metzmacher 		int ret = -ECONNABORTED;
698*1249c01aSStefan Metzmacher 
699*1249c01aSStefan Metzmacher 		if (event->event == RDMA_CM_EVENT_REJECTED)
700*1249c01aSStefan Metzmacher 			ret = -ECONNREFUSED;
701*1249c01aSStefan Metzmacher 		if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
702*1249c01aSStefan Metzmacher 			ret = -ENETDOWN;
703*1249c01aSStefan Metzmacher 		if (IS_ERR(SMBDIRECT_DEBUG_ERR_PTR(event->status)))
704*1249c01aSStefan Metzmacher 			ret = event->status;
705*1249c01aSStefan Metzmacher 
706*1249c01aSStefan Metzmacher 		smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_ERR,
707*1249c01aSStefan Metzmacher 			"%s (first_error=%1pe, expected=%s) => event=%s status=%d => ret=%1pe\n",
708*1249c01aSStefan Metzmacher 			smbdirect_socket_status_string(sc->status),
709*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(sc->first_error),
710*1249c01aSStefan Metzmacher 			rdma_event_msg(sc->rdma.expected_event),
711*1249c01aSStefan Metzmacher 			rdma_event_msg(event->event),
712*1249c01aSStefan Metzmacher 			event->status,
713*1249c01aSStefan Metzmacher 			SMBDIRECT_DEBUG_ERR_PTR(ret));
714*1249c01aSStefan Metzmacher 
715*1249c01aSStefan Metzmacher 		smbdirect_socket_schedule_cleanup(sc, ret);
716*1249c01aSStefan Metzmacher 		return 0;
717*1249c01aSStefan Metzmacher 	}
718*1249c01aSStefan Metzmacher 
719*1249c01aSStefan Metzmacher 	smbdirect_log_rdma_event(sc, SMBDIRECT_LOG_INFO,
720*1249c01aSStefan Metzmacher 		"%s (first_error=%1pe) event=%s\n",
721*1249c01aSStefan Metzmacher 		smbdirect_socket_status_string(sc->status),
722*1249c01aSStefan Metzmacher 		SMBDIRECT_DEBUG_ERR_PTR(sc->first_error),
723*1249c01aSStefan Metzmacher 		rdma_event_msg(event->event));
724*1249c01aSStefan Metzmacher 
725*1249c01aSStefan Metzmacher 	if (sc->first_error)
726*1249c01aSStefan Metzmacher 		return 0;
727*1249c01aSStefan Metzmacher 
728*1249c01aSStefan Metzmacher 	switch (event->event) {
729*1249c01aSStefan Metzmacher 	case RDMA_CM_EVENT_ESTABLISHED:
730*1249c01aSStefan Metzmacher 		smbdirect_connection_rdma_established(sc);
731*1249c01aSStefan Metzmacher 
732*1249c01aSStefan Metzmacher 		/*
733*1249c01aSStefan Metzmacher 		 * Some drivers (at least mlx5_ib and irdma) might post a
734*1249c01aSStefan Metzmacher 		 * recv completion before RDMA_CM_EVENT_ESTABLISHED,
735*1249c01aSStefan Metzmacher 		 * we need to adjust our expectation in that case.
736*1249c01aSStefan Metzmacher 		 *
737*1249c01aSStefan Metzmacher 		 * If smbdirect_accept_negotiate_recv_done was called first
738*1249c01aSStefan Metzmacher 		 * it initialized sc->connect.work only for us to
739*1249c01aSStefan Metzmacher 		 * start, so that we turned into
740*1249c01aSStefan Metzmacher 		 * SMBDIRECT_SOCKET_NEGOTIATE_NEEDED, before
741*1249c01aSStefan Metzmacher 		 * smbdirect_accept_negotiate_recv_work() runs.
742*1249c01aSStefan Metzmacher 		 *
743*1249c01aSStefan Metzmacher 		 * If smbdirect_accept_negotiate_recv_done didn't happen
744*1249c01aSStefan Metzmacher 		 * yet. sc->connect.work is still be disabled and
745*1249c01aSStefan Metzmacher 		 * queue_work() is a no-op.
746*1249c01aSStefan Metzmacher 		 */
747*1249c01aSStefan Metzmacher 		if (SMBDIRECT_CHECK_STATUS_DISCONNECT(sc, SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING))
748*1249c01aSStefan Metzmacher 			return 0;
749*1249c01aSStefan Metzmacher 		sc->status = SMBDIRECT_SOCKET_NEGOTIATE_NEEDED;
750*1249c01aSStefan Metzmacher 		spin_lock_irqsave(&sc->connect.lock, flags);
751*1249c01aSStefan Metzmacher 		if (!sc->first_error)
752*1249c01aSStefan Metzmacher 			queue_work(sc->workqueues.accept, &sc->connect.work);
753*1249c01aSStefan Metzmacher 		spin_unlock_irqrestore(&sc->connect.lock, flags);
754*1249c01aSStefan Metzmacher 
755*1249c01aSStefan Metzmacher 		/*
756*1249c01aSStefan Metzmacher 		 * wait for smbdirect_accept_negotiate_recv_done()
757*1249c01aSStefan Metzmacher 		 * to get the negotiate request.
758*1249c01aSStefan Metzmacher 		 */
759*1249c01aSStefan Metzmacher 		return 0;
760*1249c01aSStefan Metzmacher 
761*1249c01aSStefan Metzmacher 	default:
762*1249c01aSStefan Metzmacher 		break;
763*1249c01aSStefan Metzmacher 	}
764*1249c01aSStefan Metzmacher 
765*1249c01aSStefan Metzmacher 	/*
766*1249c01aSStefan Metzmacher 	 * This is an internal error
767*1249c01aSStefan Metzmacher 	 */
768*1249c01aSStefan Metzmacher 	WARN_ON_ONCE(sc->rdma.expected_event != RDMA_CM_EVENT_ESTABLISHED);
769*1249c01aSStefan Metzmacher 	smbdirect_socket_schedule_cleanup(sc, -ECONNABORTED);
770*1249c01aSStefan Metzmacher 	return 0;
771*1249c01aSStefan Metzmacher }
772*1249c01aSStefan Metzmacher 
773*1249c01aSStefan Metzmacher static long smbdirect_socket_wait_for_accept(struct smbdirect_socket *lsc, long timeo)
774*1249c01aSStefan Metzmacher {
775*1249c01aSStefan Metzmacher 	long ret;
776*1249c01aSStefan Metzmacher 
777*1249c01aSStefan Metzmacher 	ret = wait_event_interruptible_timeout(lsc->listen.wait_queue,
778*1249c01aSStefan Metzmacher 					       !list_empty_careful(&lsc->listen.ready) ||
779*1249c01aSStefan Metzmacher 					       lsc->status != SMBDIRECT_SOCKET_LISTENING ||
780*1249c01aSStefan Metzmacher 					       lsc->first_error,
781*1249c01aSStefan Metzmacher 					       timeo);
782*1249c01aSStefan Metzmacher 	if (lsc->status != SMBDIRECT_SOCKET_LISTENING)
783*1249c01aSStefan Metzmacher 		return -EINVAL;
784*1249c01aSStefan Metzmacher 	if (lsc->first_error)
785*1249c01aSStefan Metzmacher 		return lsc->first_error;
786*1249c01aSStefan Metzmacher 	if (!ret)
787*1249c01aSStefan Metzmacher 		ret = -ETIMEDOUT;
788*1249c01aSStefan Metzmacher 	if (ret < 0)
789*1249c01aSStefan Metzmacher 		return ret;
790*1249c01aSStefan Metzmacher 
791*1249c01aSStefan Metzmacher 	return 0;
792*1249c01aSStefan Metzmacher }
793*1249c01aSStefan Metzmacher 
794*1249c01aSStefan Metzmacher struct smbdirect_socket *smbdirect_socket_accept(struct smbdirect_socket *lsc,
795*1249c01aSStefan Metzmacher 						 long timeo,
796*1249c01aSStefan Metzmacher 						 struct proto_accept_arg *arg)
797*1249c01aSStefan Metzmacher {
798*1249c01aSStefan Metzmacher 	struct smbdirect_socket *nsc;
799*1249c01aSStefan Metzmacher 	unsigned long flags;
800*1249c01aSStefan Metzmacher 
801*1249c01aSStefan Metzmacher 	if (lsc->status != SMBDIRECT_SOCKET_LISTENING) {
802*1249c01aSStefan Metzmacher 		arg->err = -EINVAL;
803*1249c01aSStefan Metzmacher 		return NULL;
804*1249c01aSStefan Metzmacher 	}
805*1249c01aSStefan Metzmacher 
806*1249c01aSStefan Metzmacher 	if (lsc->first_error) {
807*1249c01aSStefan Metzmacher 		arg->err = lsc->first_error;
808*1249c01aSStefan Metzmacher 		return NULL;
809*1249c01aSStefan Metzmacher 	}
810*1249c01aSStefan Metzmacher 
811*1249c01aSStefan Metzmacher 	if (list_empty_careful(&lsc->listen.ready)) {
812*1249c01aSStefan Metzmacher 		int ret;
813*1249c01aSStefan Metzmacher 
814*1249c01aSStefan Metzmacher 		if (timeo == 0) {
815*1249c01aSStefan Metzmacher 			arg->err = -EAGAIN;
816*1249c01aSStefan Metzmacher 			return NULL;
817*1249c01aSStefan Metzmacher 		}
818*1249c01aSStefan Metzmacher 
819*1249c01aSStefan Metzmacher 		ret = smbdirect_socket_wait_for_accept(lsc, timeo);
820*1249c01aSStefan Metzmacher 		if (ret) {
821*1249c01aSStefan Metzmacher 			arg->err = ret;
822*1249c01aSStefan Metzmacher 			return NULL;
823*1249c01aSStefan Metzmacher 		}
824*1249c01aSStefan Metzmacher 	}
825*1249c01aSStefan Metzmacher 
826*1249c01aSStefan Metzmacher 	spin_lock_irqsave(&lsc->listen.lock, flags);
827*1249c01aSStefan Metzmacher 	nsc = list_first_entry_or_null(&lsc->listen.ready,
828*1249c01aSStefan Metzmacher 				       struct smbdirect_socket,
829*1249c01aSStefan Metzmacher 				       accept.list);
830*1249c01aSStefan Metzmacher 	if (nsc) {
831*1249c01aSStefan Metzmacher 		nsc->accept.listener = NULL;
832*1249c01aSStefan Metzmacher 		list_del_init_careful(&nsc->accept.list);
833*1249c01aSStefan Metzmacher 		arg->is_empty = list_empty_careful(&lsc->listen.ready);
834*1249c01aSStefan Metzmacher 	}
835*1249c01aSStefan Metzmacher 	spin_unlock_irqrestore(&lsc->listen.lock, flags);
836*1249c01aSStefan Metzmacher 	if (!nsc) {
837*1249c01aSStefan Metzmacher 		arg->err = -EAGAIN;
838*1249c01aSStefan Metzmacher 		return NULL;
839*1249c01aSStefan Metzmacher 	}
840*1249c01aSStefan Metzmacher 
841*1249c01aSStefan Metzmacher 	/*
842*1249c01aSStefan Metzmacher 	 * We did not send the negotiation response
843*1249c01aSStefan Metzmacher 	 * yet, so we did not grant any credits to the client,
844*1249c01aSStefan Metzmacher 	 * so it didn't grant any credits to us.
845*1249c01aSStefan Metzmacher 	 *
846*1249c01aSStefan Metzmacher 	 * The caller expects a connected socket
847*1249c01aSStefan Metzmacher 	 * now as there are no credits anyway.
848*1249c01aSStefan Metzmacher 	 *
849*1249c01aSStefan Metzmacher 	 * Then we send the negotiation response in
850*1249c01aSStefan Metzmacher 	 * order to grant credits to the peer.
851*1249c01aSStefan Metzmacher 	 */
852*1249c01aSStefan Metzmacher 	nsc->status = SMBDIRECT_SOCKET_CONNECTED;
853*1249c01aSStefan Metzmacher 	smbdirect_accept_negotiate_finish(nsc, 0);
854*1249c01aSStefan Metzmacher 
855*1249c01aSStefan Metzmacher 	return nsc;
856*1249c01aSStefan Metzmacher }
857*1249c01aSStefan Metzmacher __SMBDIRECT_EXPORT_SYMBOL__(smbdirect_socket_accept);
858