1 /*
2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2015 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <scsi/scsi_host.h>
20
21 #include "common.h"
22 #include "t3_cpl.h"
23 #include "t3cdev.h"
24 #include "cxgb3_defs.h"
25 #include "cxgb3_ctl_defs.h"
26 #include "cxgb3_offload.h"
27 #include "firmware_exports.h"
28 #include "cxgb3i.h"
29
30 static unsigned int dbg_level;
31 #include "../libcxgbi.h"
32
33 #define DRV_MODULE_NAME "cxgb3i"
34 #define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver"
35 #define DRV_MODULE_VERSION "2.0.1-ko"
36 #define DRV_MODULE_RELDATE "Apr. 2015"
37
38 static char version[] =
39 DRV_MODULE_DESC " " DRV_MODULE_NAME
40 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41
42 MODULE_AUTHOR("Chelsio Communications, Inc.");
43 MODULE_DESCRIPTION(DRV_MODULE_DESC);
44 MODULE_VERSION(DRV_MODULE_VERSION);
45 MODULE_LICENSE("GPL");
46
47 module_param(dbg_level, uint, 0644);
48 MODULE_PARM_DESC(dbg_level, "debug flag (default=0)");
49
50 static int cxgb3i_rcv_win = 256 * 1024;
51 module_param(cxgb3i_rcv_win, int, 0644);
52 MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)");
53
54 static int cxgb3i_snd_win = 128 * 1024;
55 module_param(cxgb3i_snd_win, int, 0644);
56 MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)");
57
58 static int cxgb3i_rx_credit_thres = 10 * 1024;
59 module_param(cxgb3i_rx_credit_thres, int, 0644);
60 MODULE_PARM_DESC(cxgb3i_rx_credit_thres,
61 "RX credits return threshold in bytes (default=10KB)");
62
63 static unsigned int cxgb3i_max_connect = 8 * 1024;
64 module_param(cxgb3i_max_connect, uint, 0644);
65 MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)");
66
67 static unsigned int cxgb3i_sport_base = 20000;
68 module_param(cxgb3i_sport_base, uint, 0644);
69 MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)");
70
71 static void cxgb3i_dev_open(struct t3cdev *);
72 static void cxgb3i_dev_close(struct t3cdev *);
73 static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32);
74
75 static struct cxgb3_client t3_client = {
76 .name = DRV_MODULE_NAME,
77 .handlers = cxgb3i_cpl_handlers,
78 .add = cxgb3i_dev_open,
79 .remove = cxgb3i_dev_close,
80 .event_handler = cxgb3i_dev_event_handler,
81 };
82
83 static const struct scsi_host_template cxgb3i_host_template = {
84 .module = THIS_MODULE,
85 .name = DRV_MODULE_NAME,
86 .proc_name = DRV_MODULE_NAME,
87 .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
88 .queuecommand = iscsi_queuecommand,
89 .change_queue_depth = scsi_change_queue_depth,
90 .sg_tablesize = SG_ALL,
91 .max_sectors = 0xFFFF,
92 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
93 .eh_timed_out = iscsi_eh_cmd_timed_out,
94 .eh_abort_handler = iscsi_eh_abort,
95 .eh_device_reset_handler = iscsi_eh_device_reset,
96 .eh_target_reset_handler = iscsi_eh_recover_target,
97 .target_alloc = iscsi_target_alloc,
98 .dma_boundary = PAGE_SIZE - 1,
99 .this_id = -1,
100 .track_queue_depth = 1,
101 .cmd_size = sizeof(struct iscsi_cmd),
102 };
103
104 static struct iscsi_transport cxgb3i_iscsi_transport = {
105 .owner = THIS_MODULE,
106 .name = DRV_MODULE_NAME,
107 /* owner and name should be set already */
108 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
109 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
110 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
111 .attr_is_visible = cxgbi_attr_is_visible,
112 .get_host_param = cxgbi_get_host_param,
113 .set_host_param = cxgbi_set_host_param,
114 /* session management */
115 .create_session = cxgbi_create_session,
116 .destroy_session = cxgbi_destroy_session,
117 .get_session_param = iscsi_session_get_param,
118 /* connection management */
119 .create_conn = cxgbi_create_conn,
120 .bind_conn = cxgbi_bind_conn,
121 .unbind_conn = iscsi_conn_unbind,
122 .destroy_conn = iscsi_tcp_conn_teardown,
123 .start_conn = iscsi_conn_start,
124 .stop_conn = iscsi_conn_stop,
125 .get_conn_param = iscsi_conn_get_param,
126 .set_param = cxgbi_set_conn_param,
127 .get_stats = cxgbi_get_conn_stats,
128 /* pdu xmit req from user space */
129 .send_pdu = iscsi_conn_send_pdu,
130 /* task */
131 .init_task = iscsi_tcp_task_init,
132 .xmit_task = iscsi_tcp_task_xmit,
133 .cleanup_task = cxgbi_cleanup_task,
134 /* pdu */
135 .alloc_pdu = cxgbi_conn_alloc_pdu,
136 .init_pdu = cxgbi_conn_init_pdu,
137 .xmit_pdu = cxgbi_conn_xmit_pdu,
138 .parse_pdu_itt = cxgbi_parse_pdu_itt,
139 /* TCP connect/disconnect */
140 .get_ep_param = cxgbi_get_ep_param,
141 .ep_connect = cxgbi_ep_connect,
142 .ep_poll = cxgbi_ep_poll,
143 .ep_disconnect = cxgbi_ep_disconnect,
144 /* Error recovery timeout call */
145 .session_recovery_timedout = iscsi_session_recovery_timedout,
146 };
147
148 static struct scsi_transport_template *cxgb3i_stt;
149
150 /*
151 * CPL (Chelsio Protocol Language) defines a message passing interface between
152 * the host driver and Chelsio asic.
153 * The section below implments CPLs that related to iscsi tcp connection
154 * open/close/abort and data send/receive.
155 */
156
157 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
158
send_act_open_req(struct cxgbi_sock * csk,struct sk_buff * skb,const struct l2t_entry * e)159 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
160 const struct l2t_entry *e)
161 {
162 unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win);
163 struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
164
165 skb->priority = CPL_PRIORITY_SETUP;
166
167 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
168 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
169 req->local_port = csk->saddr.sin_port;
170 req->peer_port = csk->daddr.sin_port;
171 req->local_ip = csk->saddr.sin_addr.s_addr;
172 req->peer_ip = csk->daddr.sin_addr.s_addr;
173
174 req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS |
175 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
176 V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
177 req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
178 V_RCV_BUFSIZ(csk->rcv_win >> 10));
179
180 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
181 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
182 csk, csk->state, csk->flags, csk->atid,
183 &req->local_ip, ntohs(req->local_port),
184 &req->peer_ip, ntohs(req->peer_port),
185 csk->mss_idx, e->idx, e->smt_idx);
186
187 l2t_send(csk->cdev->lldev, skb, csk->l2t);
188 }
189
act_open_arp_failure(struct t3cdev * dev,struct sk_buff * skb)190 static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
191 {
192 cxgbi_sock_act_open_req_arp_failure(NULL, skb);
193 }
194
195 /*
196 * CPL connection close request: host ->
197 *
198 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
199 * the write queue (i.e., after any unsent txt data).
200 */
send_close_req(struct cxgbi_sock * csk)201 static void send_close_req(struct cxgbi_sock *csk)
202 {
203 struct sk_buff *skb = csk->cpl_close;
204 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
205 unsigned int tid = csk->tid;
206
207 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
208 "csk 0x%p,%u,0x%lx,%u.\n",
209 csk, csk->state, csk->flags, csk->tid);
210
211 csk->cpl_close = NULL;
212 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
213 req->wr.wr_lo = htonl(V_WR_TID(tid));
214 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
215 req->rsvd = htonl(csk->write_seq);
216
217 cxgbi_sock_skb_entail(csk, skb);
218 if (csk->state >= CTP_ESTABLISHED)
219 push_tx_frames(csk, 1);
220 }
221
222 /*
223 * CPL connection abort request: host ->
224 *
225 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
226 * for the same connection and also that we do not try to send a message
227 * after the connection has closed.
228 */
abort_arp_failure(struct t3cdev * tdev,struct sk_buff * skb)229 static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb)
230 {
231 struct cpl_abort_req *req = cplhdr(skb);
232
233 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
234 "t3dev 0x%p, tid %u, skb 0x%p.\n",
235 tdev, GET_TID(req), skb);
236 req->cmd = CPL_ABORT_NO_RST;
237 cxgb3_ofld_send(tdev, skb);
238 }
239
send_abort_req(struct cxgbi_sock * csk)240 static void send_abort_req(struct cxgbi_sock *csk)
241 {
242 struct sk_buff *skb = csk->cpl_abort_req;
243 struct cpl_abort_req *req;
244
245 if (unlikely(csk->state == CTP_ABORTING || !skb))
246 return;
247 cxgbi_sock_set_state(csk, CTP_ABORTING);
248 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
249 /* Purge the send queue so we don't send anything after an abort. */
250 cxgbi_sock_purge_write_queue(csk);
251
252 csk->cpl_abort_req = NULL;
253 req = (struct cpl_abort_req *)skb->head;
254 skb->priority = CPL_PRIORITY_DATA;
255 set_arp_failure_handler(skb, abort_arp_failure);
256 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
257 req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
258 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
259 req->rsvd0 = htonl(csk->snd_nxt);
260 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
261 req->cmd = CPL_ABORT_SEND_RST;
262
263 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
264 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
265 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
266 req->rsvd1);
267
268 l2t_send(csk->cdev->lldev, skb, csk->l2t);
269 }
270
271 /*
272 * CPL connection abort reply: host ->
273 *
274 * Send an ABORT_RPL message in response of the ABORT_REQ received.
275 */
send_abort_rpl(struct cxgbi_sock * csk,int rst_status)276 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
277 {
278 struct sk_buff *skb = csk->cpl_abort_rpl;
279 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
280
281 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
282 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
283 csk, csk->state, csk->flags, csk->tid, rst_status);
284
285 csk->cpl_abort_rpl = NULL;
286 skb->priority = CPL_PRIORITY_DATA;
287 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
288 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
289 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
290 rpl->cmd = rst_status;
291 cxgb3_ofld_send(csk->cdev->lldev, skb);
292 }
293
294 /*
295 * CPL connection rx data ack: host ->
296 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
297 * credits sent.
298 */
send_rx_credits(struct cxgbi_sock * csk,u32 credits)299 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
300 {
301 struct sk_buff *skb;
302 struct cpl_rx_data_ack *req;
303 u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
304
305 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
306 "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
307 csk, csk->state, csk->flags, csk->tid, credits, dack);
308
309 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
310 if (!skb) {
311 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
312 return 0;
313 }
314 req = (struct cpl_rx_data_ack *)skb->head;
315 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
316 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
317 req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) |
318 V_RX_CREDITS(credits));
319 skb->priority = CPL_PRIORITY_ACK;
320 cxgb3_ofld_send(csk->cdev->lldev, skb);
321 return credits;
322 }
323
324 /*
325 * CPL connection tx data: host ->
326 *
327 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
328 * credits sent.
329 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
330 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
331 */
332
333 static unsigned int wrlen __read_mostly;
334 static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
335
init_wr_tab(unsigned int wr_len)336 static void init_wr_tab(unsigned int wr_len)
337 {
338 int i;
339
340 if (skb_wrs[1]) /* already initialized */
341 return;
342 for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
343 int sgl_len = (3 * i) / 2 + (i & 1);
344
345 sgl_len += 3;
346 skb_wrs[i] = (sgl_len <= wr_len
347 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
348 }
349 wrlen = wr_len * 8;
350 }
351
make_tx_data_wr(struct cxgbi_sock * csk,struct sk_buff * skb,int len,int req_completion)352 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
353 int len, int req_completion)
354 {
355 struct tx_data_wr *req;
356 struct l2t_entry *l2t = csk->l2t;
357
358 skb_reset_transport_header(skb);
359 req = __skb_push(skb, sizeof(*req));
360 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
361 (req_completion ? F_WR_COMPL : 0));
362 req->wr_lo = htonl(V_WR_TID(csk->tid));
363 /* len includes the length of any HW ULP additions */
364 req->len = htonl(len);
365 /* V_TX_ULP_SUBMODE sets both the mode and submode */
366 req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_tx_ulp_mode(skb)) |
367 V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
368 req->sndseq = htonl(csk->snd_nxt);
369 req->param = htonl(V_TX_PORT(l2t->smt_idx));
370
371 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
372 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
373 V_TX_CPU_IDX(csk->rss_qid));
374 /* sendbuffer is in units of 32KB. */
375 req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15));
376 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
377 }
378 }
379
380 /*
381 * push_tx_frames -- start transmit
382 *
383 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
384 * connection's send queue and sends them on to T3. Must be called with the
385 * connection's lock held. Returns the amount of send buffer space that was
386 * freed as a result of sending queued data to T3.
387 */
388
arp_failure_skb_discard(struct t3cdev * dev,struct sk_buff * skb)389 static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb)
390 {
391 kfree_skb(skb);
392 }
393
push_tx_frames(struct cxgbi_sock * csk,int req_completion)394 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
395 {
396 int total_size = 0;
397 struct sk_buff *skb;
398
399 if (unlikely(csk->state < CTP_ESTABLISHED ||
400 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
401 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
402 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
403 csk, csk->state, csk->flags, csk->tid);
404 return 0;
405 }
406
407 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
408 int len = skb->len; /* length before skb_push */
409 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
410 int wrs_needed = skb_wrs[frags];
411
412 if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
413 wrs_needed = 1;
414
415 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
416
417 if (csk->wr_cred < wrs_needed) {
418 log_debug(1 << CXGBI_DBG_PDU_TX,
419 "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
420 csk, skb->len, skb->data_len, frags,
421 wrs_needed, csk->wr_cred);
422 break;
423 }
424
425 __skb_unlink(skb, &csk->write_queue);
426 skb->priority = CPL_PRIORITY_DATA;
427 skb->csum = wrs_needed; /* remember this until the WR_ACK */
428 csk->wr_cred -= wrs_needed;
429 csk->wr_una_cred += wrs_needed;
430 cxgbi_sock_enqueue_wr(csk, skb);
431
432 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
433 "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
434 "left %u, unack %u.\n",
435 csk, skb->len, skb->data_len, frags, skb->csum,
436 csk->wr_cred, csk->wr_una_cred);
437
438 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
439 if ((req_completion &&
440 csk->wr_una_cred == wrs_needed) ||
441 csk->wr_una_cred >= csk->wr_max_cred / 2) {
442 req_completion = 1;
443 csk->wr_una_cred = 0;
444 }
445 len += cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb));
446 make_tx_data_wr(csk, skb, len, req_completion);
447 csk->snd_nxt += len;
448 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
449 }
450 total_size += skb->truesize;
451 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
452 "csk 0x%p, tid 0x%x, send skb 0x%p.\n",
453 csk, csk->tid, skb);
454 set_arp_failure_handler(skb, arp_failure_skb_discard);
455 l2t_send(csk->cdev->lldev, skb, csk->l2t);
456 }
457 return total_size;
458 }
459
460 /*
461 * Process a CPL_ACT_ESTABLISH message: -> host
462 * Updates connection state from an active establish CPL message. Runs with
463 * the connection lock held.
464 */
465
free_atid(struct cxgbi_sock * csk)466 static inline void free_atid(struct cxgbi_sock *csk)
467 {
468 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
469 cxgb3_free_atid(csk->cdev->lldev, csk->atid);
470 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
471 cxgbi_sock_put(csk);
472 }
473 }
474
do_act_establish(struct t3cdev * tdev,struct sk_buff * skb,void * ctx)475 static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
476 {
477 struct cxgbi_sock *csk = ctx;
478 struct cpl_act_establish *req = cplhdr(skb);
479 unsigned int tid = GET_TID(req);
480 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
481 u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
482
483 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
484 "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
485 atid, atid, csk, csk->state, csk->flags, rcv_isn);
486
487 cxgbi_sock_get(csk);
488 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
489 csk->tid = tid;
490 cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
491
492 free_atid(csk);
493
494 csk->rss_qid = G_QNUM(ntohs(skb->csum));
495
496 spin_lock_bh(&csk->lock);
497 if (csk->retry_timer.function) {
498 del_timer(&csk->retry_timer);
499 csk->retry_timer.function = NULL;
500 }
501
502 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
503 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
504 csk, csk->state, csk->flags, csk->tid);
505
506 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
507 if (csk->rcv_win > (M_RCV_BUFSIZ << 10))
508 csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10);
509
510 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
511
512 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
513 /* upper layer has requested closing */
514 send_abort_req(csk);
515 else {
516 if (skb_queue_len(&csk->write_queue))
517 push_tx_frames(csk, 1);
518 cxgbi_conn_tx_open(csk);
519 }
520
521 spin_unlock_bh(&csk->lock);
522 __kfree_skb(skb);
523 return 0;
524 }
525
526 /*
527 * Process a CPL_ACT_OPEN_RPL message: -> host
528 * Handle active open failures.
529 */
act_open_rpl_status_to_errno(int status)530 static int act_open_rpl_status_to_errno(int status)
531 {
532 switch (status) {
533 case CPL_ERR_CONN_RESET:
534 return -ECONNREFUSED;
535 case CPL_ERR_ARP_MISS:
536 return -EHOSTUNREACH;
537 case CPL_ERR_CONN_TIMEDOUT:
538 return -ETIMEDOUT;
539 case CPL_ERR_TCAM_FULL:
540 return -ENOMEM;
541 case CPL_ERR_CONN_EXIST:
542 return -EADDRINUSE;
543 default:
544 return -EIO;
545 }
546 }
547
act_open_retry_timer(struct timer_list * t)548 static void act_open_retry_timer(struct timer_list *t)
549 {
550 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
551 struct sk_buff *skb;
552
553 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
554 "csk 0x%p,%u,0x%lx,%u.\n",
555 csk, csk->state, csk->flags, csk->tid);
556
557 cxgbi_sock_get(csk);
558 spin_lock_bh(&csk->lock);
559 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
560 if (!skb)
561 cxgbi_sock_fail_act_open(csk, -ENOMEM);
562 else {
563 skb->sk = (struct sock *)csk;
564 set_arp_failure_handler(skb, act_open_arp_failure);
565 send_act_open_req(csk, skb, csk->l2t);
566 }
567 spin_unlock_bh(&csk->lock);
568 cxgbi_sock_put(csk);
569 }
570
do_act_open_rpl(struct t3cdev * tdev,struct sk_buff * skb,void * ctx)571 static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
572 {
573 struct cxgbi_sock *csk = ctx;
574 struct cpl_act_open_rpl *rpl = cplhdr(skb);
575
576 pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
577 csk, csk->state, csk->flags, csk->atid, rpl->status,
578 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
579 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
580
581 if (rpl->status != CPL_ERR_TCAM_FULL &&
582 rpl->status != CPL_ERR_CONN_EXIST &&
583 rpl->status != CPL_ERR_ARP_MISS)
584 cxgb3_queue_tid_release(tdev, GET_TID(rpl));
585
586 cxgbi_sock_get(csk);
587 spin_lock_bh(&csk->lock);
588 if (rpl->status == CPL_ERR_CONN_EXIST &&
589 csk->retry_timer.function != act_open_retry_timer) {
590 csk->retry_timer.function = act_open_retry_timer;
591 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
592 } else
593 cxgbi_sock_fail_act_open(csk,
594 act_open_rpl_status_to_errno(rpl->status));
595
596 spin_unlock_bh(&csk->lock);
597 cxgbi_sock_put(csk);
598 __kfree_skb(skb);
599 return 0;
600 }
601
602 /*
603 * Process PEER_CLOSE CPL messages: -> host
604 * Handle peer FIN.
605 */
do_peer_close(struct t3cdev * cdev,struct sk_buff * skb,void * ctx)606 static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
607 {
608 struct cxgbi_sock *csk = ctx;
609
610 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
611 "csk 0x%p,%u,0x%lx,%u.\n",
612 csk, csk->state, csk->flags, csk->tid);
613
614 cxgbi_sock_rcv_peer_close(csk);
615 __kfree_skb(skb);
616 return 0;
617 }
618
619 /*
620 * Process CLOSE_CONN_RPL CPL message: -> host
621 * Process a peer ACK to our FIN.
622 */
do_close_con_rpl(struct t3cdev * cdev,struct sk_buff * skb,void * ctx)623 static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
624 void *ctx)
625 {
626 struct cxgbi_sock *csk = ctx;
627 struct cpl_close_con_rpl *rpl = cplhdr(skb);
628
629 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
630 "csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
631 csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
632
633 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
634 __kfree_skb(skb);
635 return 0;
636 }
637
638 /*
639 * Process ABORT_REQ_RSS CPL message: -> host
640 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
641 * request except that we need to reply to it.
642 */
643
abort_status_to_errno(struct cxgbi_sock * csk,int abort_reason,int * need_rst)644 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
645 int *need_rst)
646 {
647 switch (abort_reason) {
648 case CPL_ERR_BAD_SYN:
649 case CPL_ERR_CONN_RESET:
650 return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
651 case CPL_ERR_XMIT_TIMEDOUT:
652 case CPL_ERR_PERSIST_TIMEDOUT:
653 case CPL_ERR_FINWAIT2_TIMEDOUT:
654 case CPL_ERR_KEEPALIVE_TIMEDOUT:
655 return -ETIMEDOUT;
656 default:
657 return -EIO;
658 }
659 }
660
do_abort_req(struct t3cdev * cdev,struct sk_buff * skb,void * ctx)661 static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
662 {
663 const struct cpl_abort_req_rss *req = cplhdr(skb);
664 struct cxgbi_sock *csk = ctx;
665 int rst_status = CPL_ABORT_NO_RST;
666
667 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
668 "csk 0x%p,%u,0x%lx,%u.\n",
669 csk, csk->state, csk->flags, csk->tid);
670
671 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
672 req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
673 goto done;
674 }
675
676 cxgbi_sock_get(csk);
677 spin_lock_bh(&csk->lock);
678
679 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
680 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
681 cxgbi_sock_set_state(csk, CTP_ABORTING);
682 goto out;
683 }
684
685 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
686 send_abort_rpl(csk, rst_status);
687
688 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
689 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
690 cxgbi_sock_closed(csk);
691 }
692
693 out:
694 spin_unlock_bh(&csk->lock);
695 cxgbi_sock_put(csk);
696 done:
697 __kfree_skb(skb);
698 return 0;
699 }
700
701 /*
702 * Process ABORT_RPL_RSS CPL message: -> host
703 * Process abort replies. We only process these messages if we anticipate
704 * them as the coordination between SW and HW in this area is somewhat lacking
705 * and sometimes we get ABORT_RPLs after we are done with the connection that
706 * originated the ABORT_REQ.
707 */
do_abort_rpl(struct t3cdev * cdev,struct sk_buff * skb,void * ctx)708 static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
709 {
710 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
711 struct cxgbi_sock *csk = ctx;
712
713 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
714 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
715 rpl->status, csk, csk ? csk->state : 0,
716 csk ? csk->flags : 0UL);
717 /*
718 * Ignore replies to post-close aborts indicating that the abort was
719 * requested too late. These connections are terminated when we get
720 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
721 * arrives the TID is either no longer used or it has been recycled.
722 */
723 if (rpl->status == CPL_ERR_ABORT_FAILED)
724 goto rel_skb;
725 /*
726 * Sometimes we've already closed the connection, e.g., a post-close
727 * abort races with ABORT_REQ_RSS, the latter frees the connection
728 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
729 * but FW turns the ABORT_REQ into a regular one and so we get
730 * ABORT_RPL_RSS with status 0 and no connection.
731 */
732 if (csk)
733 cxgbi_sock_rcv_abort_rpl(csk);
734 rel_skb:
735 __kfree_skb(skb);
736 return 0;
737 }
738
739 /*
740 * Process RX_ISCSI_HDR CPL message: -> host
741 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
742 * follow after the bhs.
743 */
do_iscsi_hdr(struct t3cdev * t3dev,struct sk_buff * skb,void * ctx)744 static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
745 {
746 struct cxgbi_sock *csk = ctx;
747 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
748 struct cpl_iscsi_hdr_norss data_cpl;
749 struct cpl_rx_data_ddp_norss ddp_cpl;
750 unsigned int hdr_len, data_len, status;
751 unsigned int len;
752 int err;
753
754 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
755 "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
756 csk, csk->state, csk->flags, csk->tid, skb, skb->len);
757
758 spin_lock_bh(&csk->lock);
759
760 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
761 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
762 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
763 csk, csk->state, csk->flags, csk->tid);
764 if (csk->state != CTP_ABORTING)
765 goto abort_conn;
766 else
767 goto discard;
768 }
769
770 cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
771 cxgbi_skcb_flags(skb) = 0;
772
773 skb_reset_transport_header(skb);
774 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
775
776 len = hdr_len = ntohs(hdr_cpl->len);
777 /* msg coalesce is off or not enough data received */
778 if (skb->len <= hdr_len) {
779 pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n",
780 csk->cdev->ports[csk->port_id]->name, csk->tid,
781 skb->len, hdr_len);
782 goto abort_conn;
783 }
784 cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED);
785
786 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
787 sizeof(ddp_cpl));
788 if (err < 0) {
789 pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n",
790 csk->cdev->ports[csk->port_id]->name, csk->tid,
791 skb->len, sizeof(ddp_cpl), err);
792 goto abort_conn;
793 }
794
795 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
796 cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
797 cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
798 status = ntohl(ddp_cpl.ddp_status);
799
800 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
801 "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
802 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
803
804 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT))
805 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
806 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT))
807 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
808 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT))
809 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
810
811 if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
812 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
813 if (err < 0) {
814 pr_err("%s: tid %u, cp %zu/%u failed %d.\n",
815 csk->cdev->ports[csk->port_id]->name,
816 csk->tid, sizeof(data_cpl), skb->len, err);
817 goto abort_conn;
818 }
819 data_len = ntohs(data_cpl.len);
820 log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX,
821 "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n",
822 skb, data_len, cxgbi_skcb_rx_pdulen(skb), status);
823 len += sizeof(data_cpl) + data_len;
824 } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT))
825 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
826
827 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
828 __pskb_trim(skb, len);
829 __skb_queue_tail(&csk->receive_queue, skb);
830 cxgbi_conn_pdu_ready(csk);
831
832 spin_unlock_bh(&csk->lock);
833 return 0;
834
835 abort_conn:
836 send_abort_req(csk);
837 discard:
838 spin_unlock_bh(&csk->lock);
839 __kfree_skb(skb);
840 return 0;
841 }
842
843 /*
844 * Process TX_DATA_ACK CPL messages: -> host
845 * Process an acknowledgment of WR completion. Advance snd_una and send the
846 * next batch of work requests from the write queue.
847 */
do_wr_ack(struct t3cdev * cdev,struct sk_buff * skb,void * ctx)848 static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
849 {
850 struct cxgbi_sock *csk = ctx;
851 struct cpl_wr_ack *hdr = cplhdr(skb);
852
853 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
854 "csk 0x%p,%u,0x%lx,%u, cr %u.\n",
855 csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
856
857 cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
858 __kfree_skb(skb);
859 return 0;
860 }
861
862 /*
863 * for each connection, pre-allocate skbs needed for close/abort requests. So
864 * that we can service the request right away.
865 */
alloc_cpls(struct cxgbi_sock * csk)866 static int alloc_cpls(struct cxgbi_sock *csk)
867 {
868 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
869 GFP_KERNEL);
870 if (!csk->cpl_close)
871 return -ENOMEM;
872 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
873 GFP_KERNEL);
874 if (!csk->cpl_abort_req)
875 goto free_cpl_skbs;
876
877 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
878 GFP_KERNEL);
879 if (!csk->cpl_abort_rpl)
880 goto free_cpl_skbs;
881
882 return 0;
883
884 free_cpl_skbs:
885 cxgbi_sock_free_cpl_skbs(csk);
886 return -ENOMEM;
887 }
888
l2t_put(struct cxgbi_sock * csk)889 static void l2t_put(struct cxgbi_sock *csk)
890 {
891 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
892
893 if (csk->l2t) {
894 l2t_release(t3dev, csk->l2t);
895 csk->l2t = NULL;
896 cxgbi_sock_put(csk);
897 }
898 }
899
900 /*
901 * release_offload_resources - release offload resource
902 * Release resources held by an offload connection (TID, L2T entry, etc.)
903 */
release_offload_resources(struct cxgbi_sock * csk)904 static void release_offload_resources(struct cxgbi_sock *csk)
905 {
906 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
907
908 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
909 "csk 0x%p,%u,0x%lx,%u.\n",
910 csk, csk->state, csk->flags, csk->tid);
911
912 csk->rss_qid = 0;
913 cxgbi_sock_free_cpl_skbs(csk);
914
915 if (csk->wr_cred != csk->wr_max_cred) {
916 cxgbi_sock_purge_wr_queue(csk);
917 cxgbi_sock_reset_wr_list(csk);
918 }
919 l2t_put(csk);
920 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
921 free_atid(csk);
922 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
923 cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
924 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
925 cxgbi_sock_put(csk);
926 }
927 csk->dst = NULL;
928 csk->cdev = NULL;
929 }
930
update_address(struct cxgbi_hba * chba)931 static void update_address(struct cxgbi_hba *chba)
932 {
933 if (chba->ipv4addr) {
934 if (chba->vdev &&
935 chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) {
936 cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr);
937 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
938 pr_info("%s set %pI4.\n",
939 chba->vdev->name, &chba->ipv4addr);
940 } else if (chba->ipv4addr !=
941 cxgb3i_get_private_ipv4addr(chba->ndev)) {
942 cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr);
943 pr_info("%s set %pI4.\n",
944 chba->ndev->name, &chba->ipv4addr);
945 }
946 } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) {
947 if (chba->vdev)
948 cxgb3i_set_private_ipv4addr(chba->vdev, 0);
949 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
950 }
951 }
952
init_act_open(struct cxgbi_sock * csk)953 static int init_act_open(struct cxgbi_sock *csk)
954 {
955 struct dst_entry *dst = csk->dst;
956 struct cxgbi_device *cdev = csk->cdev;
957 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
958 struct net_device *ndev = cdev->ports[csk->port_id];
959 struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
960 struct sk_buff *skb = NULL;
961 int ret;
962
963 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
964 "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
965
966 update_address(chba);
967 if (chba->ipv4addr)
968 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
969
970 csk->rss_qid = 0;
971 csk->l2t = t3_l2t_get(t3dev, dst, ndev,
972 &csk->daddr.sin_addr.s_addr);
973 if (!csk->l2t) {
974 pr_err("NO l2t available.\n");
975 return -EINVAL;
976 }
977 cxgbi_sock_get(csk);
978
979 csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
980 if (csk->atid < 0) {
981 pr_err("NO atid available.\n");
982 ret = -EINVAL;
983 goto put_sock;
984 }
985 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
986 cxgbi_sock_get(csk);
987
988 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
989 if (!skb) {
990 ret = -ENOMEM;
991 goto free_atid;
992 }
993 skb->sk = (struct sock *)csk;
994 set_arp_failure_handler(skb, act_open_arp_failure);
995 csk->snd_win = cxgb3i_snd_win;
996 csk->rcv_win = cxgb3i_rcv_win;
997
998 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
999 csk->wr_una_cred = 0;
1000 csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
1001 cxgbi_sock_reset_wr_list(csk);
1002 csk->err = 0;
1003
1004 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1005 "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
1006 csk, csk->state, csk->flags,
1007 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
1008 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
1009
1010 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1011 send_act_open_req(csk, skb, csk->l2t);
1012 return 0;
1013
1014 free_atid:
1015 cxgb3_free_atid(t3dev, csk->atid);
1016 put_sock:
1017 cxgbi_sock_put(csk);
1018 l2t_release(t3dev, csk->l2t);
1019 csk->l2t = NULL;
1020
1021 return ret;
1022 }
1023
1024 cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
1025 [CPL_ACT_ESTABLISH] = do_act_establish,
1026 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1027 [CPL_PEER_CLOSE] = do_peer_close,
1028 [CPL_ABORT_REQ_RSS] = do_abort_req,
1029 [CPL_ABORT_RPL_RSS] = do_abort_rpl,
1030 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1031 [CPL_TX_DMA_ACK] = do_wr_ack,
1032 [CPL_ISCSI_HDR] = do_iscsi_hdr,
1033 };
1034
1035 /**
1036 * cxgb3i_ofld_init - allocate and initialize resources for each adapter found
1037 * @cdev: cxgbi adapter
1038 */
cxgb3i_ofld_init(struct cxgbi_device * cdev)1039 static int cxgb3i_ofld_init(struct cxgbi_device *cdev)
1040 {
1041 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
1042 struct adap_ports port;
1043 struct ofld_page_info rx_page_info;
1044 unsigned int wr_len;
1045 int rc;
1046
1047 if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 ||
1048 t3dev->ctl(t3dev, GET_PORTS, &port) < 0 ||
1049 t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
1050 pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev);
1051 return -EINVAL;
1052 }
1053
1054 if (cxgb3i_max_connect > CXGBI_MAX_CONN)
1055 cxgb3i_max_connect = CXGBI_MAX_CONN;
1056
1057 rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base,
1058 cxgb3i_max_connect);
1059 if (rc < 0)
1060 return rc;
1061
1062 init_wr_tab(wr_len);
1063 cdev->csk_release_offload_resources = release_offload_resources;
1064 cdev->csk_push_tx_frames = push_tx_frames;
1065 cdev->csk_send_abort_req = send_abort_req;
1066 cdev->csk_send_close_req = send_close_req;
1067 cdev->csk_send_rx_credits = send_rx_credits;
1068 cdev->csk_alloc_cpls = alloc_cpls;
1069 cdev->csk_init_act_open = init_act_open;
1070
1071 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1072 return 0;
1073 }
1074
1075 /*
1076 * functions to program the pagepod in h/w
1077 */
ulp_mem_io_set_hdr(struct sk_buff * skb,unsigned int addr)1078 static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
1079 {
1080 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
1081
1082 memset(req, 0, sizeof(*req));
1083
1084 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
1085 req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
1086 V_ULPTX_CMD(ULP_MEM_WRITE));
1087 req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) |
1088 V_ULPTX_NFLITS((IPPOD_SIZE >> 3) + 1));
1089 }
1090
cdev2ppm(struct cxgbi_device * cdev)1091 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
1092 {
1093 return ((struct t3cdev *)cdev->lldev)->ulp_iscsi;
1094 }
1095
ddp_set_map(struct cxgbi_ppm * ppm,struct cxgbi_sock * csk,struct cxgbi_task_tag_info * ttinfo)1096 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1097 struct cxgbi_task_tag_info *ttinfo)
1098 {
1099 unsigned int idx = ttinfo->idx;
1100 unsigned int npods = ttinfo->npods;
1101 struct scatterlist *sg = ttinfo->sgl;
1102 struct cxgbi_pagepod *ppod;
1103 struct ulp_mem_io *req;
1104 unsigned int sg_off;
1105 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1106 int i;
1107
1108 for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
1109 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1110 IPPOD_SIZE, 0, GFP_ATOMIC);
1111
1112 if (!skb)
1113 return -ENOMEM;
1114 ulp_mem_io_set_hdr(skb, pm_addr);
1115 req = (struct ulp_mem_io *)skb->head;
1116 ppod = (struct cxgbi_pagepod *)(req + 1);
1117 sg_off = i * PPOD_PAGES_MAX;
1118 cxgbi_ddp_set_one_ppod(ppod, ttinfo, &sg,
1119 &sg_off);
1120 skb->priority = CPL_PRIORITY_CONTROL;
1121 cxgb3_ofld_send(ppm->lldev, skb);
1122 }
1123 return 0;
1124 }
1125
ddp_clear_map(struct cxgbi_device * cdev,struct cxgbi_ppm * ppm,struct cxgbi_task_tag_info * ttinfo)1126 static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
1127 struct cxgbi_task_tag_info *ttinfo)
1128 {
1129 unsigned int idx = ttinfo->idx;
1130 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1131 unsigned int npods = ttinfo->npods;
1132 int i;
1133
1134 log_debug(1 << CXGBI_DBG_DDP,
1135 "cdev 0x%p, clear idx %u, npods %u.\n",
1136 cdev, idx, npods);
1137
1138 for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
1139 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1140 IPPOD_SIZE, 0, GFP_ATOMIC);
1141
1142 if (!skb) {
1143 pr_err("cdev 0x%p, clear ddp, %u,%d/%u, skb OOM.\n",
1144 cdev, idx, i, npods);
1145 continue;
1146 }
1147 ulp_mem_io_set_hdr(skb, pm_addr);
1148 skb->priority = CPL_PRIORITY_CONTROL;
1149 cxgb3_ofld_send(ppm->lldev, skb);
1150 }
1151 }
1152
ddp_setup_conn_pgidx(struct cxgbi_sock * csk,unsigned int tid,int pg_idx)1153 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1154 unsigned int tid, int pg_idx)
1155 {
1156 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1157 GFP_KERNEL);
1158 struct cpl_set_tcb_field *req;
1159 u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
1160
1161 log_debug(1 << CXGBI_DBG_DDP,
1162 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
1163 if (!skb)
1164 return -ENOMEM;
1165
1166 /* set up ulp submode and page size */
1167 req = (struct cpl_set_tcb_field *)skb->head;
1168 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1169 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1170 req->reply = V_NO_REPLY(1);
1171 req->cpu_idx = 0;
1172 req->word = htons(31);
1173 req->mask = cpu_to_be64(0xF0000000);
1174 req->val = cpu_to_be64(val << 28);
1175 skb->priority = CPL_PRIORITY_CONTROL;
1176
1177 cxgb3_ofld_send(csk->cdev->lldev, skb);
1178 return 0;
1179 }
1180
1181 /**
1182 * ddp_setup_conn_digest - setup conn. digest setting
1183 * @csk: cxgb tcp socket
1184 * @tid: connection id
1185 * @hcrc: header digest enabled
1186 * @dcrc: data digest enabled
1187 * set up the iscsi digest settings for a connection identified by tid
1188 */
ddp_setup_conn_digest(struct cxgbi_sock * csk,unsigned int tid,int hcrc,int dcrc)1189 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1190 int hcrc, int dcrc)
1191 {
1192 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1193 GFP_KERNEL);
1194 struct cpl_set_tcb_field *req;
1195 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
1196
1197 log_debug(1 << CXGBI_DBG_DDP,
1198 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
1199 if (!skb)
1200 return -ENOMEM;
1201
1202 /* set up ulp submode and page size */
1203 req = (struct cpl_set_tcb_field *)skb->head;
1204 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1205 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1206 req->reply = V_NO_REPLY(1);
1207 req->cpu_idx = 0;
1208 req->word = htons(31);
1209 req->mask = cpu_to_be64(0x0F000000);
1210 req->val = cpu_to_be64(val << 24);
1211 skb->priority = CPL_PRIORITY_CONTROL;
1212
1213 cxgb3_ofld_send(csk->cdev->lldev, skb);
1214 return 0;
1215 }
1216
1217 /**
1218 * cxgb3i_ddp_init - initialize the cxgb3 adapter's ddp resource
1219 * @cdev: cxgb3i adapter
1220 * initialize the ddp pagepod manager for a given adapter
1221 */
cxgb3i_ddp_init(struct cxgbi_device * cdev)1222 static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
1223 {
1224 struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
1225 struct net_device *ndev = cdev->ports[0];
1226 struct cxgbi_tag_format tformat;
1227 unsigned int ppmax, tagmask = 0;
1228 struct ulp_iscsi_info uinfo;
1229 int i, err;
1230
1231 err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
1232 if (err < 0) {
1233 pr_err("%s, failed to get iscsi param %d.\n",
1234 ndev->name, err);
1235 return err;
1236 }
1237 if (uinfo.llimit >= uinfo.ulimit) {
1238 pr_warn("T3 %s, iscsi NOT enabled %u ~ %u!\n",
1239 ndev->name, uinfo.llimit, uinfo.ulimit);
1240 return -EACCES;
1241 }
1242
1243 ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
1244 tagmask = cxgbi_tagmask_set(ppmax);
1245
1246 pr_info("T3 %s: 0x%x~0x%x, 0x%x, tagmask 0x%x -> 0x%x.\n",
1247 ndev->name, uinfo.llimit, uinfo.ulimit, ppmax, uinfo.tagmask,
1248 tagmask);
1249
1250 memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
1251 for (i = 0; i < 4; i++)
1252 tformat.pgsz_order[i] = uinfo.pgsz_factor[i];
1253 cxgbi_tagmask_check(tagmask, &tformat);
1254
1255 err = cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat,
1256 (uinfo.ulimit - uinfo.llimit + 1),
1257 uinfo.llimit, uinfo.llimit, 0, 0, 0);
1258 if (err)
1259 return err;
1260
1261 if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) {
1262 uinfo.tagmask = tagmask;
1263 uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
1264
1265 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
1266 if (err < 0) {
1267 pr_err("T3 %s fail to set iscsi param %d.\n",
1268 ndev->name, err);
1269 cdev->flags |= CXGBI_FLAG_DDP_OFF;
1270 }
1271 err = 0;
1272 }
1273
1274 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1275 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1276 cdev->csk_ddp_set_map = ddp_set_map;
1277 cdev->csk_ddp_clear_map = ddp_clear_map;
1278 cdev->cdev2ppm = cdev2ppm;
1279 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1280 uinfo.max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
1281 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1282 uinfo.max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
1283
1284 return 0;
1285 }
1286
cxgb3i_dev_close(struct t3cdev * t3dev)1287 static void cxgb3i_dev_close(struct t3cdev *t3dev)
1288 {
1289 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1290
1291 if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) {
1292 pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0);
1293 return;
1294 }
1295
1296 cxgbi_device_unregister(cdev);
1297 }
1298
1299 /**
1300 * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings
1301 * @t3dev: t3cdev adapter
1302 */
cxgb3i_dev_open(struct t3cdev * t3dev)1303 static void cxgb3i_dev_open(struct t3cdev *t3dev)
1304 {
1305 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1306 struct adapter *adapter = tdev2adap(t3dev);
1307 int i, err;
1308
1309 if (cdev) {
1310 pr_info("0x%p, updating.\n", cdev);
1311 return;
1312 }
1313
1314 cdev = cxgbi_device_register(0, adapter->params.nports);
1315 if (!cdev) {
1316 pr_warn("device 0x%p register failed.\n", t3dev);
1317 return;
1318 }
1319
1320 cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET;
1321 cdev->lldev = t3dev;
1322 cdev->pdev = adapter->pdev;
1323 cdev->ports = adapter->port;
1324 cdev->nports = adapter->params.nports;
1325 cdev->mtus = adapter->params.mtus;
1326 cdev->nmtus = NMTUS;
1327 cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
1328 cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
1329 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
1330 cdev->itp = &cxgb3i_iscsi_transport;
1331
1332 err = cxgb3i_ddp_init(cdev);
1333 if (err) {
1334 pr_info("0x%p ddp init failed %d\n", cdev, err);
1335 goto err_out;
1336 }
1337
1338 err = cxgb3i_ofld_init(cdev);
1339 if (err) {
1340 pr_info("0x%p offload init failed\n", cdev);
1341 goto err_out;
1342 }
1343
1344 err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN,
1345 &cxgb3i_host_template, cxgb3i_stt);
1346 if (err)
1347 goto err_out;
1348
1349 for (i = 0; i < cdev->nports; i++)
1350 cdev->hbas[i]->ipv4addr =
1351 cxgb3i_get_private_ipv4addr(cdev->ports[i]);
1352
1353 pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
1354 cdev, cdev ? cdev->flags : 0, t3dev, err);
1355 return;
1356
1357 err_out:
1358 cxgbi_device_unregister(cdev);
1359 }
1360
cxgb3i_dev_event_handler(struct t3cdev * t3dev,u32 event,u32 port)1361 static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port)
1362 {
1363 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1364
1365 log_debug(1 << CXGBI_DBG_TOE,
1366 "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n",
1367 t3dev, cdev, event, port);
1368 if (!cdev)
1369 return;
1370
1371 switch (event) {
1372 case OFFLOAD_STATUS_DOWN:
1373 cdev->flags |= CXGBI_FLAG_ADAPTER_RESET;
1374 break;
1375 case OFFLOAD_STATUS_UP:
1376 cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET;
1377 break;
1378 }
1379 }
1380
1381 /**
1382 * cxgb3i_init_module - module init entry point
1383 *
1384 * initialize any driver wide global data structures and register itself
1385 * with the cxgb3 module
1386 */
cxgb3i_init_module(void)1387 static int __init cxgb3i_init_module(void)
1388 {
1389 int rc;
1390
1391 printk(KERN_INFO "%s", version);
1392
1393 rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1394 if (rc < 0)
1395 return rc;
1396
1397 cxgb3_register_client(&t3_client);
1398 return 0;
1399 }
1400
1401 /**
1402 * cxgb3i_exit_module - module cleanup/exit entry point
1403 *
1404 * go through the driver hba list and for each hba, release any resource held.
1405 * and unregisters iscsi transport and the cxgb3 module
1406 */
cxgb3i_exit_module(void)1407 static void __exit cxgb3i_exit_module(void)
1408 {
1409 cxgb3_unregister_client(&t3_client);
1410 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3);
1411 cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1412 }
1413
1414 module_init(cxgb3i_init_module);
1415 module_exit(cxgb3i_exit_module);
1416