1 /*- 2 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 * 29 */ 30 31 #ifndef __T4_TOM_H__ 32 #define __T4_TOM_H__ 33 #include <sys/vmem.h> 34 35 #define LISTEN_HASH_SIZE 32 36 37 /* 38 * Min receive window. We want it to be large enough to accommodate receive 39 * coalescing, handle jumbo frames, and not trigger sender SWS avoidance. 40 */ 41 #define MIN_RCV_WND (24 * 1024U) 42 43 /* 44 * Max receive window supported by HW in bytes. Only a small part of it can 45 * be set through option0, the rest needs to be set through RX_DATA_ACK. 46 */ 47 #define MAX_RCV_WND ((1U << 27) - 1) 48 49 #define DDP_RSVD_WIN (16 * 1024U) 50 #define SB_DDP_INDICATE SB_IN_TOE /* soreceive must respond to indicate */ 51 52 #define USE_DDP_RX_FLOW_CONTROL 53 54 #define PPOD_SZ(n) ((n) * sizeof(struct pagepod)) 55 #define PPOD_SIZE (PPOD_SZ(1)) 56 57 /* TOE PCB flags */ 58 enum { 59 TPF_ATTACHED = (1 << 0), /* a tcpcb refers to this toepcb */ 60 TPF_FLOWC_WR_SENT = (1 << 1), /* firmware flow context WR sent */ 61 TPF_TX_DATA_SENT = (1 << 2), /* some data sent */ 62 TPF_TX_SUSPENDED = (1 << 3), /* tx suspended for lack of resources */ 63 TPF_SEND_FIN = (1 << 4), /* send FIN after all pending data */ 64 TPF_FIN_SENT = (1 << 5), /* FIN has been sent */ 65 TPF_ABORT_SHUTDOWN = (1 << 6), /* connection abort is in progress */ 66 TPF_CPL_PENDING = (1 << 7), /* haven't received the last CPL */ 67 TPF_SYNQE = (1 << 8), /* synq_entry, not really a toepcb */ 68 TPF_SYNQE_NEEDFREE = (1 << 9), /* synq_entry was malloc'd separately */ 69 TPF_SYNQE_TCPDDP = (1 << 10), /* ulp_mode TCPDDP in toepcb */ 70 TPF_SYNQE_EXPANDED = (1 << 11), /* toepcb ready, tid context updated */ 71 TPF_SYNQE_HAS_L2TE = (1 << 12), /* we've replied to PASS_ACCEPT_REQ */ 72 }; 73 74 enum { 75 DDP_OK = (1 << 0), /* OK to turn on DDP */ 76 DDP_SC_REQ = (1 << 1), /* state change (on/off) requested */ 77 DDP_ON = (1 << 2), /* DDP is turned on */ 78 DDP_BUF0_ACTIVE = (1 << 3), /* buffer 0 in use (not invalidated) */ 79 DDP_BUF1_ACTIVE = (1 << 4), /* buffer 1 in use (not invalidated) */ 80 DDP_TASK_ACTIVE = (1 << 5), /* requeue task is queued / running */ 81 DDP_DEAD = (1 << 6), /* toepcb is shutting down */ 82 }; 83 84 struct ofld_tx_sdesc { 85 uint32_t plen; /* payload length */ 86 uint8_t tx_credits; /* firmware tx credits (unit is 16B) */ 87 }; 88 89 struct ppod_region { 90 u_int pr_start; 91 u_int pr_len; 92 u_int pr_page_shift[4]; 93 uint32_t pr_tag_mask; /* hardware tagmask for this region. */ 94 uint32_t pr_invalid_bit; /* OR with this to invalidate tag. */ 95 uint32_t pr_alias_mask; /* AND with tag to get alias bits. */ 96 u_int pr_alias_shift; /* shift this much for first alias bit. */ 97 vmem_t *pr_arena; 98 }; 99 100 struct ppod_reservation { 101 struct ppod_region *prsv_pr; 102 uint32_t prsv_tag; /* Full tag: pgsz, alias, tag, color */ 103 u_int prsv_nppods; 104 }; 105 106 struct pageset { 107 TAILQ_ENTRY(pageset) link; 108 vm_page_t *pages; 109 int npages; 110 int flags; 111 int offset; /* offset in first page */ 112 int len; 113 struct ppod_reservation prsv; 114 struct vmspace *vm; 115 u_int vm_timestamp; 116 }; 117 118 TAILQ_HEAD(pagesetq, pageset); 119 120 #define PS_WIRED 0x0001 /* Pages wired rather than held. */ 121 #define PS_PPODS_WRITTEN 0x0002 /* Page pods written to the card. */ 122 123 #define EXT_FLAG_AIOTX EXT_FLAG_VENDOR1 124 125 struct ddp_buffer { 126 struct pageset *ps; 127 128 struct kaiocb *job; 129 int cancel_pending; 130 }; 131 132 struct aiotx_buffer { 133 struct pageset ps; 134 struct kaiocb *job; 135 int refcount; 136 }; 137 138 struct toepcb { 139 TAILQ_ENTRY(toepcb) link; /* toep_list */ 140 u_int flags; /* miscellaneous flags */ 141 int refcount; 142 struct tom_data *td; 143 struct inpcb *inp; /* backpointer to host stack's PCB */ 144 struct vnet *vnet; 145 struct vi_info *vi; /* virtual interface */ 146 struct sge_wrq *ofld_txq; 147 struct sge_ofld_rxq *ofld_rxq; 148 struct sge_wrq *ctrlq; 149 struct l2t_entry *l2te; /* L2 table entry used by this connection */ 150 struct clip_entry *ce; /* CLIP table entry used by this tid */ 151 int tid; /* Connection identifier */ 152 153 /* tx credit handling */ 154 u_int tx_total; /* total tx WR credits (in 16B units) */ 155 u_int tx_credits; /* tx WR credits (in 16B units) available */ 156 u_int tx_nocompl; /* tx WR credits since last compl request */ 157 u_int plen_nocompl; /* payload since last compl request */ 158 159 /* rx credit handling */ 160 u_int sb_cc; /* last noted value of so_rcv->sb_cc */ 161 int rx_credits; /* rx credits (in bytes) to be returned to hw */ 162 163 u_int ulp_mode; /* ULP mode */ 164 void *ulpcb; 165 void *ulpcb2; 166 struct mbufq ulp_pduq; /* PDUs waiting to be sent out. */ 167 struct mbufq ulp_pdu_reclaimq; 168 169 u_int ddp_flags; 170 struct ddp_buffer db[2]; 171 TAILQ_HEAD(, pageset) ddp_cached_pagesets; 172 TAILQ_HEAD(, kaiocb) ddp_aiojobq; 173 u_int ddp_waiting_count; 174 u_int ddp_active_count; 175 u_int ddp_cached_count; 176 int ddp_active_id; /* the currently active DDP buffer */ 177 struct task ddp_requeue_task; 178 struct kaiocb *ddp_queueing; 179 struct mtx ddp_lock; 180 181 TAILQ_HEAD(, kaiocb) aiotx_jobq; 182 struct task aiotx_task; 183 bool aiotx_task_active; 184 185 /* Tx software descriptor */ 186 uint8_t txsd_total; 187 uint8_t txsd_pidx; 188 uint8_t txsd_cidx; 189 uint8_t txsd_avail; 190 struct ofld_tx_sdesc txsd[]; 191 }; 192 193 #define DDP_LOCK(toep) mtx_lock(&(toep)->ddp_lock) 194 #define DDP_UNLOCK(toep) mtx_unlock(&(toep)->ddp_lock) 195 #define DDP_ASSERT_LOCKED(toep) mtx_assert(&(toep)->ddp_lock, MA_OWNED) 196 197 struct flowc_tx_params { 198 uint32_t snd_nxt; 199 uint32_t rcv_nxt; 200 unsigned int snd_space; 201 unsigned int mss; 202 }; 203 204 #define DDP_RETRY_WAIT 5 /* seconds to wait before re-enabling DDP */ 205 #define DDP_LOW_SCORE 1 206 #define DDP_HIGH_SCORE 3 207 208 /* 209 * Compressed state for embryonic connections for a listener. Barely fits in 210 * 64B, try not to grow it further. 211 */ 212 struct synq_entry { 213 TAILQ_ENTRY(synq_entry) link; /* listen_ctx's synq link */ 214 int flags; /* same as toepcb's tp_flags */ 215 int tid; 216 struct listen_ctx *lctx; /* backpointer to listen ctx */ 217 struct mbuf *syn; 218 uint32_t iss; 219 uint32_t ts; 220 volatile uintptr_t wr; 221 volatile u_int refcnt; 222 uint16_t l2e_idx; 223 uint16_t rcv_bufsize; 224 }; 225 226 /* listen_ctx flags */ 227 #define LCTX_RPL_PENDING 1 /* waiting for a CPL_PASS_OPEN_RPL */ 228 229 struct listen_ctx { 230 LIST_ENTRY(listen_ctx) link; /* listen hash linkage */ 231 volatile int refcount; 232 int stid; 233 struct stid_region stid_region; 234 int flags; 235 struct inpcb *inp; /* listening socket's inp */ 236 struct vnet *vnet; 237 struct sge_wrq *ctrlq; 238 struct sge_ofld_rxq *ofld_rxq; 239 struct clip_entry *ce; 240 TAILQ_HEAD(, synq_entry) synq; 241 }; 242 243 struct clip_entry { 244 TAILQ_ENTRY(clip_entry) link; 245 struct in6_addr lip; /* local IPv6 address */ 246 u_int refcount; 247 }; 248 249 TAILQ_HEAD(clip_head, clip_entry); 250 struct tom_data { 251 struct toedev tod; 252 253 /* toepcb's associated with this TOE device */ 254 struct mtx toep_list_lock; 255 TAILQ_HEAD(, toepcb) toep_list; 256 257 struct mtx lctx_hash_lock; 258 LIST_HEAD(, listen_ctx) *listen_hash; 259 u_long listen_mask; 260 int lctx_count; /* # of lctx in the hash table */ 261 262 struct ppod_region pr; 263 264 struct mtx clip_table_lock; 265 struct clip_head clip_table; 266 int clip_gen; 267 268 /* WRs that will not be sent to the chip because L2 resolution failed */ 269 struct mtx unsent_wr_lock; 270 STAILQ_HEAD(, wrqe) unsent_wr_list; 271 struct task reclaim_wr_resources; 272 }; 273 274 static inline struct tom_data * 275 tod_td(struct toedev *tod) 276 { 277 278 return (__containerof(tod, struct tom_data, tod)); 279 } 280 281 static inline struct adapter * 282 td_adapter(struct tom_data *td) 283 { 284 285 return (td->tod.tod_softc); 286 } 287 288 static inline void 289 set_mbuf_ulp_submode(struct mbuf *m, uint8_t ulp_submode) 290 { 291 292 M_ASSERTPKTHDR(m); 293 m->m_pkthdr.PH_per.eight[0] = ulp_submode; 294 } 295 296 static inline uint8_t 297 mbuf_ulp_submode(struct mbuf *m) 298 { 299 300 M_ASSERTPKTHDR(m); 301 return (m->m_pkthdr.PH_per.eight[0]); 302 } 303 304 /* t4_tom.c */ 305 struct toepcb *alloc_toepcb(struct vi_info *, int, int, int); 306 struct toepcb *hold_toepcb(struct toepcb *); 307 void free_toepcb(struct toepcb *); 308 void offload_socket(struct socket *, struct toepcb *); 309 void undo_offload_socket(struct socket *); 310 void final_cpl_received(struct toepcb *); 311 void insert_tid(struct adapter *, int, void *, int); 312 void *lookup_tid(struct adapter *, int); 313 void update_tid(struct adapter *, int, void *); 314 void remove_tid(struct adapter *, int, int); 315 void release_tid(struct adapter *, int, struct sge_wrq *); 316 int find_best_mtu_idx(struct adapter *, struct in_conninfo *, int); 317 u_long select_rcv_wnd(struct socket *); 318 int select_rcv_wscale(void); 319 uint64_t calc_opt0(struct socket *, struct vi_info *, struct l2t_entry *, 320 int, int, int, int); 321 uint64_t select_ntuple(struct vi_info *, struct l2t_entry *); 322 void set_tcpddp_ulp_mode(struct toepcb *); 323 int negative_advice(int); 324 struct clip_entry *hold_lip(struct tom_data *, struct in6_addr *, 325 struct clip_entry *); 326 void release_lip(struct tom_data *, struct clip_entry *); 327 328 /* t4_connect.c */ 329 void t4_init_connect_cpl_handlers(void); 330 void t4_uninit_connect_cpl_handlers(void); 331 int t4_connect(struct toedev *, struct socket *, struct rtentry *, 332 struct sockaddr *); 333 void act_open_failure_cleanup(struct adapter *, u_int, u_int); 334 335 /* t4_listen.c */ 336 void t4_init_listen_cpl_handlers(void); 337 void t4_uninit_listen_cpl_handlers(void); 338 int t4_listen_start(struct toedev *, struct tcpcb *); 339 int t4_listen_stop(struct toedev *, struct tcpcb *); 340 void t4_syncache_added(struct toedev *, void *); 341 void t4_syncache_removed(struct toedev *, void *); 342 int t4_syncache_respond(struct toedev *, void *, struct mbuf *); 343 int do_abort_req_synqe(struct sge_iq *, const struct rss_header *, 344 struct mbuf *); 345 int do_abort_rpl_synqe(struct sge_iq *, const struct rss_header *, 346 struct mbuf *); 347 void t4_offload_socket(struct toedev *, void *, struct socket *); 348 349 /* t4_cpl_io.c */ 350 void aiotx_init_toep(struct toepcb *); 351 int t4_aio_queue_aiotx(struct socket *, struct kaiocb *); 352 void t4_init_cpl_io_handlers(void); 353 void t4_uninit_cpl_io_handlers(void); 354 void send_abort_rpl(struct adapter *, struct sge_wrq *, int , int); 355 void send_flowc_wr(struct toepcb *, struct flowc_tx_params *); 356 void send_reset(struct adapter *, struct toepcb *, uint32_t); 357 void make_established(struct toepcb *, uint32_t, uint32_t, uint16_t); 358 void t4_rcvd(struct toedev *, struct tcpcb *); 359 void t4_rcvd_locked(struct toedev *, struct tcpcb *); 360 int t4_tod_output(struct toedev *, struct tcpcb *); 361 int t4_send_fin(struct toedev *, struct tcpcb *); 362 int t4_send_rst(struct toedev *, struct tcpcb *); 363 void t4_set_tcb_field(struct adapter *, struct sge_wrq *, int, uint16_t, 364 uint64_t, uint64_t, int, int, int); 365 void t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop); 366 void t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop); 367 int do_set_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 368 369 /* t4_ddp.c */ 370 int t4_init_ppod_region(struct ppod_region *, struct t4_range *, u_int, 371 const char *); 372 void t4_free_ppod_region(struct ppod_region *); 373 int t4_alloc_page_pods_for_ps(struct ppod_region *, struct pageset *); 374 int t4_alloc_page_pods_for_buf(struct ppod_region *, vm_offset_t, int, 375 struct ppod_reservation *); 376 int t4_write_page_pods_for_ps(struct adapter *, struct sge_wrq *, int, 377 struct pageset *); 378 int t4_write_page_pods_for_buf(struct adapter *, struct sge_wrq *, int tid, 379 struct ppod_reservation *, vm_offset_t, int); 380 void t4_free_page_pods(struct ppod_reservation *); 381 int t4_soreceive_ddp(struct socket *, struct sockaddr **, struct uio *, 382 struct mbuf **, struct mbuf **, int *); 383 int t4_aio_queue_ddp(struct socket *, struct kaiocb *); 384 int t4_ddp_mod_load(void); 385 void t4_ddp_mod_unload(void); 386 void ddp_assert_empty(struct toepcb *); 387 void ddp_init_toep(struct toepcb *); 388 void ddp_uninit_toep(struct toepcb *); 389 void ddp_queue_toep(struct toepcb *); 390 void release_ddp_resources(struct toepcb *toep); 391 void handle_ddp_close(struct toepcb *, struct tcpcb *, uint32_t); 392 void handle_ddp_indicate(struct toepcb *); 393 void handle_ddp_tcb_rpl(struct toepcb *, const struct cpl_set_tcb_rpl *); 394 void insert_ddp_data(struct toepcb *, uint32_t); 395 396 #endif 397