1 /*- 2 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 * 29 */ 30 31 #ifndef __T4_TOM_H__ 32 #define __T4_TOM_H__ 33 #include <sys/vmem.h> 34 35 #define LISTEN_HASH_SIZE 32 36 37 /* 38 * Min receive window. We want it to be large enough to accommodate receive 39 * coalescing, handle jumbo frames, and not trigger sender SWS avoidance. 40 */ 41 #define MIN_RCV_WND (24 * 1024U) 42 43 /* 44 * Max receive window supported by HW in bytes. Only a small part of it can 45 * be set through option0, the rest needs to be set through RX_DATA_ACK. 46 */ 47 #define MAX_RCV_WND ((1U << 27) - 1) 48 49 #define DDP_RSVD_WIN (16 * 1024U) 50 #define SB_DDP_INDICATE SB_IN_TOE /* soreceive must respond to indicate */ 51 52 #define USE_DDP_RX_FLOW_CONTROL 53 54 #define PPOD_SZ(n) ((n) * sizeof(struct pagepod)) 55 #define PPOD_SIZE (PPOD_SZ(1)) 56 57 /* TOE PCB flags */ 58 enum { 59 TPF_ATTACHED = (1 << 0), /* a tcpcb refers to this toepcb */ 60 TPF_FLOWC_WR_SENT = (1 << 1), /* firmware flow context WR sent */ 61 TPF_TX_DATA_SENT = (1 << 2), /* some data sent */ 62 TPF_TX_SUSPENDED = (1 << 3), /* tx suspended for lack of resources */ 63 TPF_SEND_FIN = (1 << 4), /* send FIN after all pending data */ 64 TPF_FIN_SENT = (1 << 5), /* FIN has been sent */ 65 TPF_ABORT_SHUTDOWN = (1 << 6), /* connection abort is in progress */ 66 TPF_CPL_PENDING = (1 << 7), /* haven't received the last CPL */ 67 TPF_SYNQE = (1 << 8), /* synq_entry, not really a toepcb */ 68 TPF_SYNQE_NEEDFREE = (1 << 9), /* synq_entry was malloc'd separately */ 69 TPF_SYNQE_TCPDDP = (1 << 10), /* ulp_mode TCPDDP in toepcb */ 70 TPF_SYNQE_EXPANDED = (1 << 11), /* toepcb ready, tid context updated */ 71 TPF_SYNQE_HAS_L2TE = (1 << 12), /* we've replied to PASS_ACCEPT_REQ */ 72 }; 73 74 enum { 75 DDP_OK = (1 << 0), /* OK to turn on DDP */ 76 DDP_SC_REQ = (1 << 1), /* state change (on/off) requested */ 77 DDP_ON = (1 << 2), /* DDP is turned on */ 78 DDP_BUF0_ACTIVE = (1 << 3), /* buffer 0 in use (not invalidated) */ 79 DDP_BUF1_ACTIVE = (1 << 4), /* buffer 1 in use (not invalidated) */ 80 DDP_TASK_ACTIVE = (1 << 5), /* requeue task is queued / running */ 81 DDP_DEAD = (1 << 6), /* toepcb is shutting down */ 82 }; 83 84 struct ofld_tx_sdesc { 85 uint32_t plen; /* payload length */ 86 uint8_t tx_credits; /* firmware tx credits (unit is 16B) */ 87 }; 88 89 struct ppod_region { 90 u_int pr_start; 91 u_int pr_len; 92 u_int pr_page_shift[4]; 93 uint32_t pr_tag_mask; /* hardware tagmask for this region. */ 94 uint32_t pr_invalid_bit; /* OR with this to invalidate tag. */ 95 uint32_t pr_alias_mask; /* AND with tag to get alias bits. */ 96 u_int pr_alias_shift; /* shift this much for first alias bit. */ 97 vmem_t *pr_arena; 98 }; 99 100 struct ppod_reservation { 101 struct ppod_region *prsv_pr; 102 uint32_t prsv_tag; /* Full tag: pgsz, alias, tag, color */ 103 u_int prsv_nppods; 104 }; 105 106 struct pageset { 107 TAILQ_ENTRY(pageset) link; 108 vm_page_t *pages; 109 int npages; 110 int flags; 111 int offset; /* offset in first page */ 112 int len; 113 struct ppod_reservation prsv; 114 struct vmspace *vm; 115 u_int vm_timestamp; 116 }; 117 118 TAILQ_HEAD(pagesetq, pageset); 119 120 #define PS_WIRED 0x0001 /* Pages wired rather than held. */ 121 #define PS_PPODS_WRITTEN 0x0002 /* Page pods written to the card. */ 122 123 #define EXT_FLAG_AIOTX EXT_FLAG_VENDOR1 124 125 struct ddp_buffer { 126 struct pageset *ps; 127 128 struct kaiocb *job; 129 int cancel_pending; 130 }; 131 132 struct aiotx_buffer { 133 struct pageset ps; 134 struct kaiocb *job; 135 int refcount; 136 }; 137 138 struct toepcb { 139 TAILQ_ENTRY(toepcb) link; /* toep_list */ 140 u_int flags; /* miscellaneous flags */ 141 int refcount; 142 struct tom_data *td; 143 struct inpcb *inp; /* backpointer to host stack's PCB */ 144 struct vnet *vnet; 145 struct vi_info *vi; /* virtual interface */ 146 struct sge_wrq *ofld_txq; 147 struct sge_ofld_rxq *ofld_rxq; 148 struct sge_wrq *ctrlq; 149 struct l2t_entry *l2te; /* L2 table entry used by this connection */ 150 struct clip_entry *ce; /* CLIP table entry used by this tid */ 151 int tid; /* Connection identifier */ 152 int tc_idx; /* traffic class that this tid is bound to */ 153 154 /* tx credit handling */ 155 u_int tx_total; /* total tx WR credits (in 16B units) */ 156 u_int tx_credits; /* tx WR credits (in 16B units) available */ 157 u_int tx_nocompl; /* tx WR credits since last compl request */ 158 u_int plen_nocompl; /* payload since last compl request */ 159 160 /* rx credit handling */ 161 u_int sb_cc; /* last noted value of so_rcv->sb_cc */ 162 int rx_credits; /* rx credits (in bytes) to be returned to hw */ 163 164 u_int ulp_mode; /* ULP mode */ 165 void *ulpcb; 166 void *ulpcb2; 167 struct mbufq ulp_pduq; /* PDUs waiting to be sent out. */ 168 struct mbufq ulp_pdu_reclaimq; 169 170 u_int ddp_flags; 171 struct ddp_buffer db[2]; 172 TAILQ_HEAD(, pageset) ddp_cached_pagesets; 173 TAILQ_HEAD(, kaiocb) ddp_aiojobq; 174 u_int ddp_waiting_count; 175 u_int ddp_active_count; 176 u_int ddp_cached_count; 177 int ddp_active_id; /* the currently active DDP buffer */ 178 struct task ddp_requeue_task; 179 struct kaiocb *ddp_queueing; 180 struct mtx ddp_lock; 181 182 TAILQ_HEAD(, kaiocb) aiotx_jobq; 183 struct task aiotx_task; 184 bool aiotx_task_active; 185 186 /* Tx software descriptor */ 187 uint8_t txsd_total; 188 uint8_t txsd_pidx; 189 uint8_t txsd_cidx; 190 uint8_t txsd_avail; 191 struct ofld_tx_sdesc txsd[]; 192 }; 193 194 #define DDP_LOCK(toep) mtx_lock(&(toep)->ddp_lock) 195 #define DDP_UNLOCK(toep) mtx_unlock(&(toep)->ddp_lock) 196 #define DDP_ASSERT_LOCKED(toep) mtx_assert(&(toep)->ddp_lock, MA_OWNED) 197 198 struct flowc_tx_params { 199 uint32_t snd_nxt; 200 uint32_t rcv_nxt; 201 unsigned int snd_space; 202 unsigned int mss; 203 }; 204 205 #define DDP_RETRY_WAIT 5 /* seconds to wait before re-enabling DDP */ 206 #define DDP_LOW_SCORE 1 207 #define DDP_HIGH_SCORE 3 208 209 /* 210 * Compressed state for embryonic connections for a listener. Barely fits in 211 * 64B, try not to grow it further. 212 */ 213 struct synq_entry { 214 TAILQ_ENTRY(synq_entry) link; /* listen_ctx's synq link */ 215 int flags; /* same as toepcb's tp_flags */ 216 int tid; 217 struct listen_ctx *lctx; /* backpointer to listen ctx */ 218 struct mbuf *syn; 219 uint32_t iss; 220 uint32_t ts; 221 volatile uintptr_t wr; 222 volatile u_int refcnt; 223 uint16_t l2e_idx; 224 uint16_t rcv_bufsize; 225 }; 226 227 /* listen_ctx flags */ 228 #define LCTX_RPL_PENDING 1 /* waiting for a CPL_PASS_OPEN_RPL */ 229 230 struct listen_ctx { 231 LIST_ENTRY(listen_ctx) link; /* listen hash linkage */ 232 volatile int refcount; 233 int stid; 234 struct stid_region stid_region; 235 int flags; 236 struct inpcb *inp; /* listening socket's inp */ 237 struct vnet *vnet; 238 struct sge_wrq *ctrlq; 239 struct sge_ofld_rxq *ofld_rxq; 240 struct clip_entry *ce; 241 TAILQ_HEAD(, synq_entry) synq; 242 }; 243 244 struct clip_entry { 245 TAILQ_ENTRY(clip_entry) link; 246 struct in6_addr lip; /* local IPv6 address */ 247 u_int refcount; 248 }; 249 250 TAILQ_HEAD(clip_head, clip_entry); 251 struct tom_data { 252 struct toedev tod; 253 254 /* toepcb's associated with this TOE device */ 255 struct mtx toep_list_lock; 256 TAILQ_HEAD(, toepcb) toep_list; 257 258 struct mtx lctx_hash_lock; 259 LIST_HEAD(, listen_ctx) *listen_hash; 260 u_long listen_mask; 261 int lctx_count; /* # of lctx in the hash table */ 262 263 struct ppod_region pr; 264 265 struct mtx clip_table_lock; 266 struct clip_head clip_table; 267 int clip_gen; 268 269 /* WRs that will not be sent to the chip because L2 resolution failed */ 270 struct mtx unsent_wr_lock; 271 STAILQ_HEAD(, wrqe) unsent_wr_list; 272 struct task reclaim_wr_resources; 273 }; 274 275 static inline struct tom_data * 276 tod_td(struct toedev *tod) 277 { 278 279 return (__containerof(tod, struct tom_data, tod)); 280 } 281 282 static inline struct adapter * 283 td_adapter(struct tom_data *td) 284 { 285 286 return (td->tod.tod_softc); 287 } 288 289 static inline void 290 set_mbuf_ulp_submode(struct mbuf *m, uint8_t ulp_submode) 291 { 292 293 M_ASSERTPKTHDR(m); 294 m->m_pkthdr.PH_per.eight[0] = ulp_submode; 295 } 296 297 static inline uint8_t 298 mbuf_ulp_submode(struct mbuf *m) 299 { 300 301 M_ASSERTPKTHDR(m); 302 return (m->m_pkthdr.PH_per.eight[0]); 303 } 304 305 /* t4_tom.c */ 306 struct toepcb *alloc_toepcb(struct vi_info *, int, int, int); 307 struct toepcb *hold_toepcb(struct toepcb *); 308 void free_toepcb(struct toepcb *); 309 void offload_socket(struct socket *, struct toepcb *); 310 void undo_offload_socket(struct socket *); 311 void final_cpl_received(struct toepcb *); 312 void insert_tid(struct adapter *, int, void *, int); 313 void *lookup_tid(struct adapter *, int); 314 void update_tid(struct adapter *, int, void *); 315 void remove_tid(struct adapter *, int, int); 316 void release_tid(struct adapter *, int, struct sge_wrq *); 317 int find_best_mtu_idx(struct adapter *, struct in_conninfo *, int); 318 u_long select_rcv_wnd(struct socket *); 319 int select_rcv_wscale(void); 320 uint64_t calc_opt0(struct socket *, struct vi_info *, struct l2t_entry *, 321 int, int, int, int); 322 uint64_t select_ntuple(struct vi_info *, struct l2t_entry *); 323 void set_tcpddp_ulp_mode(struct toepcb *); 324 int negative_advice(int); 325 struct clip_entry *hold_lip(struct tom_data *, struct in6_addr *, 326 struct clip_entry *); 327 void release_lip(struct tom_data *, struct clip_entry *); 328 329 /* t4_connect.c */ 330 void t4_init_connect_cpl_handlers(void); 331 void t4_uninit_connect_cpl_handlers(void); 332 int t4_connect(struct toedev *, struct socket *, struct rtentry *, 333 struct sockaddr *); 334 void act_open_failure_cleanup(struct adapter *, u_int, u_int); 335 336 /* t4_listen.c */ 337 void t4_init_listen_cpl_handlers(void); 338 void t4_uninit_listen_cpl_handlers(void); 339 int t4_listen_start(struct toedev *, struct tcpcb *); 340 int t4_listen_stop(struct toedev *, struct tcpcb *); 341 void t4_syncache_added(struct toedev *, void *); 342 void t4_syncache_removed(struct toedev *, void *); 343 int t4_syncache_respond(struct toedev *, void *, struct mbuf *); 344 int do_abort_req_synqe(struct sge_iq *, const struct rss_header *, 345 struct mbuf *); 346 int do_abort_rpl_synqe(struct sge_iq *, const struct rss_header *, 347 struct mbuf *); 348 void t4_offload_socket(struct toedev *, void *, struct socket *); 349 350 /* t4_cpl_io.c */ 351 void aiotx_init_toep(struct toepcb *); 352 int t4_aio_queue_aiotx(struct socket *, struct kaiocb *); 353 void t4_init_cpl_io_handlers(void); 354 void t4_uninit_cpl_io_handlers(void); 355 void send_abort_rpl(struct adapter *, struct sge_wrq *, int , int); 356 void send_flowc_wr(struct toepcb *, struct flowc_tx_params *); 357 void send_reset(struct adapter *, struct toepcb *, uint32_t); 358 void make_established(struct toepcb *, uint32_t, uint32_t, uint16_t); 359 void t4_rcvd(struct toedev *, struct tcpcb *); 360 void t4_rcvd_locked(struct toedev *, struct tcpcb *); 361 int t4_tod_output(struct toedev *, struct tcpcb *); 362 int t4_send_fin(struct toedev *, struct tcpcb *); 363 int t4_send_rst(struct toedev *, struct tcpcb *); 364 void t4_set_tcb_field(struct adapter *, struct sge_wrq *, int, uint16_t, 365 uint64_t, uint64_t, int, int, int); 366 void t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop); 367 void t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop); 368 int do_set_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 369 370 /* t4_ddp.c */ 371 int t4_init_ppod_region(struct ppod_region *, struct t4_range *, u_int, 372 const char *); 373 void t4_free_ppod_region(struct ppod_region *); 374 int t4_alloc_page_pods_for_ps(struct ppod_region *, struct pageset *); 375 int t4_alloc_page_pods_for_buf(struct ppod_region *, vm_offset_t, int, 376 struct ppod_reservation *); 377 int t4_write_page_pods_for_ps(struct adapter *, struct sge_wrq *, int, 378 struct pageset *); 379 int t4_write_page_pods_for_buf(struct adapter *, struct sge_wrq *, int tid, 380 struct ppod_reservation *, vm_offset_t, int); 381 void t4_free_page_pods(struct ppod_reservation *); 382 int t4_soreceive_ddp(struct socket *, struct sockaddr **, struct uio *, 383 struct mbuf **, struct mbuf **, int *); 384 int t4_aio_queue_ddp(struct socket *, struct kaiocb *); 385 int t4_ddp_mod_load(void); 386 void t4_ddp_mod_unload(void); 387 void ddp_assert_empty(struct toepcb *); 388 void ddp_init_toep(struct toepcb *); 389 void ddp_uninit_toep(struct toepcb *); 390 void ddp_queue_toep(struct toepcb *); 391 void release_ddp_resources(struct toepcb *toep); 392 void handle_ddp_close(struct toepcb *, struct tcpcb *, uint32_t); 393 void handle_ddp_indicate(struct toepcb *); 394 void handle_ddp_tcb_rpl(struct toepcb *, const struct cpl_set_tcb_rpl *); 395 void insert_ddp_data(struct toepcb *, uint32_t); 396 397 #endif 398