1 /*- 2 * Copyright (c) 2012, 2015 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 * 29 */ 30 31 #ifndef __T4_TOM_H__ 32 #define __T4_TOM_H__ 33 #include <sys/vmem.h> 34 35 #define LISTEN_HASH_SIZE 32 36 37 /* 38 * Min receive window. We want it to be large enough to accommodate receive 39 * coalescing, handle jumbo frames, and not trigger sender SWS avoidance. 40 */ 41 #define MIN_RCV_WND (24 * 1024U) 42 43 /* 44 * Max receive window supported by HW in bytes. Only a small part of it can 45 * be set through option0, the rest needs to be set through RX_DATA_ACK. 46 */ 47 #define MAX_RCV_WND ((1U << 27) - 1) 48 49 #define DDP_RSVD_WIN (16 * 1024U) 50 #define SB_DDP_INDICATE SB_IN_TOE /* soreceive must respond to indicate */ 51 52 #define USE_DDP_RX_FLOW_CONTROL 53 54 #define PPOD_SZ(n) ((n) * sizeof(struct pagepod)) 55 #define PPOD_SIZE (PPOD_SZ(1)) 56 57 /* TOE PCB flags */ 58 enum { 59 TPF_ATTACHED = (1 << 0), /* a tcpcb refers to this toepcb */ 60 TPF_FLOWC_WR_SENT = (1 << 1), /* firmware flow context WR sent */ 61 TPF_TX_DATA_SENT = (1 << 2), /* some data sent */ 62 TPF_TX_SUSPENDED = (1 << 3), /* tx suspended for lack of resources */ 63 TPF_SEND_FIN = (1 << 4), /* send FIN after all pending data */ 64 TPF_FIN_SENT = (1 << 5), /* FIN has been sent */ 65 TPF_ABORT_SHUTDOWN = (1 << 6), /* connection abort is in progress */ 66 TPF_CPL_PENDING = (1 << 7), /* haven't received the last CPL */ 67 TPF_SYNQE = (1 << 8), /* synq_entry, not really a toepcb */ 68 TPF_SYNQE_NEEDFREE = (1 << 9), /* synq_entry was malloc'd separately */ 69 TPF_SYNQE_TCPDDP = (1 << 10), /* ulp_mode TCPDDP in toepcb */ 70 TPF_SYNQE_EXPANDED = (1 << 11), /* toepcb ready, tid context updated */ 71 TPF_SYNQE_HAS_L2TE = (1 << 12), /* we've replied to PASS_ACCEPT_REQ */ 72 }; 73 74 enum { 75 DDP_OK = (1 << 0), /* OK to turn on DDP */ 76 DDP_SC_REQ = (1 << 1), /* state change (on/off) requested */ 77 DDP_ON = (1 << 2), /* DDP is turned on */ 78 DDP_BUF0_ACTIVE = (1 << 3), /* buffer 0 in use (not invalidated) */ 79 DDP_BUF1_ACTIVE = (1 << 4), /* buffer 1 in use (not invalidated) */ 80 DDP_TASK_ACTIVE = (1 << 5), /* requeue task is queued / running */ 81 DDP_DEAD = (1 << 6), /* toepcb is shutting down */ 82 }; 83 84 struct ofld_tx_sdesc { 85 uint32_t plen; /* payload length */ 86 uint8_t tx_credits; /* firmware tx credits (unit is 16B) */ 87 }; 88 89 struct ppod_region { 90 u_int pr_start; 91 u_int pr_len; 92 u_int pr_page_shift[4]; 93 uint32_t pr_tag_mask; /* hardware tagmask for this region. */ 94 uint32_t pr_invalid_bit; /* OR with this to invalidate tag. */ 95 uint32_t pr_alias_mask; /* AND with tag to get alias bits. */ 96 u_int pr_alias_shift; /* shift this much for first alias bit. */ 97 vmem_t *pr_arena; 98 }; 99 100 struct ppod_reservation { 101 struct ppod_region *prsv_pr; 102 uint32_t prsv_tag; /* Full tag: pgsz, alias, tag, color */ 103 u_int prsv_nppods; 104 }; 105 106 struct pageset { 107 TAILQ_ENTRY(pageset) link; 108 vm_page_t *pages; 109 int npages; 110 int flags; 111 int offset; /* offset in first page */ 112 int len; 113 struct ppod_reservation prsv; 114 struct vmspace *vm; 115 vm_offset_t start; 116 u_int vm_timestamp; 117 }; 118 119 TAILQ_HEAD(pagesetq, pageset); 120 121 #define PS_WIRED 0x0001 /* Pages wired rather than held. */ 122 #define PS_PPODS_WRITTEN 0x0002 /* Page pods written to the card. */ 123 124 #define EXT_FLAG_AIOTX EXT_FLAG_VENDOR1 125 126 struct ddp_buffer { 127 struct pageset *ps; 128 129 struct kaiocb *job; 130 int cancel_pending; 131 }; 132 133 struct aiotx_buffer { 134 struct pageset ps; 135 struct kaiocb *job; 136 int refcount; 137 }; 138 139 struct toepcb { 140 TAILQ_ENTRY(toepcb) link; /* toep_list */ 141 u_int flags; /* miscellaneous flags */ 142 int refcount; 143 struct tom_data *td; 144 struct inpcb *inp; /* backpointer to host stack's PCB */ 145 struct vnet *vnet; 146 struct vi_info *vi; /* virtual interface */ 147 struct sge_wrq *ofld_txq; 148 struct sge_ofld_rxq *ofld_rxq; 149 struct sge_wrq *ctrlq; 150 struct l2t_entry *l2te; /* L2 table entry used by this connection */ 151 struct clip_entry *ce; /* CLIP table entry used by this tid */ 152 int tid; /* Connection identifier */ 153 int tc_idx; /* traffic class that this tid is bound to */ 154 155 /* tx credit handling */ 156 u_int tx_total; /* total tx WR credits (in 16B units) */ 157 u_int tx_credits; /* tx WR credits (in 16B units) available */ 158 u_int tx_nocompl; /* tx WR credits since last compl request */ 159 u_int plen_nocompl; /* payload since last compl request */ 160 161 /* rx credit handling */ 162 u_int sb_cc; /* last noted value of so_rcv->sb_cc */ 163 int rx_credits; /* rx credits (in bytes) to be returned to hw */ 164 165 u_int ulp_mode; /* ULP mode */ 166 void *ulpcb; 167 void *ulpcb2; 168 struct mbufq ulp_pduq; /* PDUs waiting to be sent out. */ 169 struct mbufq ulp_pdu_reclaimq; 170 171 u_int ddp_flags; 172 struct ddp_buffer db[2]; 173 TAILQ_HEAD(, pageset) ddp_cached_pagesets; 174 TAILQ_HEAD(, kaiocb) ddp_aiojobq; 175 u_int ddp_waiting_count; 176 u_int ddp_active_count; 177 u_int ddp_cached_count; 178 int ddp_active_id; /* the currently active DDP buffer */ 179 struct task ddp_requeue_task; 180 struct kaiocb *ddp_queueing; 181 struct mtx ddp_lock; 182 183 TAILQ_HEAD(, kaiocb) aiotx_jobq; 184 struct task aiotx_task; 185 bool aiotx_task_active; 186 187 /* Tx software descriptor */ 188 uint8_t txsd_total; 189 uint8_t txsd_pidx; 190 uint8_t txsd_cidx; 191 uint8_t txsd_avail; 192 struct ofld_tx_sdesc txsd[]; 193 }; 194 195 #define DDP_LOCK(toep) mtx_lock(&(toep)->ddp_lock) 196 #define DDP_UNLOCK(toep) mtx_unlock(&(toep)->ddp_lock) 197 #define DDP_ASSERT_LOCKED(toep) mtx_assert(&(toep)->ddp_lock, MA_OWNED) 198 199 struct flowc_tx_params { 200 uint32_t snd_nxt; 201 uint32_t rcv_nxt; 202 unsigned int snd_space; 203 unsigned int mss; 204 }; 205 206 #define DDP_RETRY_WAIT 5 /* seconds to wait before re-enabling DDP */ 207 #define DDP_LOW_SCORE 1 208 #define DDP_HIGH_SCORE 3 209 210 /* 211 * Compressed state for embryonic connections for a listener. Barely fits in 212 * 64B, try not to grow it further. 213 */ 214 struct synq_entry { 215 TAILQ_ENTRY(synq_entry) link; /* listen_ctx's synq link */ 216 int flags; /* same as toepcb's tp_flags */ 217 int tid; 218 struct listen_ctx *lctx; /* backpointer to listen ctx */ 219 struct mbuf *syn; 220 uint32_t iss; 221 uint32_t ts; 222 volatile uintptr_t wr; 223 volatile u_int refcnt; 224 uint16_t l2e_idx; 225 uint16_t rcv_bufsize; 226 }; 227 228 /* listen_ctx flags */ 229 #define LCTX_RPL_PENDING 1 /* waiting for a CPL_PASS_OPEN_RPL */ 230 231 struct listen_ctx { 232 LIST_ENTRY(listen_ctx) link; /* listen hash linkage */ 233 volatile int refcount; 234 int stid; 235 struct stid_region stid_region; 236 int flags; 237 struct inpcb *inp; /* listening socket's inp */ 238 struct vnet *vnet; 239 struct sge_wrq *ctrlq; 240 struct sge_ofld_rxq *ofld_rxq; 241 struct clip_entry *ce; 242 TAILQ_HEAD(, synq_entry) synq; 243 }; 244 245 struct clip_entry { 246 TAILQ_ENTRY(clip_entry) link; 247 struct in6_addr lip; /* local IPv6 address */ 248 u_int refcount; 249 }; 250 251 TAILQ_HEAD(clip_head, clip_entry); 252 struct tom_data { 253 struct toedev tod; 254 255 /* toepcb's associated with this TOE device */ 256 struct mtx toep_list_lock; 257 TAILQ_HEAD(, toepcb) toep_list; 258 259 struct mtx lctx_hash_lock; 260 LIST_HEAD(, listen_ctx) *listen_hash; 261 u_long listen_mask; 262 int lctx_count; /* # of lctx in the hash table */ 263 264 struct ppod_region pr; 265 266 struct mtx clip_table_lock; 267 struct clip_head clip_table; 268 int clip_gen; 269 270 /* WRs that will not be sent to the chip because L2 resolution failed */ 271 struct mtx unsent_wr_lock; 272 STAILQ_HEAD(, wrqe) unsent_wr_list; 273 struct task reclaim_wr_resources; 274 }; 275 276 static inline struct tom_data * 277 tod_td(struct toedev *tod) 278 { 279 280 return (__containerof(tod, struct tom_data, tod)); 281 } 282 283 static inline struct adapter * 284 td_adapter(struct tom_data *td) 285 { 286 287 return (td->tod.tod_softc); 288 } 289 290 static inline void 291 set_mbuf_ulp_submode(struct mbuf *m, uint8_t ulp_submode) 292 { 293 294 M_ASSERTPKTHDR(m); 295 m->m_pkthdr.PH_per.eight[0] = ulp_submode; 296 } 297 298 static inline uint8_t 299 mbuf_ulp_submode(struct mbuf *m) 300 { 301 302 M_ASSERTPKTHDR(m); 303 return (m->m_pkthdr.PH_per.eight[0]); 304 } 305 306 /* t4_tom.c */ 307 struct toepcb *alloc_toepcb(struct vi_info *, int, int, int); 308 struct toepcb *hold_toepcb(struct toepcb *); 309 void free_toepcb(struct toepcb *); 310 void offload_socket(struct socket *, struct toepcb *); 311 void undo_offload_socket(struct socket *); 312 void final_cpl_received(struct toepcb *); 313 void insert_tid(struct adapter *, int, void *, int); 314 void *lookup_tid(struct adapter *, int); 315 void update_tid(struct adapter *, int, void *); 316 void remove_tid(struct adapter *, int, int); 317 void release_tid(struct adapter *, int, struct sge_wrq *); 318 int find_best_mtu_idx(struct adapter *, struct in_conninfo *, int); 319 u_long select_rcv_wnd(struct socket *); 320 int select_rcv_wscale(void); 321 uint64_t calc_opt0(struct socket *, struct vi_info *, struct l2t_entry *, 322 int, int, int, int); 323 uint64_t select_ntuple(struct vi_info *, struct l2t_entry *); 324 void set_tcpddp_ulp_mode(struct toepcb *); 325 int negative_advice(int); 326 struct clip_entry *hold_lip(struct tom_data *, struct in6_addr *, 327 struct clip_entry *); 328 void release_lip(struct tom_data *, struct clip_entry *); 329 330 /* t4_connect.c */ 331 void t4_init_connect_cpl_handlers(void); 332 void t4_uninit_connect_cpl_handlers(void); 333 int t4_connect(struct toedev *, struct socket *, struct rtentry *, 334 struct sockaddr *); 335 void act_open_failure_cleanup(struct adapter *, u_int, u_int); 336 337 /* t4_listen.c */ 338 void t4_init_listen_cpl_handlers(void); 339 void t4_uninit_listen_cpl_handlers(void); 340 int t4_listen_start(struct toedev *, struct tcpcb *); 341 int t4_listen_stop(struct toedev *, struct tcpcb *); 342 void t4_syncache_added(struct toedev *, void *); 343 void t4_syncache_removed(struct toedev *, void *); 344 int t4_syncache_respond(struct toedev *, void *, struct mbuf *); 345 int do_abort_req_synqe(struct sge_iq *, const struct rss_header *, 346 struct mbuf *); 347 int do_abort_rpl_synqe(struct sge_iq *, const struct rss_header *, 348 struct mbuf *); 349 void t4_offload_socket(struct toedev *, void *, struct socket *); 350 351 /* t4_cpl_io.c */ 352 void aiotx_init_toep(struct toepcb *); 353 int t4_aio_queue_aiotx(struct socket *, struct kaiocb *); 354 void t4_init_cpl_io_handlers(void); 355 void t4_uninit_cpl_io_handlers(void); 356 void send_abort_rpl(struct adapter *, struct sge_wrq *, int , int); 357 void send_flowc_wr(struct toepcb *, struct flowc_tx_params *); 358 void send_reset(struct adapter *, struct toepcb *, uint32_t); 359 void make_established(struct toepcb *, uint32_t, uint32_t, uint16_t); 360 void t4_rcvd(struct toedev *, struct tcpcb *); 361 void t4_rcvd_locked(struct toedev *, struct tcpcb *); 362 int t4_tod_output(struct toedev *, struct tcpcb *); 363 int t4_send_fin(struct toedev *, struct tcpcb *); 364 int t4_send_rst(struct toedev *, struct tcpcb *); 365 void t4_set_tcb_field(struct adapter *, struct sge_wrq *, int, uint16_t, 366 uint64_t, uint64_t, int, int, int); 367 void t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop); 368 void t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop); 369 int do_set_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *); 370 371 /* t4_ddp.c */ 372 int t4_init_ppod_region(struct ppod_region *, struct t4_range *, u_int, 373 const char *); 374 void t4_free_ppod_region(struct ppod_region *); 375 int t4_alloc_page_pods_for_ps(struct ppod_region *, struct pageset *); 376 int t4_alloc_page_pods_for_buf(struct ppod_region *, vm_offset_t, int, 377 struct ppod_reservation *); 378 int t4_write_page_pods_for_ps(struct adapter *, struct sge_wrq *, int, 379 struct pageset *); 380 int t4_write_page_pods_for_buf(struct adapter *, struct sge_wrq *, int tid, 381 struct ppod_reservation *, vm_offset_t, int); 382 void t4_free_page_pods(struct ppod_reservation *); 383 int t4_soreceive_ddp(struct socket *, struct sockaddr **, struct uio *, 384 struct mbuf **, struct mbuf **, int *); 385 int t4_aio_queue_ddp(struct socket *, struct kaiocb *); 386 int t4_ddp_mod_load(void); 387 void t4_ddp_mod_unload(void); 388 void ddp_assert_empty(struct toepcb *); 389 void ddp_init_toep(struct toepcb *); 390 void ddp_uninit_toep(struct toepcb *); 391 void ddp_queue_toep(struct toepcb *); 392 void release_ddp_resources(struct toepcb *toep); 393 void handle_ddp_close(struct toepcb *, struct tcpcb *, uint32_t); 394 void handle_ddp_indicate(struct toepcb *); 395 void handle_ddp_tcb_rpl(struct toepcb *, const struct cpl_set_tcb_rpl *); 396 void insert_ddp_data(struct toepcb *, uint32_t); 397 398 #endif 399