1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2010 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 */ 30 31 #ifndef __T4_OFFLOAD_H__ 32 #define __T4_OFFLOAD_H__ 33 #include <sys/param.h> 34 #include <sys/proc.h> 35 #include <sys/condvar.h> 36 37 #define INIT_ULPTX_WRH(w, wrlen, atomic, tid) do { \ 38 (w)->wr_hi = htonl(V_FW_WR_OP(FW_ULPTX_WR) | V_FW_WR_ATOMIC(atomic)); \ 39 (w)->wr_mid = htonl(V_FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ 40 V_FW_WR_FLOWID(tid)); \ 41 (w)->wr_lo = cpu_to_be64(0); \ 42 } while (0) 43 44 #define INIT_ULPTX_WR(w, wrlen, atomic, tid) \ 45 INIT_ULPTX_WRH(&((w)->wr), wrlen, atomic, tid) 46 47 #define INIT_TP_WR(w, tid) do { \ 48 (w)->wr.wr_hi = htonl(V_FW_WR_OP(FW_TP_WR) | \ 49 V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ 50 (w)->wr.wr_mid = htonl(V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ 51 V_FW_WR_FLOWID(tid)); \ 52 (w)->wr.wr_lo = cpu_to_be64(0); \ 53 } while (0) 54 55 #define INIT_TP_WR_MIT_CPL(w, cpl, tid) do { \ 56 INIT_TP_WR(w, tid); \ 57 OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \ 58 } while (0) 59 60 TAILQ_HEAD(stid_head, stid_region); 61 struct listen_ctx; 62 63 struct stid_region { 64 TAILQ_ENTRY(stid_region) link; 65 u_int used; /* # of stids used by this region */ 66 u_int free; /* # of contiguous stids free right after this region */ 67 }; 68 69 /* 70 * Max # of ATIDs. The absolute HW max is larger than this but we reserve a few 71 * of the upper bits for use as a cookie to demux the reply. 72 */ 73 #define MAX_ATIDS (M_TID_TID + 1) 74 75 union aopen_entry { 76 void *data; 77 union aopen_entry *next; 78 }; 79 80 /* cxgbe_rate_tag flags */ 81 enum { 82 EO_FLOWC_PENDING = (1 << 0), /* flowc needs to be sent */ 83 EO_FLOWC_RPL_PENDING = (1 << 1), /* flowc credits due back */ 84 EO_SND_TAG_REF = (1 << 2), /* kernel has a ref on us */ 85 EO_FLUSH_RPL_PENDING = (1 << 3), /* credit flush rpl due back */ 86 }; 87 88 struct cxgbe_rate_tag { 89 struct m_snd_tag com; 90 struct adapter *adapter; 91 u_int flags; 92 struct mtx lock; 93 int port_id; 94 int etid; 95 struct mbufq pending_tx, pending_fwack; 96 int plen; 97 struct sge_ofld_txq *eo_txq; 98 uint32_t ctrl0; 99 uint16_t iqid; 100 int8_t schedcl; 101 uint64_t max_rate; /* in bytes/s */ 102 uint8_t tx_total; /* total tx WR credits (in 16B units) */ 103 uint8_t tx_credits; /* tx WR credits (in 16B units) available */ 104 uint8_t tx_nocompl; /* tx WR credits since last compl request */ 105 uint8_t ncompl; /* # of completions outstanding. */ 106 }; 107 108 static inline struct cxgbe_rate_tag * 109 mst_to_crt(struct m_snd_tag *t) 110 { 111 return (__containerof(t, struct cxgbe_rate_tag, com)); 112 } 113 114 union etid_entry { 115 struct cxgbe_rate_tag *cst; 116 union etid_entry *next; 117 }; 118 119 /* 120 * Holds the size, base address, start, end, etc. of various types of TIDs. The 121 * tables themselves are allocated dynamically. 122 */ 123 struct tid_info { 124 u_int nstids; 125 u_int stid_base; 126 127 u_int natids; 128 129 u_int nftids; 130 u_int ftid_base; 131 u_int ftid_end; 132 133 u_int nhpftids; 134 u_int hpftid_base; 135 u_int hpftid_end; 136 137 u_int ntids; 138 u_int tid_base; 139 140 u_int netids; 141 u_int etid_base; 142 u_int etid_end; 143 144 struct mtx stid_lock __aligned(CACHE_LINE_SIZE); 145 struct listen_ctx **stid_tab; 146 u_int stids_in_use; 147 u_int nstids_free_head; /* # of available stids at the beginning */ 148 struct stid_head stids; 149 bool stid_tab_stopped; 150 151 struct mtx atid_lock __aligned(CACHE_LINE_SIZE); 152 union aopen_entry *atid_tab; 153 union aopen_entry *afree; 154 u_int atids_in_use; 155 bool atid_alloc_stopped; 156 157 /* High priority filters and normal filters share the lock and cv. */ 158 struct mtx ftid_lock __aligned(CACHE_LINE_SIZE); 159 struct cv ftid_cv; 160 struct filter_entry *ftid_tab; 161 struct filter_entry *hpftid_tab; 162 u_int ftids_in_use; 163 u_int hpftids_in_use; 164 165 /* 166 * hashfilter and TOE are mutually exclusive and both use ntids and 167 * tids_in_use. The lock and cv are used only by hashfilter. 168 */ 169 struct mtx hftid_lock __aligned(CACHE_LINE_SIZE); 170 struct cv hftid_cv; 171 void **tid_tab; 172 u_int tids_in_use; 173 174 void *hftid_hash_4t; /* LIST_HEAD(, filter_entry) *hftid_hash_4t; */ 175 u_long hftid_4t_mask; 176 void *hftid_hash_tid; /* LIST_HEAD(, filter_entry) *hftid_hash_tid; */ 177 u_long hftid_tid_mask; 178 179 struct mtx etid_lock __aligned(CACHE_LINE_SIZE); 180 union etid_entry *etid_tab; 181 union etid_entry *efree; 182 u_int etids_in_use; 183 }; 184 185 struct t4_range { 186 u_int start; 187 u_int size; 188 }; 189 190 struct t4_virt_res { /* virtualized HW resources */ 191 struct t4_range ddp; 192 struct t4_range iscsi; 193 struct t4_range stag; 194 struct t4_range rq; 195 struct t4_range pbl; 196 struct t4_range qp; 197 struct t4_range cq; 198 struct t4_range srq; 199 struct t4_range ocq; 200 struct t4_range l2t; 201 struct t4_range key; 202 }; 203 204 enum { 205 ULD_TOM = 0, 206 ULD_IWARP, 207 ULD_ISCSI, 208 ULD_MAX = ULD_ISCSI 209 }; 210 211 struct adapter; 212 struct port_info; 213 struct uld_info { 214 int (*uld_activate)(struct adapter *); 215 int (*uld_deactivate)(struct adapter *); 216 int (*uld_stop)(struct adapter *); 217 int (*uld_restart)(struct adapter *); 218 }; 219 220 struct tom_tunables { 221 int cong_algorithm; 222 int sndbuf; 223 int ddp; 224 int rx_coalesce; 225 int tls; 226 int tx_align; 227 int tx_zcopy; 228 int cop_managed_offloading; 229 int autorcvbuf_inc; 230 int update_hc_on_pmtu_change; 231 int iso; 232 }; 233 234 /* iWARP driver tunables */ 235 struct iw_tunables { 236 int wc_en; 237 }; 238 239 struct tls_tunables { 240 int inline_keys; 241 int combo_wrs; 242 }; 243 244 #ifdef TCP_OFFLOAD 245 int t4_register_uld(struct uld_info *, int); 246 int t4_unregister_uld(struct uld_info *, int); 247 int t4_activate_uld(struct adapter *, int); 248 int t4_deactivate_uld(struct adapter *, int); 249 int uld_active(struct adapter *, int); 250 #endif 251 #endif 252