1 /*- 2 * Copyright (c) 2014 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #ifdef DEV_NETMAP 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/eventhandler.h> 38 #include <sys/lock.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/selinfo.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <machine/bus.h> 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/if_media.h> 48 #include <net/if_var.h> 49 #include <net/if_clone.h> 50 #include <net/if_types.h> 51 #include <net/netmap.h> 52 #include <dev/netmap/netmap_kern.h> 53 54 #include "common/common.h" 55 #include "common/t4_regs.h" 56 #include "common/t4_regs_values.h" 57 58 extern int fl_pad; /* XXXNM */ 59 60 SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters"); 61 62 /* 63 * 0 = normal netmap rx 64 * 1 = black hole 65 * 2 = supermassive black hole (buffer packing enabled) 66 */ 67 int black_hole = 0; 68 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0, 69 "Sink incoming packets."); 70 71 int rx_ndesc = 256; 72 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, 73 &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); 74 75 int rx_nframes = 64; 76 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN, 77 &rx_nframes, 0, "max # of frames received before waking up netmap rx."); 78 79 int holdoff_tmr_idx = 2; 80 SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, 81 &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); 82 83 /* 84 * Congestion drops. 85 * -1: no congestion feedback (not recommended). 86 * 0: backpressure the channel instead of dropping packets right away. 87 * 1: no backpressure, drop packets for the congested queue immediately. 88 */ 89 static int nm_cong_drop = 1; 90 TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop); 91 92 int starve_fl = 0; 93 SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN, 94 &starve_fl, 0, "Don't ring fl db for netmap rx queues."); 95 96 static int 97 alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong) 98 { 99 int rc, cntxt_id, i; 100 __be32 v; 101 struct adapter *sc = vi->pi->adapter; 102 struct sge_params *sp = &sc->params.sge; 103 struct netmap_adapter *na = NA(vi->ifp); 104 struct fw_iq_cmd c; 105 106 MPASS(na != NULL); 107 MPASS(nm_rxq->iq_desc != NULL); 108 MPASS(nm_rxq->fl_desc != NULL); 109 110 bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); 111 bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); 112 113 bzero(&c, sizeof(c)); 114 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 115 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 116 V_FW_IQ_CMD_VFN(0)); 117 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 118 FW_LEN16(c)); 119 MPASS(!forwarding_intr_to_fwq(sc)); 120 KASSERT(nm_rxq->intr_idx < sc->intr_count, 121 ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); 122 v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); 123 c.type_to_iqandstindex = htobe32(v | 124 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 125 V_FW_IQ_CMD_VIID(vi->viid) | 126 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 127 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) | 128 F_FW_IQ_CMD_IQGTSMODE | 129 V_FW_IQ_CMD_IQINTCNTTHRESH(0) | 130 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 131 c.iqsize = htobe16(vi->qsize_rxq); 132 c.iqaddr = htobe64(nm_rxq->iq_ba); 133 if (cong >= 0) { 134 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | 135 V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | 136 F_FW_IQ_CMD_FL0CONGEN); 137 } 138 c.iqns_to_fl0congen |= 139 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 140 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 141 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 142 (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); 143 c.fl0dcaen_to_fl0cidxfthresh = 144 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 145 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) | 146 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 147 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 148 c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); 149 c.fl0addr = htobe64(nm_rxq->fl_ba); 150 151 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 152 if (rc != 0) { 153 device_printf(sc->dev, 154 "failed to create netmap ingress queue: %d\n", rc); 155 return (rc); 156 } 157 158 nm_rxq->iq_cidx = 0; 159 MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); 160 nm_rxq->iq_gen = F_RSPD_GEN; 161 nm_rxq->iq_cntxt_id = be16toh(c.iqid); 162 nm_rxq->iq_abs_id = be16toh(c.physiqid); 163 cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; 164 if (cntxt_id >= sc->sge.niq) { 165 panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", 166 __func__, cntxt_id, sc->sge.niq - 1); 167 } 168 sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; 169 170 nm_rxq->fl_cntxt_id = be16toh(c.fl0id); 171 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 172 MPASS(nm_rxq->fl_sidx == na->num_rx_desc); 173 cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; 174 if (cntxt_id >= sc->sge.neq) { 175 panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", 176 __func__, cntxt_id, sc->sge.neq - 1); 177 } 178 sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; 179 180 nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | 181 sc->chip_params->sge_fl_db; 182 183 if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) { 184 uint32_t param, val; 185 186 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 187 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 188 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 189 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 190 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 191 V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id); 192 if (cong == 0) 193 val = 1 << 19; 194 else { 195 val = 2 << 19; 196 for (i = 0; i < 4; i++) { 197 if (cong & (1 << i)) 198 val |= 1 << (i << 2); 199 } 200 } 201 202 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 203 if (rc != 0) { 204 /* report error but carry on */ 205 device_printf(sc->dev, 206 "failed to set congestion manager context for " 207 "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc); 208 } 209 } 210 211 t4_write_reg(sc, sc->sge_gts_reg, 212 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 213 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 214 215 return (rc); 216 } 217 218 static int 219 free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 220 { 221 struct adapter *sc = vi->pi->adapter; 222 int rc; 223 224 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 225 nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 226 if (rc != 0) 227 device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", 228 __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); 229 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 230 return (rc); 231 } 232 233 static int 234 alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 235 { 236 int rc, cntxt_id; 237 size_t len; 238 struct adapter *sc = vi->pi->adapter; 239 struct netmap_adapter *na = NA(vi->ifp); 240 struct fw_eq_eth_cmd c; 241 242 MPASS(na != NULL); 243 MPASS(nm_txq->desc != NULL); 244 245 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 246 bzero(nm_txq->desc, len); 247 248 bzero(&c, sizeof(c)); 249 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 250 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 251 V_FW_EQ_ETH_CMD_VFN(0)); 252 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 253 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 254 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 255 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 256 c.fetchszm_to_iqid = 257 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 258 V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 259 V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); 260 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 261 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 262 V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); 263 c.eqaddr = htobe64(nm_txq->ba); 264 265 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 266 if (rc != 0) { 267 device_printf(vi->dev, 268 "failed to create netmap egress queue: %d\n", rc); 269 return (rc); 270 } 271 272 nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 273 cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; 274 if (cntxt_id >= sc->sge.neq) 275 panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, 276 cntxt_id, sc->sge.neq - 1); 277 sc->sge.eqmap[cntxt_id] = (void *)nm_txq; 278 279 nm_txq->pidx = nm_txq->cidx = 0; 280 MPASS(nm_txq->sidx == na->num_tx_desc); 281 nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; 282 283 nm_txq->doorbells = sc->doorbells; 284 if (isset(&nm_txq->doorbells, DOORBELL_UDB) || 285 isset(&nm_txq->doorbells, DOORBELL_UDBWC) || 286 isset(&nm_txq->doorbells, DOORBELL_WCWR)) { 287 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 288 uint32_t mask = (1 << s_qpp) - 1; 289 volatile uint8_t *udb; 290 291 udb = sc->udbs_base + UDBS_DB_OFFSET; 292 udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; 293 nm_txq->udb_qid = nm_txq->cntxt_id & mask; 294 if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 295 clrbit(&nm_txq->doorbells, DOORBELL_WCWR); 296 else { 297 udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; 298 nm_txq->udb_qid = 0; 299 } 300 nm_txq->udb = (volatile void *)udb; 301 } 302 303 return (rc); 304 } 305 306 static int 307 free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 308 { 309 struct adapter *sc = vi->pi->adapter; 310 int rc; 311 312 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 313 if (rc != 0) 314 device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, 315 nm_txq->cntxt_id, rc); 316 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 317 return (rc); 318 } 319 320 static int 321 cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 322 struct netmap_adapter *na) 323 { 324 struct netmap_slot *slot; 325 struct netmap_kring *kring; 326 struct sge_nm_rxq *nm_rxq; 327 struct sge_nm_txq *nm_txq; 328 int rc, i, j, hwidx; 329 struct hw_buf_info *hwb; 330 331 ASSERT_SYNCHRONIZED_OP(sc); 332 333 if ((vi->flags & VI_INIT_DONE) == 0 || 334 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 335 return (EAGAIN); 336 337 hwb = &sc->sge.hw_buf_info[0]; 338 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 339 if (hwb->size == NETMAP_BUF_SIZE(na)) 340 break; 341 } 342 if (i >= SGE_FLBUF_SIZES) { 343 if_printf(ifp, "no hwidx for netmap buffer size %d.\n", 344 NETMAP_BUF_SIZE(na)); 345 return (ENXIO); 346 } 347 hwidx = i; 348 349 /* Must set caps before calling netmap_reset */ 350 nm_set_native_flags(na); 351 352 for_each_nm_rxq(vi, i, nm_rxq) { 353 struct irq *irq = &sc->irq[vi->first_intr + i]; 354 355 kring = na->rx_rings[nm_rxq->nid]; 356 if (!nm_kring_pending_on(kring) || 357 nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID) 358 continue; 359 360 alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop)); 361 nm_rxq->fl_hwidx = hwidx; 362 slot = netmap_reset(na, NR_RX, i, 0); 363 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 364 365 /* We deal with 8 bufs at a time */ 366 MPASS((na->num_rx_desc & 7) == 0); 367 MPASS(na->num_rx_desc == nm_rxq->fl_sidx); 368 for (j = 0; j < nm_rxq->fl_sidx; j++) { 369 uint64_t ba; 370 371 PNMB(na, &slot[j], &ba); 372 MPASS(ba != 0); 373 nm_rxq->fl_desc[j] = htobe64(ba | hwidx); 374 } 375 j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; 376 MPASS((j & 7) == 0); 377 j /= 8; /* driver pidx to hardware pidx */ 378 wmb(); 379 t4_write_reg(sc, sc->sge_kdoorbell_reg, 380 nm_rxq->fl_db_val | V_PIDX(j)); 381 382 atomic_cmpset_int(&irq->nm_state, NM_OFF, NM_ON); 383 } 384 385 for_each_nm_txq(vi, i, nm_txq) { 386 kring = na->tx_rings[nm_txq->nid]; 387 if (!nm_kring_pending_on(kring) || 388 nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID) 389 continue; 390 391 alloc_nm_txq_hwq(vi, nm_txq); 392 slot = netmap_reset(na, NR_TX, i, 0); 393 MPASS(slot != NULL); /* XXXNM: error check, not assert */ 394 } 395 396 if (vi->nm_rss == NULL) { 397 vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, 398 M_ZERO | M_WAITOK); 399 } 400 for (i = 0; i < vi->rss_size;) { 401 for_each_nm_rxq(vi, j, nm_rxq) { 402 vi->nm_rss[i++] = nm_rxq->iq_abs_id; 403 if (i == vi->rss_size) 404 break; 405 } 406 } 407 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 408 vi->nm_rss, vi->rss_size); 409 if (rc != 0) 410 if_printf(ifp, "netmap rss_config failed: %d\n", rc); 411 412 return (rc); 413 } 414 415 static int 416 cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp, 417 struct netmap_adapter *na) 418 { 419 struct netmap_kring *kring; 420 int rc, i; 421 struct sge_nm_txq *nm_txq; 422 struct sge_nm_rxq *nm_rxq; 423 424 ASSERT_SYNCHRONIZED_OP(sc); 425 426 if ((vi->flags & VI_INIT_DONE) == 0) 427 return (0); 428 429 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 430 vi->rss, vi->rss_size); 431 if (rc != 0) 432 if_printf(ifp, "failed to restore RSS config: %d\n", rc); 433 nm_clear_native_flags(na); 434 435 for_each_nm_txq(vi, i, nm_txq) { 436 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 437 438 kring = na->tx_rings[nm_txq->nid]; 439 if (!nm_kring_pending_off(kring) || 440 nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) 441 continue; 442 443 /* Wait for hw pidx to catch up ... */ 444 while (be16toh(nm_txq->pidx) != spg->pidx) 445 pause("nmpidx", 1); 446 447 /* ... and then for the cidx. */ 448 while (spg->pidx != spg->cidx) 449 pause("nmcidx", 1); 450 451 free_nm_txq_hwq(vi, nm_txq); 452 } 453 for_each_nm_rxq(vi, i, nm_rxq) { 454 struct irq *irq = &sc->irq[vi->first_intr + i]; 455 456 kring = na->rx_rings[nm_rxq->nid]; 457 if (!nm_kring_pending_off(kring) || 458 nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID) 459 continue; 460 461 while (!atomic_cmpset_int(&irq->nm_state, NM_ON, NM_OFF)) 462 pause("nmst", 1); 463 464 free_nm_rxq_hwq(vi, nm_rxq); 465 } 466 467 return (rc); 468 } 469 470 static int 471 cxgbe_netmap_reg(struct netmap_adapter *na, int on) 472 { 473 struct ifnet *ifp = na->ifp; 474 struct vi_info *vi = ifp->if_softc; 475 struct adapter *sc = vi->pi->adapter; 476 int rc; 477 478 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); 479 if (rc != 0) 480 return (rc); 481 if (on) 482 rc = cxgbe_netmap_on(sc, vi, ifp, na); 483 else 484 rc = cxgbe_netmap_off(sc, vi, ifp, na); 485 end_synchronized_op(sc, 0); 486 487 return (rc); 488 } 489 490 /* How many packets can a single type1 WR carry in n descriptors */ 491 static inline int 492 ndesc_to_npkt(const int n) 493 { 494 495 MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); 496 497 return (n * 2 - 1); 498 } 499 #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) 500 501 /* Space (in descriptors) needed for a type1 WR that carries n packets */ 502 static inline int 503 npkt_to_ndesc(const int n) 504 { 505 506 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 507 508 return ((n + 2) / 2); 509 } 510 511 /* Space (in 16B units) needed for a type1 WR that carries n packets */ 512 static inline int 513 npkt_to_len16(const int n) 514 { 515 516 MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 517 518 return (n * 2 + 1); 519 } 520 521 #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) 522 523 static void 524 ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) 525 { 526 int n; 527 u_int db = nm_txq->doorbells; 528 529 MPASS(nm_txq->pidx != nm_txq->dbidx); 530 531 n = NMIDXDIFF(nm_txq, dbidx); 532 if (n > 1) 533 clrbit(&db, DOORBELL_WCWR); 534 wmb(); 535 536 switch (ffs(db) - 1) { 537 case DOORBELL_UDB: 538 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 539 break; 540 541 case DOORBELL_WCWR: { 542 volatile uint64_t *dst, *src; 543 544 /* 545 * Queues whose 128B doorbell segment fits in the page do not 546 * use relative qid (udb_qid is always 0). Only queues with 547 * doorbell segments can do WCWR. 548 */ 549 KASSERT(nm_txq->udb_qid == 0 && n == 1, 550 ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", 551 __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); 552 553 dst = (volatile void *)((uintptr_t)nm_txq->udb + 554 UDBS_WR_OFFSET - UDBS_DB_OFFSET); 555 src = (void *)&nm_txq->desc[nm_txq->dbidx]; 556 while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) 557 *dst++ = *src++; 558 wmb(); 559 break; 560 } 561 562 case DOORBELL_UDBWC: 563 *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 564 wmb(); 565 break; 566 567 case DOORBELL_KDB: 568 t4_write_reg(sc, sc->sge_kdoorbell_reg, 569 V_QID(nm_txq->cntxt_id) | V_PIDX(n)); 570 break; 571 } 572 nm_txq->dbidx = nm_txq->pidx; 573 } 574 575 int lazy_tx_credit_flush = 1; 576 577 /* 578 * Write work requests to send 'npkt' frames and ring the doorbell to send them 579 * on their way. No need to check for wraparound. 580 */ 581 static void 582 cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, 583 struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum) 584 { 585 struct netmap_ring *ring = kring->ring; 586 struct netmap_slot *slot; 587 const u_int lim = kring->nkr_num_slots - 1; 588 struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; 589 uint16_t len; 590 uint64_t ba; 591 struct cpl_tx_pkt_core *cpl; 592 struct ulptx_sgl *usgl; 593 int i, n; 594 595 while (npkt) { 596 n = min(npkt, MAX_NPKT_IN_TYPE1_WR); 597 len = 0; 598 599 wr = (void *)&nm_txq->desc[nm_txq->pidx]; 600 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 601 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); 602 wr->npkt = n; 603 wr->r3 = 0; 604 wr->type = 1; 605 cpl = (void *)(wr + 1); 606 607 for (i = 0; i < n; i++) { 608 slot = &ring->slot[kring->nr_hwcur]; 609 PNMB(kring->na, slot, &ba); 610 MPASS(ba != 0); 611 612 cpl->ctrl0 = nm_txq->cpl_ctrl0; 613 cpl->pack = 0; 614 cpl->len = htobe16(slot->len); 615 /* 616 * netmap(4) says "netmap does not use features such as 617 * checksum offloading, TCP segmentation offloading, 618 * encryption, VLAN encapsulation/decapsulation, etc." 619 * 620 * So the ncxl interfaces have tx hardware checksumming 621 * disabled by default. But you can override netmap by 622 * enabling IFCAP_TXCSUM on the interface manully. 623 */ 624 cpl->ctrl1 = txcsum ? 0 : 625 htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 626 627 usgl = (void *)(cpl + 1); 628 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 629 V_ULPTX_NSGE(1)); 630 usgl->len0 = htobe32(slot->len); 631 usgl->addr0 = htobe64(ba); 632 633 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 634 cpl = (void *)(usgl + 1); 635 MPASS(slot->len + len <= UINT16_MAX); 636 len += slot->len; 637 kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); 638 } 639 wr->plen = htobe16(len); 640 641 npkt -= n; 642 nm_txq->pidx += npkt_to_ndesc(n); 643 MPASS(nm_txq->pidx <= nm_txq->sidx); 644 if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { 645 /* 646 * This routine doesn't know how to write WRs that wrap 647 * around. Make sure it wasn't asked to. 648 */ 649 MPASS(npkt == 0); 650 nm_txq->pidx = 0; 651 } 652 653 if (npkt == 0 && npkt_remaining == 0) { 654 /* All done. */ 655 if (lazy_tx_credit_flush == 0) { 656 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 657 F_FW_WR_EQUIQ); 658 nm_txq->equeqidx = nm_txq->pidx; 659 nm_txq->equiqidx = nm_txq->pidx; 660 } 661 ring_nm_txq_db(sc, nm_txq); 662 return; 663 } 664 665 if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { 666 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 667 F_FW_WR_EQUIQ); 668 nm_txq->equeqidx = nm_txq->pidx; 669 nm_txq->equiqidx = nm_txq->pidx; 670 } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 671 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 672 nm_txq->equeqidx = nm_txq->pidx; 673 } 674 if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) 675 ring_nm_txq_db(sc, nm_txq); 676 } 677 678 /* Will get called again. */ 679 MPASS(npkt_remaining); 680 } 681 682 /* How many contiguous free descriptors starting at pidx */ 683 static inline int 684 contiguous_ndesc_available(struct sge_nm_txq *nm_txq) 685 { 686 687 if (nm_txq->cidx > nm_txq->pidx) 688 return (nm_txq->cidx - nm_txq->pidx - 1); 689 else if (nm_txq->cidx > 0) 690 return (nm_txq->sidx - nm_txq->pidx); 691 else 692 return (nm_txq->sidx - nm_txq->pidx - 1); 693 } 694 695 static int 696 reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) 697 { 698 struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 699 uint16_t hw_cidx = spg->cidx; /* snapshot */ 700 struct fw_eth_tx_pkts_wr *wr; 701 int n = 0; 702 703 hw_cidx = be16toh(hw_cidx); 704 705 while (nm_txq->cidx != hw_cidx) { 706 wr = (void *)&nm_txq->desc[nm_txq->cidx]; 707 708 MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR))); 709 MPASS(wr->type == 1); 710 MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); 711 712 n += wr->npkt; 713 nm_txq->cidx += npkt_to_ndesc(wr->npkt); 714 715 /* 716 * We never sent a WR that wrapped around so the credits coming 717 * back, WR by WR, should never cause the cidx to wrap around 718 * either. 719 */ 720 MPASS(nm_txq->cidx <= nm_txq->sidx); 721 if (__predict_false(nm_txq->cidx == nm_txq->sidx)) 722 nm_txq->cidx = 0; 723 } 724 725 return (n); 726 } 727 728 static int 729 cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) 730 { 731 struct netmap_adapter *na = kring->na; 732 struct ifnet *ifp = na->ifp; 733 struct vi_info *vi = ifp->if_softc; 734 struct adapter *sc = vi->pi->adapter; 735 struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; 736 const u_int head = kring->rhead; 737 u_int reclaimed = 0; 738 int n, d, npkt_remaining, ndesc_remaining, txcsum; 739 740 /* 741 * Tx was at kring->nr_hwcur last time around and now we need to advance 742 * to kring->rhead. Note that the driver's pidx moves independent of 743 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation 744 * between descriptors and frames isn't 1:1). 745 */ 746 747 npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 748 kring->nkr_num_slots - kring->nr_hwcur + head; 749 txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6); 750 while (npkt_remaining) { 751 reclaimed += reclaim_nm_tx_desc(nm_txq); 752 ndesc_remaining = contiguous_ndesc_available(nm_txq); 753 /* Can't run out of descriptors with packets still remaining */ 754 MPASS(ndesc_remaining > 0); 755 756 /* # of desc needed to tx all remaining packets */ 757 d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; 758 if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) 759 d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); 760 761 if (d <= ndesc_remaining) 762 n = npkt_remaining; 763 else { 764 /* Can't send all, calculate how many can be sent */ 765 n = (ndesc_remaining / SGE_MAX_WR_NDESC) * 766 MAX_NPKT_IN_TYPE1_WR; 767 if (ndesc_remaining % SGE_MAX_WR_NDESC) 768 n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); 769 } 770 771 /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ 772 npkt_remaining -= n; 773 cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum); 774 } 775 MPASS(npkt_remaining == 0); 776 MPASS(kring->nr_hwcur == head); 777 MPASS(nm_txq->dbidx == nm_txq->pidx); 778 779 /* 780 * Second part: reclaim buffers for completed transmissions. 781 */ 782 if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 783 reclaimed += reclaim_nm_tx_desc(nm_txq); 784 kring->nr_hwtail += reclaimed; 785 if (kring->nr_hwtail >= kring->nkr_num_slots) 786 kring->nr_hwtail -= kring->nkr_num_slots; 787 } 788 789 return (0); 790 } 791 792 static int 793 cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 794 { 795 struct netmap_adapter *na = kring->na; 796 struct netmap_ring *ring = kring->ring; 797 struct ifnet *ifp = na->ifp; 798 struct vi_info *vi = ifp->if_softc; 799 struct adapter *sc = vi->pi->adapter; 800 struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; 801 u_int const head = kring->rhead; 802 u_int n; 803 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 804 805 if (black_hole) 806 return (0); /* No updates ever. */ 807 808 if (netmap_no_pendintr || force_update) { 809 kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); 810 kring->nr_kflags &= ~NKR_PENDINTR; 811 } 812 813 if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) { 814 wmb(); 815 t4_write_reg(sc, sc->sge_kdoorbell_reg, 816 nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved)); 817 nm_rxq->fl_db_saved = 0; 818 } 819 820 /* Userspace done with buffers from kring->nr_hwcur to head */ 821 n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 822 kring->nkr_num_slots - kring->nr_hwcur + head; 823 n &= ~7U; 824 if (n > 0) { 825 u_int fl_pidx = nm_rxq->fl_pidx; 826 struct netmap_slot *slot = &ring->slot[fl_pidx]; 827 uint64_t ba; 828 int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; 829 830 /* 831 * We always deal with 8 buffers at a time. We must have 832 * stopped at an 8B boundary (fl_pidx) last time around and we 833 * must have a multiple of 8B buffers to give to the freelist. 834 */ 835 MPASS((fl_pidx & 7) == 0); 836 MPASS((n & 7) == 0); 837 838 IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); 839 IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx); 840 841 while (n > 0) { 842 for (i = 0; i < 8; i++, fl_pidx++, slot++) { 843 PNMB(na, slot, &ba); 844 MPASS(ba != 0); 845 nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); 846 slot->flags &= ~NS_BUF_CHANGED; 847 MPASS(fl_pidx <= nm_rxq->fl_sidx); 848 } 849 n -= 8; 850 if (fl_pidx == nm_rxq->fl_sidx) { 851 fl_pidx = 0; 852 slot = &ring->slot[0]; 853 } 854 if (++dbinc == 8 && n >= 32) { 855 wmb(); 856 if (starve_fl) 857 nm_rxq->fl_db_saved += dbinc; 858 else { 859 t4_write_reg(sc, sc->sge_kdoorbell_reg, 860 nm_rxq->fl_db_val | V_PIDX(dbinc)); 861 } 862 dbinc = 0; 863 } 864 } 865 MPASS(nm_rxq->fl_pidx == fl_pidx); 866 867 if (dbinc > 0) { 868 wmb(); 869 if (starve_fl) 870 nm_rxq->fl_db_saved += dbinc; 871 else { 872 t4_write_reg(sc, sc->sge_kdoorbell_reg, 873 nm_rxq->fl_db_val | V_PIDX(dbinc)); 874 } 875 } 876 } 877 878 return (0); 879 } 880 881 void 882 cxgbe_nm_attach(struct vi_info *vi) 883 { 884 struct port_info *pi; 885 struct adapter *sc; 886 struct netmap_adapter na; 887 888 MPASS(vi->nnmrxq > 0); 889 MPASS(vi->ifp != NULL); 890 891 pi = vi->pi; 892 sc = pi->adapter; 893 894 bzero(&na, sizeof(na)); 895 896 na.ifp = vi->ifp; 897 na.na_flags = NAF_BDG_MAYSLEEP; 898 899 /* Netmap doesn't know about the space reserved for the status page. */ 900 na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; 901 902 /* 903 * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So 904 * num_rx_desc is based on the number of buffers that can be held in the 905 * freelist, and not the number of entries in the iq. (These two are 906 * not exactly the same due to the space taken up by the status page). 907 */ 908 na.num_rx_desc = rounddown(vi->qsize_rxq, 8); 909 na.nm_txsync = cxgbe_netmap_txsync; 910 na.nm_rxsync = cxgbe_netmap_rxsync; 911 na.nm_register = cxgbe_netmap_reg; 912 na.num_tx_rings = vi->nnmtxq; 913 na.num_rx_rings = vi->nnmrxq; 914 netmap_attach(&na); 915 } 916 917 void 918 cxgbe_nm_detach(struct vi_info *vi) 919 { 920 921 MPASS(vi->nnmrxq > 0); 922 MPASS(vi->ifp != NULL); 923 924 netmap_detach(vi->ifp); 925 } 926 927 static inline const void * 928 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) 929 { 930 931 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); 932 933 /* data[0] is RSS header */ 934 return (&cpl->data[1]); 935 } 936 937 static void 938 handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp, 939 const struct cpl_sge_egr_update *egr) 940 { 941 uint32_t oq; 942 struct sge_nm_txq *nm_txq; 943 944 oq = be32toh(egr->opcode_qid); 945 MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); 946 nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; 947 948 netmap_tx_irq(ifp, nm_txq->nid); 949 } 950 951 void 952 t4_nm_intr(void *arg) 953 { 954 struct sge_nm_rxq *nm_rxq = arg; 955 struct vi_info *vi = nm_rxq->vi; 956 struct adapter *sc = vi->pi->adapter; 957 struct ifnet *ifp = vi->ifp; 958 struct netmap_adapter *na = NA(ifp); 959 struct netmap_kring *kring = na->rx_rings[nm_rxq->nid]; 960 struct netmap_ring *ring = kring->ring; 961 struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; 962 const void *cpl; 963 uint32_t lq; 964 u_int work = 0; 965 uint8_t opcode; 966 uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); 967 u_int fl_credits = fl_cidx & 7; 968 u_int ndesc = 0; /* desc processed since last cidx update */ 969 u_int nframes = 0; /* frames processed since last netmap wakeup */ 970 971 while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { 972 973 rmb(); 974 975 lq = be32toh(d->rsp.pldbuflen_qid); 976 opcode = d->rss.opcode; 977 cpl = &d->cpl[0]; 978 979 switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { 980 case X_RSPD_TYPE_FLBUF: 981 982 /* fall through */ 983 984 case X_RSPD_TYPE_CPL: 985 MPASS(opcode < NUM_CPL_CMDS); 986 987 switch (opcode) { 988 case CPL_FW4_MSG: 989 case CPL_FW6_MSG: 990 cpl = unwrap_nm_fw6_msg(cpl); 991 /* fall through */ 992 case CPL_SGE_EGR_UPDATE: 993 handle_nm_sge_egr_update(sc, ifp, cpl); 994 break; 995 case CPL_RX_PKT: 996 ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - 997 sc->params.sge.fl_pktshift; 998 ring->slot[fl_cidx].flags = 0; 999 nframes++; 1000 if (!(lq & F_RSPD_NEWBUF)) { 1001 MPASS(black_hole == 2); 1002 break; 1003 } 1004 fl_credits++; 1005 if (__predict_false(++fl_cidx == nm_rxq->fl_sidx)) 1006 fl_cidx = 0; 1007 break; 1008 default: 1009 panic("%s: unexpected opcode 0x%x on nm_rxq %p", 1010 __func__, opcode, nm_rxq); 1011 } 1012 break; 1013 1014 case X_RSPD_TYPE_INTR: 1015 /* Not equipped to handle forwarded interrupts. */ 1016 panic("%s: netmap queue received interrupt for iq %u\n", 1017 __func__, lq); 1018 1019 default: 1020 panic("%s: illegal response type %d on nm_rxq %p", 1021 __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); 1022 } 1023 1024 d++; 1025 if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { 1026 nm_rxq->iq_cidx = 0; 1027 d = &nm_rxq->iq_desc[0]; 1028 nm_rxq->iq_gen ^= F_RSPD_GEN; 1029 } 1030 1031 if (__predict_false(++nframes == rx_nframes) && !black_hole) { 1032 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1033 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1034 nframes = 0; 1035 } 1036 1037 if (__predict_false(++ndesc == rx_ndesc)) { 1038 if (black_hole && fl_credits >= 8) { 1039 fl_credits /= 8; 1040 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, 1041 nm_rxq->fl_sidx); 1042 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1043 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1044 fl_credits = fl_cidx & 7; 1045 } 1046 t4_write_reg(sc, sc->sge_gts_reg, 1047 V_CIDXINC(ndesc) | 1048 V_INGRESSQID(nm_rxq->iq_cntxt_id) | 1049 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1050 ndesc = 0; 1051 } 1052 } 1053 1054 atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 1055 if (black_hole) { 1056 fl_credits /= 8; 1057 IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); 1058 t4_write_reg(sc, sc->sge_kdoorbell_reg, 1059 nm_rxq->fl_db_val | V_PIDX(fl_credits)); 1060 } else if (nframes > 0) 1061 netmap_rx_irq(ifp, nm_rxq->nid, &work); 1062 1063 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) | 1064 V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | 1065 V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 1066 } 1067 #endif 1068