Lines Matching +full:tx +full:- +full:device

21  * ------------------------------------
23 * The ena driver provides support for the AWS ENA device, also
24 * referred to as their "enhanced networking". This device is present
25 * on "Nitro"-based instances. It presents itself with the following
26 * PCI Vendor/Device IDs
28 * o 1d0f:0ec2 -- ENA PF
29 * o 1d0f:1ec2 -- ENA PF (Reserved)
30 * o 1d0f:ec20 -- ENA VF
31 * o 1d0f:ec21 -- ENA VF (Reserved)
34 * to drive traffic on an ENA device. Support for the following
40 * o Tx checksum offloads
41 * o Tx DMA bind (borrow buffers)
46 * o Support for different Tx completion policies
47 * o More controlled Tx recycling and Rx refill
53 * -------------------------
55 * To properly communicate with the ENA device the driver must
57 * types are defined by the device and are found under the "common"
59 * simplified this a bit by defining all device-specific types in the
60 * ena_hw.h file. Furthermore, all device-specific types are given an
62 * device type and when we are dealing with a driver type.
64 * [1]: https://github.com/amzn/amzn-drivers
67 * --------------------------------------
69 * The ENA device presents one mac group. This single mac group
70 * represents the single unicast address that this device represents
71 * in your AWS instance. The ENA device presents no option for
72 * configuring additional MAC addresses, multicast, or promisc mode --
79 * presented by the device. However, we don't just go with whatever
80 * number of queues the device reports; but rather we limit the queues
87 * Tx and one Rx on each I/O interrupt.
94 * The ENA device presents MSI-X interrupts only. During attach the
97 * This means that a Tx/Rx queue at index 0 will map to vector 1, and
105 * Tx Queue Workings
106 * -----------------
108 * A single Tx queue (ena_txq_t) is made up of one submission queue
111 * device -- where each SQ entry describes the packet to be sent
113 * a packet (enahw_tx_cdesc_t). For this to work the host and device
115 * (free for sending) and which are owned by the device (pending
116 * device completion). This state is tracked on the host side via head
120 * queue of pending packets -- the next packet to be sent by the
121 * device is head, and all descriptors up to tail are ready for
123 * descriptors represent completed events when using per-SQ completion
126 * wrap-around. The device initializes the phase to zero, and the host
133 * completed by the device (and these completions have been processed
138 * +---+---+---+---+---+---+---+---+
140 * +---+---+---+---+---+---+---+---+
147 * +---+---+---+---+---+---+---+---+
149 * +---+---+---+---+---+---+---+---+
158 * +---+---+---+---+---+---+---+---+
160 * +---+---+---+---+---+---+---+---+
167 * +---+---+---+---+---+---+---+---+
169 * +---+---+---+---+---+---+---+---+
172 * Currently, all packets are copied for Tx. At ring start we allocate
173 * a Tx Control Buffer (TCB) for each queue descriptor. Each TCB has
175 * hold the MTU. Therefore, Tx descriptors and TCBs currently have a
183 * -----------------
186 * are very much like the Tx queues. There is a paired SQ and CQ for
188 * handing buffers to the device to fill, and the CQ is for describing
194 * mblk, immediately returning the RCB for reuse. As with Tx, this
199 * --------------------------------------------
201 * Each ENA device comes with a mechanism for sending out-of-band
207 * device may not support all the different event types
214 * ------------------------------
225 * Watchdog and Device Reset
226 * -------------------------
228 * While the device is running, the driver periodically invokes a
230 * device if not. The device will be reset if any of the following is
233 * o The device's status register fatal error bit is set. A device
236 * time -- see ENA_DEVICE_KEEPALIVE_TIMEOUT_NS;
237 * o A Tx queue has remained blocked for some time -- see
239 * o The device has requested, via an asynchronous event, that we
253 * -----------------
257 * resources in an order determined by the device's programming manual
276 * 2. It gives a better idea of what state the device is in during
328 * Amazon does not specify the endianess of the ENA device. We assume
333 #error "ENA driver is little-endian only"
360 if (ena != NULL && ena->ena_dip != NULL) { in ena_err()
361 vdev_err(ena->ena_dip, CE_WARN, fmt, ap); in ena_err()
374 if (ena != NULL && ena->ena_dip != NULL) { in ena_panic()
375 vdev_err(ena->ena_dip, CE_PANIC, fmt, ap); in ena_panic()
403 if (ena != NULL && ena->ena_dip != NULL) { in ena_dbg()
404 dev_err(ena->ena_dip, CE_NOTE, "!%s", msg); in ena_dbg()
414 mutex_enter(&ena->ena_lock); in ena_trigger_reset()
415 ena->ena_reset_reason = reason; in ena_trigger_reset()
416 mutex_exit(&ena->ena_lock); in ena_trigger_reset()
417 atomic_or_32(&ena->ena_state, ENA_STATE_ERROR); in ena_trigger_reset()
421 * Determine if a given feature is available on this device.
430 * The device attributes feature is always supported, as in ena_is_feat_avail()
436 return ((ena->ena_supported_features & mask) != 0); in ena_is_feat_avail()
440 * Determine if a given capability is available on this device.
448 return ((ena->ena_capabilities & mask) != 0); in ena_is_cap_avail()
459 ena_err(ena, "reset: device is not ready"); in ena_device_reset()
466 * The device stores the reset timeout at 100ms resolution; we in ena_device_reset()
472 ena_err(ena, "device gave invalid (0) reset timeout"); in ena_device_reset()
508 ena_err(ena, "device reset start timed out"); in ena_device_reset()
517 * Reset the timeout counter for the next device request. in ena_device_reset()
522 * Wait for the device reset to finish. in ena_device_reset()
533 ena_err(ena, "device reset timed out"); in ena_device_reset()
541 ena_dbg(ena, "device reset succeeded"); in ena_device_reset()
551 if (pci_config_setup(ena->ena_dip, &hdl) != 0) { in ena_attach_pci()
555 ena->ena_pci_hdl = hdl; in ena_attach_pci()
556 ena->ena_pci_vid = pci_config_get16(hdl, PCI_CONF_VENID); in ena_attach_pci()
557 ena->ena_pci_did = pci_config_get16(hdl, PCI_CONF_DEVID); in ena_attach_pci()
558 ena->ena_pci_rev = pci_config_get8(hdl, PCI_CONF_REVID); in ena_attach_pci()
559 ena->ena_pci_svid = pci_config_get16(hdl, PCI_CONF_SUBVENID); in ena_attach_pci()
560 ena->ena_pci_sdid = pci_config_get16(hdl, PCI_CONF_SUBSYSID); in ena_attach_pci()
562 ena->ena_pci_vid, ena->ena_pci_did, ena->ena_pci_rev, in ena_attach_pci()
563 ena->ena_pci_svid, ena->ena_pci_sdid); in ena_attach_pci()
572 pci_config_teardown(&ena->ena_pci_hdl); in ena_cleanup_pci()
579 ddi_regs_map_free(&ena->ena_reg_hdl); in ena_cleanup_regs_map()
587 if (ddi_dev_regsize(ena->ena_dip, ENA_REG_NUMBER, &ena->ena_reg_size) != in ena_attach_regs_map()
594 ena_dbg(ena, "register size: %ld", ena->ena_reg_size); in ena_attach_regs_map()
595 bzero(&ena->ena_reg_attr, sizeof (ena->ena_reg_attr)); in ena_attach_regs_map()
596 ena->ena_reg_attr.devacc_attr_version = DDI_DEVICE_ATTR_V1; in ena_attach_regs_map()
597 ena->ena_reg_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; in ena_attach_regs_map()
598 ena->ena_reg_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; in ena_attach_regs_map()
605 ret = ddi_regs_map_setup(ena->ena_dip, ENA_REG_NUMBER, in ena_attach_regs_map()
606 &ena->ena_reg_base, 0, ena->ena_reg_size, &ena->ena_reg_attr, in ena_attach_regs_map()
607 &ena->ena_reg_hdl); in ena_attach_regs_map()
616 (void *)ena->ena_reg_base); in ena_attach_regs_map()
627 ena_dma_free(&ena->ena_aq.ea_sq.eas_dma); in ena_admin_sq_free()
636 ena_adminq_t *aq = &ena->ena_aq; in ena_admin_sq_init()
637 ena_dma_buf_t *dma = &aq->ea_sq.eas_dma; in ena_admin_sq_init()
638 size_t size = aq->ea_qlen * sizeof (*aq->ea_sq.eas_entries); in ena_admin_sq_init()
641 if (aq->ea_sq.eas_entries == NULL) { in ena_admin_sq_init()
655 ENA_DMA_VERIFY_ADDR(ena, dma->edb_cookie->dmac_laddress); in ena_admin_sq_init()
656 aq->ea_sq.eas_entries = (void *)dma->edb_va; in ena_admin_sq_init()
661 aq->ea_sq.eas_tail = 0; in ena_admin_sq_init()
662 aq->ea_sq.eas_phase = 1; in ena_admin_sq_init()
663 aq->ea_sq.eas_dbaddr = in ena_admin_sq_init()
664 (uint32_t *)(ena->ena_reg_base + ENAHW_REG_ASQ_DB); in ena_admin_sq_init()
665 addr_low = (uint32_t)(dma->edb_cookie->dmac_laddress); in ena_admin_sq_init()
666 addr_high = (uint32_t)(dma->edb_cookie->dmac_laddress >> 32); in ena_admin_sq_init()
669 wval = ENAHW_ASQ_CAPS_DEPTH(aq->ea_qlen) | in ena_admin_sq_init()
670 ENAHW_ASQ_CAPS_ENTRY_SIZE(sizeof (*aq->ea_sq.eas_entries)); in ena_admin_sq_init()
682 ena_dma_free(&ena->ena_aq.ea_cq.eac_dma); in ena_admin_cq_free()
691 ena_adminq_t *aq = &ena->ena_aq; in ena_admin_cq_init()
692 ena_dma_buf_t *dma = &aq->ea_cq.eac_dma; in ena_admin_cq_init()
695 if (aq->ea_cq.eac_entries == NULL) { in ena_admin_cq_init()
696 size_t size = aq->ea_qlen * sizeof (*aq->ea_cq.eac_entries); in ena_admin_cq_init()
710 ENA_DMA_VERIFY_ADDR(ena, dma->edb_cookie->dmac_laddress); in ena_admin_cq_init()
711 aq->ea_cq.eac_entries = (void *)dma->edb_va; in ena_admin_cq_init()
716 aq->ea_cq.eac_head = 0; in ena_admin_cq_init()
717 aq->ea_cq.eac_phase = 1; in ena_admin_cq_init()
718 addr_low = (uint32_t)(dma->edb_cookie->dmac_laddress); in ena_admin_cq_init()
719 addr_high = (uint32_t)(dma->edb_cookie->dmac_laddress >> 32); in ena_admin_cq_init()
722 wval = ENAHW_ACQ_CAPS_DEPTH(aq->ea_qlen) | in ena_admin_cq_init()
723 ENAHW_ACQ_CAPS_ENTRY_SIZE(sizeof (*aq->ea_cq.eac_entries)); in ena_admin_cq_init()
732 ena->ena_device_hints.eh_mmio_read_timeout = in ena_update_hints()
733 hints->edh_mmio_read_timeout; in ena_update_hints()
734 ena->ena_device_hints.eh_keep_alive_timeout = in ena_update_hints()
735 hints->edh_keep_alive_timeout; in ena_update_hints()
736 ena->ena_device_hints.eh_tx_comp_timeout = hints->edh_tx_comp_timeout; in ena_update_hints()
737 ena->ena_device_hints.eh_missed_tx_reset_threshold = in ena_update_hints()
738 hints->edh_missed_tx_reset_threshold; in ena_update_hints()
739 ena->ena_device_hints.eh_admin_comp_timeout = in ena_update_hints()
740 hints->edh_admin_comp_timeout; in ena_update_hints()
741 ena->ena_device_hints.eh_max_tx_sgl = hints->edh_max_tx_sgl; in ena_update_hints()
742 ena->ena_device_hints.eh_max_rx_sgl = hints->edh_max_rx_sgl; in ena_update_hints()
750 * which comes from the common code and presumably is based on device
761 * max Tx queues and max Rx queues. We could probably loosen this
763 * Tx and Rx. This is what Linux does, and seems like a fine place
773 * Supposedly a device could present a different number of SQs in ena_set_max_io_queues()
779 max = MIN(ena->ena_tx_max_sq_num, max); in ena_set_max_io_queues()
780 max = MIN(ena->ena_tx_max_cq_num, max); in ena_set_max_io_queues()
781 max = MIN(ena->ena_rx_max_sq_num, max); in ena_set_max_io_queues()
782 max = MIN(ena->ena_rx_max_cq_num, max); in ena_set_max_io_queues()
790 ena->ena_max_io_queues = max; in ena_set_max_io_queues()
794 * We require that an Rx or Tx buffer be able to hold the maximum MTU
803 ena->ena_max_frame_hdr = sizeof (struct ether_vlan_header); in ena_update_buf_sizes()
804 ena->ena_max_frame_total = ena->ena_max_frame_hdr + ena->ena_mtu; in ena_update_buf_sizes()
805 ena->ena_tx_buf_sz = P2ROUNDUP_TYPED(ena->ena_max_frame_total, in ena_update_buf_sizes()
806 ena->ena_page_sz, uint32_t); in ena_update_buf_sizes()
807 ena->ena_rx_buf_sz = P2ROUNDUP_TYPED(ena->ena_max_frame_total + in ena_update_buf_sizes()
808 ENA_RX_BUF_IPHDR_ALIGNMENT, ena->ena_page_sz, uint32_t); in ena_update_buf_sizes()
825 /* In this case the device does not support querying hints */ in ena_get_hints()
845 ena->ena_tx_l3_ipv4_csum = false; in ena_get_offloads()
847 ena->ena_tx_l4_ipv4_part_csum = false; in ena_get_offloads()
848 ena->ena_tx_l4_ipv4_full_csum = false; in ena_get_offloads()
849 ena->ena_tx_l4_ipv4_lso = false; in ena_get_offloads()
851 ena->ena_tx_l4_ipv6_part_csum = false; in ena_get_offloads()
852 ena->ena_tx_l4_ipv6_full_csum = false; in ena_get_offloads()
853 ena->ena_tx_l4_ipv6_lso = false; in ena_get_offloads()
855 ena->ena_rx_l3_ipv4_csum = false; in ena_get_offloads()
856 ena->ena_rx_l4_ipv4_csum = false; in ena_get_offloads()
857 ena->ena_rx_l4_ipv6_csum = false; in ena_get_offloads()
858 ena->ena_rx_hash = false; in ena_get_offloads()
866 * In this case the device does not support querying in ena_get_offloads()
868 * the device provides no offloads. in ena_get_offloads()
876 ena->ena_tx_l3_ipv4_csum = ENAHW_FEAT_OFFLOAD_TX_L3_IPV4_CSUM(feat); in ena_get_offloads()
878 ena->ena_tx_l4_ipv4_part_csum = in ena_get_offloads()
880 ena->ena_tx_l4_ipv4_full_csum = in ena_get_offloads()
882 ena->ena_tx_l4_ipv4_lso = ENAHW_FEAT_OFFLOAD_TSO_IPV4(feat); in ena_get_offloads()
884 ena->ena_tx_l4_ipv6_part_csum = in ena_get_offloads()
886 ena->ena_tx_l4_ipv6_full_csum = in ena_get_offloads()
888 ena->ena_tx_l4_ipv6_lso = ENAHW_FEAT_OFFLOAD_TSO_IPV6(feat); in ena_get_offloads()
890 ena->ena_rx_l3_ipv4_csum = ENAHW_FEAT_OFFLOAD_RX_L3_IPV4_CSUM(feat); in ena_get_offloads()
891 ena->ena_rx_l4_ipv4_csum = ENAHW_FEAT_OFFLOAD_RX_L4_IPV4_CSUM(feat); in ena_get_offloads()
892 ena->ena_rx_l4_ipv6_csum = ENAHW_FEAT_OFFLOAD_RX_L4_IPV6_CSUM(feat); in ena_get_offloads()
900 int value = ddi_prop_get_int(DDI_DEV_T_ANY, ena->ena_dip, in ena_get_prop()
928 feat->efm_mtu = ena->ena_mtu; in ena_set_mtu()
932 ena_err(ena, "failed to set device MTU to %u: %d", ena->ena_mtu, in ena_set_mtu()
958 ena->ena_link_speed_mbits = 1000; in ena_get_link_config()
959 ena->ena_link_speeds = ENAHW_LINK_SPEED_1G; in ena_get_link_config()
960 ena->ena_link_duplex = LINK_DUPLEX_FULL; in ena_get_link_config()
961 ena->ena_link_autoneg = true; in ena_get_link_config()
965 ena->ena_link_speed_mbits = feat->eflc_speed; in ena_get_link_config()
966 ena->ena_link_speeds = feat->eflc_supported; in ena_get_link_config()
968 ena->ena_link_duplex = full_duplex ? LINK_DUPLEX_FULL : in ena_get_link_config()
970 ena->ena_link_autoneg = ENAHW_FEAT_LINK_CONF_AUTONEG(feat); in ena_get_link_config()
983 * This function should be called after the device is initialized,
994 * CQ and SQ, but technically the device could return in ena_attach_read_conf()
997 gcv = min(ena->ena_rx_max_sq_num_descs, ena->ena_rx_max_cq_num_descs); in ena_attach_read_conf()
999 ena->ena_rxq_num_descs = ena_get_prop(ena, ENA_PROP_RXQ_NUM_DESCS, in ena_attach_read_conf()
1002 ena->ena_rxq_intr_limit = ena_get_prop(ena, ENA_PROP_RXQ_INTR_LIMIT, in ena_attach_read_conf()
1006 gcv = min(ena->ena_tx_max_sq_num_descs, ena->ena_tx_max_cq_num_descs); in ena_attach_read_conf()
1008 ena->ena_txq_num_descs = ena_get_prop(ena, ENA_PROP_TXQ_NUM_DESCS, in ena_attach_read_conf()
1015 * Perform any necessary device configuration after the driver.conf
1021 ASSERT3U(ena->ena_attach_seq, >=, ENA_ATTACH_READ_CONF); in ena_attach_dev_cfg()
1028 ena->ena_mtu = 1500; in ena_attach_dev_cfg()
1029 ena_err(ena, "trying fallback MTU: %u", ena->ena_mtu); in ena_attach_dev_cfg()
1046 ena->ena_dev_major_vsn = ENAHW_DEV_MAJOR_VSN(dev_vsn); in ena_check_versions()
1047 ena->ena_dev_minor_vsn = ENAHW_DEV_MINOR_VSN(dev_vsn); in ena_check_versions()
1049 ena->ena_ctrl_major_vsn = ENAHW_CTRL_MAJOR_VSN(ctrl_vsn); in ena_check_versions()
1050 ena->ena_ctrl_minor_vsn = ENAHW_CTRL_MINOR_VSN(ctrl_vsn); in ena_check_versions()
1051 ena->ena_ctrl_subminor_vsn = ENAHW_CTRL_SUBMINOR_VSN(ctrl_vsn); in ena_check_versions()
1052 ena->ena_ctrl_impl_id = ENAHW_CTRL_IMPL_ID(ctrl_vsn); in ena_check_versions()
1054 ena_dbg(ena, "device version: %u.%u", in ena_check_versions()
1055 ena->ena_dev_major_vsn, ena->ena_dev_minor_vsn); in ena_check_versions()
1057 ena->ena_ctrl_major_vsn, ena->ena_ctrl_minor_vsn, in ena_check_versions()
1058 ena->ena_ctrl_subminor_vsn, ena->ena_ctrl_impl_id); in ena_check_versions()
1060 if (ena->ena_ctrl_subminor_vsn < ENA_CTRL_SUBMINOR_VSN_MIN) { in ena_check_versions()
1062 ena->ena_ctrl_major_vsn, ena->ena_ctrl_minor_vsn, in ena_check_versions()
1063 ena->ena_ctrl_subminor_vsn); in ena_check_versions()
1073 ena_adminq_t *aq = &ena->ena_aq; in ena_adminq_init()
1081 mutex_init(&aq->ea_sq_lock, NULL, MUTEX_DRIVER, NULL); in ena_adminq_init()
1082 mutex_init(&aq->ea_cq_lock, NULL, MUTEX_DRIVER, NULL); in ena_adminq_init()
1083 mutex_init(&aq->ea_stat_lock, NULL, MUTEX_DRIVER, NULL); in ena_adminq_init()
1084 aq->ea_qlen = ENA_ADMINQ_DEPTH; in ena_adminq_init()
1085 aq->ea_pending_cmds = 0; in ena_adminq_init()
1087 aq->ea_cmd_ctxs = kmem_zalloc(sizeof (ena_cmd_ctx_t) * aq->ea_qlen, in ena_adminq_init()
1089 list_create(&aq->ea_cmd_ctxs_free, sizeof (ena_cmd_ctx_t), in ena_adminq_init()
1091 list_create(&aq->ea_cmd_ctxs_used, sizeof (ena_cmd_ctx_t), in ena_adminq_init()
1101 aq->ea_poll_mode = true; in ena_adminq_init()
1112 ena_adminq_t *aq = &ena->ena_aq; in ena_cleanup_device_init()
1117 mutex_destroy(&aq->ea_sq_lock); in ena_cleanup_device_init()
1118 mutex_destroy(&aq->ea_cq_lock); in ena_cleanup_device_init()
1119 mutex_destroy(&aq->ea_stat_lock); in ena_cleanup_device_init()
1120 list_destroy(&aq->ea_cmd_ctxs_free); in ena_cleanup_device_init()
1121 list_destroy(&aq->ea_cmd_ctxs_used); in ena_cleanup_device_init()
1122 kmem_free(aq->ea_cmd_ctxs, sizeof (ena_cmd_ctx_t) * aq->ea_qlen); in ena_cleanup_device_init()
1135 ena_adminq_t *aq = &ena->ena_aq; in ena_attach_device_init()
1145 ena->ena_reset_reason = ENAHW_RESET_NORMAL; in ena_attach_device_init()
1146 if (!ena_device_reset(ena, ena->ena_reset_reason)) in ena_attach_device_init()
1156 ena->ena_dma_width = dma_width; in ena_attach_device_init()
1159 * The value stored in the device register is in the in ena_attach_device_init()
1164 aq->ea_cmd_timeout_ns = max(cmd_timeout, ena_admin_cmd_timeout_ns); in ena_attach_device_init()
1166 if (aq->ea_cmd_timeout_ns == 0) in ena_attach_device_init()
1167 aq->ea_cmd_timeout_ns = ENA_ADMIN_CMD_DEF_TIMEOUT_NS; in ena_attach_device_init()
1186 ena_err(ena, "failed to get device attributes: %d", ret); in ena_attach_device_init()
1190 ena_dbg(ena, "impl ID: %u", feat->efda_impl_id); in ena_attach_device_init()
1191 ena_dbg(ena, "device version: %u", feat->efda_device_version); in ena_attach_device_init()
1193 feat->efda_supported_features); in ena_attach_device_init()
1194 ena_dbg(ena, "device capabilities: 0x%x", feat->efda_capabilities); in ena_attach_device_init()
1195 ena_dbg(ena, "phys addr width: %u", feat->efda_phys_addr_width); in ena_attach_device_init()
1196 ena_dbg(ena, "virt addr width: %u", feat->efda_virt_addr_with); in ena_attach_device_init()
1197 maddr = feat->efda_mac_addr; in ena_attach_device_init()
1200 ena_dbg(ena, "max MTU: %u", feat->efda_max_mtu); in ena_attach_device_init()
1202 bcopy(maddr, ena->ena_mac_addr, ETHERADDRL); in ena_attach_device_init()
1203 ena->ena_max_mtu = feat->efda_max_mtu; in ena_attach_device_init()
1204 ena->ena_capabilities = feat->efda_capabilities; in ena_attach_device_init()
1205 supported_features = feat->efda_supported_features; in ena_attach_device_init()
1206 ena->ena_supported_features = supported_features; in ena_attach_device_init()
1222 ena->ena_tx_max_sq_num = feat_mqe->efmqe_max_tx_sq_num; in ena_attach_device_init()
1223 ena->ena_tx_max_sq_num_descs = feat_mqe->efmqe_max_tx_sq_depth; in ena_attach_device_init()
1224 ena->ena_tx_max_cq_num = feat_mqe->efmqe_max_tx_cq_num; in ena_attach_device_init()
1225 ena->ena_tx_max_cq_num_descs = feat_mqe->efmqe_max_tx_cq_depth; in ena_attach_device_init()
1226 ena->ena_tx_max_desc_per_pkt = in ena_attach_device_init()
1227 feat_mqe->efmqe_max_per_packet_tx_descs; in ena_attach_device_init()
1228 ena->ena_tx_max_hdr_len = feat_mqe->efmqe_max_tx_header_size; in ena_attach_device_init()
1230 ena->ena_rx_max_sq_num = feat_mqe->efmqe_max_rx_sq_num; in ena_attach_device_init()
1231 ena->ena_rx_max_sq_num_descs = feat_mqe->efmqe_max_rx_sq_depth; in ena_attach_device_init()
1232 ena->ena_rx_max_cq_num = feat_mqe->efmqe_max_rx_cq_num; in ena_attach_device_init()
1233 ena->ena_rx_max_cq_num_descs = feat_mqe->efmqe_max_rx_cq_depth; in ena_attach_device_init()
1234 ena->ena_rx_max_desc_per_pkt = in ena_attach_device_init()
1235 feat_mqe->efmqe_max_per_packet_rx_descs; in ena_attach_device_init()
1250 ena->ena_tx_max_sq_num = feat_mq->efmq_max_sq_num; in ena_attach_device_init()
1251 ena->ena_tx_max_sq_num_descs = feat_mq->efmq_max_sq_depth; in ena_attach_device_init()
1252 ena->ena_tx_max_cq_num = feat_mq->efmq_max_cq_num; in ena_attach_device_init()
1253 ena->ena_tx_max_cq_num_descs = feat_mq->efmq_max_cq_depth; in ena_attach_device_init()
1254 ena->ena_tx_max_desc_per_pkt = in ena_attach_device_init()
1255 feat_mq->efmq_max_per_packet_tx_descs; in ena_attach_device_init()
1256 ena->ena_tx_max_hdr_len = feat_mq->efmq_max_header_size; in ena_attach_device_init()
1258 ena->ena_rx_max_sq_num = feat_mq->efmq_max_sq_num; in ena_attach_device_init()
1259 ena->ena_rx_max_sq_num_descs = feat_mq->efmq_max_sq_depth; in ena_attach_device_init()
1260 ena->ena_rx_max_cq_num = feat_mq->efmq_max_cq_num; in ena_attach_device_init()
1261 ena->ena_rx_max_cq_num_descs = feat_mq->efmq_max_cq_depth; in ena_attach_device_init()
1262 ena->ena_rx_max_desc_per_pkt = in ena_attach_device_init()
1263 feat_mq->efmq_max_per_packet_rx_descs; in ena_attach_device_init()
1268 ena->ena_mtu = ena->ena_max_mtu; in ena_attach_device_init()
1274 ena->ena_tx_sgl_max_sz = 1; in ena_attach_device_init()
1275 ena->ena_rx_sgl_max_sz = 1; in ena_attach_device_init()
1276 if (ena->ena_device_hints.eh_max_tx_sgl != 0) in ena_attach_device_init()
1277 ena->ena_tx_sgl_max_sz = ena->ena_device_hints.eh_max_tx_sgl; in ena_attach_device_init()
1278 if (ena->ena_device_hints.eh_max_rx_sgl != 0) in ena_attach_device_init()
1279 ena->ena_rx_sgl_max_sz = ena->ena_device_hints.eh_max_rx_sgl; in ena_attach_device_init()
1314 for (int i = 0; i < ena->ena_num_intrs; i++) { in ena_cleanup_intr_alloc()
1315 int ret = ddi_intr_free(ena->ena_intr_handles[i]); in ena_cleanup_intr_alloc()
1321 if (ena->ena_intr_handles != NULL) { in ena_cleanup_intr_alloc()
1322 kmem_free(ena->ena_intr_handles, ena->ena_intr_handles_sz); in ena_cleanup_intr_alloc()
1323 ena->ena_intr_handles = NULL; in ena_cleanup_intr_alloc()
1324 ena->ena_intr_handles_sz = 0; in ena_cleanup_intr_alloc()
1329 * The Linux driver supports only MSI-X interrupts. We do the same,
1330 * with the assumption that it's the only type of interrupt the device
1340 ret = ddi_intr_get_supported_types(ena->ena_dip, &types); in ena_attach_intr_alloc()
1348 ena_err(ena, "the ena driver only supports MSI-X interrupts"); in ena_attach_intr_alloc()
1354 ideal = ena->ena_max_io_queues + 1; in ena_attach_intr_alloc()
1355 ret = ddi_intr_get_nintrs(ena->ena_dip, DDI_INTR_TYPE_MSIX, &avail); in ena_attach_intr_alloc()
1357 ena_err(ena, "failed to get number of MSI-X interrupts: %d", in ena_attach_intr_alloc()
1363 ena_err(ena, "number of MSI-X interrupts is %d, but the driver " in ena_attach_intr_alloc()
1368 ena_dbg(ena, "%d MSI-X interrupts available", avail); in ena_attach_intr_alloc()
1370 ret = ddi_intr_get_navail(ena->ena_dip, DDI_INTR_TYPE_MSIX, &avail); in ena_attach_intr_alloc()
1377 ena_err(ena, "number of available MSI-X interrupts is %d, " in ena_attach_intr_alloc()
1383 ena->ena_intr_handles_sz = req * sizeof (ddi_intr_handle_t); in ena_attach_intr_alloc()
1384 ena->ena_intr_handles = kmem_zalloc(ena->ena_intr_handles_sz, KM_SLEEP); in ena_attach_intr_alloc()
1386 ret = ddi_intr_alloc(ena->ena_dip, ena->ena_intr_handles, in ena_attach_intr_alloc()
1389 ena_err(ena, "failed to allocate %d MSI-X interrupts: %d", in ena_attach_intr_alloc()
1400 ena->ena_num_intrs = actual; in ena_attach_intr_alloc()
1402 ret = ddi_intr_get_cap(ena->ena_intr_handles[0], &ena->ena_intr_caps); in ena_attach_intr_alloc()
1408 ret = ddi_intr_get_pri(ena->ena_intr_handles[0], &ena->ena_intr_pri); in ena_attach_intr_alloc()
1414 ena_dbg(ena, "MSI-X interrupts allocated: %d, cap: 0x%x, pri: %u", in ena_attach_intr_alloc()
1415 actual, ena->ena_intr_caps, ena->ena_intr_pri); in ena_attach_intr_alloc()
1423 mutex_init(&ena->ena_lock, NULL, MUTEX_DRIVER, in ena_attach_intr_alloc()
1424 DDI_INTR_PRI(ena->ena_intr_pri)); in ena_attach_intr_alloc()
1425 mutex_init(&ena->ena_watchdog_lock, NULL, MUTEX_DRIVER, NULL); in ena_attach_intr_alloc()
1440 if (ena->ena_rxqs == NULL) { in ena_attach_alloc_rxqs()
1445 VERIFY3U(ena->ena_attach_seq, >=, ENA_ATTACH_INTR_ALLOC); in ena_attach_alloc_rxqs()
1446 ena->ena_num_rxqs = ena->ena_num_intrs - 1; in ena_attach_alloc_rxqs()
1447 ASSERT3U(ena->ena_num_rxqs, >, 0); in ena_attach_alloc_rxqs()
1448 ena->ena_rxqs = kmem_zalloc( in ena_attach_alloc_rxqs()
1449 ena->ena_num_rxqs * sizeof (*ena->ena_rxqs), KM_SLEEP); in ena_attach_alloc_rxqs()
1454 for (uint_t i = 0; i < ena->ena_num_rxqs; i++) { in ena_attach_alloc_rxqs()
1455 ena_rxq_t *rxq = &ena->ena_rxqs[i]; in ena_attach_alloc_rxqs()
1457 rxq->er_rxqs_idx = i; in ena_attach_alloc_rxqs()
1459 rxq->er_intr_vector = i + 1; in ena_attach_alloc_rxqs()
1460 rxq->er_mrh = NULL; in ena_attach_alloc_rxqs()
1463 mutex_init(&rxq->er_lock, NULL, MUTEX_DRIVER, in ena_attach_alloc_rxqs()
1464 DDI_INTR_PRI(ena->ena_intr_pri)); in ena_attach_alloc_rxqs()
1465 mutex_init(&rxq->er_stat_lock, NULL, MUTEX_DRIVER, in ena_attach_alloc_rxqs()
1466 DDI_INTR_PRI(ena->ena_intr_pri)); in ena_attach_alloc_rxqs()
1469 rxq->er_ena = ena; in ena_attach_alloc_rxqs()
1470 rxq->er_sq_num_descs = ena->ena_rxq_num_descs; in ena_attach_alloc_rxqs()
1471 rxq->er_cq_num_descs = ena->ena_rxq_num_descs; in ena_attach_alloc_rxqs()
1489 for (uint_t i = 0; i < ena->ena_num_rxqs; i++) { in ena_cleanup_rxqs()
1490 ena_rxq_t *rxq = &ena->ena_rxqs[i]; in ena_cleanup_rxqs()
1494 mutex_destroy(&rxq->er_lock); in ena_cleanup_rxqs()
1495 mutex_destroy(&rxq->er_stat_lock); in ena_cleanup_rxqs()
1501 kmem_free(ena->ena_rxqs, in ena_cleanup_rxqs()
1502 ena->ena_num_rxqs * sizeof (*ena->ena_rxqs)); in ena_cleanup_rxqs()
1503 ena->ena_rxqs = NULL; in ena_cleanup_rxqs()
1508 * Allocate the parent Tx queue structures. More importantly, this is
1517 if (ena->ena_txqs == NULL) { in ena_attach_alloc_txqs()
1522 VERIFY3U(ena->ena_attach_seq, >=, ENA_ATTACH_INTR_ALLOC); in ena_attach_alloc_txqs()
1523 ena->ena_num_txqs = ena->ena_num_intrs - 1; in ena_attach_alloc_txqs()
1524 ASSERT3U(ena->ena_num_txqs, >, 0); in ena_attach_alloc_txqs()
1525 ena->ena_txqs = kmem_zalloc( in ena_attach_alloc_txqs()
1526 ena->ena_num_txqs * sizeof (*ena->ena_txqs), KM_SLEEP); in ena_attach_alloc_txqs()
1531 for (uint_t i = 0; i < ena->ena_num_txqs; i++) { in ena_attach_alloc_txqs()
1532 ena_txq_t *txq = &ena->ena_txqs[i]; in ena_attach_alloc_txqs()
1534 txq->et_txqs_idx = i; in ena_attach_alloc_txqs()
1536 txq->et_intr_vector = i + 1; in ena_attach_alloc_txqs()
1537 txq->et_mrh = NULL; in ena_attach_alloc_txqs()
1540 mutex_init(&txq->et_lock, NULL, MUTEX_DRIVER, in ena_attach_alloc_txqs()
1541 DDI_INTR_PRI(ena->ena_intr_pri)); in ena_attach_alloc_txqs()
1542 mutex_init(&txq->et_stat_lock, NULL, MUTEX_DRIVER, in ena_attach_alloc_txqs()
1543 DDI_INTR_PRI(ena->ena_intr_pri)); in ena_attach_alloc_txqs()
1546 txq->et_ena = ena; in ena_attach_alloc_txqs()
1547 txq->et_sq_num_descs = ena->ena_txq_num_descs; in ena_attach_alloc_txqs()
1548 txq->et_cq_num_descs = ena->ena_txq_num_descs; in ena_attach_alloc_txqs()
1566 for (uint_t i = 0; i < ena->ena_num_txqs; i++) { in ena_cleanup_txqs()
1567 ena_txq_t *txq = &ena->ena_txqs[i]; in ena_cleanup_txqs()
1571 mutex_destroy(&txq->et_lock); in ena_cleanup_txqs()
1572 mutex_destroy(&txq->et_stat_lock); in ena_cleanup_txqs()
1578 kmem_free(ena->ena_txqs, in ena_cleanup_txqs()
1579 ena->ena_num_txqs * sizeof (*ena->ena_txqs)); in ena_cleanup_txqs()
1580 ena->ena_txqs = NULL; in ena_cleanup_txqs()
1585 * To reset the device we need to unwind some of the steps taken during attach
1586 * but, since the device could well be in a failed state, we cannot rely on
1594 * Experimentation has shown that the device hangs onto old async event queue
1598 * We clean up all of the Tx and Rx ring descriptors and the TCBs but leave the
1604 * Tx and Rx rings throughout, and unset the `ENA_STATE_STARTED` bit in the
1607 * asynchronous events, is masked after the device is reset until we're ready
1613 ena_txq_state_t tx_state[ena->ena_num_txqs]; in ena_reset()
1614 ena_rxq_state_t rx_state[ena->ena_num_rxqs]; in ena_reset()
1617 ena_err(ena, "resetting device with reason 0x%x [%s]", in ena_reset()
1620 VERIFY0(ena->ena_state & ENA_STATE_RESETTING); in ena_reset()
1621 atomic_or_32(&ena->ena_state, ENA_STATE_RESETTING); in ena_reset()
1623 VERIFY(ena->ena_state & ENA_STATE_STARTED); in ena_reset()
1624 atomic_and_32(&ena->ena_state, ~ENA_STATE_STARTED); in ena_reset()
1626 mutex_enter(&ena->ena_lock); in ena_reset()
1630 for (uint_t i = 0; i < ena->ena_num_txqs; i++) { in ena_reset()
1631 ena_txq_t *txq = &ena->ena_txqs[i]; in ena_reset()
1633 mutex_enter(&txq->et_lock); in ena_reset()
1634 tx_state[i] = txq->et_state; in ena_reset()
1635 if (txq->et_state & ENA_TXQ_STATE_RUNNING) in ena_reset()
1639 for (uint_t i = 0; i < ena->ena_num_rxqs; i++) { in ena_reset()
1640 ena_rxq_t *rxq = &ena->ena_rxqs[i]; in ena_reset()
1642 mutex_enter(&rxq->er_lock); in ena_reset()
1643 rx_state[i] = rxq->er_state; in ena_reset()
1644 if (rxq->er_state & ENA_RXQ_STATE_RUNNING) in ena_reset()
1649 ena_err(ena, "reset: failed to reset device"); in ena_reset()
1689 for (uint_t i = 0; i < ena->ena_num_rxqs; i++) { in ena_reset()
1690 ena_rxq_t *rxq = &ena->ena_rxqs[i]; in ena_reset()
1692 mutex_exit(&rxq->er_lock); in ena_reset()
1695 rxq->er_m_gen_num); in ena_reset()
1699 for (uint_t i = 0; i < ena->ena_num_txqs; i++) { in ena_reset()
1700 ena_txq_t *txq = &ena->ena_txqs[i]; in ena_reset()
1702 mutex_exit(&txq->et_lock); in ena_reset()
1705 txq->et_m_gen_num); in ena_reset()
1709 atomic_or_32(&ena->ena_state, ENA_STATE_STARTED); in ena_reset()
1713 atomic_and_32(&ena->ena_state, ~ENA_STATE_RESETTING); in ena_reset()
1714 mutex_exit(&ena->ena_lock); in ena_reset()
1740 .ead_name = "device initialization",
1756 .ead_name = "device config",
1780 .ead_name = "Tx queues",
1825 if (ena == NULL || ena->ena_attach_seq == 0) { in ena_cleanup()
1833 VERIFY3U(ena->ena_attach_seq, <, ENA_ATTACH_NUM_ENTRIES); in ena_cleanup()
1835 while (ena->ena_attach_seq > 0) { in ena_cleanup()
1836 int idx = ena->ena_attach_seq - 1; in ena_cleanup()
1840 desc->ead_name, idx); in ena_cleanup()
1842 if (desc->ead_cleanup_fn != NULL) in ena_cleanup()
1843 desc->ead_cleanup_fn(ena, false); in ena_cleanup()
1844 ena->ena_attach_seq--; in ena_cleanup()
1847 ASSERT3U(ena->ena_attach_seq, ==, 0); in ena_cleanup()
1848 mutex_destroy(&ena->ena_lock); in ena_cleanup()
1849 mutex_destroy(&ena->ena_watchdog_lock); in ena_cleanup()
1862 ena->ena_instance = ddi_get_instance(dip); in ena_attach()
1863 ena->ena_dip = dip; in ena_attach()
1864 ena->ena_instance = ddi_get_instance(dip); in ena_attach()
1865 ena->ena_page_sz = ddi_ptob(dip, 1); in ena_attach()
1871 ena_dbg(ena, "running attach sequence: %s (%d)", desc->ead_name, in ena_attach()
1874 if (!(success = desc->ead_attach_fn(ena))) { in ena_attach()
1876 desc->ead_name, i); in ena_attach()
1878 if (ena->ena_attach_seq == ENA_ATTACH_MAC_REGISTER) { in ena_attach()
1908 ena->ena_attach_seq--; in ena_attach()
1920 if (desc->ead_cleanup_fn != NULL) in ena_attach()
1921 desc->ead_cleanup_fn(ena, false); in ena_attach()
1931 desc->ead_name, i); in ena_attach()
1934 ena->ena_attach_seq = desc->ead_seq; in ena_attach()
1951 atomic_or_32(&ena->ena_state, ENA_STATE_INITIALIZED); in ena_attach()
1967 * mac_unregister() explicitly -- if there are still in ena_detach()
1981 /* We can't detach if clients are actively using the device. */ in ena_detach()
1989 * best-effort basis. in ena_detach()
1991 ena->ena_attach_seq = ENA_ATTACH_RXQS_ALLOC; in ena_detach()