/linux/drivers/net/ethernet/cavium/thunder/ |
H A D | thunder_bgx.c | 42 struct lmac { struct 69 struct lmac lmac[MAX_LMAC_PER_BGX]; argument 82 static int bgx_xaui_check_link(struct lmac *lmac); 107 static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) in bgx_reg_read() argument 109 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; in bgx_reg_read() 114 static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) in bgx_reg_write() argument 116 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; in bgx_reg_write() 121 static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) in bgx_reg_modify() argument 123 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; in bgx_reg_modify() 128 static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) in bgx_poll_reg() argument [all …]
|
H A D | nic_main.c | 53 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF)) argument 161 int bgx_idx, lmac; in nic_mbx_send_ready() local 171 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); in nic_mbx_send_ready() 173 mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); in nic_mbx_send_ready() 233 int bgx_idx, lmac; in nic_get_bgx_stats() local 237 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); in nic_get_bgx_stats() 245 lmac, bgx->idx); in nic_get_bgx_stats() 248 lmac, bgx->idx); in nic_get_bgx_stats() 255 int bgx, lmac, lmac_cnt; in nic_update_hw_frs() local 262 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); in nic_update_hw_frs() [all …]
|
H A D | thunder_bgx.h | 237 u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx); 238 u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
|
/linux/drivers/net/ethernet/marvell/octeontx2/af/ |
H A D | cgx.c | 120 void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) in cgx_write() argument 122 writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + in cgx_write() 126 u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) in cgx_read() argument 128 return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + in cgx_read() 132 struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) in lmac_pdata() 243 struct lmac *lmac = lmac_pdata(lmac_id, cgxd); in cgx_get_link_info() local 245 if (!lmac) in cgx_get_link_info() 248 *linfo = lmac->link_info; in cgx_get_link_info() 255 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); in cgx_lmac_addr_set() local 260 if (!lmac) in cgx_lmac_addr_set() [all …]
|
H A D | lmac_common.h | 32 struct lmac { struct 151 struct lmac *lmac_idmap[MAX_LMAC_COUNT]; 167 void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val); 168 u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset); 169 struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx); 170 int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac);
|
H A D | rvu_cgx.c | 128 int cgx, lmac, iter; in rvu_map_cgx_lmac_pf() local 165 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), in rvu_map_cgx_lmac_pf() 167 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); in rvu_map_cgx_lmac_pf() 168 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; in rvu_map_cgx_lmac_pf() 171 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); in rvu_map_cgx_lmac_pf() 310 int cgx, lmac, err; in cgx_lmac_event_handler_init() local 330 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) { in cgx_lmac_event_handler_init() 331 err = cgx_lmac_evh_register(&cb, cgxd, lmac); in cgx_lmac_event_handler_init() 335 cgx, lmac); in cgx_lmac_event_handler_init() 402 int cgx, lmac, err; in cgx_start_linkup() local [all …]
|
H A D | rpm.c | 96 static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val) in rpm_write() argument 98 cgx_write(rpm, lmac, offset, val); in rpm_write() 101 static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset) in rpm_read() argument 103 return cgx_read(rpm, lmac, offset); in rpm_read() 170 struct lmac *lmac; in rpm_lmac_enadis_rx_pause_fwding() local 176 lmac = lmac_pdata(lmac_id, rpm); in rpm_lmac_enadis_rx_pause_fwding() 177 if (!lmac) in rpm_lmac_enadis_rx_pause_fwding() 181 if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) in rpm_lmac_enadis_rx_pause_fwding() 575 struct lmac *lmac; in rpm_lmac_internal_loopback() local 581 lmac = lmac_pdata(lmac_id, rpm); in rpm_lmac_internal_loopback() [all …]
|
H A D | mcs.c | 943 int lmac; in cn10kb_mcs_bbe_intr_handler() local 957 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) { in cn10kb_mcs_bbe_intr_handler() 958 if (!(val & BIT_ULL(lmac))) in cn10kb_mcs_bbe_intr_handler() 960 dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac); in cn10kb_mcs_bbe_intr_handler() 967 int lmac; in cn10kb_mcs_pab_intr_handler() local 972 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) { in cn10kb_mcs_pab_intr_handler() 973 if (intr & BIT_ULL(lmac)) in cn10kb_mcs_pab_intr_handler() 974 dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac); in cn10kb_mcs_pab_intr_handler() 1397 int lmac; in mcs_set_lmac_channels() local 1403 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) { in mcs_set_lmac_channels() [all …]
|
H A D | mcs_rvu_if.c | 858 int cgx, lmac, port; in rvu_mcs_set_lmac_bmap() local 862 lmac = port % rvu->hw->lmac_per_cgx; in rvu_mcs_set_lmac_bmap() 863 if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac)) in rvu_mcs_set_lmac_bmap() 873 int lmac, err = 0, mcs_id; in rvu_mcs_init() local 894 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) in rvu_mcs_init() 895 mcs_set_lmac_mode(mcs, lmac, 0); in rvu_mcs_init()
|
H A D | rvu_cn10k.c | 523 int cgx, lmac; in rvu_rpm_set_channels() local 526 for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) { in rvu_rpm_set_channels() 527 __rvu_rpm_set_channels(cgx, lmac, base); in rvu_rpm_set_channels()
|
H A D | rvu.h | 65 struct dentry *lmac; member 1027 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
|
H A D | rvu_nix.c | 4608 u8 cgx = 0, lmac = 0; in rvu_mbox_handler_nix_set_hw_frs() local 4642 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); in rvu_mbox_handler_nix_set_hw_frs() 4643 link = (cgx * hw->lmac_per_cgx) + lmac; in rvu_mbox_handler_nix_set_hw_frs()
|
/linux/drivers/net/wireless/intel/iwlwifi/fw/ |
H A D | smem.c | 15 int i, lmac; in iwl_parse_shared_mem_22000() local 34 for (lmac = 0; lmac < lmac_num; lmac++) { in iwl_parse_shared_mem_22000() 36 &mem_cfg->lmac_smem[lmac]; in iwl_parse_shared_mem_22000() 39 fwrt->smem_cfg.lmac[lmac].txfifo_size[i] = in iwl_parse_shared_mem_22000() 41 fwrt->smem_cfg.lmac[lmac].rxfifo1_size = in iwl_parse_shared_mem_22000() 56 fwrt->smem_cfg.lmac[0].txfifo_size[i] = in iwl_parse_shared_mem() 59 fwrt->smem_cfg.lmac[0].rxfifo1_size = in iwl_parse_shared_mem()
|
H A D | runtime.h | 35 } lmac[MAX_NUM_LMAC]; member 87 int lmac; member
|
H A D | dbg.c | 186 cfg->lmac[0].rxfifo1_size, 0, 0); in iwl_fw_dump_rxf() 194 cfg->lmac[1].rxfifo1_size, in iwl_fw_dump_rxf() 221 cfg->lmac[0].txfifo_size[i], 0, i); in iwl_fw_dump_txf() 233 cfg->lmac[1].txfifo_size[i], in iwl_fw_dump_txf() 722 ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); in iwl_fw_rxf_len() 746 ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j], in iwl_fw_txf_len() 937 u32 *txf_size = mem_cfg->lmac[i].txfifo_size; in iwl_fw_error_dump_file() 940 dump_smem_cfg->lmac[i].txfifo_size[j] = in iwl_fw_error_dump_file() 942 dump_smem_cfg->lmac[i].rxfifo1_size = in iwl_fw_error_dump_file() 943 cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); in iwl_fw_error_dump_file() [all …]
|
/linux/net/llc/ |
H A D | llc_if.c | 83 int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac, u8 dsap) in llc_establish_connection() argument 94 memcpy(laddr.mac, lmac, sizeof(laddr.mac)); in llc_establish_connection()
|
/linux/include/net/ |
H A D | llc_if.h | 65 int llc_establish_connection(struct sock *sk, const u8 *lmac, u8 *dmac,
|