Lines Matching +full:i2c +full:- +full:arbitrator

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright(c) 2015 - 2020 Intel Corporation.
32 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78 #define SEC_SC_HALTED 0x4 /* per-context only */
79 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
87 * 0 - User Fecn Handling
88 * 1 - Vnic
89 * 2 - AIP
90 * 3 - Verbs
101 #define emulator_rev(dd) ((dd)->irev >> 8)
103 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
104 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
113 /* LRH.BTH: QW 0, OFFSET 48 - for match */
122 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
130 /* SC[n..0] QW 0, OFFSET 60 - for select */
156 /* L2_TYPE: QW 0, OFFSET 61 - for match */
164 /* L4_TYPE QW 1, OFFSET 0 - for match */
172 /* 16B VESWID - for select */
174 /* 16B ENTROPY - for select */
236 /* all CceStatus sub-block freeze bits */
241 /* all CceStatus sub-block TXE pause bits */
245 /* all CceStatus sub-block RXE pause bits */
337 /*41-63 reserved*/
454 /*30-31 reserved*/
467 /*36-63 reserved*/
514 /*04-63 reserved*/
546 /* 9-10 reserved */
708 /* 5-63 reserved*/
1070 * in the top-level CceIntStatus.
1080 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1081 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1082 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1119 * SDMA error interrupt entry - refers to another register containing more
1150 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1284 * hfi1_addr_from_offset - return addr for readq/writeq
1295 if (offset >= dd->base2_start) in hfi1_addr_from_offset()
1296 return dd->kregbase2 + (offset - dd->base2_start); in hfi1_addr_from_offset()
1297 return dd->kregbase1 + offset; in hfi1_addr_from_offset()
1301 * read_csr - read CSR at the indicated offset
1310 if (dd->flags & HFI1_PRESENT) in read_csr()
1312 return -1; in read_csr()
1316 * write_csr - write CSR at the indicated offset
1323 if (dd->flags & HFI1_PRESENT) { in write_csr()
1327 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start)) in write_csr()
1334 * get_csr_addr - return te iomem address for offset
1345 if (dd->flags & HFI1_PRESENT) in get_csr_addr()
1374 u64 csr = entry->csr; in dev_access_u32_csr()
1376 if (entry->flags & CNTR_SDMA) { in dev_access_u32_csr()
1392 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_err_cnt()
1393 return dd->per_sdma[idx].err_cnt; in access_sde_err_cnt()
1402 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_int_cnt()
1403 return dd->per_sdma[idx].sdma_int_cnt; in access_sde_int_cnt()
1412 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_idle_int_cnt()
1413 return dd->per_sdma[idx].idle_int_cnt; in access_sde_idle_int_cnt()
1423 if (dd->per_sdma && idx < dd->num_sdma) in access_sde_progress_int_cnt()
1424 return dd->per_sdma[idx].progress_int_cnt; in access_sde_progress_int_cnt()
1434 u64 csr = entry->csr; in dev_access_u64_csr()
1436 if (entry->flags & CNTR_VL) { in dev_access_u64_csr()
1453 u32 csr = entry->csr; in dc_access_lcb_cntr()
1464 if (!(dd->flags & HFI1_SHUTDOWN)) in dc_access_lcb_cntr()
1481 return read_write_csr(ppd->dd, entry->csr, mode, data); in port_access_u32_csr()
1489 u64 csr = entry->csr; in port_access_u64_csr()
1491 if (entry->flags & CNTR_VL) { in port_access_u64_csr()
1499 val = read_write_csr(ppd->dd, csr, mode, data); in port_access_u64_csr()
1531 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data); in access_sw_link_dn_cnt()
1541 return read_write_sw(ppd->dd, &ppd->link_up, mode, data); in access_sw_link_up_cnt()
1552 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data); in access_sw_unknown_frame_cnt()
1563 counter = &ppd->port_xmit_discards; in access_sw_xmit_discards()
1565 counter = &ppd->port_xmit_discards_vl[vl]; in access_sw_xmit_discards()
1569 return read_write_sw(ppd->dd, counter, mode, data); in access_sw_xmit_discards()
1581 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors, in access_xmit_constraint_errs()
1593 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors, in access_rcv_constraint_errs()
1617 ret = get_all_cpu_total(cntr) - *z_val; in read_write_cpu()
1637 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl, in access_sw_cpu_intr()
1646 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl, in access_sw_cpu_rcv_limit()
1655 return dd->verbs_dev.n_piowait; in access_sw_pio_wait()
1663 return dd->verbs_dev.n_piodrain; in access_sw_pio_drain()
1671 return dd->ctx0_seq_drop; in access_sw_ctx0_seq_drop()
1679 return dd->verbs_dev.n_txwait; in access_sw_vtx_wait()
1687 return dd->verbs_dev.n_kmem_wait; in access_sw_kmem_wait()
1695 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl, in access_sw_send_schedule()
1706 return dd->misc_err_status_cnt[12]; in access_misc_pll_lock_fail_err_cnt()
1715 return dd->misc_err_status_cnt[11]; in access_misc_mbist_fail_err_cnt()
1724 return dd->misc_err_status_cnt[10]; in access_misc_invalid_eep_cmd_err_cnt()
1733 return dd->misc_err_status_cnt[9]; in access_misc_efuse_done_parity_err_cnt()
1742 return dd->misc_err_status_cnt[8]; in access_misc_efuse_write_err_cnt()
1751 return dd->misc_err_status_cnt[7]; in access_misc_efuse_read_bad_addr_err_cnt()
1760 return dd->misc_err_status_cnt[6]; in access_misc_efuse_csr_parity_err_cnt()
1769 return dd->misc_err_status_cnt[5]; in access_misc_fw_auth_failed_err_cnt()
1778 return dd->misc_err_status_cnt[4]; in access_misc_key_mismatch_err_cnt()
1787 return dd->misc_err_status_cnt[3]; in access_misc_sbus_write_failed_err_cnt()
1796 return dd->misc_err_status_cnt[2]; in access_misc_csr_write_bad_addr_err_cnt()
1805 return dd->misc_err_status_cnt[1]; in access_misc_csr_read_bad_addr_err_cnt()
1814 return dd->misc_err_status_cnt[0]; in access_misc_csr_parity_err_cnt()
1827 return dd->sw_cce_err_status_aggregate; in access_sw_cce_err_status_aggregated_cnt()
1840 return dd->cce_err_status_cnt[40]; in access_cce_msix_csr_parity_err_cnt()
1849 return dd->cce_err_status_cnt[39]; in access_cce_int_map_unc_err_cnt()
1858 return dd->cce_err_status_cnt[38]; in access_cce_int_map_cor_err_cnt()
1867 return dd->cce_err_status_cnt[37]; in access_cce_msix_table_unc_err_cnt()
1876 return dd->cce_err_status_cnt[36]; in access_cce_msix_table_cor_err_cnt()
1885 return dd->cce_err_status_cnt[35]; in access_cce_rxdma_conv_fifo_parity_err_cnt()
1894 return dd->cce_err_status_cnt[34]; in access_cce_rcpl_async_fifo_parity_err_cnt()
1903 return dd->cce_err_status_cnt[33]; in access_cce_seg_write_bad_addr_err_cnt()
1912 return dd->cce_err_status_cnt[32]; in access_cce_seg_read_bad_addr_err_cnt()
1920 return dd->cce_err_status_cnt[31]; in access_la_triggered_cnt()
1929 return dd->cce_err_status_cnt[30]; in access_cce_trgt_cpl_timeout_err_cnt()
1938 return dd->cce_err_status_cnt[29]; in access_pcic_receive_parity_err_cnt()
1947 return dd->cce_err_status_cnt[28]; in access_pcic_transmit_back_parity_err_cnt()
1956 return dd->cce_err_status_cnt[27]; in access_pcic_transmit_front_parity_err_cnt()
1965 return dd->cce_err_status_cnt[26]; in access_pcic_cpl_dat_q_unc_err_cnt()
1974 return dd->cce_err_status_cnt[25]; in access_pcic_cpl_hd_q_unc_err_cnt()
1983 return dd->cce_err_status_cnt[24]; in access_pcic_post_dat_q_unc_err_cnt()
1992 return dd->cce_err_status_cnt[23]; in access_pcic_post_hd_q_unc_err_cnt()
2001 return dd->cce_err_status_cnt[22]; in access_pcic_retry_sot_mem_unc_err_cnt()
2010 return dd->cce_err_status_cnt[21]; in access_pcic_retry_mem_unc_err()
2019 return dd->cce_err_status_cnt[20]; in access_pcic_n_post_dat_q_parity_err_cnt()
2028 return dd->cce_err_status_cnt[19]; in access_pcic_n_post_h_q_parity_err_cnt()
2037 return dd->cce_err_status_cnt[18]; in access_pcic_cpl_dat_q_cor_err_cnt()
2046 return dd->cce_err_status_cnt[17]; in access_pcic_cpl_hd_q_cor_err_cnt()
2055 return dd->cce_err_status_cnt[16]; in access_pcic_post_dat_q_cor_err_cnt()
2064 return dd->cce_err_status_cnt[15]; in access_pcic_post_hd_q_cor_err_cnt()
2073 return dd->cce_err_status_cnt[14]; in access_pcic_retry_sot_mem_cor_err_cnt()
2082 return dd->cce_err_status_cnt[13]; in access_pcic_retry_mem_cor_err_cnt()
2091 return dd->cce_err_status_cnt[12]; in access_cce_cli1_async_fifo_dbg_parity_err_cnt()
2100 return dd->cce_err_status_cnt[11]; in access_cce_cli1_async_fifo_rxdma_parity_err_cnt()
2109 return dd->cce_err_status_cnt[10]; in access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt()
2118 return dd->cce_err_status_cnt[9]; in access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt()
2127 return dd->cce_err_status_cnt[8]; in access_cce_cli2_async_fifo_parity_err_cnt()
2136 return dd->cce_err_status_cnt[7]; in access_cce_csr_cfg_bus_parity_err_cnt()
2145 return dd->cce_err_status_cnt[6]; in access_cce_cli0_async_fifo_parity_err_cnt()
2154 return dd->cce_err_status_cnt[5]; in access_cce_rspd_data_parity_err_cnt()
2163 return dd->cce_err_status_cnt[4]; in access_cce_trgt_access_err_cnt()
2172 return dd->cce_err_status_cnt[3]; in access_cce_trgt_async_fifo_parity_err_cnt()
2181 return dd->cce_err_status_cnt[2]; in access_cce_csr_write_bad_addr_err_cnt()
2190 return dd->cce_err_status_cnt[1]; in access_cce_csr_read_bad_addr_err_cnt()
2199 return dd->cce_err_status_cnt[0]; in access_ccs_csr_parity_err_cnt()
2212 return dd->rcv_err_status_cnt[63]; in access_rx_csr_parity_err_cnt()
2221 return dd->rcv_err_status_cnt[62]; in access_rx_csr_write_bad_addr_err_cnt()
2230 return dd->rcv_err_status_cnt[61]; in access_rx_csr_read_bad_addr_err_cnt()
2239 return dd->rcv_err_status_cnt[60]; in access_rx_dma_csr_unc_err_cnt()
2248 return dd->rcv_err_status_cnt[59]; in access_rx_dma_dq_fsm_encoding_err_cnt()
2257 return dd->rcv_err_status_cnt[58]; in access_rx_dma_eq_fsm_encoding_err_cnt()
2266 return dd->rcv_err_status_cnt[57]; in access_rx_dma_csr_parity_err_cnt()
2275 return dd->rcv_err_status_cnt[56]; in access_rx_rbuf_data_cor_err_cnt()
2284 return dd->rcv_err_status_cnt[55]; in access_rx_rbuf_data_unc_err_cnt()
2293 return dd->rcv_err_status_cnt[54]; in access_rx_dma_data_fifo_rd_cor_err_cnt()
2302 return dd->rcv_err_status_cnt[53]; in access_rx_dma_data_fifo_rd_unc_err_cnt()
2311 return dd->rcv_err_status_cnt[52]; in access_rx_dma_hdr_fifo_rd_cor_err_cnt()
2320 return dd->rcv_err_status_cnt[51]; in access_rx_dma_hdr_fifo_rd_unc_err_cnt()
2329 return dd->rcv_err_status_cnt[50]; in access_rx_rbuf_desc_part2_cor_err_cnt()
2338 return dd->rcv_err_status_cnt[49]; in access_rx_rbuf_desc_part2_unc_err_cnt()
2347 return dd->rcv_err_status_cnt[48]; in access_rx_rbuf_desc_part1_cor_err_cnt()
2356 return dd->rcv_err_status_cnt[47]; in access_rx_rbuf_desc_part1_unc_err_cnt()
2365 return dd->rcv_err_status_cnt[46]; in access_rx_hq_intr_fsm_err_cnt()
2374 return dd->rcv_err_status_cnt[45]; in access_rx_hq_intr_csr_parity_err_cnt()
2383 return dd->rcv_err_status_cnt[44]; in access_rx_lookup_csr_parity_err_cnt()
2392 return dd->rcv_err_status_cnt[43]; in access_rx_lookup_rcv_array_cor_err_cnt()
2401 return dd->rcv_err_status_cnt[42]; in access_rx_lookup_rcv_array_unc_err_cnt()
2410 return dd->rcv_err_status_cnt[41]; in access_rx_lookup_des_part2_parity_err_cnt()
2419 return dd->rcv_err_status_cnt[40]; in access_rx_lookup_des_part1_unc_cor_err_cnt()
2428 return dd->rcv_err_status_cnt[39]; in access_rx_lookup_des_part1_unc_err_cnt()
2437 return dd->rcv_err_status_cnt[38]; in access_rx_rbuf_next_free_buf_cor_err_cnt()
2446 return dd->rcv_err_status_cnt[37]; in access_rx_rbuf_next_free_buf_unc_err_cnt()
2455 return dd->rcv_err_status_cnt[36]; in access_rbuf_fl_init_wr_addr_parity_err_cnt()
2464 return dd->rcv_err_status_cnt[35]; in access_rx_rbuf_fl_initdone_parity_err_cnt()
2473 return dd->rcv_err_status_cnt[34]; in access_rx_rbuf_fl_write_addr_parity_err_cnt()
2482 return dd->rcv_err_status_cnt[33]; in access_rx_rbuf_fl_rd_addr_parity_err_cnt()
2491 return dd->rcv_err_status_cnt[32]; in access_rx_rbuf_empty_err_cnt()
2500 return dd->rcv_err_status_cnt[31]; in access_rx_rbuf_full_err_cnt()
2509 return dd->rcv_err_status_cnt[30]; in access_rbuf_bad_lookup_err_cnt()
2518 return dd->rcv_err_status_cnt[29]; in access_rbuf_ctx_id_parity_err_cnt()
2527 return dd->rcv_err_status_cnt[28]; in access_rbuf_csr_qeopdw_parity_err_cnt()
2536 return dd->rcv_err_status_cnt[27]; in access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt()
2545 return dd->rcv_err_status_cnt[26]; in access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt()
2554 return dd->rcv_err_status_cnt[25]; in access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt()
2563 return dd->rcv_err_status_cnt[24]; in access_rx_rbuf_csr_q_vld_bit_parity_err_cnt()
2572 return dd->rcv_err_status_cnt[23]; in access_rx_rbuf_csr_q_next_buf_parity_err_cnt()
2581 return dd->rcv_err_status_cnt[22]; in access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt()
2590 return dd->rcv_err_status_cnt[21]; in access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt()
2599 return dd->rcv_err_status_cnt[20]; in access_rx_rbuf_block_list_read_cor_err_cnt()
2608 return dd->rcv_err_status_cnt[19]; in access_rx_rbuf_block_list_read_unc_err_cnt()
2617 return dd->rcv_err_status_cnt[18]; in access_rx_rbuf_lookup_des_cor_err_cnt()
2626 return dd->rcv_err_status_cnt[17]; in access_rx_rbuf_lookup_des_unc_err_cnt()
2635 return dd->rcv_err_status_cnt[16]; in access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt()
2644 return dd->rcv_err_status_cnt[15]; in access_rx_rbuf_lookup_des_reg_unc_err_cnt()
2653 return dd->rcv_err_status_cnt[14]; in access_rx_rbuf_free_list_cor_err_cnt()
2662 return dd->rcv_err_status_cnt[13]; in access_rx_rbuf_free_list_unc_err_cnt()
2671 return dd->rcv_err_status_cnt[12]; in access_rx_rcv_fsm_encoding_err_cnt()
2680 return dd->rcv_err_status_cnt[11]; in access_rx_dma_flag_cor_err_cnt()
2689 return dd->rcv_err_status_cnt[10]; in access_rx_dma_flag_unc_err_cnt()
2698 return dd->rcv_err_status_cnt[9]; in access_rx_dc_sop_eop_parity_err_cnt()
2707 return dd->rcv_err_status_cnt[8]; in access_rx_rcv_csr_parity_err_cnt()
2716 return dd->rcv_err_status_cnt[7]; in access_rx_rcv_qp_map_table_cor_err_cnt()
2725 return dd->rcv_err_status_cnt[6]; in access_rx_rcv_qp_map_table_unc_err_cnt()
2734 return dd->rcv_err_status_cnt[5]; in access_rx_rcv_data_cor_err_cnt()
2743 return dd->rcv_err_status_cnt[4]; in access_rx_rcv_data_unc_err_cnt()
2752 return dd->rcv_err_status_cnt[3]; in access_rx_rcv_hdr_cor_err_cnt()
2761 return dd->rcv_err_status_cnt[2]; in access_rx_rcv_hdr_unc_err_cnt()
2770 return dd->rcv_err_status_cnt[1]; in access_rx_dc_intf_parity_err_cnt()
2779 return dd->rcv_err_status_cnt[0]; in access_rx_dma_csr_cor_err_cnt()
2792 return dd->send_pio_err_status_cnt[35]; in access_pio_pec_sop_head_parity_err_cnt()
2801 return dd->send_pio_err_status_cnt[34]; in access_pio_pcc_sop_head_parity_err_cnt()
2810 return dd->send_pio_err_status_cnt[33]; in access_pio_last_returned_cnt_parity_err_cnt()
2819 return dd->send_pio_err_status_cnt[32]; in access_pio_current_free_cnt_parity_err_cnt()
2828 return dd->send_pio_err_status_cnt[31]; in access_pio_reserved_31_err_cnt()
2837 return dd->send_pio_err_status_cnt[30]; in access_pio_reserved_30_err_cnt()
2846 return dd->send_pio_err_status_cnt[29]; in access_pio_ppmc_sop_len_err_cnt()
2855 return dd->send_pio_err_status_cnt[28]; in access_pio_ppmc_bqc_mem_parity_err_cnt()
2864 return dd->send_pio_err_status_cnt[27]; in access_pio_vl_fifo_parity_err_cnt()
2873 return dd->send_pio_err_status_cnt[26]; in access_pio_vlf_sop_parity_err_cnt()
2882 return dd->send_pio_err_status_cnt[25]; in access_pio_vlf_v1_len_parity_err_cnt()
2891 return dd->send_pio_err_status_cnt[24]; in access_pio_block_qw_count_parity_err_cnt()
2900 return dd->send_pio_err_status_cnt[23]; in access_pio_write_qw_valid_parity_err_cnt()
2909 return dd->send_pio_err_status_cnt[22]; in access_pio_state_machine_err_cnt()
2918 return dd->send_pio_err_status_cnt[21]; in access_pio_write_data_parity_err_cnt()
2927 return dd->send_pio_err_status_cnt[20]; in access_pio_host_addr_mem_cor_err_cnt()
2936 return dd->send_pio_err_status_cnt[19]; in access_pio_host_addr_mem_unc_err_cnt()
2945 return dd->send_pio_err_status_cnt[18]; in access_pio_pkt_evict_sm_or_arb_sm_err_cnt()
2954 return dd->send_pio_err_status_cnt[17]; in access_pio_init_sm_in_err_cnt()
2963 return dd->send_pio_err_status_cnt[16]; in access_pio_ppmc_pbl_fifo_err_cnt()
2972 return dd->send_pio_err_status_cnt[15]; in access_pio_credit_ret_fifo_parity_err_cnt()
2981 return dd->send_pio_err_status_cnt[14]; in access_pio_v1_len_mem_bank1_cor_err_cnt()
2990 return dd->send_pio_err_status_cnt[13]; in access_pio_v1_len_mem_bank0_cor_err_cnt()
2999 return dd->send_pio_err_status_cnt[12]; in access_pio_v1_len_mem_bank1_unc_err_cnt()
3008 return dd->send_pio_err_status_cnt[11]; in access_pio_v1_len_mem_bank0_unc_err_cnt()
3017 return dd->send_pio_err_status_cnt[10]; in access_pio_sm_pkt_reset_parity_err_cnt()
3026 return dd->send_pio_err_status_cnt[9]; in access_pio_pkt_evict_fifo_parity_err_cnt()
3035 return dd->send_pio_err_status_cnt[8]; in access_pio_sbrdctrl_crrel_fifo_parity_err_cnt()
3044 return dd->send_pio_err_status_cnt[7]; in access_pio_sbrdctl_crrel_parity_err_cnt()
3053 return dd->send_pio_err_status_cnt[6]; in access_pio_pec_fifo_parity_err_cnt()
3062 return dd->send_pio_err_status_cnt[5]; in access_pio_pcc_fifo_parity_err_cnt()
3071 return dd->send_pio_err_status_cnt[4]; in access_pio_sb_mem_fifo1_err_cnt()
3080 return dd->send_pio_err_status_cnt[3]; in access_pio_sb_mem_fifo0_err_cnt()
3089 return dd->send_pio_err_status_cnt[2]; in access_pio_csr_parity_err_cnt()
3098 return dd->send_pio_err_status_cnt[1]; in access_pio_write_addr_parity_err_cnt()
3107 return dd->send_pio_err_status_cnt[0]; in access_pio_write_bad_ctxt_err_cnt()
3120 return dd->send_dma_err_status_cnt[3]; in access_sdma_pcie_req_tracking_cor_err_cnt()
3129 return dd->send_dma_err_status_cnt[2]; in access_sdma_pcie_req_tracking_unc_err_cnt()
3138 return dd->send_dma_err_status_cnt[1]; in access_sdma_csr_parity_err_cnt()
3147 return dd->send_dma_err_status_cnt[0]; in access_sdma_rpy_tag_err_cnt()
3160 return dd->send_egress_err_status_cnt[63]; in access_tx_read_pio_memory_csr_unc_err_cnt()
3169 return dd->send_egress_err_status_cnt[62]; in access_tx_read_sdma_memory_csr_err_cnt()
3178 return dd->send_egress_err_status_cnt[61]; in access_tx_egress_fifo_cor_err_cnt()
3187 return dd->send_egress_err_status_cnt[60]; in access_tx_read_pio_memory_cor_err_cnt()
3196 return dd->send_egress_err_status_cnt[59]; in access_tx_read_sdma_memory_cor_err_cnt()
3205 return dd->send_egress_err_status_cnt[58]; in access_tx_sb_hdr_cor_err_cnt()
3214 return dd->send_egress_err_status_cnt[57]; in access_tx_credit_overrun_err_cnt()
3223 return dd->send_egress_err_status_cnt[56]; in access_tx_launch_fifo8_cor_err_cnt()
3232 return dd->send_egress_err_status_cnt[55]; in access_tx_launch_fifo7_cor_err_cnt()
3241 return dd->send_egress_err_status_cnt[54]; in access_tx_launch_fifo6_cor_err_cnt()
3250 return dd->send_egress_err_status_cnt[53]; in access_tx_launch_fifo5_cor_err_cnt()
3259 return dd->send_egress_err_status_cnt[52]; in access_tx_launch_fifo4_cor_err_cnt()
3268 return dd->send_egress_err_status_cnt[51]; in access_tx_launch_fifo3_cor_err_cnt()
3277 return dd->send_egress_err_status_cnt[50]; in access_tx_launch_fifo2_cor_err_cnt()
3286 return dd->send_egress_err_status_cnt[49]; in access_tx_launch_fifo1_cor_err_cnt()
3295 return dd->send_egress_err_status_cnt[48]; in access_tx_launch_fifo0_cor_err_cnt()
3304 return dd->send_egress_err_status_cnt[47]; in access_tx_credit_return_vl_err_cnt()
3313 return dd->send_egress_err_status_cnt[46]; in access_tx_hcrc_insertion_err_cnt()
3322 return dd->send_egress_err_status_cnt[45]; in access_tx_egress_fifo_unc_err_cnt()
3331 return dd->send_egress_err_status_cnt[44]; in access_tx_read_pio_memory_unc_err_cnt()
3340 return dd->send_egress_err_status_cnt[43]; in access_tx_read_sdma_memory_unc_err_cnt()
3349 return dd->send_egress_err_status_cnt[42]; in access_tx_sb_hdr_unc_err_cnt()
3358 return dd->send_egress_err_status_cnt[41]; in access_tx_credit_return_partiy_err_cnt()
3367 return dd->send_egress_err_status_cnt[40]; in access_tx_launch_fifo8_unc_or_parity_err_cnt()
3376 return dd->send_egress_err_status_cnt[39]; in access_tx_launch_fifo7_unc_or_parity_err_cnt()
3385 return dd->send_egress_err_status_cnt[38]; in access_tx_launch_fifo6_unc_or_parity_err_cnt()
3394 return dd->send_egress_err_status_cnt[37]; in access_tx_launch_fifo5_unc_or_parity_err_cnt()
3403 return dd->send_egress_err_status_cnt[36]; in access_tx_launch_fifo4_unc_or_parity_err_cnt()
3412 return dd->send_egress_err_status_cnt[35]; in access_tx_launch_fifo3_unc_or_parity_err_cnt()
3421 return dd->send_egress_err_status_cnt[34]; in access_tx_launch_fifo2_unc_or_parity_err_cnt()
3430 return dd->send_egress_err_status_cnt[33]; in access_tx_launch_fifo1_unc_or_parity_err_cnt()
3439 return dd->send_egress_err_status_cnt[32]; in access_tx_launch_fifo0_unc_or_parity_err_cnt()
3448 return dd->send_egress_err_status_cnt[31]; in access_tx_sdma15_disallowed_packet_err_cnt()
3457 return dd->send_egress_err_status_cnt[30]; in access_tx_sdma14_disallowed_packet_err_cnt()
3466 return dd->send_egress_err_status_cnt[29]; in access_tx_sdma13_disallowed_packet_err_cnt()
3475 return dd->send_egress_err_status_cnt[28]; in access_tx_sdma12_disallowed_packet_err_cnt()
3484 return dd->send_egress_err_status_cnt[27]; in access_tx_sdma11_disallowed_packet_err_cnt()
3493 return dd->send_egress_err_status_cnt[26]; in access_tx_sdma10_disallowed_packet_err_cnt()
3502 return dd->send_egress_err_status_cnt[25]; in access_tx_sdma9_disallowed_packet_err_cnt()
3511 return dd->send_egress_err_status_cnt[24]; in access_tx_sdma8_disallowed_packet_err_cnt()
3520 return dd->send_egress_err_status_cnt[23]; in access_tx_sdma7_disallowed_packet_err_cnt()
3529 return dd->send_egress_err_status_cnt[22]; in access_tx_sdma6_disallowed_packet_err_cnt()
3538 return dd->send_egress_err_status_cnt[21]; in access_tx_sdma5_disallowed_packet_err_cnt()
3547 return dd->send_egress_err_status_cnt[20]; in access_tx_sdma4_disallowed_packet_err_cnt()
3556 return dd->send_egress_err_status_cnt[19]; in access_tx_sdma3_disallowed_packet_err_cnt()
3565 return dd->send_egress_err_status_cnt[18]; in access_tx_sdma2_disallowed_packet_err_cnt()
3574 return dd->send_egress_err_status_cnt[17]; in access_tx_sdma1_disallowed_packet_err_cnt()
3583 return dd->send_egress_err_status_cnt[16]; in access_tx_sdma0_disallowed_packet_err_cnt()
3592 return dd->send_egress_err_status_cnt[15]; in access_tx_config_parity_err_cnt()
3601 return dd->send_egress_err_status_cnt[14]; in access_tx_sbrd_ctl_csr_parity_err_cnt()
3610 return dd->send_egress_err_status_cnt[13]; in access_tx_launch_csr_parity_err_cnt()
3619 return dd->send_egress_err_status_cnt[12]; in access_tx_illegal_vl_err_cnt()
3628 return dd->send_egress_err_status_cnt[11]; in access_tx_sbrd_ctl_state_machine_parity_err_cnt()
3637 return dd->send_egress_err_status_cnt[10]; in access_egress_reserved_10_err_cnt()
3646 return dd->send_egress_err_status_cnt[9]; in access_egress_reserved_9_err_cnt()
3655 return dd->send_egress_err_status_cnt[8]; in access_tx_sdma_launch_intf_parity_err_cnt()
3664 return dd->send_egress_err_status_cnt[7]; in access_tx_pio_launch_intf_parity_err_cnt()
3673 return dd->send_egress_err_status_cnt[6]; in access_egress_reserved_6_err_cnt()
3682 return dd->send_egress_err_status_cnt[5]; in access_tx_incorrect_link_state_err_cnt()
3691 return dd->send_egress_err_status_cnt[4]; in access_tx_linkdown_err_cnt()
3700 return dd->send_egress_err_status_cnt[3]; in access_tx_egress_fifi_underrun_or_parity_err_cnt()
3709 return dd->send_egress_err_status_cnt[2]; in access_egress_reserved_2_err_cnt()
3718 return dd->send_egress_err_status_cnt[1]; in access_tx_pkt_integrity_mem_unc_err_cnt()
3727 return dd->send_egress_err_status_cnt[0]; in access_tx_pkt_integrity_mem_cor_err_cnt()
3740 return dd->send_err_status_cnt[2]; in access_send_csr_write_bad_addr_err_cnt()
3749 return dd->send_err_status_cnt[1]; in access_send_csr_read_bad_addr_err_cnt()
3758 return dd->send_err_status_cnt[0]; in access_send_csr_parity_cnt()
3771 return dd->sw_ctxt_err_status_cnt[4]; in access_pio_write_out_of_bounds_err_cnt()
3780 return dd->sw_ctxt_err_status_cnt[3]; in access_pio_write_overflow_err_cnt()
3789 return dd->sw_ctxt_err_status_cnt[2]; in access_pio_write_crosses_boundary_err_cnt()
3798 return dd->sw_ctxt_err_status_cnt[1]; in access_pio_disallowed_packet_err_cnt()
3807 return dd->sw_ctxt_err_status_cnt[0]; in access_pio_inconsistent_sop_err_cnt()
3820 return dd->sw_send_dma_eng_err_status_cnt[23]; in access_sdma_header_request_fifo_cor_err_cnt()
3829 return dd->sw_send_dma_eng_err_status_cnt[22]; in access_sdma_header_storage_cor_err_cnt()
3838 return dd->sw_send_dma_eng_err_status_cnt[21]; in access_sdma_packet_tracking_cor_err_cnt()
3847 return dd->sw_send_dma_eng_err_status_cnt[20]; in access_sdma_assembly_cor_err_cnt()
3856 return dd->sw_send_dma_eng_err_status_cnt[19]; in access_sdma_desc_table_cor_err_cnt()
3865 return dd->sw_send_dma_eng_err_status_cnt[18]; in access_sdma_header_request_fifo_unc_err_cnt()
3874 return dd->sw_send_dma_eng_err_status_cnt[17]; in access_sdma_header_storage_unc_err_cnt()
3883 return dd->sw_send_dma_eng_err_status_cnt[16]; in access_sdma_packet_tracking_unc_err_cnt()
3892 return dd->sw_send_dma_eng_err_status_cnt[15]; in access_sdma_assembly_unc_err_cnt()
3901 return dd->sw_send_dma_eng_err_status_cnt[14]; in access_sdma_desc_table_unc_err_cnt()
3910 return dd->sw_send_dma_eng_err_status_cnt[13]; in access_sdma_timeout_err_cnt()
3919 return dd->sw_send_dma_eng_err_status_cnt[12]; in access_sdma_header_length_err_cnt()
3928 return dd->sw_send_dma_eng_err_status_cnt[11]; in access_sdma_header_address_err_cnt()
3937 return dd->sw_send_dma_eng_err_status_cnt[10]; in access_sdma_header_select_err_cnt()
3946 return dd->sw_send_dma_eng_err_status_cnt[9]; in access_sdma_reserved_9_err_cnt()
3955 return dd->sw_send_dma_eng_err_status_cnt[8]; in access_sdma_packet_desc_overflow_err_cnt()
3964 return dd->sw_send_dma_eng_err_status_cnt[7]; in access_sdma_length_mismatch_err_cnt()
3972 return dd->sw_send_dma_eng_err_status_cnt[6]; in access_sdma_halt_err_cnt()
3981 return dd->sw_send_dma_eng_err_status_cnt[5]; in access_sdma_mem_read_err_cnt()
3990 return dd->sw_send_dma_eng_err_status_cnt[4]; in access_sdma_first_desc_err_cnt()
3999 return dd->sw_send_dma_eng_err_status_cnt[3]; in access_sdma_tail_out_of_bounds_err_cnt()
4008 return dd->sw_send_dma_eng_err_status_cnt[2]; in access_sdma_too_long_err_cnt()
4017 return dd->sw_send_dma_eng_err_status_cnt[1]; in access_sdma_gen_mismatch_err_cnt()
4026 return dd->sw_send_dma_eng_err_status_cnt[0]; in access_sdma_wrong_dw_err_cnt()
4036 u64 csr = entry->csr; in access_dc_rcv_err_cnt()
4040 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ? in access_dc_rcv_err_cnt()
4041 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors; in access_dc_rcv_err_cnt()
4043 dd->sw_rcv_bypass_packet_errors = 0; in access_dc_rcv_err_cnt()
4056 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4057 ppd->ibport_data.rvp.cntr, vl, \
4074 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
5207 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT in is_ax()
5216 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT in is_bx()
5225 u32 is = IS_RCVURGENT_START + rcd->ctxt; in is_urg_masked()
5228 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64))); in is_urg_masked()
5252 len--; in append_str()
5262 len--; in append_str()
5290 len--; /* leave room for a nul */ in flag_string()
5310 --p; in flag_string()
5314 /* add final nul - space already allocated above */ in flag_string()
5529 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) { in handle_cce_err()
5532 start_freeze_handling(dd->pport, FREEZE_SELF); in handle_cce_err()
5537 incr_cntr64(&dd->cce_err_status_cnt[i]); in handle_cce_err()
5539 incr_cntr64(&dd->sw_cce_err_status_aggregate); in handle_cce_err()
5552 struct hfi1_pportdata *ppd = dd->pport; in update_rcverr_timer()
5555 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt && in update_rcverr_timer()
5556 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) { in update_rcverr_timer()
5561 queue_work(ppd->link_wq, &ppd->link_bounce_work); in update_rcverr_timer()
5563 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt; in update_rcverr_timer()
5565 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); in update_rcverr_timer()
5570 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0); in init_rcverr()
5572 dd->rcv_ovfl_cnt = 0; in init_rcverr()
5573 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME); in init_rcverr()
5578 if (dd->rcverr_timer.function) in free_rcverr()
5579 del_timer_sync(&dd->rcverr_timer); in free_rcverr()
5600 start_freeze_handling(dd->pport, flags); in handle_rxe_err()
5605 incr_cntr64(&dd->rcv_err_status_cnt[i]); in handle_rxe_err()
5618 incr_cntr64(&dd->misc_err_status_cnt[i]); in handle_misc_err()
5631 start_freeze_handling(dd->pport, 0); in handle_pio_err()
5635 incr_cntr64(&dd->send_pio_err_status_cnt[i]); in handle_pio_err()
5648 start_freeze_handling(dd->pport, 0); in handle_sdma_err()
5652 incr_cntr64(&dd->send_dma_err_status_cnt[i]); in handle_sdma_err()
5658 incr_cntr64(&ppd->port_xmit_discards); in __count_port_discards()
5663 __count_port_discards(dd->pport); in count_port_inactive()
5678 struct hfi1_pportdata *ppd = dd->pport; in handle_send_egress_err_info()
5720 incr_cntr64(&ppd->port_xmit_discards_vl[vl]); in handle_send_egress_err_info()
5722 incr_cntr64(&ppd->port_xmit_discards_vl in handle_send_egress_err_info()
5755 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET); in disallowed_pkt_engine()
5759 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5769 return -1; in engine_to_vl()
5772 m = rcu_dereference(dd->sdma_map); in engine_to_vl()
5773 vl = m->engine_to_vl[engine]; in engine_to_vl()
5780 * Translate the send context (sofware index) into a VL. Return -1 if the
5789 sci = &dd->send_contexts[sw_index]; in sc_to_vl()
5792 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15)) in sc_to_vl()
5793 return -1; in sc_to_vl()
5795 sc = sci->sc; in sc_to_vl()
5797 return -1; in sc_to_vl()
5798 if (dd->vld[15].sc == sc) in sc_to_vl()
5801 if (dd->vld[i].sc == sc) in sc_to_vl()
5804 return -1; in sc_to_vl()
5814 start_freeze_handling(dd->pport, 0); in handle_egress_err()
5817 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) in handle_egress_err()
5818 start_freeze_handling(dd->pport, 0); in handle_egress_err()
5822 /* fls64() returns a 1-based offset, we want it zero based */ in handle_egress_err()
5823 int shift = posn - 1; in handle_egress_err()
5846 incr_cntr64(&dd->send_egress_err_status_cnt[i]); in handle_egress_err()
5860 incr_cntr64(&dd->send_err_status_cnt[i]); in handle_txe_err()
5872 * through here to have a central location to correctly handle single-
5873 * or multi-shot errors.
5875 * For non per-context registers, call this routine with a context value
5876 * of 0 so the per-context offset is zero.
5891 reg = read_kctxt_csr(dd, context, eri->status); in interrupt_clear_down()
5894 write_kctxt_csr(dd, context, eri->clear, reg); in interrupt_clear_down()
5895 if (likely(eri->handler)) in interrupt_clear_down()
5896 eri->handler(dd, context, reg); in interrupt_clear_down()
5901 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n", in interrupt_clear_down()
5902 eri->desc, reg); in interrupt_clear_down()
5904 * Read-modify-write so any other masked bits in interrupt_clear_down()
5907 mask = read_kctxt_csr(dd, context, eri->mask); in interrupt_clear_down()
5909 write_kctxt_csr(dd, context, eri->mask, mask); in interrupt_clear_down()
5922 if (eri->handler) { in is_misc_err_int()
5925 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n", in is_misc_err_int()
5941 * clear-down mechanism cannot be used because we cannot clear the
5942 * error bits until several other long-running items are done first.
5957 sw_index = dd->hw_to_sw[hw_context]; in is_sendctxt_err_int()
5958 if (sw_index >= dd->num_send_contexts) { in is_sendctxt_err_int()
5964 sci = &dd->send_contexts[sw_index]; in is_sendctxt_err_int()
5965 spin_lock_irqsave(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
5966 sc = sci->sc; in is_sendctxt_err_int()
5970 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
5990 if (sc->type != SC_USER) in is_sendctxt_err_int()
5991 queue_work(dd->pport->hfi1_wq, &sc->halt_work); in is_sendctxt_err_int()
5992 spin_unlock_irqrestore(&dd->sc_lock, irq_flags); in is_sendctxt_err_int()
6001 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]); in is_sendctxt_err_int()
6011 sde = &dd->per_sdma[source]; in handle_sdma_eng_err()
6013 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in handle_sdma_eng_err()
6015 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n", in handle_sdma_eng_err()
6016 sde->this_idx, source, (unsigned long long)status); in handle_sdma_eng_err()
6018 sde->err_cnt++; in handle_sdma_eng_err()
6028 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]); in handle_sdma_eng_err()
6038 struct sdma_engine *sde = &dd->per_sdma[source]; in is_sdma_eng_err_int()
6040 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in is_sdma_eng_err_int()
6042 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx, in is_sdma_eng_err_int()
6063 else if (eri->handler) in is_various_int()
6074 struct hfi1_pportdata *ppd = dd->pport; in handle_qsfp_int()
6083 ppd->driver_link_ready = 0; in handle_qsfp_int()
6089 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6094 ppd->qsfp_info.cache_valid = 0; in handle_qsfp_int()
6095 ppd->qsfp_info.reset_needed = 0; in handle_qsfp_int()
6096 ppd->qsfp_info.limiting_active = 0; in handle_qsfp_int()
6097 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, in handle_qsfp_int()
6099 /* Invert the ModPresent pin now to detect plug-in */ in handle_qsfp_int()
6100 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : in handle_qsfp_int()
6103 if ((ppd->offline_disabled_reason > in handle_qsfp_int()
6106 (ppd->offline_disabled_reason == in handle_qsfp_int()
6108 ppd->offline_disabled_reason = in handle_qsfp_int()
6112 if (ppd->host_link_state == HLS_DN_POLL) { in handle_qsfp_int()
6119 queue_work(ppd->link_wq, &ppd->link_down_work); in handle_qsfp_int()
6125 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6126 ppd->qsfp_info.cache_valid = 0; in handle_qsfp_int()
6127 ppd->qsfp_info.cache_refresh_required = 1; in handle_qsfp_int()
6128 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, in handle_qsfp_int()
6136 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT : in handle_qsfp_int()
6139 ppd->offline_disabled_reason = in handle_qsfp_int()
6147 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6148 ppd->qsfp_info.check_interrupt_flags = 1; in handle_qsfp_int()
6149 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags); in handle_qsfp_int()
6154 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work); in handle_qsfp_int()
6164 if (ret != HCMD_SUCCESS && !(dd->flags & HFI1_SHUTDOWN)) { in request_host_lcb_access()
6168 return ret == HCMD_SUCCESS ? 0 : -EBUSY; in request_host_lcb_access()
6182 return ret == HCMD_SUCCESS ? 0 : -EBUSY; in request_8051_lcb_access()
6186 * Set the LCB selector - allow host access. The DCC selector always
6197 * Clear the LCB selector - allow 8051 access. The DCC selector always
6213 * -EBUSY if the 8051 has control and cannot be disturbed
6214 * -errno if unable to acquire access from the 8051
6218 struct hfi1_pportdata *ppd = dd->pport; in acquire_lcb_access()
6228 mutex_lock(&ppd->hls_lock); in acquire_lcb_access()
6230 while (!mutex_trylock(&ppd->hls_lock)) in acquire_lcb_access()
6235 if (ppd->host_link_state & HLS_DOWN) { in acquire_lcb_access()
6237 __func__, link_state_name(ppd->host_link_state)); in acquire_lcb_access()
6238 ret = -EBUSY; in acquire_lcb_access()
6242 if (dd->lcb_access_count == 0) { in acquire_lcb_access()
6245 if (!(dd->flags & HFI1_SHUTDOWN)) in acquire_lcb_access()
6253 dd->lcb_access_count++; in acquire_lcb_access()
6255 mutex_unlock(&ppd->hls_lock); in acquire_lcb_access()
6265 * -errno if unable to release access to the 8051
6277 mutex_lock(&dd->pport->hls_lock); in release_lcb_access()
6279 while (!mutex_trylock(&dd->pport->hls_lock)) in release_lcb_access()
6283 if (dd->lcb_access_count == 0) { in release_lcb_access()
6289 if (dd->lcb_access_count == 1) { in release_lcb_access()
6301 dd->lcb_access_count--; in release_lcb_access()
6303 mutex_unlock(&dd->pport->hls_lock); in release_lcb_access()
6312 * leaving access to the 8051. Assign access now - this constrains the call
6313 * to this routine to be after all LCB set-up is done. In particular, after
6314 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6318 dd->lcb_access_count = 0; in init_lcb_access()
6338 struct hfi1_devdata *dd = ppd->dd; in handle_8051_request()
6446 dd->vl15buf_cached = 0; in reset_link_credits()
6469 ppd->sm_trap_qp = 0x0; in set_linkup_defaults()
6470 ppd->sa_qp = 0x1; in set_linkup_defaults()
6486 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN); in lcb_shutdown()
6494 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); in lcb_shutdown()
6505 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6510 lockdep_assert_held(&dd->dc8051_lock); in _dc_shutdown()
6512 if (dd->dc_shutdown) in _dc_shutdown()
6515 dd->dc_shutdown = 1; in _dc_shutdown()
6528 mutex_lock(&dd->dc8051_lock); in dc_shutdown()
6530 mutex_unlock(&dd->dc8051_lock); in dc_shutdown()
6536 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6541 lockdep_assert_held(&dd->dc8051_lock); in _dc_start()
6543 if (!dd->dc_shutdown) in _dc_start()
6556 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en); in _dc_start()
6557 dd->dc_shutdown = 0; in _dc_start()
6562 mutex_lock(&dd->dc8051_lock); in dc_start()
6564 mutex_unlock(&dd->dc8051_lock); in dc_start()
6575 if (dd->icode != ICODE_FPGA_EMULATION) in adjust_lcb_for_fpga_serdes()
6655 * This is a work-queue function outside of the interrupt.
6661 struct hfi1_devdata *dd = ppd->dd; in handle_sma_message()
6666 * msg is bytes 1-4 of the 40-bit idle message - the command code in handle_sma_message()
6679 * See OPAv1 table 9-14 - HFI and External Switch Ports Key in handle_sma_message()
6684 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED)) in handle_sma_message()
6685 ppd->neighbor_normal = 1; in handle_sma_message()
6689 * See OPAv1 table 9-14 - HFI and External Switch Ports Key in handle_sma_message()
6694 if (ppd->host_link_state == HLS_UP_ARMED && in handle_sma_message()
6695 ppd->is_active_optimize_enabled) { in handle_sma_message()
6696 ppd->neighbor_normal = 1; in handle_sma_message()
6718 spin_lock_irqsave(&dd->rcvctrl_lock, flags); in adjust_rcvctrl()
6723 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags); in adjust_rcvctrl()
6741 struct hfi1_devdata *dd = ppd->dd; in start_freeze_handling()
6750 dd->flags |= HFI1_FROZEN; in start_freeze_handling()
6757 /* do halt pre-handling on all enabled send contexts */ in start_freeze_handling()
6758 for (i = 0; i < dd->num_send_contexts; i++) { in start_freeze_handling()
6759 sc = dd->send_contexts[i].sc; in start_freeze_handling()
6760 if (sc && (sc->flags & SCF_ENABLED)) in start_freeze_handling()
6772 /* queue non-interrupt handler */ in start_freeze_handling()
6773 queue_work(ppd->hfi1_wq, &ppd->freeze_work); in start_freeze_handling()
6777 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6824 for (i = 0; i < dd->num_rcv_contexts; i++) { in rxe_freeze()
6832 * Unfreeze handling for the RXE block - kernel contexts only.
6834 * handling on a per-context basis as they call into the driver.
6844 for (i = 0; i < dd->num_rcv_contexts; i++) { in rxe_kernel_unfreeze()
6847 /* Ensure all non-user contexts(including vnic) are enabled */ in rxe_kernel_unfreeze()
6849 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) { in rxe_kernel_unfreeze()
6866 * Non-interrupt SPC freeze handling.
6868 * This is a work-queue function outside of the triggering interrupt.
6874 struct hfi1_devdata *dd = ppd->dd; in handle_freeze()
6887 /* do send egress freeze steps - nothing to do */ in handle_freeze()
6893 * Unfreeze the hardware - clear the freeze, wait for each in handle_freeze()
6912 /* do send egress unfreeze steps - nothing to do */ in handle_freeze()
6919 * it disables and re-enables RXE. Mark the device unfrozen in handle_freeze()
6930 dd->flags &= ~HFI1_FROZEN; in handle_freeze()
6931 wake_up(&dd->event_queue); in handle_freeze()
6937 * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6952 link_speed = get_link_speed(ppd->link_speed_active); in update_xmit_counters()
6965 * This is a work-queue function outside of the interrupt.
6971 struct hfi1_devdata *dd = ppd->dd; in handle_link_up()
6993 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) in handle_link_up()
6994 set_up_vl15(dd, dd->vl15buf_cached); in handle_link_up()
6997 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { in handle_link_up()
6998 /* oops - current speed is not enabled, bounce */ in handle_link_up()
7001 ppd->link_speed_active, ppd->link_speed_enabled); in handle_link_up()
7015 ppd->neighbor_guid = 0; in reset_neighbor_info()
7016 ppd->neighbor_port_number = 0; in reset_neighbor_info()
7017 ppd->neighbor_type = 0; in reset_neighbor_info()
7018 ppd->neighbor_fm_security = 0; in reset_neighbor_info()
7093 * This is a work-queue function outside of the interrupt.
7104 if ((ppd->host_link_state & in handle_link_down()
7106 ppd->port_type == PORT_TYPE_FIXED) in handle_link_down()
7107 ppd->offline_disabled_reason = in handle_link_down()
7111 was_up = !!(ppd->host_link_state & HLS_UP); in handle_link_down()
7113 xchg(&ppd->is_link_down_queued, 0); in handle_link_down()
7118 read_link_down_reason(ppd->dd, &link_down_reason); in handle_link_down()
7122 dd_dev_info(ppd->dd, "%sUnexpected link down\n", in handle_link_down()
7130 read_planned_down_reason_code(ppd->dd, &neigh_reason); in handle_link_down()
7131 dd_dev_info(ppd->dd, in handle_link_down()
7137 dd_dev_info(ppd->dd, in handle_link_down()
7142 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n", in handle_link_down()
7148 * If no reason, assume peer-initiated but missed in handle_link_down()
7161 if (was_up && ppd->local_link_down_reason.sma == 0 && in handle_link_down()
7162 ppd->neigh_link_down_reason.sma == 0) { in handle_link_down()
7163 ppd->local_link_down_reason.sma = in handle_link_down()
7164 ppd->local_link_down_reason.latest; in handle_link_down()
7165 ppd->neigh_link_down_reason.sma = in handle_link_down()
7166 ppd->neigh_link_down_reason.latest; in handle_link_down()
7172 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); in handle_link_down()
7178 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) in handle_link_down()
7179 dc_shutdown(ppd->dd); in handle_link_down()
7192 if (ppd->host_link_state & HLS_UP) { in handle_link_bounce()
7196 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n", in handle_link_bounce()
7197 __func__, link_state_name(ppd->host_link_state)); in handle_link_bounce()
7257 if (ppd->pkeys[2] != 0) { in clear_full_mgmt_pkey()
7258 ppd->pkeys[2] = 0; in clear_full_mgmt_pkey()
7260 hfi1_event_pkey_change(ppd->dd, ppd->port); in clear_full_mgmt_pkey()
7275 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup) in link_width_to_bits()
7333 if ((dd->icode == ICODE_RTL_SILICON) && in get_link_widths()
7334 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) { in get_link_widths()
7338 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G; in get_link_widths()
7341 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; in get_link_widths()
7347 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G; in get_link_widths()
7391 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7402 /* get end-of-LNI link widths */ in get_linkup_link_widths()
7403 get_linkup_widths(ppd->dd, &tx_width, &rx_width); in get_linkup_link_widths()
7406 ppd->link_width_active = tx_width; in get_linkup_link_widths()
7408 ppd->link_width_downgrade_tx_active = ppd->link_width_active; in get_linkup_link_widths()
7409 ppd->link_width_downgrade_rx_active = ppd->link_width_active; in get_linkup_link_widths()
7411 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported; in get_linkup_link_widths()
7413 ppd->current_egress_rate = active_egress_rate(ppd); in get_linkup_link_widths()
7419 * This is a work-queue function outside of the interrupt.
7425 struct hfi1_devdata *dd = ppd->dd; in handle_verify_cap()
7472 * about the peer Z value - our sent vAU is 3 (hardwired) and is not in handle_verify_cap()
7481 * credits value and wait for link-up interrupt ot set it. in handle_verify_cap()
7484 dd->vl15buf_cached = vl15buf; in handle_verify_cap()
7487 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; in handle_verify_cap()
7513 ppd->link_speed_active = 0; /* invalid value */ in handle_verify_cap()
7514 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { in handle_verify_cap()
7518 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; in handle_verify_cap()
7521 ppd->link_speed_active = OPA_LINK_SPEED_25G; in handle_verify_cap()
7526 u8 rate = remote_tx_rate & ppd->local_tx_rate; in handle_verify_cap()
7529 ppd->link_speed_active = OPA_LINK_SPEED_25G; in handle_verify_cap()
7531 ppd->link_speed_active = OPA_LINK_SPEED_12_5G; in handle_verify_cap()
7533 if (ppd->link_speed_active == 0) { in handle_verify_cap()
7536 ppd->link_speed_active = OPA_LINK_SPEED_25G; in handle_verify_cap()
7546 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; in handle_verify_cap()
7548 ppd->port_ltp_crc_mode |= in handle_verify_cap()
7549 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4; in handle_verify_cap()
7551 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val); in handle_verify_cap()
7573 /* pull LCB fifos out of reset - all fifo clocks must be stable */ in handle_verify_cap()
7585 * apply_link_downgrade_policy - Apply the link width downgrade enabled
7611 mutex_lock(&ppd->hls_lock); in apply_link_downgrade_policy()
7613 if (ppd->host_link_state & HLS_DOWN) { in apply_link_downgrade_policy()
7615 if (ppd->host_link_state & HLS_GOING_UP) { in apply_link_downgrade_policy()
7617 mutex_unlock(&ppd->hls_lock); in apply_link_downgrade_policy()
7621 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7628 lwde = ppd->link_width_downgrade_enabled; in apply_link_downgrade_policy()
7631 get_link_widths(ppd->dd, &tx, &rx); in apply_link_downgrade_policy()
7632 ppd->link_width_downgrade_tx_active = tx; in apply_link_downgrade_policy()
7633 ppd->link_width_downgrade_rx_active = rx; in apply_link_downgrade_policy()
7636 if (ppd->link_width_downgrade_tx_active == 0 || in apply_link_downgrade_policy()
7637 ppd->link_width_downgrade_rx_active == 0) { in apply_link_downgrade_policy()
7639 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n"); in apply_link_downgrade_policy()
7645 if ((ppd->link_width_active != in apply_link_downgrade_policy()
7646 ppd->link_width_downgrade_tx_active) || in apply_link_downgrade_policy()
7647 (ppd->link_width_active != in apply_link_downgrade_policy()
7648 ppd->link_width_downgrade_rx_active)) { in apply_link_downgrade_policy()
7649 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7651 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7653 ppd->link_width_active, in apply_link_downgrade_policy()
7654 ppd->link_width_downgrade_tx_active, in apply_link_downgrade_policy()
7655 ppd->link_width_downgrade_rx_active); in apply_link_downgrade_policy()
7659 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 || in apply_link_downgrade_policy()
7660 (lwde & ppd->link_width_downgrade_rx_active) == 0) { in apply_link_downgrade_policy()
7662 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7664 dd_dev_err(ppd->dd, in apply_link_downgrade_policy()
7666 lwde, ppd->link_width_downgrade_tx_active, in apply_link_downgrade_policy()
7667 ppd->link_width_downgrade_rx_active); in apply_link_downgrade_policy()
7673 mutex_unlock(&ppd->hls_lock); in apply_link_downgrade_policy()
7688 * This is a work-queue function outside of the interrupt.
7695 dd_dev_info(ppd->dd, "8051: Link width downgrade\n"); in handle_link_downgrade()
7697 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active); in handle_link_downgrade()
7732 struct hfi1_pportdata *ppd = dd->pport; in handle_8051_interrupt()
7758 if (ppd->host_link_state in handle_8051_interrupt()
7771 ppd->unknown_frame_count++; in handle_8051_interrupt()
7797 queue_work(ppd->link_wq, &ppd->sma_message_work); in handle_8051_interrupt()
7802 queue_work(ppd->link_wq, &ppd->link_up_work); in handle_8051_interrupt()
7810 queue_work(ppd->link_wq, &ppd->link_vc_work); in handle_8051_interrupt()
7825 queue_work(ppd->link_wq, &ppd->link_downgrade_work); in handle_8051_interrupt()
7863 if ((ppd->host_link_state & in handle_8051_interrupt()
7865 ppd->link_enabled == 0) { in handle_8051_interrupt()
7867 __func__, ppd->host_link_state, in handle_8051_interrupt()
7868 ppd->link_enabled); in handle_8051_interrupt()
7870 if (xchg(&ppd->is_link_down_queued, 1) == 1) in handle_8051_interrupt()
7875 queue_work(ppd->link_wq, &ppd->link_down_work); in handle_8051_interrupt()
7930 struct hfi1_pportdata *ppd = dd->pport; in handle_dcc_err()
7935 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) { in handle_dcc_err()
7937 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK; in handle_dcc_err()
7939 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK; in handle_dcc_err()
7945 struct hfi1_pportdata *ppd = dd->pport; in handle_dcc_err()
7946 /* this counter saturates at (2^32) - 1 */ in handle_dcc_err()
7947 if (ppd->link_downed < (u32)UINT_MAX) in handle_dcc_err()
7948 ppd->link_downed++; in handle_dcc_err()
7956 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) { in handle_dcc_err()
7957 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK; in handle_dcc_err()
7959 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK; in handle_dcc_err()
7973 if (ppd->port_error_action & in handle_dcc_err()
7992 do_bounce = ppd->port_error_action & in handle_dcc_err()
8009 if (!(dd->err_info_rcvport.status_and_code & in handle_dcc_err()
8011 dd->err_info_rcvport.status_and_code = in handle_dcc_err()
8014 dd->err_info_rcvport.status_and_code |= in handle_dcc_err()
8020 dd->err_info_rcvport.packet_flit1 = hdr0; in handle_dcc_err()
8021 dd->err_info_rcvport.packet_flit2 = hdr1; in handle_dcc_err()
8044 do_bounce = ppd->port_error_action & in handle_dcc_err()
8068 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev))) in handle_dcc_err()
8083 queue_work(ppd->link_wq, &ppd->link_bounce_work); in handle_dcc_err()
8102 if (eri->handler) { in is_dc_int()
8109 * and it is non-maskable. This is because if a parity in is_dc_int()
8133 * 0 - N-1 = SDma
8134 * N - 2N-1 = SDmaProgress
8135 * 2N - 3N-1 = SDmaIdle
8147 sdma_dumpstate(&dd->per_sdma[which]); in is_sdma_eng_int()
8150 if (likely(what < 3 && which < dd->num_sdma)) { in is_sdma_eng_int()
8151 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source); in is_sdma_eng_int()
8159 * is_rcv_avail_int() - User receive context available IRQ handler
8166 * and can only be used for non-threaded IRQs.
8173 if (likely(source < dd->num_rcv_contexts)) { in is_rcv_avail_int()
8191 * is_rcv_urgent_int() - User receive context urgent IRQ handler
8204 if (likely(source < dd->num_rcv_contexts)) { in is_rcv_urgent_int()
8260 * Interrupt source interrupt - called when the given source has an interrupt.
8261 * Source is a bit index into an array of 64-bit integers.
8267 /* avoids a double compare by walking the table in-order */ in is_interrupt()
8268 for (entry = &is_table[0]; entry->is_name; entry++) { in is_interrupt()
8269 if (source <= entry->end) { in is_interrupt()
8271 entry->is_int(dd, source - entry->start); in is_interrupt()
8280 * general_interrupt - General interrupt handler
8284 * This is able to correctly handle all non-threaded interrupts. Receive
8296 this_cpu_inc(*dd->int_counter); in general_interrupt()
8300 if (dd->gi_mask[i] == 0) { in general_interrupt()
8305 dd->gi_mask[i]; in general_interrupt()
8324 struct hfi1_devdata *dd = sde->dd; in sdma_interrupt()
8328 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, in sdma_interrupt()
8333 this_cpu_inc(*dd->int_counter); in sdma_interrupt()
8338 & sde->imask; in sdma_interrupt()
8349 sde->this_idx); in sdma_interrupt()
8361 struct hfi1_devdata *dd = rcd->dd; in clear_recv_intr()
8362 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg); in clear_recv_intr()
8364 write_csr(dd, addr, rcd->imask); in clear_recv_intr()
8372 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask); in force_recv_intr()
8376 * Return non-zero if a packet is present.
8393 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in check_packet_present()
8404 struct hfi1_devdata *dd = rcd->dd; in receive_interrupt_common()
8407 this_cpu_inc(*dd->int_counter); in receive_interrupt_common()
8412 * __hfi1_rcd_eoi_intr() - Make HW issue receive interrupt
8420 if (!rcd->rcvhdrq) in __hfi1_rcd_eoi_intr()
8428 * hfi1_rcd_eoi_intr() - End of Interrupt processing action
8449 * hfi1_netdev_rx_napi - napi poll function to move eoi inline
8457 struct hfi1_ctxtdata *rcd = rxq->rcd; in hfi1_netdev_rx_napi()
8460 work_done = rcd->do_interrupt(rcd, budget); in hfi1_netdev_rx_napi()
8477 if (likely(rcd->napi)) { in receive_context_interrupt_napi()
8478 if (likely(napi_schedule_prep(rcd->napi))) in receive_context_interrupt_napi()
8479 __napi_schedule_irqoff(rcd->napi); in receive_context_interrupt_napi()
8484 rcd->ctxt); in receive_context_interrupt_napi()
8507 disposition = rcd->do_interrupt(rcd, 0); in receive_context_interrupt()
8530 (void)rcd->do_interrupt(rcd, 1); in receive_context_thread()
8576 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { in read_lcb_via_8051()
8582 return -EBUSY; in read_lcb_via_8051()
8585 /* register is an index of LCB registers: (offset - base) / 8 */ in read_lcb_via_8051()
8586 regno = (addr - DC_LCB_CFG_RUN) >> 3; in read_lcb_via_8051()
8589 return -EBUSY; in read_lcb_via_8051()
8619 if (likely(ret != -EBUSY)) in update_lcb_cache()
8636 return -1; in read_lcb_cache()
8641 * Return 0 on success, -EBUSY on failure.
8645 struct hfi1_pportdata *ppd = dd->pport; in read_lcb_csr()
8648 if (ppd->host_link_state & HLS_UP) in read_lcb_csr()
8651 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) { in read_lcb_csr()
8653 return -EBUSY; in read_lcb_csr()
8670 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || in write_lcb_via_8051()
8671 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) { in write_lcb_via_8051()
8677 return -EBUSY; in write_lcb_via_8051()
8680 /* register is an index of LCB registers: (offset - base) / 8 */ in write_lcb_via_8051()
8681 regno = (addr - DC_LCB_CFG_RUN) >> 3; in write_lcb_via_8051()
8684 return -EBUSY; in write_lcb_via_8051()
8690 * Return 0 on success, -EBUSY on failure.
8694 struct hfi1_pportdata *ppd = dd->pport; in write_lcb_csr()
8697 if (ppd->host_link_state & HLS_UP) in write_lcb_csr()
8700 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) in write_lcb_csr()
8701 return -EBUSY; in write_lcb_csr()
8721 mutex_lock(&dd->dc8051_lock); in do_8051_command()
8724 if (dd->dc_shutdown) { in do_8051_command()
8725 return_code = -ENODEV; in do_8051_command()
8739 if (dd->dc8051_timed_out) { in do_8051_command()
8740 if (dd->dc8051_timed_out > 1) { in do_8051_command()
8744 return_code = -ENXIO; in do_8051_command()
8763 * 39:00 -> in_data[47:8] in do_8051_command()
8764 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE in do_8051_command()
8765 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA in do_8051_command()
8769 /* must preserve COMPLETED - it is tied to hardware */ in do_8051_command()
8799 dd->dc8051_timed_out++; in do_8051_command()
8803 return_code = -ETIMEDOUT; in do_8051_command()
8817 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT); in do_8051_command()
8822 dd->dc8051_timed_out = 0; in do_8051_command()
8829 mutex_unlock(&dd->dc8051_lock); in do_8051_command()
8859 * Return 0 on success, -errno on failure
8876 /* read is in 8-byte chunks, hardware will truncate the address down */ in read_8051_config()
9058 if (dd->pport->host_link_state & HLS_UP) { in hfi1_read_link_quality()
9122 * Returns 0 on success, -EINVAL on error
9132 return -EINVAL; in read_idle_message()
9144 * Returns 0 on success, -EINVAL on error
9155 * Returns 0 on success, -EINVAL on error
9166 return -EINVAL; in send_idle_message()
9174 * Returns 0 on success, -EINVAL on error
9189 * return 0 on success, -errno on error
9210 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { in do_quick_linkup()
9226 * sides must be done with LCB set-up before either in do_quick_linkup()
9255 ret = -EINVAL; in do_quick_linkup()
9274 * The simulator has only one loopback option - LCB. Switch in init_loopback()
9279 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) && in init_loopback()
9293 /* LCB loopback - handled at poll time */ in init_loopback()
9298 if (dd->icode == ICODE_FPGA_EMULATION) { in init_loopback()
9301 return -EINVAL; in init_loopback()
9311 return -EINVAL; in init_loopback()
9327 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) }, in opa_to_vc_link_widths()
9328 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) }, in opa_to_vc_link_widths()
9329 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) }, in opa_to_vc_link_widths()
9330 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) }, in opa_to_vc_link_widths()
9345 struct hfi1_devdata *dd = ppd->dd; in set_local_link_attributes()
9354 /* set the local tx rate - need to read-modify-write */ in set_local_link_attributes()
9356 &rx_polarity_inversion, &ppd->local_tx_rate); in set_local_link_attributes()
9360 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) { in set_local_link_attributes()
9362 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) in set_local_link_attributes()
9363 ppd->local_tx_rate = 1; in set_local_link_attributes()
9365 ppd->local_tx_rate = 0; in set_local_link_attributes()
9368 ppd->local_tx_rate = 0; in set_local_link_attributes()
9369 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G) in set_local_link_attributes()
9370 ppd->local_tx_rate |= 2; in set_local_link_attributes()
9371 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G) in set_local_link_attributes()
9372 ppd->local_tx_rate |= 1; in set_local_link_attributes()
9377 rx_polarity_inversion, ppd->local_tx_rate); in set_local_link_attributes()
9399 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init, in set_local_link_attributes()
9400 ppd->port_crc_mode_enabled); in set_local_link_attributes()
9416 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0)) in set_local_link_attributes()
9421 ppd->link_width_enabled)); in set_local_link_attributes()
9426 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev); in set_local_link_attributes()
9450 if (!ppd->driver_link_ready) { in start_link()
9451 dd_dev_info(ppd->dd, in start_link()
9469 struct hfi1_devdata *dd = ppd->dd; in wait_for_qsfp_init()
9475 * effect of power up on plug-in. We ignore this false positive in wait_for_qsfp_init()
9478 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the in wait_for_qsfp_init()
9484 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1) in wait_for_qsfp_init()
9488 mask = read_csr(dd, dd->hfi1_id ? in wait_for_qsfp_init()
9503 struct hfi1_devdata *dd = ppd->dd; in set_qsfp_int_n()
9506 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK); in set_qsfp_int_n()
9510 * when we re-enable the IntN pin in set_qsfp_int_n()
9512 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, in set_qsfp_int_n()
9518 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); in set_qsfp_int_n()
9523 struct hfi1_devdata *dd = ppd->dd; in reset_qsfp()
9533 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT); in reset_qsfp()
9536 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); in reset_qsfp()
9542 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask); in reset_qsfp()
9563 struct hfi1_devdata *dd = ppd->dd; in handle_qsfp_error_conditions()
9578 if (ppd->host_link_state & HLS_DOWN) in handle_qsfp_error_conditions()
9653 /* Bytes 9-10 and 11-12 are reserved */ in handle_qsfp_error_conditions()
9654 /* Bytes 13-15 are vendor specific */ in handle_qsfp_error_conditions()
9667 ppd = qd->ppd; in qsfp_event()
9668 dd = ppd->dd; in qsfp_event()
9674 if (ppd->host_link_state == HLS_DN_DISABLE) { in qsfp_event()
9675 dd_dev_info(ppd->dd, in qsfp_event()
9682 * Turn DC back on after cable has been re-inserted. Up until in qsfp_event()
9687 if (qd->cache_refresh_required) { in qsfp_event()
9701 if (qd->check_interrupt_flags) { in qsfp_event()
9704 if (one_qsfp_read(ppd, dd->hfi1_id, 6, in qsfp_event()
9714 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags); in qsfp_event()
9715 ppd->qsfp_info.check_interrupt_flags = 0; in qsfp_event()
9716 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, in qsfp_event()
9724 struct hfi1_pportdata *ppd = dd->pport; in init_qsfp_int()
9729 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR, in init_qsfp_int()
9731 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, in init_qsfp_int()
9740 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT, in init_qsfp_int()
9744 if (!dd->hfi1_id) in init_qsfp_int()
9751 * Do a one-time initialize of the LCB block.
9756 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) in init_lcb()
9772 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9784 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd)) in test_qsfp_read()
9788 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1); in test_qsfp_read()
9792 return -EIO; in test_qsfp_read()
9814 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) { in try_start_link()
9815 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n"); in try_start_link()
9818 dd_dev_info(ppd->dd, in try_start_link()
9820 (int)ppd->qsfp_retry_count); in try_start_link()
9821 ppd->qsfp_retry_count++; in try_start_link()
9822 queue_delayed_work(ppd->link_wq, &ppd->start_link_work, in try_start_link()
9826 ppd->qsfp_retry_count = 0; in try_start_link()
9843 struct hfi1_devdata *dd = ppd->dd; in bringup_serdes()
9850 guid = ppd->guids[HFI1_PORT_GUID_INDEX]; in bringup_serdes()
9852 if (dd->base_guid) in bringup_serdes()
9853 guid = dd->base_guid + ppd->port - 1; in bringup_serdes()
9854 ppd->guids[HFI1_PORT_GUID_INDEX] = guid; in bringup_serdes()
9858 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP; in bringup_serdes()
9860 /* one-time init of the LCB */ in bringup_serdes()
9870 if (ppd->port_type == PORT_TYPE_QSFP) { in bringup_serdes()
9882 struct hfi1_devdata *dd = ppd->dd; in hfi1_quiet_serdes()
9891 ppd->driver_link_ready = 0; in hfi1_quiet_serdes()
9892 ppd->link_enabled = 0; in hfi1_quiet_serdes()
9894 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */ in hfi1_quiet_serdes()
9895 flush_delayed_work(&ppd->start_link_work); in hfi1_quiet_serdes()
9896 cancel_delayed_work_sync(&ppd->start_link_work); in hfi1_quiet_serdes()
9898 ppd->offline_disabled_reason = in hfi1_quiet_serdes()
9906 cancel_work_sync(&ppd->freeze_work); in hfi1_quiet_serdes()
9915 for (i = 0; i < dd->num_pports; i++, ppd++) { in init_cpu_counters()
9916 ppd->ibport_data.rvp.rc_acks = NULL; in init_cpu_counters()
9917 ppd->ibport_data.rvp.rc_qacks = NULL; in init_cpu_counters()
9918 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); in init_cpu_counters()
9919 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); in init_cpu_counters()
9920 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); in init_cpu_counters()
9921 if (!ppd->ibport_data.rvp.rc_acks || in init_cpu_counters()
9922 !ppd->ibport_data.rvp.rc_delayed_comp || in init_cpu_counters()
9923 !ppd->ibport_data.rvp.rc_qacks) in init_cpu_counters()
9924 return -ENOMEM; in init_cpu_counters()
9938 if (!(dd->flags & HFI1_PRESENT)) in hfi1_put_tid()
9957 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg); in hfi1_put_tid()
9958 writeq(reg, dd->rcvarray_wc + (index * 8)); in hfi1_put_tid()
9973 struct hfi1_devdata *dd = rcd->dd; in hfi1_clear_tids()
9977 for (i = rcd->eager_base; i < rcd->eager_base + in hfi1_clear_tids()
9978 rcd->egrbufs.alloced; i++) in hfi1_clear_tids()
9981 for (i = rcd->expected_base; in hfi1_clear_tids()
9982 i < rcd->expected_base + rcd->expected_count; i++) in hfi1_clear_tids()
10020 struct hfi1_devdata *dd = ppd->dd; in hfi1_get_ib_cfg()
10024 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */ in hfi1_get_ib_cfg()
10025 val = ppd->link_width_enabled; in hfi1_get_ib_cfg()
10027 case HFI1_IB_CFG_LWID: /* currently active Link-width */ in hfi1_get_ib_cfg()
10028 val = ppd->link_width_active; in hfi1_get_ib_cfg()
10031 val = ppd->link_speed_enabled; in hfi1_get_ib_cfg()
10034 val = ppd->link_speed_active; in hfi1_get_ib_cfg()
10037 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */ in hfi1_get_ib_cfg()
10038 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */ in hfi1_get_ib_cfg()
10043 val = ppd->actual_vls_operational; in hfi1_get_ib_cfg()
10052 val = ppd->overrun_threshold; in hfi1_get_ib_cfg()
10055 val = ppd->phy_error_threshold; in hfi1_get_ib_cfg()
10088 * HFI allows this to be set per-receive context, but the
10094 * The maximum non-payload (MTU) bytes in LRH.PktLen are in lrh_max_header_bytes()
10098 * dd->rcd[0].rcvhdrqentsize is in DW. in lrh_max_header_bytes()
10103 return (get_hdrqentsize(dd->rcd[0]) - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2; in lrh_max_header_bytes()
10119 struct hfi1_devdata *dd = ppd->dd; in set_send_length()
10121 u32 maxvlmtu = dd->vld[15].mtu; in set_send_length()
10122 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) in set_send_length()
10128 for (i = 0; i < ppd->vls_supported; i++) { in set_send_length()
10129 if (dd->vld[i].mtu > maxvlmtu) in set_send_length()
10130 maxvlmtu = dd->vld[i].mtu; in set_send_length()
10132 len1 |= (((dd->vld[i].mtu + max_hb) >> 2) in set_send_length()
10136 len2 |= (((dd->vld[i].mtu + max_hb) >> 2) in set_send_length()
10144 for (i = 0; i < ppd->vls_supported; i++) { in set_send_length()
10145 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50), in set_send_length()
10146 sc_mtu_to_threshold(dd->vld[i].sc, in set_send_length()
10147 dd->vld[i].mtu, in set_send_length()
10148 get_hdrqentsize(dd->rcd[0]))); in set_send_length()
10154 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), in set_send_length()
10155 sc_mtu_to_threshold(dd->vld[15].sc, in set_send_length()
10156 dd->vld[15].mtu, in set_send_length()
10157 dd->rcd[0]->rcvhdrqentsize)); in set_send_length()
10158 sc_set_cr_threshold(dd->vld[15].sc, thres); in set_send_length()
10163 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG); in set_send_length()
10167 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1); in set_send_length()
10174 struct hfi1_devdata *dd = ppd->dd; in set_lidlmc()
10175 u32 mask = ~((1U << ppd->lmc) - 1); in set_lidlmc()
10176 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); in set_lidlmc()
10183 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid; in set_lidlmc()
10190 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1); in set_lidlmc()
10284 struct hfi1_devdata *dd = ppd->dd; in decode_state_complete()
10292 * [ 0: 0] - success in decode_state_complete()
10293 * [ 3: 1] - state in decode_state_complete()
10294 * [ 7: 4] - next state timeout in decode_state_complete()
10295 * [15: 8] - reason code in decode_state_complete()
10296 * [31:16] - lanes in decode_state_complete()
10324 read_last_local_state(ppd->dd, &last_local_state); in check_lni_states()
10325 read_last_remote_state(ppd->dd, &last_remote_state); in check_lni_states()
10330 * training in-process. in check_lni_states()
10354 return -ETIMEDOUT; in wait_link_transfer_active()
10364 struct hfi1_devdata *dd = ppd->dd; in force_logical_link_state_down()
10393 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n"); in force_logical_link_state_down()
10398 * Expects ppd->hls_mutex to be held.
10406 struct hfi1_devdata *dd = ppd->dd; in goto_offline()
10413 previous_state = ppd->host_link_state; in goto_offline()
10414 ppd->host_link_state = HLS_GOING_OFFLINE; in goto_offline()
10423 return -EINVAL; in goto_offline()
10425 if (ppd->offline_disabled_reason == in goto_offline()
10427 ppd->offline_disabled_reason = in goto_offline()
10435 if (ppd->port_type == PORT_TYPE_QSFP && in goto_offline()
10436 ppd->qsfp_info.limiting_active && in goto_offline()
10462 * Now in charge of LCB - must be after the physical state is in goto_offline()
10473 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */ in goto_offline()
10490 ppd->host_link_state = HLS_DN_OFFLINE; in goto_offline()
10497 * - change our state in goto_offline()
10498 * - notify others if we were previously in a linkup state in goto_offline()
10500 ppd->host_link_state = HLS_DN_OFFLINE; in goto_offline()
10510 ppd->qsfp_info.reset_needed = 0; in goto_offline()
10514 ppd->link_width_active = 0; in goto_offline()
10515 ppd->link_width_downgrade_tx_active = 0; in goto_offline()
10516 ppd->link_width_downgrade_rx_active = 0; in goto_offline()
10517 ppd->current_egress_rate = 0; in goto_offline()
10548 switch (ppd->linkinit_reason) { in link_state_reason_name()
10567 * driver_pstate - convert the driver's notion of a port's
10569 * Return -1 (converted to a u32) to indicate error.
10573 switch (ppd->host_link_state) { in driver_pstate()
10594 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", in driver_pstate()
10595 ppd->host_link_state); in driver_pstate()
10596 return -1; in driver_pstate()
10601 * driver_lstate - convert the driver's notion of a port's
10602 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10607 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN)) in driver_lstate()
10610 switch (ppd->host_link_state & HLS_UP) { in driver_lstate()
10618 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n", in driver_lstate()
10619 ppd->host_link_state); in driver_lstate()
10620 return -1; in driver_lstate()
10627 if (ppd->local_link_down_reason.latest == 0 && in set_link_down_reason()
10628 ppd->neigh_link_down_reason.latest == 0) { in set_link_down_reason()
10629 ppd->local_link_down_reason.latest = lcl_reason; in set_link_down_reason()
10630 ppd->neigh_link_down_reason.latest = neigh_reason; in set_link_down_reason()
10631 ppd->remote_link_down_reason = rem_reason; in set_link_down_reason()
10636 * data_vls_operational() - Verify if data VL BCT credits and MTU
10640 * Return: true - Ok, false -otherwise.
10647 if (!ppd->actual_vls_operational) in data_vls_operational()
10650 for (i = 0; i < ppd->vls_supported; i++) { in data_vls_operational()
10651 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i)); in data_vls_operational()
10652 if ((reg && !ppd->dd->vld[i].mtu) || in data_vls_operational()
10653 (!reg && ppd->dd->vld[i].mtu)) in data_vls_operational()
10666 * Returns 0 on success, -errno on failure.
10670 struct hfi1_devdata *dd = ppd->dd; in set_link_state()
10675 mutex_lock(&ppd->hls_lock); in set_link_state()
10681 /* interpret poll -> poll as a link bounce */ in set_link_state()
10682 poll_bounce = ppd->host_link_state == HLS_DN_POLL && in set_link_state()
10686 link_state_name(ppd->host_link_state), in set_link_state()
10697 ppd->is_sm_config_started = 0; in set_link_state()
10703 if (ppd->host_link_state == state && !poll_bounce) in set_link_state()
10708 if (ppd->host_link_state == HLS_DN_POLL && in set_link_state()
10709 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) { in set_link_state()
10718 } else if (ppd->host_link_state != HLS_GOING_UP) { in set_link_state()
10730 "%s: physical state did not change to LINK-UP\n", in set_link_state()
10744 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR) in set_link_state()
10745 ppd->linkinit_reason = in set_link_state()
10759 update_xmit_counters(ppd, ppd->link_width_active); in set_link_state()
10761 ppd->host_link_state = HLS_UP_INIT; in set_link_state()
10765 if (ppd->host_link_state != HLS_UP_INIT) in set_link_state()
10772 ret = -EINVAL; in set_link_state()
10784 ppd->host_link_state = HLS_UP_ARMED; in set_link_state()
10791 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) in set_link_state()
10792 ppd->neighbor_normal = 1; in set_link_state()
10795 if (ppd->host_link_state != HLS_UP_ARMED) in set_link_state()
10807 ppd->host_link_state = HLS_UP_ACTIVE; in set_link_state()
10811 event.device = &dd->verbs_dev.rdi.ibdev; in set_link_state()
10812 event.element.port_num = ppd->port; in set_link_state()
10817 if ((ppd->host_link_state == HLS_DN_DISABLE || in set_link_state()
10818 ppd->host_link_state == HLS_DN_OFFLINE) && in set_link_state()
10819 dd->dc_shutdown) in set_link_state()
10824 if (ppd->host_link_state != HLS_DN_OFFLINE) { in set_link_state()
10825 u8 tmp = ppd->link_enabled; in set_link_state()
10827 ret = goto_offline(ppd, ppd->remote_link_down_reason); in set_link_state()
10829 ppd->link_enabled = tmp; in set_link_state()
10832 ppd->remote_link_down_reason = 0; in set_link_state()
10834 if (ppd->driver_link_ready) in set_link_state()
10835 ppd->link_enabled = 1; in set_link_state()
10838 set_all_slowpath(ppd->dd); in set_link_state()
10843 ppd->port_error_action = 0; in set_link_state()
10857 ret = -EINVAL; in set_link_state()
10867 ppd->host_link_state = HLS_DN_POLL; in set_link_state()
10868 ppd->offline_disabled_reason = in set_link_state()
10881 ppd->link_enabled = 0; in set_link_state()
10886 if (ppd->host_link_state != HLS_DN_OFFLINE) { in set_link_state()
10887 ret = goto_offline(ppd, ppd->remote_link_down_reason); in set_link_state()
10890 ppd->remote_link_down_reason = 0; in set_link_state()
10893 if (!dd->dc_shutdown) { in set_link_state()
10899 ret = -EINVAL; in set_link_state()
10911 ppd->host_link_state = HLS_DN_DISABLE; in set_link_state()
10914 if (ppd->host_link_state == HLS_DN_DISABLE) in set_link_state()
10918 ret = goto_offline(ppd, ppd->remote_link_down_reason); in set_link_state()
10920 ppd->remote_link_down_reason = 0; in set_link_state()
10923 if (ppd->host_link_state != HLS_DN_POLL) in set_link_state()
10925 ppd->host_link_state = HLS_VERIFY_CAP; in set_link_state()
10929 if (ppd->host_link_state != HLS_VERIFY_CAP) in set_link_state()
10937 ret = -EINVAL; in set_link_state()
10940 ppd->host_link_state = HLS_GOING_UP; in set_link_state()
10948 ret = -EINVAL; in set_link_state()
10956 __func__, link_state_name(ppd->host_link_state), in set_link_state()
10958 ret = -EINVAL; in set_link_state()
10961 mutex_unlock(&ppd->hls_lock); in set_link_state()
10980 * The VL Arbitrator high limit is sent in units of 4k in hfi1_set_ib_cfg()
10986 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg); in hfi1_set_ib_cfg()
10991 ret = -EINVAL; in hfi1_set_ib_cfg()
10994 if (ppd->vls_operational != val) { in hfi1_set_ib_cfg()
10995 ppd->vls_operational = val; in hfi1_set_ib_cfg()
10996 if (!ppd->port) in hfi1_set_ib_cfg()
10997 ret = -EINVAL; in hfi1_set_ib_cfg()
11008 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */ in hfi1_set_ib_cfg()
11009 ppd->link_width_enabled = val & ppd->link_width_supported; in hfi1_set_ib_cfg()
11012 ppd->link_width_downgrade_enabled = in hfi1_set_ib_cfg()
11013 val & ppd->link_width_downgrade_supported; in hfi1_set_ib_cfg()
11016 ppd->link_speed_enabled = val & ppd->link_speed_supported; in hfi1_set_ib_cfg()
11023 ppd->overrun_threshold = val; in hfi1_set_ib_cfg()
11030 ppd->phy_error_threshold = val; in hfi1_set_ib_cfg()
11044 dd_dev_info(ppd->dd, in hfi1_set_ib_cfg()
11073 spin_lock_init(&ppd->vl_arb_cache[i].lock); in init_vl_arb_caches()
11087 spin_lock(&ppd->vl_arb_cache[idx].lock); in vl_arb_lock_cache()
11088 return &ppd->vl_arb_cache[idx]; in vl_arb_lock_cache()
11093 spin_unlock(&ppd->vl_arb_cache[idx].lock); in vl_arb_unlock_cache()
11099 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl)); in vl_arb_get_cache()
11105 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); in vl_arb_set_cache()
11111 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); in vl_arb_match_cache()
11119 struct hfi1_devdata *dd = ppd->dd; in set_vl_weights()
11124 mutex_lock(&ppd->hls_lock); in set_vl_weights()
11126 if (ppd->host_link_state & HLS_UP) in set_vl_weights()
11133 * Before adjusting VL arbitration weights, empty per-VL in set_vl_weights()
11143 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n", in set_vl_weights()
11153 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK) in set_vl_weights()
11155 | (((u64)vl->weight in set_vl_weights()
11166 mutex_unlock(&ppd->hls_lock); in set_vl_weights()
11179 vll->dedicated = cpu_to_be16( in read_one_cm_vl()
11182 vll->shared = cpu_to_be16( in read_one_cm_vl()
11199 /* OPA and HFI have a 1-1 mapping */ in get_buffer_control()
11201 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]); in get_buffer_control()
11203 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */ in get_buffer_control()
11204 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]); in get_buffer_control()
11207 bc->overall_shared_limit = cpu_to_be16( in get_buffer_control()
11222 /* each register contains 16 SC->VLnt mappings, 4 bits each */ in get_sc2vlnt()
11227 dp->vlnt[2 * i] = byte & 0xf; in get_sc2vlnt()
11228 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4; in get_sc2vlnt()
11235 dp->vlnt[16 + (2 * i)] = byte & 0xf; in get_sc2vlnt()
11236 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4; in get_sc2vlnt()
11247 vl->vl = 0xf; in get_vlarb_preempt()
11248 vl->weight = 0; in get_vlarb_preempt()
11256 0, dp->vlnt[0] & 0xf, in set_sc2vlnt()
11257 1, dp->vlnt[1] & 0xf, in set_sc2vlnt()
11258 2, dp->vlnt[2] & 0xf, in set_sc2vlnt()
11259 3, dp->vlnt[3] & 0xf, in set_sc2vlnt()
11260 4, dp->vlnt[4] & 0xf, in set_sc2vlnt()
11261 5, dp->vlnt[5] & 0xf, in set_sc2vlnt()
11262 6, dp->vlnt[6] & 0xf, in set_sc2vlnt()
11263 7, dp->vlnt[7] & 0xf, in set_sc2vlnt()
11264 8, dp->vlnt[8] & 0xf, in set_sc2vlnt()
11265 9, dp->vlnt[9] & 0xf, in set_sc2vlnt()
11266 10, dp->vlnt[10] & 0xf, in set_sc2vlnt()
11267 11, dp->vlnt[11] & 0xf, in set_sc2vlnt()
11268 12, dp->vlnt[12] & 0xf, in set_sc2vlnt()
11269 13, dp->vlnt[13] & 0xf, in set_sc2vlnt()
11270 14, dp->vlnt[14] & 0xf, in set_sc2vlnt()
11271 15, dp->vlnt[15] & 0xf)); in set_sc2vlnt()
11274 16, dp->vlnt[16] & 0xf, in set_sc2vlnt()
11275 17, dp->vlnt[17] & 0xf, in set_sc2vlnt()
11276 18, dp->vlnt[18] & 0xf, in set_sc2vlnt()
11277 19, dp->vlnt[19] & 0xf, in set_sc2vlnt()
11278 20, dp->vlnt[20] & 0xf, in set_sc2vlnt()
11279 21, dp->vlnt[21] & 0xf, in set_sc2vlnt()
11280 22, dp->vlnt[22] & 0xf, in set_sc2vlnt()
11281 23, dp->vlnt[23] & 0xf, in set_sc2vlnt()
11282 24, dp->vlnt[24] & 0xf, in set_sc2vlnt()
11283 25, dp->vlnt[25] & 0xf, in set_sc2vlnt()
11284 26, dp->vlnt[26] & 0xf, in set_sc2vlnt()
11285 27, dp->vlnt[27] & 0xf, in set_sc2vlnt()
11286 28, dp->vlnt[28] & 0xf, in set_sc2vlnt()
11287 29, dp->vlnt[29] & 0xf, in set_sc2vlnt()
11288 30, dp->vlnt[30] & 0xf, in set_sc2vlnt()
11289 31, dp->vlnt[31] & 0xf)); in set_sc2vlnt()
11322 /* set the given per-VL shared limit */
11339 /* set the given per-VL dedicated limit */
11356 /* spin until the given per-VL status mask bits clear */
11412 struct hfi1_devdata *dd = ppd->dd; in set_buffer_control()
11445 new_total += be16_to_cpu(new_bc->vl[i].dedicated); in set_buffer_control()
11449 be16_to_cpu(new_bc->vl[i].dedicated)); in set_buffer_control()
11451 be16_to_cpu(new_bc->vl[i].shared)); in set_buffer_control()
11452 new_bc->vl[i].dedicated = 0; in set_buffer_control()
11453 new_bc->vl[i].shared = 0; in set_buffer_control()
11455 new_total += be16_to_cpu(new_bc->overall_shared_limit); in set_buffer_control()
11478 this_shared_changing = new_bc->vl[i].shared in set_buffer_control()
11482 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated || in set_buffer_control()
11488 if (be16_to_cpu(new_bc->vl[i].dedicated) < in set_buffer_control()
11503 if ((be16_to_cpu(new_bc->overall_shared_limit) < in set_buffer_control()
11531 be16_to_cpu(new_bc-> in set_buffer_control()
11534 new_bc->vl[i].dedicated; in set_buffer_control()
11545 if (be16_to_cpu(new_bc->vl[i].dedicated) > in set_buffer_control()
11548 be16_to_cpu(new_bc-> in set_buffer_control()
11558 if (be16_to_cpu(new_bc->vl[i].shared) > in set_buffer_control()
11560 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared)); in set_buffer_control()
11564 if (be16_to_cpu(new_bc->overall_shared_limit) > in set_buffer_control()
11567 be16_to_cpu(new_bc->overall_shared_limit)); in set_buffer_control()
11579 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 || in set_buffer_control()
11580 be16_to_cpu(new_bc->vl[i].shared) > 0) in set_buffer_control()
11582 ppd->actual_vls_operational = vl_count; in set_buffer_control()
11583 ret = sdma_map_init(dd, ppd->port - 1, vl_count ? in set_buffer_control()
11584 ppd->actual_vls_operational : in set_buffer_control()
11585 ppd->vls_operational, in set_buffer_control()
11588 ret = pio_map_init(dd, ppd->port - 1, vl_count ? in set_buffer_control()
11589 ppd->actual_vls_operational : in set_buffer_control()
11590 ppd->vls_operational, NULL); in set_buffer_control()
11630 size = get_buffer_control(ppd->dd, t, NULL); in fm_get_table()
11633 size = get_sc2vlnt(ppd->dd, t); in fm_get_table()
11638 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t); in fm_get_table()
11648 return -EINVAL; in fm_get_table()
11688 set_sc2vlnt(ppd->dd, t); in fm_set_table()
11691 ret = -EINVAL; in fm_set_table()
11699 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11712 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11713 * Just re-enables all data VLs (the "fill" part happens
11714 * automatically - the name was chosen for symmetry with
11717 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11730 * drain_data_vls() - assumes that disable_data_vls() has been called,
11731 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11742 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11748 * // do things with per-VL resources
11764 * the cclock, a non-zero ns will always have a non-zero result.
11770 if (dd->icode == ICODE_FPGA_EMULATION) in ns_to_cclock()
11781 * the cclock, a non-zero cclocks will always have a non-zero result.
11787 if (dd->icode == ICODE_FPGA_EMULATION) in cclock_to_ns()
11804 struct hfi1_devdata *dd = rcd->dd; in adjust_rcv_timeout()
11805 u32 timeout = rcd->rcvavail_timeout; in adjust_rcv_timeout()
11829 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */ in adjust_rcv_timeout()
11831 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr); in adjust_rcv_timeout()
11834 rcd->rcvavail_timeout = timeout; in adjust_rcv_timeout()
11839 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT, in adjust_rcv_timeout()
11847 struct hfi1_devdata *dd = rcd->dd; in update_usrhead()
11849 u32 ctxt = rcd->ctxt; in update_usrhead()
11872 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) in hdrqempty()
11878 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL); in hdrqempty()
11897 * 0xB-0xF - reserved (Receive Array only)
11920 * encode_rcv_header_entry_size - return chip specific encoding for size
11940 * hfi1_validate_rcvhdrcnt - validate hdrcnt
11948 return -EINVAL; in hfi1_validate_rcvhdrcnt()
11955 return -EINVAL; in hfi1_validate_rcvhdrcnt()
11961 return -EINVAL; in hfi1_validate_rcvhdrcnt()
11968 * set_hdrq_regs - set header queue registers for context
11994 dd->rcvhdrtail_dummy_dma); in set_hdrq_regs()
12007 ctxt = rcd->ctxt; in hfi1_rcvctrl()
12017 rcd->rcvhdrq_dma); in hfi1_rcvctrl()
12020 rcd->rcvhdrqtailaddr_dma); in hfi1_rcvctrl()
12032 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd)); in hfi1_rcvctrl()
12035 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr; in hfi1_rcvctrl()
12042 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size) in hfi1_rcvctrl()
12046 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */ in hfi1_rcvctrl()
12054 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT) in hfi1_rcvctrl()
12057 (((rcd->eager_base >> RCV_SHIFT) in hfi1_rcvctrl()
12064 * rcd->expected_count is set to individual RcvArray entries, in hfi1_rcvctrl()
12065 * not pairs, and the CSR takes a pair-count in groups of in hfi1_rcvctrl()
12068 reg = (((rcd->expected_count >> RCV_SHIFT) in hfi1_rcvctrl()
12071 (((rcd->expected_base >> RCV_SHIFT) in hfi1_rcvctrl()
12085 if (dd->rcvhdrtail_dummy_dma) { in hfi1_rcvctrl()
12087 dd->rcvhdrtail_dummy_dma); in hfi1_rcvctrl()
12095 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, in hfi1_rcvctrl()
12096 IS_RCVAVAIL_START + rcd->ctxt, true); in hfi1_rcvctrl()
12100 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt, in hfi1_rcvctrl()
12101 IS_RCVAVAIL_START + rcd->ctxt, false); in hfi1_rcvctrl()
12117 * In one-packet-per-eager mode, the size comes from in hfi1_rcvctrl()
12134 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, in hfi1_rcvctrl()
12135 IS_RCVURGENT_START + rcd->ctxt, true); in hfi1_rcvctrl()
12137 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt, in hfi1_rcvctrl()
12138 IS_RCVURGENT_START + rcd->ctxt, false); in hfi1_rcvctrl()
12167 (u64)rcd->rcvavail_timeout << in hfi1_rcvctrl()
12182 dd->rcvhdrtail_dummy_dma); in hfi1_rcvctrl()
12191 ret = dd->cntrnameslen; in hfi1_read_cntrs()
12192 *namep = dd->cntrnames; in hfi1_read_cntrs()
12197 ret = (dd->ndevcntrs) * sizeof(u64); in hfi1_read_cntrs()
12200 *cntrp = dd->cntrs; in hfi1_read_cntrs()
12207 hfi1_cdbg(CNTR, "reading %s", entry->name); in hfi1_read_cntrs()
12208 if (entry->flags & CNTR_DISABLED) { in hfi1_read_cntrs()
12212 if (entry->flags & CNTR_VL) { in hfi1_read_cntrs()
12215 val = entry->rw_cntr(entry, in hfi1_read_cntrs()
12223 dd->cntrs[entry->offset + j] = in hfi1_read_cntrs()
12226 } else if (entry->flags & CNTR_SDMA) { in hfi1_read_cntrs()
12232 entry->rw_cntr(entry, dd, j, in hfi1_read_cntrs()
12237 dd->cntrs[entry->offset + j] = in hfi1_read_cntrs()
12241 val = entry->rw_cntr(entry, dd, in hfi1_read_cntrs()
12244 dd->cntrs[entry->offset] = val; in hfi1_read_cntrs()
12262 ret = ppd->dd->portcntrnameslen; in hfi1_read_portcntrs()
12263 *namep = ppd->dd->portcntrnames; in hfi1_read_portcntrs()
12268 ret = ppd->dd->nportcntrs * sizeof(u64); in hfi1_read_portcntrs()
12269 *cntrp = ppd->cntrs; in hfi1_read_portcntrs()
12273 hfi1_cdbg(CNTR, "reading %s", entry->name); in hfi1_read_portcntrs()
12274 if (entry->flags & CNTR_DISABLED) { in hfi1_read_portcntrs()
12280 if (entry->flags & CNTR_VL) { in hfi1_read_portcntrs()
12283 val = entry->rw_cntr(entry, ppd, j, in hfi1_read_portcntrs()
12290 ppd->cntrs[entry->offset + j] = val; in hfi1_read_portcntrs()
12293 val = entry->rw_cntr(entry, ppd, in hfi1_read_portcntrs()
12297 ppd->cntrs[entry->offset] = val; in hfi1_read_portcntrs()
12310 if (dd->synth_stats_timer.function) in free_cntrs()
12311 del_timer_sync(&dd->synth_stats_timer); in free_cntrs()
12312 cancel_work_sync(&dd->update_cntr_work); in free_cntrs()
12314 for (i = 0; i < dd->num_pports; i++, ppd++) { in free_cntrs()
12315 kfree(ppd->cntrs); in free_cntrs()
12316 kfree(ppd->scntrs); in free_cntrs()
12317 free_percpu(ppd->ibport_data.rvp.rc_acks); in free_cntrs()
12318 free_percpu(ppd->ibport_data.rvp.rc_qacks); in free_cntrs()
12319 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); in free_cntrs()
12320 ppd->cntrs = NULL; in free_cntrs()
12321 ppd->scntrs = NULL; in free_cntrs()
12322 ppd->ibport_data.rvp.rc_acks = NULL; in free_cntrs()
12323 ppd->ibport_data.rvp.rc_qacks = NULL; in free_cntrs()
12324 ppd->ibport_data.rvp.rc_delayed_comp = NULL; in free_cntrs()
12326 kfree(dd->portcntrnames); in free_cntrs()
12327 dd->portcntrnames = NULL; in free_cntrs()
12328 kfree(dd->cntrs); in free_cntrs()
12329 dd->cntrs = NULL; in free_cntrs()
12330 kfree(dd->scntrs); in free_cntrs()
12331 dd->scntrs = NULL; in free_cntrs()
12332 kfree(dd->cntrnames); in free_cntrs()
12333 dd->cntrnames = NULL; in free_cntrs()
12334 if (dd->update_cntr_wq) { in free_cntrs()
12335 destroy_workqueue(dd->update_cntr_wq); in free_cntrs()
12336 dd->update_cntr_wq = NULL; in free_cntrs()
12346 if (entry->flags & CNTR_DISABLED) { in read_dev_port_cntr()
12347 dd_dev_err(dd, "Counter %s not enabled", entry->name); in read_dev_port_cntr()
12351 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); in read_dev_port_cntr()
12353 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0); in read_dev_port_cntr()
12356 if (entry->flags & CNTR_SYNTH) { in read_dev_port_cntr()
12362 if (entry->flags & CNTR_32BIT) { in read_dev_port_cntr()
12397 if (entry->flags & CNTR_DISABLED) { in write_dev_port_cntr()
12398 dd_dev_err(dd, "Counter %s not enabled", entry->name); in write_dev_port_cntr()
12402 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval); in write_dev_port_cntr()
12404 if (entry->flags & CNTR_SYNTH) { in write_dev_port_cntr()
12406 if (entry->flags & CNTR_32BIT) { in write_dev_port_cntr()
12407 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, in write_dev_port_cntr()
12411 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, in write_dev_port_cntr()
12415 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data); in write_dev_port_cntr()
12431 sval = dd->scntrs + entry->offset; in read_dev_cntr()
12445 sval = dd->scntrs + entry->offset; in write_dev_cntr()
12459 sval = ppd->scntrs + entry->offset; in read_port_cntr()
12464 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && in read_port_cntr()
12470 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl); in read_port_cntr()
12479 sval = ppd->scntrs + entry->offset; in write_port_cntr()
12484 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) && in write_port_cntr()
12490 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data); in write_port_cntr()
12512 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in do_update_synth_timer()
12515 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0); in do_update_synth_timer()
12520 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx); in do_update_synth_timer()
12522 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) { in do_update_synth_timer()
12529 dd->unit); in do_update_synth_timer()
12531 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx); in do_update_synth_timer()
12533 "[%d] total flits 0x%llx limit 0x%llx", dd->unit, in do_update_synth_timer()
12537 dd->unit); in do_update_synth_timer()
12543 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit); in do_update_synth_timer()
12546 if (entry->flags & CNTR_VL) { in do_update_synth_timer()
12554 for (i = 0; i < dd->num_pports; i++, ppd++) { in do_update_synth_timer()
12557 if (entry->flags & CNTR_VL) { in do_update_synth_timer()
12573 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, in do_update_synth_timer()
12577 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, in do_update_synth_timer()
12581 dd->unit, dd->last_tx, dd->last_rx); in do_update_synth_timer()
12584 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); in do_update_synth_timer()
12592 queue_work(dd->update_cntr_wq, &dd->update_cntr_work); in update_synth_timer()
12593 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); in update_synth_timer()
12609 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0); in init_cntrs()
12616 dd->ndevcntrs = 0; in init_cntrs()
12626 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12631 /* Add ",32" for 32-bit counters */ in init_cntrs()
12635 dd->ndevcntrs++; in init_cntrs()
12638 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12643 /* Add ",32" for 32-bit counters */ in init_cntrs()
12647 dd->ndevcntrs++; in init_cntrs()
12652 /* Add ",32" for 32-bit counters */ in init_cntrs()
12655 dev_cntrs[i].offset = dd->ndevcntrs; in init_cntrs()
12656 dd->ndevcntrs++; in init_cntrs()
12661 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64), in init_cntrs()
12663 if (!dd->cntrs) in init_cntrs()
12666 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12667 if (!dd->scntrs) in init_cntrs()
12671 dd->cntrnameslen = sz; in init_cntrs()
12672 dd->cntrnames = kmalloc(sz, GFP_KERNEL); in init_cntrs()
12673 if (!dd->cntrnames) in init_cntrs()
12677 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) { in init_cntrs()
12734 rcv_ctxts = dd->num_rcv_contexts; in init_cntrs()
12742 dd->nportcntrs = 0; in init_cntrs()
12750 port_cntrs[i].offset = dd->nportcntrs; in init_cntrs()
12755 /* Add ",32" for 32-bit counters */ in init_cntrs()
12759 dd->nportcntrs++; in init_cntrs()
12764 /* Add ",32" for 32-bit counters */ in init_cntrs()
12767 port_cntrs[i].offset = dd->nportcntrs; in init_cntrs()
12768 dd->nportcntrs++; in init_cntrs()
12773 dd->portcntrnameslen = sz; in init_cntrs()
12774 dd->portcntrnames = kmalloc(sz, GFP_KERNEL); in init_cntrs()
12775 if (!dd->portcntrnames) in init_cntrs()
12779 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) { in init_cntrs()
12815 for (i = 0; i < dd->num_pports; i++, ppd++) { in init_cntrs()
12816 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12817 if (!ppd->cntrs) in init_cntrs()
12820 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL); in init_cntrs()
12821 if (!ppd->scntrs) in init_cntrs()
12829 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d", in init_cntrs()
12830 WQ_MEM_RECLAIM, dd->unit); in init_cntrs()
12831 if (!dd->update_cntr_wq) in init_cntrs()
12834 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer); in init_cntrs()
12836 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); in init_cntrs()
12840 return -ENOMEM; in init_cntrs()
12864 /* look at the HFI meta-states only */ in chip_to_opa_pstate()
12924 * update_statusp - Update userspace status flag
12938 * memory. Do it here to ensure a reliable state - this is in update_statusp()
12944 if (ppd->statusp) { in update_statusp()
12948 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF | in update_statusp()
12952 *ppd->statusp |= HFI1_STATUS_IB_CONF; in update_statusp()
12955 *ppd->statusp |= HFI1_STATUS_IB_READY; in update_statusp()
12959 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n", in update_statusp()
12964 * wait_logical_linkstate - wait for an IB link state change to occur
12971 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12981 new_state = chip_to_opa_lstate(ppd->dd, in wait_logical_linkstate()
12982 read_logical_state(ppd->dd)); in wait_logical_linkstate()
12986 dd_dev_err(ppd->dd, in wait_logical_linkstate()
12989 return -ETIMEDOUT; in wait_logical_linkstate()
12999 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state); in log_state_transition()
13001 dd_dev_info(ppd->dd, in log_state_transition()
13012 u32 read_state = read_physical_state(ppd->dd); in log_physical_state()
13017 dd_dev_err(ppd->dd, in log_physical_state()
13024 * wait_physical_linkstate - wait for an physical link state change to occur
13030 * Returns 0 if state reached, otherwise -ETIMEDOUT.
13040 read_state = read_physical_state(ppd->dd); in wait_physical_linkstate()
13044 dd_dev_err(ppd->dd, in wait_physical_linkstate()
13047 return -ETIMEDOUT; in wait_physical_linkstate()
13049 usleep_range(1950, 2050); /* sleep 2ms-ish */ in wait_physical_linkstate()
13057 * wait_phys_link_offline_quiet_substates - wait for any offline substate
13063 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13073 read_state = read_physical_state(ppd->dd); in wait_phys_link_offline_substates()
13077 dd_dev_err(ppd->dd, in wait_phys_link_offline_substates()
13080 return -ETIMEDOUT; in wait_phys_link_offline_substates()
13082 usleep_range(1950, 2050); /* sleep 2ms-ish */ in wait_phys_link_offline_substates()
13090 * wait_phys_link_out_of_offline - wait for any out of offline state
13096 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
13106 read_state = read_physical_state(ppd->dd); in wait_phys_link_out_of_offline()
13110 dd_dev_err(ppd->dd, in wait_phys_link_out_of_offline()
13113 return -ETIMEDOUT; in wait_phys_link_out_of_offline()
13115 usleep_range(1950, 2050); /* sleep 2ms-ish */ in wait_phys_link_out_of_offline()
13131 struct hfi1_devdata *dd = sc->dd; in hfi1_init_ctxt()
13133 u8 set = (sc->type == SC_USER ? in hfi1_init_ctxt()
13136 reg = read_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt()
13142 write_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt()
13152 if (dd->icode != ICODE_RTL_SILICON) { in hfi1_tempsense_rd()
13156 return -EINVAL; in hfi1_tempsense_rd()
13159 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) & in hfi1_tempsense_rd()
13161 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) & in hfi1_tempsense_rd()
13163 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) & in hfi1_tempsense_rd()
13165 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) & in hfi1_tempsense_rd()
13167 /* triggers is a 3-bit value - 1 bit per trigger. */ in hfi1_tempsense_rd()
13168 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7); in hfi1_tempsense_rd()
13176 * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13190 spin_lock_irqsave(&dd->irq_src_lock, flags); in read_mod_write()
13197 spin_unlock_irqrestore(&dd->irq_src_lock, flags); in read_mod_write()
13201 * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13216 return -EINVAL; in set_intr_bits()
13219 return -ERANGE; in set_intr_bits()
13225 read_mod_write(dd, src - 1, bits, set); in set_intr_bits()
13263 * Remap the interrupt source from the general handler to the given MSI-X
13275 dd->gi_mask[m] &= ~((u64)1 << n); in remap_intr()
13281 /* direct the chip source to the given MSI-X interrupt */ in remap_intr()
13294 * engine. Per-engine interrupts are as follows: in remap_sdma_interrupts()
13306 * chip interrupts back to MSI-X 0.
13314 dd->gi_mask[i] = ~(u64)0; in reset_interrupts()
13316 /* all chip interrupts map to MSI-X 0 */ in reset_interrupts()
13322 * set_up_interrupts() - Initialize the IRQ resources and state
13336 /* reset general handler mask, chip MSI-X mappings */ in set_up_interrupts()
13339 /* ask for MSI-X interrupts */ in set_up_interrupts()
13354 * num_rcv_contexts - number of contexts being used
13355 * n_krcv_queues - number of kernel contexts
13356 * first_dyn_alloc_ctxt - first dynamically allocated context
13358 * freectxts - number of free user contexts
13359 * num_send_contexts - number of PIO send contexts being used
13360 * num_netdev_contexts - number of contexts reserved for netdev
13375 * - Context 0 - control context (VL15/multicast/error) in set_up_context_variables()
13376 * - Context 1 - first kernel context in set_up_context_variables()
13377 * - Context 2 - second kernel context in set_up_context_variables()
13391 * one send context is allocated for each VL{0-7} and VL15 in set_up_context_variables()
13393 if (num_kernel_contexts > (send_contexts - num_vls - 1)) { in set_up_context_variables()
13396 send_contexts - num_vls - 1, in set_up_context_variables()
13398 num_kernel_contexts = send_contexts - num_vls - 1; in set_up_context_variables()
13403 * - default to 1 user context per real (non-HT) CPU core if in set_up_context_variables()
13416 (u32)(rcv_contexts - num_kernel_contexts), in set_up_context_variables()
13419 n_usr_ctxts = rcv_contexts - num_kernel_contexts; in set_up_context_variables()
13423 hfi1_num_netdev_contexts(dd, rcv_contexts - in set_up_context_variables()
13429 * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts + in set_up_context_variables()
13439 rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL) in set_up_context_variables()
13440 + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1 in set_up_context_variables()
13446 int over = rmt_count - NUM_MAP_ENTRIES; in set_up_context_variables()
13450 return -EINVAL; in set_up_context_variables()
13453 n_usr_ctxts, n_usr_ctxts - over); in set_up_context_variables()
13454 n_usr_ctxts -= over; in set_up_context_variables()
13458 dd->num_rcv_contexts = in set_up_context_variables()
13460 dd->n_krcv_queues = num_kernel_contexts; in set_up_context_variables()
13461 dd->first_dyn_alloc_ctxt = num_kernel_contexts; in set_up_context_variables()
13462 dd->num_netdev_contexts = num_netdev_contexts; in set_up_context_variables()
13463 dd->num_user_contexts = n_usr_ctxts; in set_up_context_variables()
13464 dd->freectxts = n_usr_ctxts; in set_up_context_variables()
13468 (int)dd->num_rcv_contexts, in set_up_context_variables()
13469 (int)dd->n_krcv_queues, in set_up_context_variables()
13470 dd->num_netdev_contexts, in set_up_context_variables()
13471 dd->num_user_contexts); in set_up_context_variables()
13477 * consecutive entries by using write-combining of the entire in set_up_context_variables()
13484 dd->rcv_entries.group_size = RCV_INCREMENT; in set_up_context_variables()
13485 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size; in set_up_context_variables()
13486 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts; in set_up_context_variables()
13487 dd->rcv_entries.nctxt_extra = ngroups - in set_up_context_variables()
13488 (dd->num_rcv_contexts * dd->rcv_entries.ngroups); in set_up_context_variables()
13490 dd->rcv_entries.ngroups, in set_up_context_variables()
13491 dd->rcv_entries.nctxt_extra); in set_up_context_variables()
13492 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size > in set_up_context_variables()
13494 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) / in set_up_context_variables()
13495 dd->rcv_entries.group_size; in set_up_context_variables()
13498 dd->rcv_entries.ngroups); in set_up_context_variables()
13499 dd->rcv_entries.nctxt_extra = 0; in set_up_context_variables()
13506 dd->num_send_contexts = ret; in set_up_context_variables()
13511 dd->num_send_contexts, in set_up_context_variables()
13512 dd->sc_sizes[SC_KERNEL].count, in set_up_context_variables()
13513 dd->sc_sizes[SC_ACK].count, in set_up_context_variables()
13514 dd->sc_sizes[SC_USER].count, in set_up_context_variables()
13515 dd->sc_sizes[SC_VL15].count); in set_up_context_variables()
13529 struct hfi1_devdata *dd = ppd->dd; in set_partition_keys()
13535 reg |= (ppd->pkeys[i] & in set_partition_keys()
13542 ((i - 3) * 2), reg); in set_partition_keys()
13555 * NOTE: All user context CSRs that are not mmaped write-only
13575 * to be read, so are not pre-initialized in write_uninitialized_csrs_and_memories()
13635 /* CCE_REVISION read-only */ in reset_cce_csrs()
13636 /* CCE_REVISION2 read-only */ in reset_cce_csrs()
13637 /* CCE_CTRL - bits clear automatically */ in reset_cce_csrs()
13638 /* CCE_STATUS read-only, use CceCtrl to clear */ in reset_cce_csrs()
13644 /* CCE_ERR_STATUS read-only */ in reset_cce_csrs()
13658 /* CCE_MSIX_PBA read-only */ in reset_cce_csrs()
13665 /* CCE_INT_STATUS read-only */ in reset_cce_csrs()
13669 /* CCE_INT_BLOCKED read-only */ in reset_cce_csrs()
13686 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can in reset_misc_csrs()
13687 * only be written 128-byte chunks in reset_misc_csrs()
13693 /* MISC_STS_8051_DIGEST read-only */ in reset_misc_csrs()
13694 /* MISC_STS_SBM_DIGEST read-only */ in reset_misc_csrs()
13695 /* MISC_STS_PCIE_DIGEST read-only */ in reset_misc_csrs()
13696 /* MISC_STS_FAB_DIGEST read-only */ in reset_misc_csrs()
13697 /* MISC_ERR_STATUS read-only */ in reset_misc_csrs()
13713 /* SEND_CONTEXTS read-only */ in reset_txe_csrs()
13714 /* SEND_DMA_ENGINES read-only */ in reset_txe_csrs()
13715 /* SEND_PIO_MEM_SIZE read-only */ in reset_txe_csrs()
13716 /* SEND_DMA_MEM_SIZE read-only */ in reset_txe_csrs()
13719 /* SEND_PIO_ERR_STATUS read-only */ in reset_txe_csrs()
13723 /* SEND_DMA_ERR_STATUS read-only */ in reset_txe_csrs()
13727 /* SEND_EGRESS_ERR_STATUS read-only */ in reset_txe_csrs()
13739 /* SEND_ERR_STATUS read-only */ in reset_txe_csrs()
13742 /* SEND_ERR_FORCE read-only */ in reset_txe_csrs()
13755 /* SEND_CM_CREDIT_USED_STATUS read-only */ in reset_txe_csrs()
13764 /* SEND_CM_CREDIT_USED_VL read-only */ in reset_txe_csrs()
13765 /* SEND_CM_CREDIT_USED_VL15 read-only */ in reset_txe_csrs()
13766 /* SEND_EGRESS_CTXT_STATUS read-only */ in reset_txe_csrs()
13767 /* SEND_EGRESS_SEND_DMA_STATUS read-only */ in reset_txe_csrs()
13769 /* SEND_EGRESS_ERR_INFO read-only */ in reset_txe_csrs()
13770 /* SEND_EGRESS_ERR_SOURCE read-only */ in reset_txe_csrs()
13773 * TXE Per-Context CSRs in reset_txe_csrs()
13791 * TXE Per-SDMA CSRs in reset_txe_csrs()
13795 /* SEND_DMA_STATUS read-only */ in reset_txe_csrs()
13799 /* SEND_DMA_HEAD read-only */ in reset_txe_csrs()
13802 /* SEND_DMA_IDLE_CNT read-only */ in reset_txe_csrs()
13805 /* SEND_DMA_DESC_FETCHED_CNT read-only */ in reset_txe_csrs()
13806 /* SEND_DMA_ENG_ERR_STATUS read-only */ in reset_txe_csrs()
13840 * Give up after 1ms - maximum wait time. in init_rbufs()
13848 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n", in init_rbufs()
13852 udelay(2); /* do not busy-wait the CSR */ in init_rbufs()
13855 /* start the init - expect RcvCtrl to be 0 */ in init_rbufs()
13869 /* delay is required first time through - see above */ in init_rbufs()
13870 udelay(2); /* do not busy-wait the CSR */ in init_rbufs()
13875 /* give up after 100us - slowest possible at 33MHz is 73us */ in init_rbufs()
13895 /* RCV_STATUS read-only */ in reset_rxe_csrs()
13896 /* RCV_CONTEXTS read-only */ in reset_rxe_csrs()
13897 /* RCV_ARRAY_CNT read-only */ in reset_rxe_csrs()
13898 /* RCV_BUF_SIZE read-only */ in reset_rxe_csrs()
13903 /* this is a clear-down */ in reset_rxe_csrs()
13906 /* RCV_ERR_STATUS read-only */ in reset_rxe_csrs()
13924 * RXE Kernel and User Per-Context CSRs in reset_rxe_csrs()
13929 /* RCV_CTXT_STATUS read-only */ in reset_rxe_csrs()
13942 /* RCV_HDR_TAIL read-only */ in reset_rxe_csrs()
13944 /* RCV_EGR_INDEX_TAIL read-only */ in reset_rxe_csrs()
13946 /* RCV_EGR_OFFSET_TAIL read-only */ in reset_rxe_csrs()
13960 * SC 0-7 -> VL 0-7 (respectively)
13961 * SC 15 -> VL 15
13963 * -> VL 0
14009 *((u8 *)(dd->sc2vl) + i) = (u8)i; in init_sc2vl_tables()
14011 *((u8 *)(dd->sc2vl) + i) = 0; in init_sc2vl_tables()
14017 * depend on the chip going through a power-on reset - a driver may be loaded
14020 * Do not write any CSR values to the chip in this routine - there may be
14072 pcie_flr(dd->pcidev); in init_chip()
14084 pcie_flr(dd->pcidev); in init_chip()
14126 dd->vau = CM_VAU; in init_early_variables()
14127 dd->link_credits = CM_GLOBAL_CREDITS; in init_early_variables()
14129 dd->link_credits--; in init_early_variables()
14130 dd->vcu = cu_to_vcu(hfi1_cu); in init_early_variables()
14131 /* enough room for 8 MAD packets plus header - 17K */ in init_early_variables()
14132 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau); in init_early_variables()
14133 if (dd->vl15_init > dd->link_credits) in init_early_variables()
14134 dd->vl15_init = dd->link_credits; in init_early_variables()
14139 for (i = 0; i < dd->num_pports; i++) { in init_early_variables()
14140 struct hfi1_pportdata *ppd = &dd->pport[i]; in init_early_variables()
14159 * hfi1_get_qp_map - get qp map
14172 * init_qpmap_table - init qp map
14244 memset(rmt->map, rxcontext, sizeof(rmt->map)); in alloc_rsm_map_table()
14245 rmt->used = 0; in alloc_rsm_map_table()
14263 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]); in complete_rsm_map_table()
14283 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT | in add_rsm_rule()
14285 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT); in add_rsm_rule()
14287 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT | in add_rsm_rule()
14288 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT | in add_rsm_rule()
14289 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT | in add_rsm_rule()
14290 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT | in add_rsm_rule()
14291 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT | in add_rsm_rule()
14292 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT); in add_rsm_rule()
14294 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT | in add_rsm_rule()
14295 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT | in add_rsm_rule()
14296 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT | in add_rsm_rule()
14297 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT); in add_rsm_rule()
14355 * init_qos - init RX qos
14377 rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n); in init_qos()
14384 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES) in init_qos()
14396 idx = rmt->used + ((qpn << n) ^ i); in init_qos()
14400 reg = rmt->map[regidx]; in init_qos()
14404 rmt->map[regidx] = reg; in init_qos()
14411 rrd.offset = rmt->used; in init_qos()
14428 rmt->used += rmt_entries; in init_qos()
14431 dd->qos_shift = n + 1; in init_qos()
14434 dd->qos_shift = 1; in init_qos()
14435 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1); in init_qos()
14451 start = dd->first_dyn_alloc_ctxt; in init_fecn_handling()
14453 total_cnt = dd->num_rcv_contexts - start; in init_fecn_handling()
14456 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) { in init_fecn_handling()
14457 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n"); in init_fecn_handling()
14464 * in the range start...num_rcv_contexts-1 (inclusive). in init_fecn_handling()
14467 * the table - as long as the entries themselves do not wrap. in init_fecn_handling()
14471 offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start); in init_fecn_handling()
14473 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts; in init_fecn_handling()
14478 reg = rmt->map[regidx]; in init_fecn_handling()
14481 rmt->map[regidx] = reg; in init_fecn_handling()
14486 * o packet type 0 - expected in init_fecn_handling()
14490 * Use index 1 to extract the 8-bit receive context from DestQP in init_fecn_handling()
14509 rmt->used += total_cnt; in init_fecn_handling()
14538 dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n", in hfi1_netdev_update_rmt()
14542 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */ in hfi1_netdev_update_rmt()
14549 reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8); in hfi1_netdev_update_rmt()
14554 dev_dbg(&(dd)->pcidev->dev, in hfi1_netdev_update_rmt()
14556 regoff - RCV_RSM_MAP_TABLE, reg); in hfi1_netdev_update_rmt()
14560 if (i < (NUM_NETDEV_MAP_ENTRIES - 1)) in hfi1_netdev_update_rmt()
14586 if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) { in hfi1_init_aip_rsm()
14642 if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1) in hfi1_deinit_aip_rsm()
14656 return -ENOMEM; in init_rxe()
14663 hfi1_netdev_set_free_rmt_idx(dd, rmt->used); in init_rxe()
14749 /* enable all per-context and per-SDMA engine errors */ in init_txe()
14756 assign_local_cm_au_table(dd, dd->vcu); in init_txe()
14760 * Don't set on Simulator - causes it to choke. in init_txe()
14762 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR) in init_txe()
14772 if (!rcd || !rcd->sc) in hfi1_set_ctxt_jkey()
14773 return -EINVAL; in hfi1_set_ctxt_jkey()
14775 hw_ctxt = rcd->sc->hw_context; in hfi1_set_ctxt_jkey()
14780 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY)) in hfi1_set_ctxt_jkey()
14784 * Enable send-side J_KEY integrity check, unless this is A0 h/w in hfi1_set_ctxt_jkey()
14796 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg); in hfi1_set_ctxt_jkey()
14806 if (!rcd || !rcd->sc) in hfi1_clear_ctxt_jkey()
14807 return -EINVAL; in hfi1_clear_ctxt_jkey()
14809 hw_ctxt = rcd->sc->hw_context; in hfi1_clear_ctxt_jkey()
14812 * Disable send-side J_KEY integrity check, unless this is A0 h/w. in hfi1_clear_ctxt_jkey()
14822 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0); in hfi1_clear_ctxt_jkey()
14833 if (!rcd || !rcd->sc) in hfi1_set_ctxt_pkey()
14834 return -EINVAL; in hfi1_set_ctxt_pkey()
14836 hw_ctxt = rcd->sc->hw_context; in hfi1_set_ctxt_pkey()
14853 if (!ctxt || !ctxt->sc) in hfi1_clear_ctxt_pkey()
14854 return -EINVAL; in hfi1_clear_ctxt_pkey()
14856 hw_ctxt = ctxt->sc->hw_context; in hfi1_clear_ctxt_pkey()
14878 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14892 /* pre-allocate the asic structure in case we are the first device */ in init_asic_data()
14893 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL); in init_asic_data()
14895 return -ENOMEM; in init_asic_data()
14901 dd->unit != peer->unit) in init_asic_data()
14907 dd->asic_data = peer->asic_data; in init_asic_data()
14910 dd->asic_data = asic_data; in init_asic_data()
14911 mutex_init(&dd->asic_data->asic_resource_mutex); in init_asic_data()
14913 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */ in init_asic_data()
14916 /* first one through - set up i2c devices */ in init_asic_data()
14918 ret = set_up_i2c(dd, dd->asic_data); in init_asic_data()
14924 * Set dd->boardname. Use a generic name if a name is not returned from
14927 * Return 0 on success, -ENOMEM if space could not be allocated.
14933 "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series"; in obtain_boardname()
14938 (void **)&dd->boardname); in obtain_boardname()
14942 dd->boardname = kstrdup(generic, GFP_KERNEL); in obtain_boardname()
14943 if (!dd->boardname) in obtain_boardname()
14944 return -ENOMEM; in obtain_boardname()
14955 * Return 0 on success, -EINVAL on failure.
14990 return -EINVAL; in check_int_registers()
14994 * hfi1_init_dd() - Initialize most of the dd structure.
14998 * chip-specific function pointers for later use.
15002 struct pci_dev *pdev = dd->pcidev; in hfi1_init_dd()
15012 struct pci_dev *parent = pdev->bus->self; in hfi1_init_dd()
15015 ppd = dd->pport; in hfi1_init_dd()
15016 for (i = 0; i < dd->num_pports; i++, ppd++) { in hfi1_init_dd()
15021 ppd->link_width_supported = in hfi1_init_dd()
15024 ppd->link_width_downgrade_supported = in hfi1_init_dd()
15025 ppd->link_width_supported; in hfi1_init_dd()
15027 ppd->link_width_enabled = OPA_LINK_WIDTH_4X; in hfi1_init_dd()
15028 ppd->link_width_downgrade_enabled = in hfi1_init_dd()
15029 ppd->link_width_downgrade_supported; in hfi1_init_dd()
15039 ppd->vls_supported = num_vls; in hfi1_init_dd()
15040 ppd->vls_operational = ppd->vls_supported; in hfi1_init_dd()
15043 dd->vld[vl].mtu = hfi1_max_mtu; in hfi1_init_dd()
15044 dd->vld[15].mtu = MAX_MAD_PACKET; in hfi1_init_dd()
15049 ppd->overrun_threshold = 0x4; in hfi1_init_dd()
15050 ppd->phy_error_threshold = 0xf; in hfi1_init_dd()
15051 ppd->port_crc_mode_enabled = link_crc_mask; in hfi1_init_dd()
15053 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8; in hfi1_init_dd()
15055 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4; in hfi1_init_dd()
15057 ppd->host_link_state = HLS_DN_OFFLINE; in hfi1_init_dd()
15075 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT) in hfi1_init_dd()
15077 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT) in hfi1_init_dd()
15092 * obtain the hardware ID - NOT related to unit, which is a in hfi1_init_dd()
15096 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT) in hfi1_init_dd()
15099 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT; in hfi1_init_dd()
15100 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT; in hfi1_init_dd()
15102 dd->icode < ARRAY_SIZE(inames) ? in hfi1_init_dd()
15103 inames[dd->icode] : "unknown", (int)dd->irev); in hfi1_init_dd()
15106 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G; in hfi1_init_dd()
15108 dd->pport->link_speed_enabled = dd->pport->link_speed_supported; in hfi1_init_dd()
15110 dd->pport->link_speed_active = OPA_LINK_SPEED_25G; in hfi1_init_dd()
15113 ppd = dd->pport; in hfi1_init_dd()
15114 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) { in hfi1_init_dd()
15115 ppd->link_width_supported = in hfi1_init_dd()
15116 ppd->link_width_enabled = in hfi1_init_dd()
15117 ppd->link_width_downgrade_supported = in hfi1_init_dd()
15118 ppd->link_width_downgrade_enabled = in hfi1_init_dd()
15126 ppd->vls_supported = sdma_engines; in hfi1_init_dd()
15127 ppd->vls_operational = ppd->vls_supported; in hfi1_init_dd()
15133 * non-zero, then the calculated field will be at least 1. in hfi1_init_dd()
15135 * Must be after icode is set up - the cclock rate depends in hfi1_init_dd()
15138 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64; in hfi1_init_dd()
15139 if (dd->rcv_intr_timeout_csr > in hfi1_init_dd()
15141 dd->rcv_intr_timeout_csr = in hfi1_init_dd()
15143 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout) in hfi1_init_dd()
15144 dd->rcv_intr_timeout_csr = 1; in hfi1_init_dd()
15184 * - init_chip() - the chip will not initiate any PCIe transactions in hfi1_init_dd()
15185 * - pcie_speeds() - reads the current link speed in hfi1_init_dd()
15186 * - hfi1_firmware_init() - the needed firmware is ready to be in hfi1_init_dd()
15208 snprintf(dd->boardversion, BOARD_VERS_MAX, in hfi1_init_dd()
15211 (u32)dd->majrev, in hfi1_init_dd()
15212 (u32)dd->minrev, in hfi1_init_dd()
15213 (dd->revision >> CCE_REVISION_SW_SHIFT) in hfi1_init_dd()
15232 /* set initial non-RXE, non-TXE CSRs */ in hfi1_init_dd()
15261 for (i = 0; i < dd->num_pports; ++i) { in hfi1_init_dd()
15276 /* set up LCB access - must be after set_up_interrupts() */ in hfi1_init_dd()
15284 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n", in hfi1_init_dd()
15285 (dd->base_guid & 0xFFFFFF) | in hfi1_init_dd()
15286 ((dd->base_guid >> 11) & 0xF000000)); in hfi1_init_dd()
15288 dd->oui1 = dd->base_guid >> 56 & 0xFF; in hfi1_init_dd()
15289 dd->oui2 = dd->base_guid >> 48 & 0xFF; in hfi1_init_dd()
15290 dd->oui3 = dd->base_guid >> 40 & 0xFF; in hfi1_init_dd()
15306 init_completion(&dd->user_comp); in hfi1_init_dd()
15309 refcount_set(&dd->user_refcount, 1); in hfi1_init_dd()
15333 u32 current_egress_rate = ppd->current_egress_rate; in delay_cycles()
15336 if (desired_egress_rate == -1) in delay_cycles()
15342 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) - in delay_cycles()
15349 * create_pbc - build a pbc for transmission
15351 * @flags: special case flags or-ed in built pbc
15358 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15403 if (dd->icode != ICODE_RTL_SILICON || in thermal_init()
15432 /* Step 3: Write clock divider value (100MHz -> 2MHz) */ in thermal_init()
15447 /* Step 5: De-assert block reset and start conversion */ in thermal_init()
15472 struct hfi1_pportdata *ppd = &dd->pport[0]; in handle_temp_err()
15480 dd->flags |= HFI1_FORCED_FREEZE; in handle_temp_err()
15493 ppd->driver_link_ready = 0; in handle_temp_err()
15494 ppd->link_enabled = 0; in handle_temp_err()