Lines Matching +full:chg +full:- +full:int

14  *	- Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
38 #include <linux/radix-tree.h>
93 /* base qkey for use in sriov tunnel-qp/proxy-qp communication.
95 * and to test for violation, we use the mask (protect against future chg).
112 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
116 * -device managed - High level API for ib and eth flow steering. FW is
118 * - B0 steering mode - Common low level API for ib and (if supported) eth.
119 * - A0 steering mode - Limited low level API for eth. In case of IB,
136 static inline const char *mlx4_steering_mode_str(int steering_mode) in mlx4_steering_mode_str()
242 /* bit enums for an 8-bit flags field indicating special use
447 * - ctrl segment (16 bytes)
448 * - rdma segment (16 bytes)
449 * - scatter elements (16 bytes each)
451 MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16
527 int num_ports;
528 int vl_cap[MLX4_MAX_PORTS + 1];
529 int ib_mtu_cap[MLX4_MAX_PORTS + 1];
532 int eth_mtu_cap[MLX4_MAX_PORTS + 1];
533 int gid_table_len[MLX4_MAX_PORTS + 1];
534 int pkey_table_len[MLX4_MAX_PORTS + 1];
535 int trans_type[MLX4_MAX_PORTS + 1];
536 int vendor_oui[MLX4_MAX_PORTS + 1];
537 int wavelength[MLX4_MAX_PORTS + 1];
539 int local_ca_ack_delay;
540 int num_uars;
542 int bf_reg_size;
543 int bf_regs_per_page;
544 int max_sq_sg;
545 int max_rq_sg;
546 int num_qps;
547 int max_wqes;
548 int max_sq_desc_sz;
549 int max_rq_desc_sz;
550 int max_qp_init_rdma;
551 int max_qp_dest_rdma;
552 int max_tc_eth;
558 int num_srqs;
559 int max_srq_wqes;
560 int max_srq_sge;
561 int reserved_srqs;
562 int num_cqs;
563 int max_cqes;
564 int reserved_cqs;
565 int num_sys_eqs;
566 int num_eqs;
567 int reserved_eqs;
568 int num_comp_vectors;
569 int num_mpts;
570 int max_fmr_maps;
571 int num_mtts;
572 int fmr_reserved_mtts;
573 int reserved_mtts;
574 int reserved_mrws;
575 int reserved_uars;
576 int num_mgms;
577 int num_amgms;
578 int reserved_mcgs;
579 int num_qp_per_mgm;
580 int steering_mode;
581 int dmfs_high_steer_mode;
582 int fs_log_max_ucast_qp_range_size;
583 int num_pds;
584 int reserved_pds;
585 int max_xrcds;
586 int reserved_xrcds;
587 int mtt_entry_sz;
596 int max_gso_sz;
597 int max_rss_tbl_sz;
598 int reserved_qps_cnt[MLX4_NUM_QP_REGION];
599 int reserved_qps;
600 int reserved_qps_base[MLX4_NUM_QP_REGION];
601 int log_num_macs;
602 int log_num_vlans;
619 int tunnel_offload_mode;
638 int nbufs;
639 int npages;
640 int page_shift;
645 int order;
646 int page_shift;
671 int index;
672 int order;
688 int enabled;
700 int enabled;
708 int max_pages;
709 int max_maps;
710 int maps;
716 int index;
724 unsigned int offset;
725 int buf_size;
741 int arm_sn;
743 int cqn;
748 int reset_notify_added;
755 int qpn;
764 int srqn;
765 int max;
766 int max_gs;
767 int wqe_shift;
828 int qp;
829 int cq;
830 int srq;
831 int mpt;
832 int mtt;
833 int counter;
834 int xrcd;
850 int nvfs[MLX4_MAX_PORTS + 1];
851 int num_vfs;
875 int numa_node;
876 int oper_log_mgm_entry_size;
975 int set_guid0;
976 int set_node_guid;
977 int set_si_guid;
979 int port_width_cap;
1009 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
1010 if ((type) == (dev)->caps.port_mask[(port)])
1013 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
1014 if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
1015 ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) || \
1016 ((dev)->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2))
1019 #define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
1023 static inline int mlx4_master_func_num(struct mlx4_dev *dev) in mlx4_master_func_num()
1025 return dev->caps.function; in mlx4_master_func_num()
1028 static inline int mlx4_is_master(struct mlx4_dev *dev) in mlx4_is_master()
1030 return dev->flags & MLX4_FLAG_MASTER; in mlx4_is_master()
1033 static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) in mlx4_num_reserved_sqps()
1035 return dev->phys_caps.base_sqpn + 8 + in mlx4_num_reserved_sqps()
1039 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) in mlx4_is_qp_reserved()
1041 return (qpn < dev->phys_caps.base_sqpn + 8 + in mlx4_is_qp_reserved()
1043 qpn >= dev->phys_caps.base_sqpn) || in mlx4_is_qp_reserved()
1044 (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); in mlx4_is_qp_reserved()
1047 static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) in mlx4_is_guest_proxy()
1049 int guest_proxy_base = dev->phys_caps.base_proxy_sqpn + slave * 8; in mlx4_is_guest_proxy()
1057 static inline int mlx4_is_mfunc(struct mlx4_dev *dev) in mlx4_is_mfunc()
1059 return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER); in mlx4_is_mfunc()
1062 static inline int mlx4_is_slave(struct mlx4_dev *dev) in mlx4_is_slave()
1064 return dev->flags & MLX4_FLAG_SLAVE; in mlx4_is_slave()
1067 static inline int mlx4_is_eth(struct mlx4_dev *dev, int port) in mlx4_is_eth()
1069 return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1; in mlx4_is_eth()
1072 int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
1074 void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
1075 static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset) in mlx4_buf_offset()
1077 if (BITS_PER_LONG == 64 || buf->nbufs == 1) in mlx4_buf_offset()
1078 return (u8 *)buf->direct.buf + offset; in mlx4_buf_offset()
1080 return (u8 *)buf->page_list[offset >> PAGE_SHIFT].buf + in mlx4_buf_offset()
1081 (offset & (PAGE_SIZE - 1)); in mlx4_buf_offset()
1084 int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
1086 int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
1089 int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
1091 int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
1094 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
1099 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
1100 int npages, int page_shift, struct mlx4_mr *mr);
1101 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
1102 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
1103 int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
1106 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
1107 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
1108 int start_index, int npages, u64 *page_list);
1109 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
1112 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
1116 int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
1117 int size, int max_direct);
1119 int size);
1121 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
1123 unsigned vector, int collapsed, int timestamp_en);
1125 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
1126 int *base, u8 flags);
1127 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
1129 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
1133 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
1136 int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
1137 int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
1139 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
1140 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
1142 int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1143 int block_mcast_loopback, enum mlx4_protocol prot);
1144 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1146 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1147 u8 port, int block_mcast_loopback,
1149 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1168 MLX4_NET_TRANS_RULE_DUMMY = -1, /* force enum to be signed */
1173 static inline int map_hw_to_sw_id(u16 header_id) in map_hw_to_sw_id()
1176 int i; in map_hw_to_sw_id()
1181 return -EINVAL; in map_hw_to_sw_id()
1193 MLX4_FS_MODE_DUMMY = -1, /* force enum to be signed */
1365 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
1367 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
1369 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
1370 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
1371 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
1372 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
1373 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
1375 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
1377 int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
1378 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
1379 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
1381 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1383 int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
1384 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
1386 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
1387 int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
1388 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
1389 int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
1391 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
1392 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1393 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
1396 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
1397 int npages, u64 iova, u32 *lkey, u32 *rkey);
1398 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
1399 int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
1400 int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
1403 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
1404 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
1405 int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
1406 int mlx4_test_async(struct mlx4_dev *dev);
1407 int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
1411 bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
1412 int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
1413 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1415 int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
1416 int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
1418 int mlx4_get_phys_port_id(struct mlx4_dev *dev);
1419 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
1420 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
1422 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
1424 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
1426 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
1427 int port);
1428 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port);
1429 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port);
1430 int mlx4_flow_attach(struct mlx4_dev *dev,
1432 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
1433 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
1435 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
1437 int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
1439 int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
1440 int port, int qpn, u16 prio, u64 *reg_id);
1442 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
1443 int i, int val);
1445 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
1447 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave);
1448 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port);
1449 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port);
1450 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr);
1451 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change…
1452 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port);
1453 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_p…
1455 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
1456 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
1458 int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1459 int *slave_id);
1460 int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1463 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
1472 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave);
1477 int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port);
1484 int port);
1493 int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
1495 int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
1497 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
1498 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
1499 int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port);
1500 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
1501 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
1502 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
1503 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
1504 int enable);
1505 int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
1507 int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
1509 int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
1511 int mlx4_mr_hw_change_access(struct mlx4_dev *dev,
1517 int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
1518 u64 iova, u64 size, int npages,
1519 int page_shift, struct mlx4_mpt_entry *mpt_entry);
1521 int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
1523 int mlx4_max_tc(struct mlx4_dev *dev);
1564 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
1568 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1571 static inline int mlx4_to_hw_uar_index(struct mlx4_dev *dev, int index) in mlx4_to_hw_uar_index()
1573 return (index << (PAGE_SHIFT - dev->uar_page_shift)); in mlx4_to_hw_uar_index()
1576 static inline int mlx4_get_num_reserved_uar(struct mlx4_dev *dev) in mlx4_get_num_reserved_uar()
1579 return (128 >> (PAGE_SHIFT - dev->uar_page_shift)); in mlx4_get_num_reserved_uar()