Lines Matching full:dev
195 struct mlx4_dev *dev = &priv->dev; in mlx4_devlink_crdump_snapshot_get() local
197 ctx->val.vbool = dev->persist->crdump.snapshot_enable; in mlx4_devlink_crdump_snapshot_get()
206 struct mlx4_dev *dev = &priv->dev; in mlx4_devlink_crdump_snapshot_set() local
208 dev->persist->crdump.snapshot_enable = ctx->val.vbool; in mlx4_devlink_crdump_snapshot_set()
290 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, in mlx4_set_num_reserved_uars() argument
297 dev->caps.reserved_uars = in mlx4_set_num_reserved_uars()
299 mlx4_get_num_reserved_uar(dev), in mlx4_set_num_reserved_uars()
301 (1 << (PAGE_SHIFT - dev->uar_page_shift))); in mlx4_set_num_reserved_uars()
304 int mlx4_check_port_params(struct mlx4_dev *dev, in mlx4_check_port_params() argument
309 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { in mlx4_check_port_params()
310 for (i = 0; i < dev->caps.num_ports - 1; i++) { in mlx4_check_port_params()
312 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); in mlx4_check_port_params()
318 for (i = 0; i < dev->caps.num_ports; i++) { in mlx4_check_port_params()
319 if (!(port_type[i] & dev->caps.supported_type[i+1])) { in mlx4_check_port_params()
320 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", in mlx4_check_port_params()
328 static void mlx4_set_port_mask(struct mlx4_dev *dev) in mlx4_set_port_mask() argument
332 for (i = 1; i <= dev->caps.num_ports; ++i) in mlx4_set_port_mask()
333 dev->caps.port_mask[i] = dev->caps.port_type[i]; in mlx4_set_port_mask()
340 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) in mlx4_query_func() argument
345 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { in mlx4_query_func()
346 err = mlx4_QUERY_FUNC(dev, &func, 0); in mlx4_query_func()
348 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); in mlx4_query_func()
359 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) in mlx4_enable_cqe_eqe_stride() argument
361 struct mlx4_caps *dev_cap = &dev->caps; in mlx4_enable_cqe_eqe_stride()
379 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); in mlx4_enable_cqe_eqe_stride()
384 if (mlx4_is_master(dev)) in mlx4_enable_cqe_eqe_stride()
388 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); in mlx4_enable_cqe_eqe_stride()
394 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, in _mlx4_dev_port() argument
397 dev->caps.vl_cap[port] = port_cap->max_vl; in _mlx4_dev_port()
398 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; in _mlx4_dev_port()
399 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; in _mlx4_dev_port()
400 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; in _mlx4_dev_port()
404 dev->caps.gid_table_len[port] = port_cap->max_gids; in _mlx4_dev_port()
405 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; in _mlx4_dev_port()
406 dev->caps.port_width_cap[port] = port_cap->max_port_width; in _mlx4_dev_port()
407 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; in _mlx4_dev_port()
408 dev->caps.max_tc_eth = port_cap->max_tc_eth; in _mlx4_dev_port()
409 dev->caps.def_mac[port] = port_cap->def_mac; in _mlx4_dev_port()
410 dev->caps.supported_type[port] = port_cap->supported_port_types; in _mlx4_dev_port()
411 dev->caps.suggested_type[port] = port_cap->suggested_type; in _mlx4_dev_port()
412 dev->caps.default_sense[port] = port_cap->default_sense; in _mlx4_dev_port()
413 dev->caps.trans_type[port] = port_cap->trans_type; in _mlx4_dev_port()
414 dev->caps.vendor_oui[port] = port_cap->vendor_oui; in _mlx4_dev_port()
415 dev->caps.wavelength[port] = port_cap->wavelength; in _mlx4_dev_port()
416 dev->caps.trans_code[port] = port_cap->trans_code; in _mlx4_dev_port()
421 static int mlx4_dev_port(struct mlx4_dev *dev, int port, in mlx4_dev_port() argument
426 err = mlx4_QUERY_PORT(dev, port, port_cap); in mlx4_dev_port()
429 mlx4_err(dev, "QUERY_PORT command failed.\n"); in mlx4_dev_port()
434 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) in mlx4_enable_ignore_fcs() argument
436 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) in mlx4_enable_ignore_fcs()
439 if (mlx4_is_mfunc(dev)) { in mlx4_enable_ignore_fcs()
440 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); in mlx4_enable_ignore_fcs()
441 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; in mlx4_enable_ignore_fcs()
445 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { in mlx4_enable_ignore_fcs()
446 mlx4_dbg(dev, in mlx4_enable_ignore_fcs()
448 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; in mlx4_enable_ignore_fcs()
454 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) in mlx4_dev_cap() argument
459 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); in mlx4_dev_cap()
461 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); in mlx4_dev_cap()
464 mlx4_dev_cap_dump(dev, dev_cap); in mlx4_dev_cap()
467 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", in mlx4_dev_cap()
472 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", in mlx4_dev_cap()
477 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { in mlx4_dev_cap()
478 …mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n… in mlx4_dev_cap()
481 pci_resource_len(dev->persist->pdev, 2)); in mlx4_dev_cap()
485 dev->caps.num_ports = dev_cap->num_ports; in mlx4_dev_cap()
486 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; in mlx4_dev_cap()
487 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? in mlx4_dev_cap()
488 dev->caps.num_sys_eqs : in mlx4_dev_cap()
490 for (i = 1; i <= dev->caps.num_ports; ++i) { in mlx4_dev_cap()
491 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); in mlx4_dev_cap()
493 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); in mlx4_dev_cap()
498 dev->caps.map_clock_to_user = dev_cap->map_clock_to_user; in mlx4_dev_cap()
499 dev->caps.uar_page_size = PAGE_SIZE; in mlx4_dev_cap()
500 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; in mlx4_dev_cap()
501 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; in mlx4_dev_cap()
502 dev->caps.bf_reg_size = dev_cap->bf_reg_size; in mlx4_dev_cap()
503 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; in mlx4_dev_cap()
504 dev->caps.max_sq_sg = dev_cap->max_sq_sg; in mlx4_dev_cap()
505 dev->caps.max_rq_sg = dev_cap->max_rq_sg; in mlx4_dev_cap()
506 dev->caps.max_wqes = dev_cap->max_qp_sz; in mlx4_dev_cap()
507 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; in mlx4_dev_cap()
508 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; in mlx4_dev_cap()
509 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; in mlx4_dev_cap()
510 dev->caps.reserved_srqs = dev_cap->reserved_srqs; in mlx4_dev_cap()
511 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; in mlx4_dev_cap()
512 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; in mlx4_dev_cap()
517 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; in mlx4_dev_cap()
518 dev->caps.reserved_cqs = dev_cap->reserved_cqs; in mlx4_dev_cap()
519 dev->caps.reserved_eqs = dev_cap->reserved_eqs; in mlx4_dev_cap()
520 dev->caps.reserved_mtts = dev_cap->reserved_mtts; in mlx4_dev_cap()
521 dev->caps.reserved_mrws = dev_cap->reserved_mrws; in mlx4_dev_cap()
523 dev->caps.reserved_pds = dev_cap->reserved_pds; in mlx4_dev_cap()
524 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? in mlx4_dev_cap()
526 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? in mlx4_dev_cap()
528 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; in mlx4_dev_cap()
530 dev->caps.max_msg_sz = dev_cap->max_msg_sz; in mlx4_dev_cap()
531 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); in mlx4_dev_cap()
532 dev->caps.flags = dev_cap->flags; in mlx4_dev_cap()
533 dev->caps.flags2 = dev_cap->flags2; in mlx4_dev_cap()
534 dev->caps.bmme_flags = dev_cap->bmme_flags; in mlx4_dev_cap()
535 dev->caps.reserved_lkey = dev_cap->reserved_lkey; in mlx4_dev_cap()
536 dev->caps.stat_rate_support = dev_cap->stat_rate_support; in mlx4_dev_cap()
537 dev->caps.max_gso_sz = dev_cap->max_gso_sz; in mlx4_dev_cap()
538 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; in mlx4_dev_cap()
539 dev->caps.wol_port[1] = dev_cap->wol_port[1]; in mlx4_dev_cap()
540 dev->caps.wol_port[2] = dev_cap->wol_port[2]; in mlx4_dev_cap()
541 dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs; in mlx4_dev_cap()
544 if (!mlx4_is_slave(dev)) { in mlx4_dev_cap()
548 if (enable_4k_uar || !dev->persist->num_vfs) in mlx4_dev_cap()
549 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; in mlx4_dev_cap()
551 dev->uar_page_shift = PAGE_SHIFT; in mlx4_dev_cap()
553 mlx4_set_num_reserved_uars(dev, dev_cap); in mlx4_dev_cap()
556 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { in mlx4_dev_cap()
560 err = mlx4_QUERY_HCA(dev, &hca_param); in mlx4_dev_cap()
568 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; in mlx4_dev_cap()
572 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) in mlx4_dev_cap()
573 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; in mlx4_dev_cap()
575 if (mlx4_is_mfunc(dev)) in mlx4_dev_cap()
576 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; in mlx4_dev_cap()
579 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; in mlx4_dev_cap()
580 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; in mlx4_dev_cap()
582 dev->caps.log_num_macs = log_num_mac; in mlx4_dev_cap()
583 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; in mlx4_dev_cap()
586 for (i = 1; i <= dev->caps.num_ports; ++i) { in mlx4_dev_cap()
587 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; in mlx4_dev_cap()
588 if (dev->caps.supported_type[i]) { in mlx4_dev_cap()
590 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) in mlx4_dev_cap()
591 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; in mlx4_dev_cap()
593 else if (dev->caps.supported_type[i] == in mlx4_dev_cap()
595 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; in mlx4_dev_cap()
601 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? in mlx4_dev_cap()
604 dev->caps.port_type[i] = port_type_array[i - 1]; in mlx4_dev_cap()
613 mlx4_priv(dev)->sense.sense_allowed[i] = in mlx4_dev_cap()
614 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && in mlx4_dev_cap()
615 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && in mlx4_dev_cap()
616 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); in mlx4_dev_cap()
623 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { in mlx4_dev_cap()
625 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; in mlx4_dev_cap()
626 mlx4_SENSE_PORT(dev, i, &sensed_port); in mlx4_dev_cap()
628 dev->caps.port_type[i] = sensed_port; in mlx4_dev_cap()
630 dev->caps.possible_type[i] = dev->caps.port_type[i]; in mlx4_dev_cap()
633 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { in mlx4_dev_cap()
634 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; in mlx4_dev_cap()
635 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", in mlx4_dev_cap()
636 i, 1 << dev->caps.log_num_macs); in mlx4_dev_cap()
638 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { in mlx4_dev_cap()
639 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; in mlx4_dev_cap()
640 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", in mlx4_dev_cap()
641 i, 1 << dev->caps.log_num_vlans); in mlx4_dev_cap()
645 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) && in mlx4_dev_cap()
648 mlx4_warn(dev, in mlx4_dev_cap()
650 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP; in mlx4_dev_cap()
653 dev->caps.max_counters = dev_cap->max_counters; in mlx4_dev_cap()
655 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; in mlx4_dev_cap()
656 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = in mlx4_dev_cap()
657 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = in mlx4_dev_cap()
658 (1 << dev->caps.log_num_macs) * in mlx4_dev_cap()
659 (1 << dev->caps.log_num_vlans) * in mlx4_dev_cap()
660 dev->caps.num_ports; in mlx4_dev_cap()
661 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; in mlx4_dev_cap()
664 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) in mlx4_dev_cap()
665 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; in mlx4_dev_cap()
667 dev->caps.dmfs_high_rate_qpn_base = in mlx4_dev_cap()
668 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; in mlx4_dev_cap()
671 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { in mlx4_dev_cap()
672 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; in mlx4_dev_cap()
673 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; in mlx4_dev_cap()
674 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; in mlx4_dev_cap()
676 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; in mlx4_dev_cap()
677 dev->caps.dmfs_high_rate_qpn_base = in mlx4_dev_cap()
678 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; in mlx4_dev_cap()
679 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; in mlx4_dev_cap()
682 dev->caps.rl_caps = dev_cap->rl_caps; in mlx4_dev_cap()
684 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = in mlx4_dev_cap()
685 dev->caps.dmfs_high_rate_qpn_range; in mlx4_dev_cap()
687 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + in mlx4_dev_cap()
688 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + in mlx4_dev_cap()
689 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + in mlx4_dev_cap()
690 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; in mlx4_dev_cap()
692 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; in mlx4_dev_cap()
694 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { in mlx4_dev_cap()
697 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); in mlx4_dev_cap()
698 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; in mlx4_dev_cap()
699 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; in mlx4_dev_cap()
705 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); in mlx4_dev_cap()
711 if ((dev->caps.flags & in mlx4_dev_cap()
713 mlx4_is_master(dev)) in mlx4_dev_cap()
714 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; in mlx4_dev_cap()
716 if (!mlx4_is_slave(dev)) { in mlx4_dev_cap()
717 mlx4_enable_cqe_eqe_stride(dev); in mlx4_dev_cap()
718 dev->caps.alloc_res_qp_mask = in mlx4_dev_cap()
719 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | in mlx4_dev_cap()
722 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && in mlx4_dev_cap()
723 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { in mlx4_dev_cap()
724 mlx4_warn(dev, "Old device ETS support detected\n"); in mlx4_dev_cap()
725 mlx4_warn(dev, "Consider upgrading device FW.\n"); in mlx4_dev_cap()
726 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; in mlx4_dev_cap()
730 dev->caps.alloc_res_qp_mask = 0; in mlx4_dev_cap()
733 mlx4_enable_ignore_fcs(dev); in mlx4_dev_cap()
739 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) in mlx4_how_many_lives_vf() argument
741 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_how_many_lives_vf()
746 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { in mlx4_how_many_lives_vf()
750 mlx4_warn(dev, "%s: slave: %d is still active\n", in mlx4_how_many_lives_vf()
758 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) in mlx4_get_parav_qkey() argument
762 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || in mlx4_get_parav_qkey()
763 qpn < dev->phys_caps.base_proxy_sqpn) in mlx4_get_parav_qkey()
766 if (qpn >= dev->phys_caps.base_tunnel_sqpn) in mlx4_get_parav_qkey()
768 qk += qpn - dev->phys_caps.base_tunnel_sqpn; in mlx4_get_parav_qkey()
770 qk += qpn - dev->phys_caps.base_proxy_sqpn; in mlx4_get_parav_qkey()
776 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) in mlx4_sync_pkey_table() argument
778 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); in mlx4_sync_pkey_table()
780 if (!mlx4_is_master(dev)) in mlx4_sync_pkey_table()
787 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) in mlx4_put_slave_node_guid() argument
789 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); in mlx4_put_slave_node_guid()
791 if (!mlx4_is_master(dev)) in mlx4_put_slave_node_guid()
798 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) in mlx4_get_slave_node_guid() argument
800 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); in mlx4_get_slave_node_guid()
802 if (!mlx4_is_master(dev)) in mlx4_get_slave_node_guid()
809 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) in mlx4_is_slave_active() argument
811 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_is_slave_active()
814 if (!mlx4_is_master(dev)) in mlx4_is_slave_active()
839 static void slave_adjust_steering_mode(struct mlx4_dev *dev, in slave_adjust_steering_mode() argument
843 dev->caps.steering_mode = hca_param->steering_mode; in slave_adjust_steering_mode()
844 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { in slave_adjust_steering_mode()
845 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; in slave_adjust_steering_mode()
846 dev->caps.fs_log_max_ucast_qp_range_size = in slave_adjust_steering_mode()
849 dev->caps.num_qp_per_mgm = in slave_adjust_steering_mode()
852 mlx4_dbg(dev, "Steering mode is: %s\n", in slave_adjust_steering_mode()
853 mlx4_steering_mode_str(dev->caps.steering_mode)); in slave_adjust_steering_mode()
856 static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev) in mlx4_slave_destroy_special_qp_cap() argument
858 kfree(dev->caps.spec_qps); in mlx4_slave_destroy_special_qp_cap()
859 dev->caps.spec_qps = NULL; in mlx4_slave_destroy_special_qp_cap()
862 static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev) in mlx4_slave_special_qp_cap() argument
865 struct mlx4_caps *caps = &dev->caps; in mlx4_slave_special_qp_cap()
872 mlx4_err(dev, "Failed to allocate memory for special qps cap\n"); in mlx4_slave_special_qp_cap()
878 err = mlx4_QUERY_FUNC_CAP(dev, i, func_cap); in mlx4_slave_special_qp_cap()
880 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", in mlx4_slave_special_qp_cap()
887 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, in mlx4_slave_special_qp_cap()
891 mlx4_err(dev, "QUERY_PORT command failed for port %d, aborting (%d)\n", in mlx4_slave_special_qp_cap()
899 mlx4_slave_destroy_special_qp_cap(dev); in mlx4_slave_special_qp_cap()
904 static int mlx4_slave_cap(struct mlx4_dev *dev) in mlx4_slave_cap() argument
916 mlx4_err(dev, "Failed to allocate memory for slave_cap\n"); in mlx4_slave_cap()
921 err = mlx4_QUERY_HCA(dev, hca_param); in mlx4_slave_cap()
923 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); in mlx4_slave_cap()
931 mlx4_err(dev, "Unknown hca global capabilities\n"); in mlx4_slave_cap()
936 dev->caps.hca_core_clock = hca_param->hca_core_clock; in mlx4_slave_cap()
938 dev->caps.max_qp_dest_rdma = 1 << hca_param->log_rd_per_qp; in mlx4_slave_cap()
939 err = mlx4_dev_cap(dev, dev_cap); in mlx4_slave_cap()
941 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); in mlx4_slave_cap()
945 err = mlx4_QUERY_FW(dev); in mlx4_slave_cap()
947 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); in mlx4_slave_cap()
949 page_size = ~dev->caps.page_size_cap + 1; in mlx4_slave_cap()
950 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); in mlx4_slave_cap()
952 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", in mlx4_slave_cap()
959 dev->uar_page_shift = hca_param->uar_page_sz + 12; in mlx4_slave_cap()
962 if (dev->uar_page_shift > PAGE_SHIFT) { in mlx4_slave_cap()
963 mlx4_err(dev, in mlx4_slave_cap()
970 mlx4_set_num_reserved_uars(dev, dev_cap); in mlx4_slave_cap()
976 dev->caps.uar_page_size = PAGE_SIZE; in mlx4_slave_cap()
978 err = mlx4_QUERY_FUNC_CAP(dev, 0, func_cap); in mlx4_slave_cap()
980 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", in mlx4_slave_cap()
987 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", in mlx4_slave_cap()
994 dev->caps.num_ports = func_cap->num_ports; in mlx4_slave_cap()
995 dev->quotas.qp = func_cap->qp_quota; in mlx4_slave_cap()
996 dev->quotas.srq = func_cap->srq_quota; in mlx4_slave_cap()
997 dev->quotas.cq = func_cap->cq_quota; in mlx4_slave_cap()
998 dev->quotas.mpt = func_cap->mpt_quota; in mlx4_slave_cap()
999 dev->quotas.mtt = func_cap->mtt_quota; in mlx4_slave_cap()
1000 dev->caps.num_qps = 1 << hca_param->log_num_qps; in mlx4_slave_cap()
1001 dev->caps.num_srqs = 1 << hca_param->log_num_srqs; in mlx4_slave_cap()
1002 dev->caps.num_cqs = 1 << hca_param->log_num_cqs; in mlx4_slave_cap()
1003 dev->caps.num_mpts = 1 << hca_param->log_mpt_sz; in mlx4_slave_cap()
1004 dev->caps.num_eqs = func_cap->max_eq; in mlx4_slave_cap()
1005 dev->caps.reserved_eqs = func_cap->reserved_eq; in mlx4_slave_cap()
1006 dev->caps.reserved_lkey = func_cap->reserved_lkey; in mlx4_slave_cap()
1007 dev->caps.num_pds = MLX4_NUM_PDS; in mlx4_slave_cap()
1008 dev->caps.num_mgms = 0; in mlx4_slave_cap()
1009 dev->caps.num_amgms = 0; in mlx4_slave_cap()
1011 if (dev->caps.num_ports > MLX4_MAX_PORTS) { in mlx4_slave_cap()
1012 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", in mlx4_slave_cap()
1013 dev->caps.num_ports, MLX4_MAX_PORTS); in mlx4_slave_cap()
1018 mlx4_replace_zero_macs(dev); in mlx4_slave_cap()
1020 err = mlx4_slave_special_qp_cap(dev); in mlx4_slave_cap()
1022 mlx4_err(dev, "Set special QP caps failed. aborting\n"); in mlx4_slave_cap()
1026 if (dev->caps.uar_page_size * (dev->caps.num_uars - in mlx4_slave_cap()
1027 dev->caps.reserved_uars) > in mlx4_slave_cap()
1028 pci_resource_len(dev->persist->pdev, in mlx4_slave_cap()
1030 …mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, abo… in mlx4_slave_cap()
1031 dev->caps.uar_page_size * dev->caps.num_uars, in mlx4_slave_cap()
1033 pci_resource_len(dev->persist->pdev, 2)); in mlx4_slave_cap()
1039 dev->caps.eqe_size = 64; in mlx4_slave_cap()
1040 dev->caps.eqe_factor = 1; in mlx4_slave_cap()
1042 dev->caps.eqe_size = 32; in mlx4_slave_cap()
1043 dev->caps.eqe_factor = 0; in mlx4_slave_cap()
1047 dev->caps.cqe_size = 64; in mlx4_slave_cap()
1048 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; in mlx4_slave_cap()
1050 dev->caps.cqe_size = 32; in mlx4_slave_cap()
1054 dev->caps.eqe_size = hca_param->eqe_size; in mlx4_slave_cap()
1055 dev->caps.eqe_factor = 0; in mlx4_slave_cap()
1059 dev->caps.cqe_size = hca_param->cqe_size; in mlx4_slave_cap()
1061 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; in mlx4_slave_cap()
1064 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; in mlx4_slave_cap()
1065 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); in mlx4_slave_cap()
1067 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_USER_MAC_EN; in mlx4_slave_cap()
1068 mlx4_dbg(dev, "User MAC FW update is not supported in slave mode\n"); in mlx4_slave_cap()
1070 slave_adjust_steering_mode(dev, dev_cap, hca_param); in mlx4_slave_cap()
1071 mlx4_dbg(dev, "RSS support for IP fragments is %s\n", in mlx4_slave_cap()
1075 dev->caps.bf_reg_size) in mlx4_slave_cap()
1076 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; in mlx4_slave_cap()
1079 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; in mlx4_slave_cap()
1083 mlx4_slave_destroy_special_qp_cap(dev); in mlx4_slave_cap()
1095 int mlx4_change_port_types(struct mlx4_dev *dev, in mlx4_change_port_types() argument
1102 for (port = 0; port < dev->caps.num_ports; port++) { in mlx4_change_port_types()
1105 if (port_types[port] != dev->caps.port_type[port + 1]) in mlx4_change_port_types()
1109 mlx4_unregister_device(dev); in mlx4_change_port_types()
1110 for (port = 1; port <= dev->caps.num_ports; port++) { in mlx4_change_port_types()
1111 mlx4_CLOSE_PORT(dev, port); in mlx4_change_port_types()
1112 dev->caps.port_type[port] = port_types[port - 1]; in mlx4_change_port_types()
1113 err = mlx4_SET_PORT(dev, port, -1); in mlx4_change_port_types()
1115 mlx4_err(dev, "Failed to set port %d, aborting\n", in mlx4_change_port_types()
1120 mlx4_set_port_mask(dev); in mlx4_change_port_types()
1121 err = mlx4_register_device(dev); in mlx4_change_port_types()
1123 mlx4_err(dev, "Failed to register device\n"); in mlx4_change_port_types()
1132 static ssize_t show_port_type(struct device *dev, in show_port_type() argument
1138 struct mlx4_dev *mdev = info->dev; in show_port_type()
1155 struct mlx4_dev *mdev = info->dev; in __set_port_type()
1218 static ssize_t set_port_type(struct device *dev, in set_port_type() argument
1224 struct mlx4_dev *mdev = info->dev; in set_port_type()
1275 static ssize_t show_port_ib_mtu(struct device *dev, in show_port_ib_mtu() argument
1281 struct mlx4_dev *mdev = info->dev; in show_port_ib_mtu()
1291 static ssize_t set_port_ib_mtu(struct device *dev, in set_port_ib_mtu() argument
1297 struct mlx4_dev *mdev = info->dev; in set_port_ib_mtu()
1338 static int mlx4_mf_bond(struct mlx4_dev *dev) in mlx4_mf_bond() argument
1345 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); in mlx4_mf_bond()
1346 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); in mlx4_mf_bond()
1350 dev->persist->num_vfs + 1) > 1) { in mlx4_mf_bond()
1351 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); in mlx4_mf_bond()
1358 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + in mlx4_mf_bond()
1359 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; in mlx4_mf_bond()
1363 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", in mlx4_mf_bond()
1368 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { in mlx4_mf_bond()
1369 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); in mlx4_mf_bond()
1373 err = mlx4_bond_mac_table(dev); in mlx4_mf_bond()
1376 err = mlx4_bond_vlan_table(dev); in mlx4_mf_bond()
1379 err = mlx4_bond_fs_rules(dev); in mlx4_mf_bond()
1385 (void)mlx4_unbond_vlan_table(dev); in mlx4_mf_bond()
1387 (void)mlx4_unbond_mac_table(dev); in mlx4_mf_bond()
1391 static int mlx4_mf_unbond(struct mlx4_dev *dev) in mlx4_mf_unbond() argument
1395 ret = mlx4_unbond_fs_rules(dev); in mlx4_mf_unbond()
1397 mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret); in mlx4_mf_unbond()
1398 ret1 = mlx4_unbond_mac_table(dev); in mlx4_mf_unbond()
1400 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); in mlx4_mf_unbond()
1403 ret1 = mlx4_unbond_vlan_table(dev); in mlx4_mf_unbond()
1405 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); in mlx4_mf_unbond()
1411 static int mlx4_bond(struct mlx4_dev *dev) in mlx4_bond() argument
1414 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_bond()
1418 if (!mlx4_is_bonded(dev)) { in mlx4_bond()
1419 ret = mlx4_do_bond(dev, true); in mlx4_bond()
1421 mlx4_err(dev, "Failed to bond device: %d\n", ret); in mlx4_bond()
1422 if (!ret && mlx4_is_master(dev)) { in mlx4_bond()
1423 ret = mlx4_mf_bond(dev); in mlx4_bond()
1425 mlx4_err(dev, "bond for multifunction failed\n"); in mlx4_bond()
1426 mlx4_do_bond(dev, false); in mlx4_bond()
1433 mlx4_dbg(dev, "Device is bonded\n"); in mlx4_bond()
1438 static int mlx4_unbond(struct mlx4_dev *dev) in mlx4_unbond() argument
1441 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_unbond()
1445 if (mlx4_is_bonded(dev)) { in mlx4_unbond()
1448 ret = mlx4_do_bond(dev, false); in mlx4_unbond()
1450 mlx4_err(dev, "Failed to unbond device: %d\n", ret); in mlx4_unbond()
1451 if (mlx4_is_master(dev)) in mlx4_unbond()
1452 ret2 = mlx4_mf_unbond(dev); in mlx4_unbond()
1454 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); in mlx4_unbond()
1461 mlx4_dbg(dev, "Device is unbonded\n"); in mlx4_unbond()
1466 static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) in mlx4_port_map_set() argument
1470 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_port_map_set()
1473 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) in mlx4_port_map_set()
1494 err = mlx4_virt2phy_port_map(dev, port1, port2); in mlx4_port_map_set()
1496 mlx4_dbg(dev, "port map changed: [%d][%d]\n", in mlx4_port_map_set()
1501 mlx4_err(dev, "Failed to change port map: %d\n", err); in mlx4_port_map_set()
1511 struct mlx4_dev *dev; member
1522 if (!mlx4_is_bonded(bond->dev)) { in mlx4_bond_work()
1523 err = mlx4_bond(bond->dev); in mlx4_bond_work()
1525 mlx4_err(bond->dev, "Fail to bond device\n"); in mlx4_bond_work()
1528 err = mlx4_port_map_set(bond->dev, &bond->port_map); in mlx4_bond_work()
1530 mlx4_err(bond->dev, in mlx4_bond_work()
1535 } else if (mlx4_is_bonded(bond->dev)) { in mlx4_bond_work()
1536 err = mlx4_unbond(bond->dev); in mlx4_bond_work()
1538 mlx4_err(bond->dev, "Fail to unbond device\n"); in mlx4_bond_work()
1540 put_device(&bond->dev->persist->pdev->dev); in mlx4_bond_work()
1544 int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1, in mlx4_queue_bond_work() argument
1554 get_device(&dev->persist->pdev->dev); in mlx4_queue_bond_work()
1555 bond->dev = dev; in mlx4_queue_bond_work()
1564 static int mlx4_load_fw(struct mlx4_dev *dev) in mlx4_load_fw() argument
1566 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_load_fw()
1569 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, in mlx4_load_fw()
1572 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); in mlx4_load_fw()
1576 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); in mlx4_load_fw()
1578 mlx4_err(dev, "MAP_FA command failed, aborting\n"); in mlx4_load_fw()
1582 err = mlx4_RUN_FW(dev); in mlx4_load_fw()
1584 mlx4_err(dev, "RUN_FW command failed, aborting\n"); in mlx4_load_fw()
1591 mlx4_UNMAP_FA(dev); in mlx4_load_fw()
1594 mlx4_free_icm(dev, priv->fw.fw_icm, 0); in mlx4_load_fw()
1598 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, in mlx4_init_cmpt_table() argument
1601 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_init_cmpt_table()
1605 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, in mlx4_init_cmpt_table()
1609 cmpt_entry_sz, dev->caps.num_qps, in mlx4_init_cmpt_table()
1610 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], in mlx4_init_cmpt_table()
1615 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, in mlx4_init_cmpt_table()
1619 cmpt_entry_sz, dev->caps.num_srqs, in mlx4_init_cmpt_table()
1620 dev->caps.reserved_srqs, 0, 0); in mlx4_init_cmpt_table()
1624 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, in mlx4_init_cmpt_table()
1628 cmpt_entry_sz, dev->caps.num_cqs, in mlx4_init_cmpt_table()
1629 dev->caps.reserved_cqs, 0, 0); in mlx4_init_cmpt_table()
1633 num_eqs = dev->phys_caps.num_phys_eqs; in mlx4_init_cmpt_table()
1634 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, in mlx4_init_cmpt_table()
1645 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_init_cmpt_table()
1648 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); in mlx4_init_cmpt_table()
1651 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); in mlx4_init_cmpt_table()
1657 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, in mlx4_init_icm() argument
1660 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_init_icm()
1665 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); in mlx4_init_icm()
1667 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); in mlx4_init_icm()
1671 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", in mlx4_init_icm()
1675 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, in mlx4_init_icm()
1678 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); in mlx4_init_icm()
1682 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); in mlx4_init_icm()
1684 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); in mlx4_init_icm()
1688 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); in mlx4_init_icm()
1690 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); in mlx4_init_icm()
1695 num_eqs = dev->phys_caps.num_phys_eqs; in mlx4_init_icm()
1696 err = mlx4_init_icm_table(dev, &priv->eq_table.table, in mlx4_init_icm()
1700 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); in mlx4_init_icm()
1708 * dev->caps.mtt_entry_sz below is really the MTT segment in mlx4_init_icm()
1711 dev->caps.reserved_mtts = in mlx4_init_icm()
1712 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, in mlx4_init_icm()
1713 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; in mlx4_init_icm()
1715 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, in mlx4_init_icm()
1717 dev->caps.mtt_entry_sz, in mlx4_init_icm()
1718 dev->caps.num_mtts, in mlx4_init_icm()
1719 dev->caps.reserved_mtts, 1, 0); in mlx4_init_icm()
1721 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); in mlx4_init_icm()
1725 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, in mlx4_init_icm()
1728 dev->caps.num_mpts, in mlx4_init_icm()
1729 dev->caps.reserved_mrws, 1, 1); in mlx4_init_icm()
1731 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); in mlx4_init_icm()
1735 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, in mlx4_init_icm()
1738 dev->caps.num_qps, in mlx4_init_icm()
1739 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], in mlx4_init_icm()
1742 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); in mlx4_init_icm()
1746 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, in mlx4_init_icm()
1749 dev->caps.num_qps, in mlx4_init_icm()
1750 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], in mlx4_init_icm()
1753 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); in mlx4_init_icm()
1757 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, in mlx4_init_icm()
1760 dev->caps.num_qps, in mlx4_init_icm()
1761 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], in mlx4_init_icm()
1764 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); in mlx4_init_icm()
1768 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, in mlx4_init_icm()
1771 dev->caps.num_qps, in mlx4_init_icm()
1772 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], in mlx4_init_icm()
1775 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); in mlx4_init_icm()
1779 err = mlx4_init_icm_table(dev, &priv->cq_table.table, in mlx4_init_icm()
1782 dev->caps.num_cqs, in mlx4_init_icm()
1783 dev->caps.reserved_cqs, 0, 0); in mlx4_init_icm()
1785 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); in mlx4_init_icm()
1789 err = mlx4_init_icm_table(dev, &priv->srq_table.table, in mlx4_init_icm()
1792 dev->caps.num_srqs, in mlx4_init_icm()
1793 dev->caps.reserved_srqs, 0, 0); in mlx4_init_icm()
1795 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); in mlx4_init_icm()
1806 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, in mlx4_init_icm()
1808 mlx4_get_mgm_entry_size(dev), in mlx4_init_icm()
1809 dev->caps.num_mgms + dev->caps.num_amgms, in mlx4_init_icm()
1810 dev->caps.num_mgms + dev->caps.num_amgms, in mlx4_init_icm()
1813 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); in mlx4_init_icm()
1820 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); in mlx4_init_icm()
1823 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); in mlx4_init_icm()
1826 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); in mlx4_init_icm()
1829 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); in mlx4_init_icm()
1832 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); in mlx4_init_icm()
1835 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); in mlx4_init_icm()
1838 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); in mlx4_init_icm()
1841 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); in mlx4_init_icm()
1844 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); in mlx4_init_icm()
1847 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); in mlx4_init_icm()
1848 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_init_icm()
1849 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); in mlx4_init_icm()
1850 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); in mlx4_init_icm()
1853 mlx4_UNMAP_ICM_AUX(dev); in mlx4_init_icm()
1856 mlx4_free_icm(dev, priv->fw.aux_icm, 0); in mlx4_init_icm()
1861 static void mlx4_free_icms(struct mlx4_dev *dev) in mlx4_free_icms() argument
1863 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_free_icms()
1865 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); in mlx4_free_icms()
1866 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); in mlx4_free_icms()
1867 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); in mlx4_free_icms()
1868 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); in mlx4_free_icms()
1869 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); in mlx4_free_icms()
1870 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); in mlx4_free_icms()
1871 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); in mlx4_free_icms()
1872 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); in mlx4_free_icms()
1873 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); in mlx4_free_icms()
1874 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); in mlx4_free_icms()
1875 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); in mlx4_free_icms()
1876 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_free_icms()
1877 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); in mlx4_free_icms()
1878 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); in mlx4_free_icms()
1880 mlx4_UNMAP_ICM_AUX(dev); in mlx4_free_icms()
1881 mlx4_free_icm(dev, priv->fw.aux_icm, 0); in mlx4_free_icms()
1884 static void mlx4_slave_exit(struct mlx4_dev *dev) in mlx4_slave_exit() argument
1886 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_slave_exit()
1889 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, in mlx4_slave_exit()
1891 mlx4_warn(dev, "Failed to close slave function\n"); in mlx4_slave_exit()
1895 static int map_bf_area(struct mlx4_dev *dev) in map_bf_area() argument
1897 struct mlx4_priv *priv = mlx4_priv(dev); in map_bf_area()
1902 if (!dev->caps.bf_reg_size) in map_bf_area()
1905 bf_start = pci_resource_start(dev->persist->pdev, 2) + in map_bf_area()
1906 (dev->caps.num_uars << PAGE_SHIFT); in map_bf_area()
1907 bf_len = pci_resource_len(dev->persist->pdev, 2) - in map_bf_area()
1908 (dev->caps.num_uars << PAGE_SHIFT); in map_bf_area()
1916 static void unmap_bf_area(struct mlx4_dev *dev) in unmap_bf_area() argument
1918 if (mlx4_priv(dev)->bf_mapping) in unmap_bf_area()
1919 io_mapping_free(mlx4_priv(dev)->bf_mapping); in unmap_bf_area()
1922 u64 mlx4_read_clock(struct mlx4_dev *dev) in mlx4_read_clock() argument
1927 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_read_clock()
1944 static int map_internal_clock(struct mlx4_dev *dev) in map_internal_clock() argument
1946 struct mlx4_priv *priv = mlx4_priv(dev); in map_internal_clock()
1949 ioremap(pci_resource_start(dev->persist->pdev, in map_internal_clock()
1959 int mlx4_get_internal_clock_params(struct mlx4_dev *dev, in mlx4_get_internal_clock_params() argument
1962 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_get_internal_clock_params()
1964 if (mlx4_is_slave(dev)) in mlx4_get_internal_clock_params()
1967 if (!dev->caps.map_clock_to_user) { in mlx4_get_internal_clock_params()
1968 mlx4_dbg(dev, "Map clock to user is not supported.\n"); in mlx4_get_internal_clock_params()
1983 static void unmap_internal_clock(struct mlx4_dev *dev) in unmap_internal_clock() argument
1985 struct mlx4_priv *priv = mlx4_priv(dev); in unmap_internal_clock()
1991 static void mlx4_close_hca(struct mlx4_dev *dev) in mlx4_close_hca() argument
1993 unmap_internal_clock(dev); in mlx4_close_hca()
1994 unmap_bf_area(dev); in mlx4_close_hca()
1995 if (mlx4_is_slave(dev)) in mlx4_close_hca()
1996 mlx4_slave_exit(dev); in mlx4_close_hca()
1998 mlx4_CLOSE_HCA(dev, 0); in mlx4_close_hca()
1999 mlx4_free_icms(dev); in mlx4_close_hca()
2003 static void mlx4_close_fw(struct mlx4_dev *dev) in mlx4_close_fw() argument
2005 if (!mlx4_is_slave(dev)) { in mlx4_close_fw()
2006 mlx4_UNMAP_FA(dev); in mlx4_close_fw()
2007 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); in mlx4_close_fw()
2011 static int mlx4_comm_check_offline(struct mlx4_dev *dev) in mlx4_comm_check_offline() argument
2018 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_comm_check_offline()
2032 if (dev->persist->interface_state & in mlx4_comm_check_offline()
2043 mlx4_err(dev, "Communication channel is offline.\n"); in mlx4_comm_check_offline()
2047 static void mlx4_reset_vf_support(struct mlx4_dev *dev) in mlx4_reset_vf_support() argument
2051 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_reset_vf_support()
2060 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; in mlx4_reset_vf_support()
2063 static int mlx4_init_slave(struct mlx4_dev *dev) in mlx4_init_slave() argument
2065 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_init_slave()
2072 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); in mlx4_init_slave()
2078 if (mlx4_comm_check_offline(dev)) { in mlx4_init_slave()
2079 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); in mlx4_init_slave()
2083 mlx4_reset_vf_support(dev); in mlx4_init_slave()
2084 mlx4_warn(dev, "Sending reset\n"); in mlx4_init_slave()
2085 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, in mlx4_init_slave()
2091 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); in mlx4_init_slave()
2105 mlx4_err(dev, "slave driver version is not supported by the master\n"); in mlx4_init_slave()
2109 mlx4_warn(dev, "Sending vhcr0\n"); in mlx4_init_slave()
2110 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, in mlx4_init_slave()
2113 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, in mlx4_init_slave()
2116 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, in mlx4_init_slave()
2119 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, in mlx4_init_slave()
2127 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); in mlx4_init_slave()
2133 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) in mlx4_parav_master_pf_caps() argument
2137 for (i = 1; i <= dev->caps.num_ports; i++) { in mlx4_parav_master_pf_caps()
2138 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) in mlx4_parav_master_pf_caps()
2139 dev->caps.gid_table_len[i] = in mlx4_parav_master_pf_caps()
2140 mlx4_get_slave_num_gids(dev, 0, i); in mlx4_parav_master_pf_caps()
2142 dev->caps.gid_table_len[i] = 1; in mlx4_parav_master_pf_caps()
2143 dev->caps.pkey_table_len[i] = in mlx4_parav_master_pf_caps()
2144 dev->phys_caps.pkey_phys_table_len[i] - 1; in mlx4_parav_master_pf_caps()
2186 static void choose_steering_mode(struct mlx4_dev *dev, in choose_steering_mode() argument
2191 if (dev->caps.dmfs_high_steer_mode == in choose_steering_mode()
2193 mlx4_err(dev, "DMFS high rate mode not supported\n"); in choose_steering_mode()
2195 dev->caps.dmfs_high_steer_mode = in choose_steering_mode()
2202 (!mlx4_is_mfunc(dev) || in choose_steering_mode()
2204 (dev->persist->num_vfs + 1))) && in choose_steering_mode()
2207 dev->oper_log_mgm_entry_size = in choose_steering_mode()
2209 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; in choose_steering_mode()
2210 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; in choose_steering_mode()
2211 dev->caps.fs_log_max_ucast_qp_range_size = in choose_steering_mode()
2214 if (dev->caps.dmfs_high_steer_mode != in choose_steering_mode()
2216 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; in choose_steering_mode()
2217 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && in choose_steering_mode()
2218 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) in choose_steering_mode()
2219 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; in choose_steering_mode()
2221 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; in choose_steering_mode()
2223 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || in choose_steering_mode()
2224 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) in choose_steering_mode()
2225 …mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back t… in choose_steering_mode()
2227 dev->oper_log_mgm_entry_size = in choose_steering_mode()
2231 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); in choose_steering_mode()
2233 …mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size… in choose_steering_mode()
2234 mlx4_steering_mode_str(dev->caps.steering_mode), in choose_steering_mode()
2235 dev->oper_log_mgm_entry_size, in choose_steering_mode()
2239 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, in choose_tunnel_offload_mode() argument
2242 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && in choose_tunnel_offload_mode()
2244 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; in choose_tunnel_offload_mode()
2246 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; in choose_tunnel_offload_mode()
2248 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode in choose_tunnel_offload_mode()
2252 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) in mlx4_validate_optimized_steering() argument
2257 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) in mlx4_validate_optimized_steering()
2260 for (i = 1; i <= dev->caps.num_ports; i++) { in mlx4_validate_optimized_steering()
2261 if (mlx4_dev_port(dev, i, &port_cap)) { in mlx4_validate_optimized_steering()
2262 mlx4_err(dev, in mlx4_validate_optimized_steering()
2264 } else if ((dev->caps.dmfs_high_steer_mode != in mlx4_validate_optimized_steering()
2267 !!(dev->caps.dmfs_high_steer_mode == in mlx4_validate_optimized_steering()
2269 mlx4_err(dev, in mlx4_validate_optimized_steering()
2272 dev->caps.dmfs_high_steer_mode), in mlx4_validate_optimized_steering()
2281 static int mlx4_init_fw(struct mlx4_dev *dev) in mlx4_init_fw() argument
2286 if (!mlx4_is_slave(dev)) { in mlx4_init_fw()
2287 err = mlx4_QUERY_FW(dev); in mlx4_init_fw()
2290 mlx4_info(dev, "non-primary physical function, skipping\n"); in mlx4_init_fw()
2292 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); in mlx4_init_fw()
2296 err = mlx4_load_fw(dev); in mlx4_init_fw()
2298 mlx4_err(dev, "Failed to start FW, aborting\n"); in mlx4_init_fw()
2304 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); in mlx4_init_fw()
2306 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); in mlx4_init_fw()
2312 static int mlx4_init_hca(struct mlx4_dev *dev) in mlx4_init_hca() argument
2314 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_init_hca()
2323 if (!mlx4_is_slave(dev)) { in mlx4_init_hca()
2332 err = mlx4_dev_cap(dev, dev_cap); in mlx4_init_hca()
2334 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); in mlx4_init_hca()
2338 choose_steering_mode(dev, dev_cap); in mlx4_init_hca()
2339 choose_tunnel_offload_mode(dev, dev_cap); in mlx4_init_hca()
2341 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && in mlx4_init_hca()
2342 mlx4_is_master(dev)) in mlx4_init_hca()
2343 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; in mlx4_init_hca()
2345 err = mlx4_get_phys_port_id(dev); in mlx4_init_hca()
2347 mlx4_err(dev, "Fail to get physical port id\n"); in mlx4_init_hca()
2349 if (mlx4_is_master(dev)) in mlx4_init_hca()
2350 mlx4_parav_master_pf_caps(dev); in mlx4_init_hca()
2353 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); in mlx4_init_hca()
2358 if (dev->caps.steering_mode == in mlx4_init_hca()
2362 icm_size = mlx4_make_profile(dev, &profile, dev_cap, in mlx4_init_hca()
2369 if (enable_4k_uar || !dev->persist->num_vfs) { in mlx4_init_hca()
2370 init_hca->log_uar_sz = ilog2(dev->caps.num_uars) + in mlx4_init_hca()
2374 init_hca->log_uar_sz = ilog2(dev->caps.num_uars); in mlx4_init_hca()
2379 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || in mlx4_init_hca()
2380 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) in mlx4_init_hca()
2383 err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size); in mlx4_init_hca()
2387 err = mlx4_INIT_HCA(dev, init_hca); in mlx4_init_hca()
2389 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); in mlx4_init_hca()
2394 err = mlx4_query_func(dev, dev_cap); in mlx4_init_hca()
2396 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); in mlx4_init_hca()
2399 dev->caps.num_eqs = dev_cap->max_eqs; in mlx4_init_hca()
2400 dev->caps.reserved_eqs = dev_cap->reserved_eqs; in mlx4_init_hca()
2401 dev->caps.reserved_uars = dev_cap->reserved_uars; in mlx4_init_hca()
2409 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { in mlx4_init_hca()
2410 err = mlx4_QUERY_HCA(dev, init_hca); in mlx4_init_hca()
2412 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); in mlx4_init_hca()
2413 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; in mlx4_init_hca()
2415 dev->caps.hca_core_clock = in mlx4_init_hca()
2422 if (!dev->caps.hca_core_clock) { in mlx4_init_hca()
2423 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; in mlx4_init_hca()
2424 mlx4_err(dev, in mlx4_init_hca()
2426 } else if (map_internal_clock(dev)) { in mlx4_init_hca()
2431 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; in mlx4_init_hca()
2432 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); in mlx4_init_hca()
2436 if (dev->caps.dmfs_high_steer_mode != in mlx4_init_hca()
2438 if (mlx4_validate_optimized_steering(dev)) in mlx4_init_hca()
2439 mlx4_warn(dev, "Optimized steering validation failed\n"); in mlx4_init_hca()
2441 if (dev->caps.dmfs_high_steer_mode == in mlx4_init_hca()
2443 dev->caps.dmfs_high_rate_qpn_base = in mlx4_init_hca()
2444 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; in mlx4_init_hca()
2445 dev->caps.dmfs_high_rate_qpn_range = in mlx4_init_hca()
2449 mlx4_info(dev, "DMFS high rate steer mode is: %s\n", in mlx4_init_hca()
2451 dev->caps.dmfs_high_steer_mode)); in mlx4_init_hca()
2454 err = mlx4_init_slave(dev); in mlx4_init_hca()
2457 mlx4_err(dev, "Failed to initialize slave\n"); in mlx4_init_hca()
2461 err = mlx4_slave_cap(dev); in mlx4_init_hca()
2463 mlx4_err(dev, "Failed to obtain slave caps\n"); in mlx4_init_hca()
2468 if (map_bf_area(dev)) in mlx4_init_hca()
2469 mlx4_dbg(dev, "Failed to map blue flame area\n"); in mlx4_init_hca()
2472 if (!mlx4_is_slave(dev)) in mlx4_init_hca()
2473 mlx4_set_port_mask(dev); in mlx4_init_hca()
2475 err = mlx4_QUERY_ADAPTER(dev, &adapter); in mlx4_init_hca()
2477 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); in mlx4_init_hca()
2482 err = mlx4_config_dev_retrieval(dev, ¶ms); in mlx4_init_hca()
2484 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); in mlx4_init_hca()
2486 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; in mlx4_init_hca()
2487 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; in mlx4_init_hca()
2490 memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id)); in mlx4_init_hca()
2496 unmap_internal_clock(dev); in mlx4_init_hca()
2497 unmap_bf_area(dev); in mlx4_init_hca()
2499 if (mlx4_is_slave(dev)) in mlx4_init_hca()
2500 mlx4_slave_destroy_special_qp_cap(dev); in mlx4_init_hca()
2503 if (mlx4_is_slave(dev)) in mlx4_init_hca()
2504 mlx4_slave_exit(dev); in mlx4_init_hca()
2506 mlx4_CLOSE_HCA(dev, 0); in mlx4_init_hca()
2509 if (!mlx4_is_slave(dev)) in mlx4_init_hca()
2510 mlx4_free_icms(dev); in mlx4_init_hca()
2519 static int mlx4_init_counters_table(struct mlx4_dev *dev) in mlx4_init_counters_table() argument
2521 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_init_counters_table()
2524 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) in mlx4_init_counters_table()
2527 if (!dev->caps.max_counters) in mlx4_init_counters_table()
2530 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); in mlx4_init_counters_table()
2534 nent_pow2 - dev->caps.max_counters + 1); in mlx4_init_counters_table()
2537 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) in mlx4_cleanup_counters_table() argument
2539 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) in mlx4_cleanup_counters_table()
2542 if (!dev->caps.max_counters) in mlx4_cleanup_counters_table()
2545 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); in mlx4_cleanup_counters_table()
2548 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev) in mlx4_cleanup_default_counters() argument
2550 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_cleanup_default_counters()
2553 for (port = 0; port < dev->caps.num_ports; port++) in mlx4_cleanup_default_counters()
2555 mlx4_counter_free(dev, priv->def_counter[port]); in mlx4_cleanup_default_counters()
2558 static int mlx4_allocate_default_counters(struct mlx4_dev *dev) in mlx4_allocate_default_counters() argument
2560 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_allocate_default_counters()
2564 for (port = 0; port < dev->caps.num_ports; port++) in mlx4_allocate_default_counters()
2567 for (port = 0; port < dev->caps.num_ports; port++) { in mlx4_allocate_default_counters()
2568 err = mlx4_counter_alloc(dev, &idx, MLX4_RES_USAGE_DRIVER); in mlx4_allocate_default_counters()
2576 } else if (mlx4_is_slave(dev) && err == -EINVAL) { in mlx4_allocate_default_counters()
2577 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); in mlx4_allocate_default_counters()
2578 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", in mlx4_allocate_default_counters()
2579 MLX4_SINK_COUNTER_INDEX(dev)); in mlx4_allocate_default_counters()
2582 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", in mlx4_allocate_default_counters()
2584 mlx4_cleanup_default_counters(dev); in mlx4_allocate_default_counters()
2588 mlx4_dbg(dev, "%s: default counter index %d for port %d\n", in mlx4_allocate_default_counters()
2595 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) in __mlx4_counter_alloc() argument
2597 struct mlx4_priv *priv = mlx4_priv(dev); in __mlx4_counter_alloc()
2599 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) in __mlx4_counter_alloc()
2604 *idx = MLX4_SINK_COUNTER_INDEX(dev); in __mlx4_counter_alloc()
2611 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage) in mlx4_counter_alloc() argument
2617 if (mlx4_is_mfunc(dev)) { in mlx4_counter_alloc()
2618 err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier, in mlx4_counter_alloc()
2627 return __mlx4_counter_alloc(dev, idx); in mlx4_counter_alloc()
2631 static int __mlx4_clear_if_stat(struct mlx4_dev *dev, in __mlx4_clear_if_stat() argument
2638 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); in __mlx4_clear_if_stat()
2642 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, in __mlx4_clear_if_stat()
2646 mlx4_free_cmd_mailbox(dev, if_stat_mailbox); in __mlx4_clear_if_stat()
2650 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) in __mlx4_counter_free() argument
2652 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) in __mlx4_counter_free()
2655 if (idx == MLX4_SINK_COUNTER_INDEX(dev)) in __mlx4_counter_free()
2658 __mlx4_clear_if_stat(dev, idx); in __mlx4_counter_free()
2660 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); in __mlx4_counter_free()
2664 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) in mlx4_counter_free() argument
2668 if (mlx4_is_mfunc(dev)) { in mlx4_counter_free()
2670 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, in mlx4_counter_free()
2675 __mlx4_counter_free(dev, idx); in mlx4_counter_free()
2679 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port) in mlx4_get_default_counter_index() argument
2681 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_get_default_counter_index()
2687 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) in mlx4_set_admin_guid() argument
2689 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_set_admin_guid()
2695 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) in mlx4_get_admin_guid() argument
2697 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_get_admin_guid()
2703 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) in mlx4_set_random_admin_guid() argument
2705 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_set_random_admin_guid()
2718 static int mlx4_setup_hca(struct mlx4_dev *dev) in mlx4_setup_hca() argument
2720 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_setup_hca()
2725 err = mlx4_init_uar_table(dev); in mlx4_setup_hca()
2727 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); in mlx4_setup_hca()
2731 err = mlx4_uar_alloc(dev, &priv->driver_uar); in mlx4_setup_hca()
2733 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); in mlx4_setup_hca()
2739 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); in mlx4_setup_hca()
2744 err = mlx4_init_pd_table(dev); in mlx4_setup_hca()
2746 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); in mlx4_setup_hca()
2750 err = mlx4_init_xrcd_table(dev); in mlx4_setup_hca()
2752 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); in mlx4_setup_hca()
2756 err = mlx4_init_mr_table(dev); in mlx4_setup_hca()
2758 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); in mlx4_setup_hca()
2762 if (!mlx4_is_slave(dev)) { in mlx4_setup_hca()
2763 err = mlx4_init_mcg_table(dev); in mlx4_setup_hca()
2765 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); in mlx4_setup_hca()
2768 err = mlx4_config_mad_demux(dev); in mlx4_setup_hca()
2770 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); in mlx4_setup_hca()
2775 err = mlx4_init_eq_table(dev); in mlx4_setup_hca()
2777 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); in mlx4_setup_hca()
2781 err = mlx4_cmd_use_events(dev); in mlx4_setup_hca()
2783 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); in mlx4_setup_hca()
2787 err = mlx4_NOP(dev); in mlx4_setup_hca()
2789 if (dev->flags & MLX4_FLAG_MSI_X) { in mlx4_setup_hca()
2790 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", in mlx4_setup_hca()
2792 mlx4_warn(dev, "Trying again without MSI-X\n"); in mlx4_setup_hca()
2794 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", in mlx4_setup_hca()
2796 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); in mlx4_setup_hca()
2802 mlx4_dbg(dev, "NOP command IRQ test passed\n"); in mlx4_setup_hca()
2804 err = mlx4_init_cq_table(dev); in mlx4_setup_hca()
2806 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); in mlx4_setup_hca()
2810 err = mlx4_init_srq_table(dev); in mlx4_setup_hca()
2812 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); in mlx4_setup_hca()
2816 err = mlx4_init_qp_table(dev); in mlx4_setup_hca()
2818 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); in mlx4_setup_hca()
2822 if (!mlx4_is_slave(dev)) { in mlx4_setup_hca()
2823 err = mlx4_init_counters_table(dev); in mlx4_setup_hca()
2825 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); in mlx4_setup_hca()
2830 err = mlx4_allocate_default_counters(dev); in mlx4_setup_hca()
2832 mlx4_err(dev, "Failed to allocate default counters, aborting\n"); in mlx4_setup_hca()
2836 if (!mlx4_is_slave(dev)) { in mlx4_setup_hca()
2837 for (port = 1; port <= dev->caps.num_ports; port++) { in mlx4_setup_hca()
2839 err = mlx4_get_port_ib_caps(dev, port, in mlx4_setup_hca()
2842 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", in mlx4_setup_hca()
2844 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; in mlx4_setup_hca()
2847 if (mlx4_is_master(dev)) { in mlx4_setup_hca()
2849 for (i = 0; i < dev->num_slaves; i++) { in mlx4_setup_hca()
2850 if (i == mlx4_master_func_num(dev)) in mlx4_setup_hca()
2857 if (mlx4_is_mfunc(dev)) in mlx4_setup_hca()
2858 dev->caps.port_ib_mtu[port] = IB_MTU_2048; in mlx4_setup_hca()
2860 dev->caps.port_ib_mtu[port] = IB_MTU_4096; in mlx4_setup_hca()
2862 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? in mlx4_setup_hca()
2863 dev->caps.pkey_table_len[port] : -1); in mlx4_setup_hca()
2865 mlx4_err(dev, "Failed to set port %d, aborting\n", in mlx4_setup_hca()
2875 mlx4_cleanup_default_counters(dev); in mlx4_setup_hca()
2878 if (!mlx4_is_slave(dev)) in mlx4_setup_hca()
2879 mlx4_cleanup_counters_table(dev); in mlx4_setup_hca()
2882 mlx4_cleanup_qp_table(dev); in mlx4_setup_hca()
2885 mlx4_cleanup_srq_table(dev); in mlx4_setup_hca()
2888 mlx4_cleanup_cq_table(dev); in mlx4_setup_hca()
2891 mlx4_cmd_use_polling(dev); in mlx4_setup_hca()
2894 mlx4_cleanup_eq_table(dev); in mlx4_setup_hca()
2897 if (!mlx4_is_slave(dev)) in mlx4_setup_hca()
2898 mlx4_cleanup_mcg_table(dev); in mlx4_setup_hca()
2901 mlx4_cleanup_mr_table(dev); in mlx4_setup_hca()
2904 mlx4_cleanup_xrcd_table(dev); in mlx4_setup_hca()
2907 mlx4_cleanup_pd_table(dev); in mlx4_setup_hca()
2913 mlx4_uar_free(dev, &priv->driver_uar); in mlx4_setup_hca()
2916 mlx4_cleanup_uar_table(dev); in mlx4_setup_hca()
2920 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) in mlx4_init_affinity_hint() argument
2923 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_init_affinity_hint()
2928 if (eqn > dev->caps.num_comp_vectors) in mlx4_init_affinity_hint()
2932 off += mlx4_get_eqs_per_port(dev, i); in mlx4_init_affinity_hint()
2950 static void mlx4_enable_msi_x(struct mlx4_dev *dev) in mlx4_enable_msi_x() argument
2952 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_enable_msi_x()
2958 int nreq = min3(dev->caps.num_ports * in mlx4_enable_msi_x()
2960 dev->caps.num_eqs - dev->caps.reserved_eqs, in mlx4_enable_msi_x()
2973 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, in mlx4_enable_msi_x()
2981 dev->caps.num_comp_vectors = nreq - 1; in mlx4_enable_msi_x()
2985 dev->caps.num_ports); in mlx4_enable_msi_x()
2987 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { in mlx4_enable_msi_x()
2994 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { in mlx4_enable_msi_x()
2996 dev->caps.num_ports); in mlx4_enable_msi_x()
3003 if (mlx4_init_affinity_hint(dev, port + 1, i)) in mlx4_enable_msi_x()
3004 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", in mlx4_enable_msi_x()
3008 * (dev->caps.num_comp_vectors / dev->caps.num_ports) in mlx4_enable_msi_x()
3016 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && in mlx4_enable_msi_x()
3018 (dev->caps.num_comp_vectors / dev->caps.num_ports)) == in mlx4_enable_msi_x()
3020 /* If dev->caps.num_comp_vectors < dev->caps.num_ports, in mlx4_enable_msi_x()
3026 dev->flags |= MLX4_FLAG_MSI_X; in mlx4_enable_msi_x()
3033 dev->caps.num_comp_vectors = 1; in mlx4_enable_msi_x()
3037 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; in mlx4_enable_msi_x()
3040 dev->caps.num_ports); in mlx4_enable_msi_x()
3074 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) in mlx4_init_port_info() argument
3076 struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); in mlx4_init_port_info()
3077 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; in mlx4_init_port_info()
3090 dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) in mlx4_init_port_info()
3093 dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) in mlx4_init_port_info()
3096 info->dev = dev; in mlx4_init_port_info()
3098 if (!mlx4_is_slave(dev)) { in mlx4_init_port_info()
3099 mlx4_init_mac_table(dev, &info->mac_table); in mlx4_init_port_info()
3100 mlx4_init_vlan_table(dev, &info->vlan_table); in mlx4_init_port_info()
3101 mlx4_init_roce_gid_table(dev, &info->gid_table); in mlx4_init_port_info()
3102 info->base_qpn = mlx4_get_base_qpn(dev, port); in mlx4_init_port_info()
3107 if (mlx4_is_mfunc(dev)) { in mlx4_init_port_info()
3116 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); in mlx4_init_port_info()
3118 mlx4_err(dev, "Failed to create file for port %d\n", port); in mlx4_init_port_info()
3127 if (mlx4_is_mfunc(dev)) { in mlx4_init_port_info()
3136 err = device_create_file(&dev->persist->pdev->dev, in mlx4_init_port_info()
3139 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); in mlx4_init_port_info()
3140 device_remove_file(&info->dev->persist->pdev->dev, in mlx4_init_port_info()
3156 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); in mlx4_cleanup_port_info()
3157 device_remove_file(&info->dev->persist->pdev->dev, in mlx4_cleanup_port_info()
3168 static int mlx4_init_steering(struct mlx4_dev *dev) in mlx4_init_steering() argument
3170 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_init_steering()
3171 int num_entries = dev->caps.num_ports; in mlx4_init_steering()
3187 static void mlx4_clear_steering(struct mlx4_dev *dev) in mlx4_clear_steering() argument
3189 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_clear_steering()
3192 int num_entries = dev->caps.num_ports; in mlx4_clear_steering()
3228 static int mlx4_get_ownership(struct mlx4_dev *dev) in mlx4_get_ownership() argument
3233 if (pci_channel_offline(dev->persist->pdev)) in mlx4_get_ownership()
3236 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + in mlx4_get_ownership()
3240 mlx4_err(dev, "Failed to obtain ownership bit\n"); in mlx4_get_ownership()
3249 static void mlx4_free_ownership(struct mlx4_dev *dev) in mlx4_free_ownership() argument
3253 if (pci_channel_offline(dev->persist->pdev)) in mlx4_free_ownership()
3256 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + in mlx4_free_ownership()
3260 mlx4_err(dev, "Failed to obtain ownership bit\n"); in mlx4_free_ownership()
3271 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, in mlx4_enable_sriov() argument
3274 u64 dev_flags = dev->flags; in mlx4_enable_sriov()
3280 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), in mlx4_enable_sriov()
3282 if (!dev->dev_vfs) in mlx4_enable_sriov()
3288 if (dev->flags & MLX4_FLAG_SRIOV) { in mlx4_enable_sriov()
3290 … mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", in mlx4_enable_sriov()
3296 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), GFP_KERNEL); in mlx4_enable_sriov()
3297 if (NULL == dev->dev_vfs) { in mlx4_enable_sriov()
3298 mlx4_err(dev, "Failed to allocate memory for VFs\n"); in mlx4_enable_sriov()
3302 if (!(dev->flags & MLX4_FLAG_SRIOV)) { in mlx4_enable_sriov()
3304 mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n", in mlx4_enable_sriov()
3309 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); in mlx4_enable_sriov()
3313 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", in mlx4_enable_sriov()
3317 mlx4_warn(dev, "Running in master mode\n"); in mlx4_enable_sriov()
3321 dev->persist->num_vfs = total_vfs; in mlx4_enable_sriov()
3328 dev->persist->num_vfs = 0; in mlx4_enable_sriov()
3329 kfree(dev->dev_vfs); in mlx4_enable_sriov()
3330 dev->dev_vfs = NULL; in mlx4_enable_sriov()
3338 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, in mlx4_check_dev_cap() argument
3345 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", in mlx4_check_dev_cap()
3352 static int mlx4_pci_enable_device(struct mlx4_dev *dev) in mlx4_pci_enable_device() argument
3354 struct pci_dev *pdev = dev->persist->pdev; in mlx4_pci_enable_device()
3357 mutex_lock(&dev->persist->pci_status_mutex); in mlx4_pci_enable_device()
3358 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { in mlx4_pci_enable_device()
3361 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; in mlx4_pci_enable_device()
3363 mutex_unlock(&dev->persist->pci_status_mutex); in mlx4_pci_enable_device()
3368 static void mlx4_pci_disable_device(struct mlx4_dev *dev) in mlx4_pci_disable_device() argument
3370 struct pci_dev *pdev = dev->persist->pdev; in mlx4_pci_disable_device()
3372 mutex_lock(&dev->persist->pci_status_mutex); in mlx4_pci_disable_device()
3373 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { in mlx4_pci_disable_device()
3375 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; in mlx4_pci_disable_device()
3377 mutex_unlock(&dev->persist->pci_status_mutex); in mlx4_pci_disable_device()
3385 struct mlx4_dev *dev; in mlx4_load_one() local
3394 dev = &priv->dev; in mlx4_load_one()
3396 err = mlx4_adev_init(dev); in mlx4_load_one()
3412 dev->rev_id = pdev->revision; in mlx4_load_one()
3413 dev->numa_node = dev_to_node(&pdev->dev); in mlx4_load_one()
3417 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); in mlx4_load_one()
3418 dev->flags |= MLX4_FLAG_SLAVE; in mlx4_load_one()
3423 err = mlx4_get_ownership(dev); in mlx4_load_one()
3428 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); in mlx4_load_one()
3442 err = mlx4_reset(dev); in mlx4_load_one()
3444 mlx4_err(dev, "Failed to reset HCA, aborting\n"); in mlx4_load_one()
3449 dev->flags = MLX4_FLAG_MASTER; in mlx4_load_one()
3452 dev->flags |= MLX4_FLAG_SRIOV; in mlx4_load_one()
3453 dev->persist->num_vfs = total_vfs; in mlx4_load_one()
3460 dev->persist->state = MLX4_DEVICE_STATE_UP; in mlx4_load_one()
3463 err = mlx4_cmd_init(dev); in mlx4_load_one()
3465 mlx4_err(dev, "Failed to init command interface, aborting\n"); in mlx4_load_one()
3472 if (mlx4_is_mfunc(dev)) { in mlx4_load_one()
3473 if (mlx4_is_master(dev)) { in mlx4_load_one()
3474 dev->num_slaves = MLX4_MAX_NUM_SLAVES; in mlx4_load_one()
3477 dev->num_slaves = 0; in mlx4_load_one()
3478 err = mlx4_multi_func_init(dev); in mlx4_load_one()
3480 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); in mlx4_load_one()
3486 err = mlx4_init_fw(dev); in mlx4_load_one()
3488 mlx4_err(dev, "Failed to init fw, aborting.\n"); in mlx4_load_one()
3492 if (mlx4_is_master(dev)) { in mlx4_load_one()
3502 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); in mlx4_load_one()
3504 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); in mlx4_load_one()
3508 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) in mlx4_load_one()
3512 u64 dev_flags = mlx4_enable_sriov(dev, pdev, in mlx4_load_one()
3517 mlx4_close_fw(dev); in mlx4_load_one()
3518 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); in mlx4_load_one()
3519 dev->flags = dev_flags; in mlx4_load_one()
3520 if (!SRIOV_VALID_STATE(dev->flags)) { in mlx4_load_one()
3521 mlx4_err(dev, "Invalid SRIOV state\n"); in mlx4_load_one()
3524 err = mlx4_reset(dev); in mlx4_load_one()
3526 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); in mlx4_load_one()
3537 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); in mlx4_load_one()
3539 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); in mlx4_load_one()
3543 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) in mlx4_load_one()
3548 err = mlx4_init_hca(dev); in mlx4_load_one()
3553 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); in mlx4_load_one()
3555 if (dev->flags & MLX4_FLAG_SRIOV) { in mlx4_load_one()
3558 if (mlx4_is_master(dev) && !reset_flow) in mlx4_load_one()
3560 dev->flags &= ~MLX4_FLAG_SRIOV; in mlx4_load_one()
3562 if (!mlx4_is_slave(dev)) in mlx4_load_one()
3563 mlx4_free_ownership(dev); in mlx4_load_one()
3564 dev->flags |= MLX4_FLAG_SLAVE; in mlx4_load_one()
3565 dev->flags &= ~MLX4_FLAG_MASTER; in mlx4_load_one()
3571 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { in mlx4_load_one()
3572 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, in mlx4_load_one()
3575 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { in mlx4_load_one()
3576 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); in mlx4_load_one()
3577 dev->flags = dev_flags; in mlx4_load_one()
3578 err = mlx4_cmd_init(dev); in mlx4_load_one()
3583 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); in mlx4_load_one()
3587 dev->flags = dev_flags; in mlx4_load_one()
3590 if (!SRIOV_VALID_STATE(dev->flags)) { in mlx4_load_one()
3591 mlx4_err(dev, "Invalid SRIOV state\n"); in mlx4_load_one()
3601 if (!mlx4_is_slave(dev)) in mlx4_load_one()
3602 pcie_print_link_status(dev->persist->pdev); in mlx4_load_one()
3606 if (mlx4_is_master(dev)) { in mlx4_load_one()
3607 if (dev->caps.num_ports < 2 && in mlx4_load_one()
3610 mlx4_err(dev, in mlx4_load_one()
3612 dev->caps.num_ports); in mlx4_load_one()
3615 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); in mlx4_load_one()
3618 i < sizeof(dev->persist->nvfs)/ in mlx4_load_one()
3619 sizeof(dev->persist->nvfs[0]); i++) { in mlx4_load_one()
3622 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { in mlx4_load_one()
3623 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; in mlx4_load_one()
3624 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : in mlx4_load_one()
3625 dev->caps.num_ports; in mlx4_load_one()
3632 err = mlx4_multi_func_init(dev); in mlx4_load_one()
3634 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); in mlx4_load_one()
3639 err = mlx4_alloc_eq_table(dev); in mlx4_load_one()
3646 mlx4_enable_msi_x(dev); in mlx4_load_one()
3647 if ((mlx4_is_mfunc(dev)) && in mlx4_load_one()
3648 !(dev->flags & MLX4_FLAG_MSI_X)) { in mlx4_load_one()
3650 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); in mlx4_load_one()
3654 if (!mlx4_is_slave(dev)) { in mlx4_load_one()
3655 err = mlx4_init_steering(dev); in mlx4_load_one()
3660 mlx4_init_quotas(dev); in mlx4_load_one()
3662 err = mlx4_setup_hca(dev); in mlx4_load_one()
3663 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && in mlx4_load_one()
3664 !mlx4_is_mfunc(dev)) { in mlx4_load_one()
3665 dev->flags &= ~MLX4_FLAG_MSI_X; in mlx4_load_one()
3666 dev->caps.num_comp_vectors = 1; in mlx4_load_one()
3668 err = mlx4_setup_hca(dev); in mlx4_load_one()
3677 if (mlx4_is_master(dev)) { in mlx4_load_one()
3678 err = mlx4_ARM_COMM_CHANNEL(dev); in mlx4_load_one()
3680 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", in mlx4_load_one()
3686 for (port = 1; port <= dev->caps.num_ports; port++) { in mlx4_load_one()
3687 err = mlx4_init_port_info(dev, port); in mlx4_load_one()
3695 err = mlx4_register_device(dev); in mlx4_load_one()
3699 mlx4_sense_init(dev); in mlx4_load_one()
3700 mlx4_start_sense(dev); in mlx4_load_one()
3704 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) in mlx4_load_one()
3714 mlx4_cleanup_default_counters(dev); in mlx4_load_one()
3715 if (!mlx4_is_slave(dev)) in mlx4_load_one()
3716 mlx4_cleanup_counters_table(dev); in mlx4_load_one()
3717 mlx4_cleanup_qp_table(dev); in mlx4_load_one()
3718 mlx4_cleanup_srq_table(dev); in mlx4_load_one()
3719 mlx4_cleanup_cq_table(dev); in mlx4_load_one()
3720 mlx4_cmd_use_polling(dev); in mlx4_load_one()
3721 mlx4_cleanup_eq_table(dev); in mlx4_load_one()
3722 mlx4_cleanup_mcg_table(dev); in mlx4_load_one()
3723 mlx4_cleanup_mr_table(dev); in mlx4_load_one()
3724 mlx4_cleanup_xrcd_table(dev); in mlx4_load_one()
3725 mlx4_cleanup_pd_table(dev); in mlx4_load_one()
3726 mlx4_cleanup_uar_table(dev); in mlx4_load_one()
3729 if (!mlx4_is_slave(dev)) in mlx4_load_one()
3730 mlx4_clear_steering(dev); in mlx4_load_one()
3733 if (dev->flags & MLX4_FLAG_MSI_X) in mlx4_load_one()
3737 mlx4_free_eq_table(dev); in mlx4_load_one()
3740 if (mlx4_is_master(dev)) { in mlx4_load_one()
3741 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); in mlx4_load_one()
3742 mlx4_multi_func_cleanup(dev); in mlx4_load_one()
3745 if (mlx4_is_slave(dev)) in mlx4_load_one()
3746 mlx4_slave_destroy_special_qp_cap(dev); in mlx4_load_one()
3749 mlx4_close_hca(dev); in mlx4_load_one()
3752 mlx4_close_fw(dev); in mlx4_load_one()
3755 if (mlx4_is_slave(dev)) in mlx4_load_one()
3756 mlx4_multi_func_cleanup(dev); in mlx4_load_one()
3759 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); in mlx4_load_one()
3762 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { in mlx4_load_one()
3764 dev->flags &= ~MLX4_FLAG_SRIOV; in mlx4_load_one()
3767 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) in mlx4_load_one()
3770 kfree(priv->dev.dev_vfs); in mlx4_load_one()
3772 if (!mlx4_is_slave(dev)) in mlx4_load_one()
3773 mlx4_free_ownership(dev); in mlx4_load_one()
3778 mlx4_adev_cleanup(dev); in mlx4_load_one()
3795 err = mlx4_pci_enable_device(&priv->dev); in __mlx4_init_one()
3797 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); in __mlx4_init_one()
3809 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); in __mlx4_init_one()
3818 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); in __mlx4_init_one()
3824 dev_err(&pdev->dev, in __mlx4_init_one()
3833 dev_err(&pdev->dev, in __mlx4_init_one()
3845 …dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\… in __mlx4_init_one()
3851 dev_err(&pdev->dev, "Missing UAR, aborting\n"); in __mlx4_init_one()
3858 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); in __mlx4_init_one()
3864 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in __mlx4_init_one()
3866 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); in __mlx4_init_one()
3867 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in __mlx4_init_one()
3869 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); in __mlx4_init_one()
3875 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); in __mlx4_init_one()
3894 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", in __mlx4_init_one()
3902 err = mlx4_crdump_init(&priv->dev); in __mlx4_init_one()
3906 err = mlx4_catas_init(&priv->dev); in __mlx4_init_one()
3917 mlx4_catas_end(&priv->dev); in __mlx4_init_one()
3920 mlx4_crdump_end(&priv->dev); in __mlx4_init_one()
3926 mlx4_pci_disable_device(&priv->dev); in __mlx4_init_one()
3933 struct mlx4_dev *dev = &priv->dev; in mlx4_devlink_param_load_driverinit_values() local
3934 struct mlx4_fw_crdump *crdump = &dev->persist->crdump; in mlx4_devlink_param_load_driverinit_values()
3982 struct mlx4_dev *dev = &priv->dev; in mlx4_devlink_reload_down() local
3983 struct mlx4_dev_persistent *persist = dev->persist; in mlx4_devlink_reload_down()
3990 …mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n… in mlx4_devlink_reload_down()
4000 struct mlx4_dev *dev = &priv->dev; in mlx4_devlink_reload_up() local
4001 struct mlx4_dev_persistent *persist = dev->persist; in mlx4_devlink_reload_up()
4007 mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n", in mlx4_devlink_reload_up()
4023 struct mlx4_dev *dev; in mlx4_init_one() local
4028 devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev); in mlx4_init_one()
4034 dev = &priv->dev; in mlx4_init_one()
4035 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); in mlx4_init_one()
4036 if (!dev->persist) { in mlx4_init_one()
4040 dev->persist->pdev = pdev; in mlx4_init_one()
4041 dev->persist->dev = dev; in mlx4_init_one()
4042 pci_set_drvdata(pdev, dev->persist); in mlx4_init_one()
4044 mutex_init(&dev->persist->device_state_mutex); in mlx4_init_one()
4045 mutex_init(&dev->persist->interface_state_mutex); in mlx4_init_one()
4046 mutex_init(&dev->persist->pci_status_mutex); in mlx4_init_one()
4066 kfree(dev->persist); in mlx4_init_one()
4073 static void mlx4_clean_dev(struct mlx4_dev *dev) in mlx4_clean_dev() argument
4075 struct mlx4_dev_persistent *persist = dev->persist; in mlx4_clean_dev()
4076 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_clean_dev()
4077 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); in mlx4_clean_dev()
4080 priv->dev.persist = persist; in mlx4_clean_dev()
4081 priv->dev.flags = flags; in mlx4_clean_dev()
4087 struct mlx4_dev *dev = persist->dev; in mlx4_unload_one() local
4088 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_unload_one()
4099 for (i = 0; i < dev->caps.num_ports; i++) { in mlx4_unload_one()
4100 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; in mlx4_unload_one()
4101 dev->persist->curr_port_poss_type[i] = dev->caps. in mlx4_unload_one()
4107 mlx4_stop_sense(dev); in mlx4_unload_one()
4108 mlx4_unregister_device(dev); in mlx4_unload_one()
4110 for (p = 1; p <= dev->caps.num_ports; p++) { in mlx4_unload_one()
4112 mlx4_CLOSE_PORT(dev, p); in mlx4_unload_one()
4115 if (mlx4_is_master(dev)) in mlx4_unload_one()
4116 mlx4_free_resource_tracker(dev, in mlx4_unload_one()
4119 mlx4_cleanup_default_counters(dev); in mlx4_unload_one()
4120 if (!mlx4_is_slave(dev)) in mlx4_unload_one()
4121 mlx4_cleanup_counters_table(dev); in mlx4_unload_one()
4122 mlx4_cleanup_qp_table(dev); in mlx4_unload_one()
4123 mlx4_cleanup_srq_table(dev); in mlx4_unload_one()
4124 mlx4_cleanup_cq_table(dev); in mlx4_unload_one()
4125 mlx4_cmd_use_polling(dev); in mlx4_unload_one()
4126 mlx4_cleanup_eq_table(dev); in mlx4_unload_one()
4127 mlx4_cleanup_mcg_table(dev); in mlx4_unload_one()
4128 mlx4_cleanup_mr_table(dev); in mlx4_unload_one()
4129 mlx4_cleanup_xrcd_table(dev); in mlx4_unload_one()
4130 mlx4_cleanup_pd_table(dev); in mlx4_unload_one()
4132 if (mlx4_is_master(dev)) in mlx4_unload_one()
4133 mlx4_free_resource_tracker(dev, in mlx4_unload_one()
4137 mlx4_uar_free(dev, &priv->driver_uar); in mlx4_unload_one()
4138 mlx4_cleanup_uar_table(dev); in mlx4_unload_one()
4139 if (!mlx4_is_slave(dev)) in mlx4_unload_one()
4140 mlx4_clear_steering(dev); in mlx4_unload_one()
4141 mlx4_free_eq_table(dev); in mlx4_unload_one()
4142 if (mlx4_is_master(dev)) in mlx4_unload_one()
4143 mlx4_multi_func_cleanup(dev); in mlx4_unload_one()
4144 mlx4_close_hca(dev); in mlx4_unload_one()
4145 mlx4_close_fw(dev); in mlx4_unload_one()
4146 if (mlx4_is_slave(dev)) in mlx4_unload_one()
4147 mlx4_multi_func_cleanup(dev); in mlx4_unload_one()
4148 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); in mlx4_unload_one()
4150 if (dev->flags & MLX4_FLAG_MSI_X) in mlx4_unload_one()
4153 if (!mlx4_is_slave(dev)) in mlx4_unload_one()
4154 mlx4_free_ownership(dev); in mlx4_unload_one()
4156 mlx4_slave_destroy_special_qp_cap(dev); in mlx4_unload_one()
4157 kfree(dev->dev_vfs); in mlx4_unload_one()
4159 mlx4_adev_cleanup(dev); in mlx4_unload_one()
4161 mlx4_clean_dev(dev); in mlx4_unload_one()
4169 struct mlx4_dev *dev = persist->dev; in mlx4_remove_one() local
4170 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_remove_one()
4177 if (mlx4_is_slave(dev)) in mlx4_remove_one()
4185 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { in mlx4_remove_one()
4186 active_vfs = mlx4_how_many_lives_vf(dev); in mlx4_remove_one()
4199 mlx4_info(dev, "%s: interface is down\n", __func__); in mlx4_remove_one()
4200 mlx4_catas_end(dev); in mlx4_remove_one()
4201 mlx4_crdump_end(dev); in mlx4_remove_one()
4202 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { in mlx4_remove_one()
4203 mlx4_warn(dev, "Disabling SR-IOV\n"); in mlx4_remove_one()
4208 mlx4_pci_disable_device(dev); in mlx4_remove_one()
4211 kfree(dev->persist); in mlx4_remove_one()
4216 static int restore_current_port_types(struct mlx4_dev *dev, in restore_current_port_types() argument
4220 struct mlx4_priv *priv = mlx4_priv(dev); in restore_current_port_types()
4223 mlx4_stop_sense(dev); in restore_current_port_types()
4226 for (i = 0; i < dev->caps.num_ports; i++) in restore_current_port_types()
4227 dev->caps.possible_type[i + 1] = poss_types[i]; in restore_current_port_types()
4228 err = mlx4_change_port_types(dev, types); in restore_current_port_types()
4229 mlx4_start_sense(dev); in restore_current_port_types()
4244 struct mlx4_dev *dev = persist->dev; in mlx4_restart_one_up() local
4245 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_restart_one_up()
4250 total_vfs = dev->persist->num_vfs; in mlx4_restart_one_up()
4251 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); in mlx4_restart_one_up()
4257 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", in mlx4_restart_one_up()
4262 err = restore_current_port_types(dev, dev->persist->curr_port_type, in mlx4_restart_one_up()
4263 dev->persist->curr_port_poss_type); in mlx4_restart_one_up()
4265 mlx4_err(dev, "could not restore original port types (%d)\n", in mlx4_restart_one_up()
4333 struct mlx4_dev *dev = persist->dev; in mlx4_pci_err_detected() local
4336 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); in mlx4_pci_err_detected()
4339 devlink = priv_to_devlink(mlx4_priv(dev)); in mlx4_pci_err_detected()
4350 mlx4_pci_disable_device(persist->dev); in mlx4_pci_err_detected()
4357 struct mlx4_dev *dev = persist->dev; in mlx4_pci_slot_reset() local
4360 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); in mlx4_pci_slot_reset()
4361 err = mlx4_pci_enable_device(dev); in mlx4_pci_slot_reset()
4363 mlx4_err(dev, "Can not re-enable device, err=%d\n", err); in mlx4_pci_slot_reset()
4376 struct mlx4_dev *dev = persist->dev; in mlx4_pci_resume() local
4377 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_pci_resume()
4383 mlx4_err(dev, "%s was called\n", __func__); in mlx4_pci_resume()
4384 total_vfs = dev->persist->num_vfs; in mlx4_pci_resume()
4385 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); in mlx4_pci_resume()
4394 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", in mlx4_pci_resume()
4399 err = restore_current_port_types(dev, dev->persist-> in mlx4_pci_resume()
4400 curr_port_type, dev->persist-> in mlx4_pci_resume()
4403 mlx4_err(dev, "could not restore original port types (%d)\n", err); in mlx4_pci_resume()
4413 struct mlx4_dev *dev = persist->dev; in mlx4_shutdown() local
4416 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); in mlx4_shutdown()
4417 devlink = priv_to_devlink(mlx4_priv(dev)); in mlx4_shutdown()
4424 mlx4_pci_disable_device(dev); in mlx4_shutdown()
4437 struct mlx4_dev *dev = persist->dev; in mlx4_suspend() local
4440 mlx4_err(dev, "suspend was called\n"); in mlx4_suspend()
4441 devlink = priv_to_devlink(mlx4_priv(dev)); in mlx4_suspend()
4456 struct mlx4_dev *dev = persist->dev; in mlx4_resume() local
4457 struct mlx4_priv *priv = mlx4_priv(dev); in mlx4_resume()
4463 mlx4_err(dev, "resume was called\n"); in mlx4_resume()
4464 total_vfs = dev->persist->num_vfs; in mlx4_resume()
4465 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); in mlx4_resume()
4474 ret = restore_current_port_types(dev, in mlx4_resume()
4475 dev->persist->curr_port_type, in mlx4_resume()
4476 dev->persist->curr_port_poss_type); in mlx4_resume()
4478 mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret); in mlx4_resume()