Home
last modified time | relevance | path

Searched refs:vport (Results 1 – 25 of 194) sorted by relevance

12345678

/linux/net/openvswitch/
H A Dvport-netdev.c31 struct vport *vport; in netdev_port_receive() local
33 vport = ovs_netdev_get_vport(skb->dev); in netdev_port_receive()
34 if (unlikely(!vport)) in netdev_port_receive()
50 ovs_vport_receive(vport, skb, skb_tunnel_info(skb)); in netdev_port_receive()
70 struct vport *local; in get_dpdev()
76 struct vport *ovs_netdev_link(struct vport *vport, const char *name) in ovs_netdev_link() argument
80 vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name); in ovs_netdev_link()
81 if (!vport->dev) { in ovs_netdev_link()
88 if (strcmp(name, ovs_vport_name(vport))) { in ovs_netdev_link()
92 netdev_tracker_alloc(vport->dev, &vport->dev_tracker, GFP_KERNEL); in ovs_netdev_link()
[all …]
H A Dvport.c95 struct vport *ovs_vport_locate(const struct net *net, const char *name) in ovs_vport_locate()
98 struct vport *vport; in ovs_vport_locate() local
100 hlist_for_each_entry_rcu(vport, bucket, hash_node, in ovs_vport_locate()
102 if (!strcmp(name, ovs_vport_name(vport)) && in ovs_vport_locate()
103 net_eq(ovs_dp_get_net(vport->dp), net)) in ovs_vport_locate()
104 return vport; in ovs_vport_locate()
122 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, in ovs_vport_alloc()
125 struct vport *vport; in ovs_vport_alloc() local
129 alloc_size = sizeof(struct vport); in ovs_vport_alloc()
135 vport = kzalloc(alloc_size, GFP_KERNEL); in ovs_vport_alloc()
[all …]
H A Dvport.h20 struct vport;
28 struct vport *ovs_vport_add(const struct vport_parms *);
29 void ovs_vport_del(struct vport *);
31 struct vport *ovs_vport_locate(const struct net *net, const char *name);
33 void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
35 int ovs_vport_get_upcall_stats(struct vport *vport, struct sk_buff *skb);
37 int ovs_vport_set_options(struct vport *, struct nlattr *options);
38 int ovs_vport_get_options(const struct vport *, struct sk_buff *);
40 int ovs_vport_set_upcall_portids(struct vport *, const struct nlattr *pids);
41 int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
[all …]
H A Dvport-internal_dev.c22 struct vport *vport; member
42 err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL); in internal_dev_xmit()
78 struct vport *vport = ovs_internal_dev_get_vport(dev); in internal_dev_destructor() local
80 ovs_vport_free(vport); in internal_dev_destructor()
123 static struct vport *internal_dev_create(const struct vport_parms *parms) in internal_dev_create()
125 struct vport *vport; in internal_dev_create() local
130 vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms); in internal_dev_create()
131 if (IS_ERR(vport)) { in internal_dev_create()
132 err = PTR_ERR(vport); in internal_dev_create()
138 vport->dev = dev; in internal_dev_create()
[all …]
H A Dvport-geneve.c36 static inline struct geneve_port *geneve_vport(const struct vport *vport) in geneve_vport() argument
38 return vport_priv(vport); in geneve_vport()
41 static int geneve_get_options(const struct vport *vport, in geneve_get_options() argument
44 struct geneve_port *geneve_port = geneve_vport(vport); in geneve_get_options()
51 static struct vport *geneve_tnl_create(const struct vport_parms *parms) in geneve_tnl_create()
57 struct vport *vport; in geneve_tnl_create() local
76 vport = ovs_vport_alloc(sizeof(struct geneve_port), in geneve_tnl_create()
78 if (IS_ERR(vport)) in geneve_tnl_create()
79 return vport; in geneve_tnl_create()
81 geneve_port = geneve_vport(vport); in geneve_tnl_create()
[all …]
H A Dvport-vxlan.c22 static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb) in vxlan_get_options() argument
24 struct vxlan_dev *vxlan = netdev_priv(vport->dev); in vxlan_get_options()
51 static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr, in vxlan_configure_exts() argument
71 static struct vport *vxlan_tnl_create(const struct vport_parms *parms) in vxlan_tnl_create()
76 struct vport *vport; in vxlan_tnl_create() local
100 vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms); in vxlan_tnl_create()
101 if (IS_ERR(vport)) in vxlan_tnl_create()
102 return vport; in vxlan_tnl_create()
106 err = vxlan_configure_exts(vport, a, &conf); in vxlan_tnl_create()
108 ovs_vport_free(vport); in vxlan_tnl_create()
[all …]
H A Dvport-gre.c39 static struct vport *gre_tnl_create(const struct vport_parms *parms) in gre_tnl_create()
43 struct vport *vport; in gre_tnl_create() local
46 vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms); in gre_tnl_create()
47 if (IS_ERR(vport)) in gre_tnl_create()
48 return vport; in gre_tnl_create()
54 ovs_vport_free(vport); in gre_tnl_create()
62 ovs_vport_free(vport); in gre_tnl_create()
67 return vport; in gre_tnl_create()
70 static struct vport *gre_create(const struct vport_parms *parms) in gre_create()
72 struct vport *vport; in gre_create() local
[all …]
H A Ddp_notify.c14 static void dp_detach_port_notify(struct vport *vport) in dp_detach_port_notify() argument
19 dp = vport->dp; in dp_detach_port_notify()
20 notify = ovs_vport_cmd_build_info(vport, ovs_dp_get_net(dp), in dp_detach_port_notify()
22 ovs_dp_detach_port(vport); in dp_detach_port_notify()
44 struct vport *vport; in ovs_dp_notify_wq() local
47 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { in ovs_dp_notify_wq()
48 if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL) in ovs_dp_notify_wq()
51 if (!(netif_is_ovs_port(vport->dev))) in ovs_dp_notify_wq()
52 dp_detach_port_notify(vport); in ovs_dp_notify_wq()
64 struct vport *vport = NULL; in dp_device_event() local
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/
H A Dingress_ofld.c10 acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
14 const struct mlx5_vport *vport) in esw_acl_ingress_prio_tag_enabled() argument
17 mlx5_eswitch_is_vf_vport(esw, vport->vport)); in esw_acl_ingress_prio_tag_enabled()
21 struct mlx5_vport *vport) in esw_acl_ingress_prio_tag_create() argument
46 if (vport->ingress.offloads.modify_metadata_rule) { in esw_acl_ingress_prio_tag_create()
48 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; in esw_acl_ingress_prio_tag_create()
51 vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, in esw_acl_ingress_prio_tag_create()
53 if (IS_ERR(vport->ingress.allow_rule)) { in esw_acl_ingress_prio_tag_create()
54 err = PTR_ERR(vport->ingress.allow_rule); in esw_acl_ingress_prio_tag_create()
57 vport->vport, err); in esw_acl_ingress_prio_tag_create()
[all …]
H A Degress_ofld.c9 static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport) in esw_acl_egress_ofld_fwd2vport_destroy() argument
11 if (!vport->egress.offloads.fwd_rule) in esw_acl_egress_ofld_fwd2vport_destroy()
14 mlx5_del_flow_rules(vport->egress.offloads.fwd_rule); in esw_acl_egress_ofld_fwd2vport_destroy()
15 vport->egress.offloads.fwd_rule = NULL; in esw_acl_egress_ofld_fwd2vport_destroy()
18 void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport, int rule_index) in esw_acl_egress_ofld_bounce_rule_destroy() argument
21 xa_load(&vport->egress.offloads.bounce_rules, rule_index); in esw_acl_egress_ofld_bounce_rule_destroy()
27 xa_erase(&vport->egress.offloads.bounce_rules, rule_index); in esw_acl_egress_ofld_bounce_rule_destroy()
30 static void esw_acl_egress_ofld_bounce_rules_destroy(struct mlx5_vport *vport) in esw_acl_egress_ofld_bounce_rules_destroy() argument
35 xa_for_each(&vport->egress.offloads.bounce_rules, i, bounce_rule) { in esw_acl_egress_ofld_bounce_rules_destroy()
37 xa_erase(&vport->egress.offloads.bounce_rules, i); in esw_acl_egress_ofld_bounce_rules_destroy()
[all …]
H A Dingress_lgcy.c9 static void esw_acl_ingress_lgcy_rules_destroy(struct mlx5_vport *vport) in esw_acl_ingress_lgcy_rules_destroy() argument
11 if (vport->ingress.legacy.drop_rule) { in esw_acl_ingress_lgcy_rules_destroy()
12 mlx5_del_flow_rules(vport->ingress.legacy.drop_rule); in esw_acl_ingress_lgcy_rules_destroy()
13 vport->ingress.legacy.drop_rule = NULL; in esw_acl_ingress_lgcy_rules_destroy()
15 esw_acl_ingress_allow_rule_destroy(vport); in esw_acl_ingress_lgcy_rules_destroy()
19 struct mlx5_vport *vport) in esw_acl_ingress_lgcy_groups_create() argument
42 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); in esw_acl_ingress_lgcy_groups_create()
46 vport->vport, err); in esw_acl_ingress_lgcy_groups_create()
49 vport->ingress.legacy.allow_untagged_spoofchk_grp = g; in esw_acl_ingress_lgcy_groups_create()
58 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); in esw_acl_ingress_lgcy_groups_create()
[all …]
H A Degress_lgcy.c9 static void esw_acl_egress_lgcy_rules_destroy(struct mlx5_vport *vport) in esw_acl_egress_lgcy_rules_destroy() argument
11 esw_acl_egress_vlan_destroy(vport); in esw_acl_egress_lgcy_rules_destroy()
12 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) { in esw_acl_egress_lgcy_rules_destroy()
13 mlx5_del_flow_rules(vport->egress.legacy.drop_rule); in esw_acl_egress_lgcy_rules_destroy()
14 vport->egress.legacy.drop_rule = NULL; in esw_acl_egress_lgcy_rules_destroy()
19 struct mlx5_vport *vport) in esw_acl_egress_lgcy_groups_create() argument
27 err = esw_acl_egress_vlan_grp_create(esw, vport); in esw_acl_egress_lgcy_groups_create()
39 drop_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in); in esw_acl_egress_lgcy_groups_create()
43 vport->vport, err); in esw_acl_egress_lgcy_groups_create()
47 vport->egress.legacy.drop_grp = drop_grp; in esw_acl_egress_lgcy_groups_create()
[all …]
H A Dhelper.c9 esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, int size) in esw_acl_table_create() argument
26 vport_num = vport->vport; in esw_acl_table_create()
30 root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index); in esw_acl_table_create()
50 struct mlx5_vport *vport, in esw_egress_acl_vlan_create() argument
58 if (vport->egress.allowed_vlan) in esw_egress_acl_vlan_create()
72 vport->egress.allowed_vlan = in esw_egress_acl_vlan_create()
73 mlx5_add_flow_rules(vport->egress.acl, spec, in esw_egress_acl_vlan_create()
75 if (IS_ERR(vport->egress.allowed_vlan)) { in esw_egress_acl_vlan_create()
76 err = PTR_ERR(vport->egress.allowed_vlan); in esw_egress_acl_vlan_create()
79 vport->vport, err); in esw_egress_acl_vlan_create()
[all …]
/linux/drivers/scsi/lpfc/
H A Dlpfc_vport.c53 inline void lpfc_vport_set_state(struct lpfc_vport *vport, in lpfc_vport_set_state() argument
56 struct fc_vport *fc_vport = vport->fc_vport; in lpfc_vport_set_state()
76 vport->port_state = LPFC_VPORT_FAILED; in lpfc_vport_set_state()
79 vport->port_state = LPFC_VPORT_UNKNOWN; in lpfc_vport_set_state()
118 lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) in lpfc_vport_sparm() argument
131 rc = lpfc_read_sparam(phba, pmb, vport->vpi); in lpfc_vport_sparm()
146 pmb->vport = vport; in lpfc_vport_sparm()
150 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_vport_sparm()
158 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_vport_sparm()
170 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); in lpfc_vport_sparm()
[all …]
H A Dlpfc_vmid.c47 struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, in lpfc_get_vmid_from_hashtable() argument
52 hash_for_each_possible(vport->hash_table, vmp, hnode, hash) { in lpfc_get_vmid_from_hashtable()
69 lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, in lpfc_put_vmid_in_hashtable() argument
72 hash_add(vport->hash_table, &vmp->hnode, hash); in lpfc_put_vmid_in_hashtable()
107 static void lpfc_vmid_update_entry(struct lpfc_vport *vport, in lpfc_vmid_update_entry() argument
114 if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) in lpfc_vmid_update_entry()
116 else if (vport->phba->cfg_vmid_app_header) in lpfc_vmid_update_entry()
129 static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, in lpfc_vmid_assign_cs_ctl() argument
135 if (vport->port_type == LPFC_PHYSICAL_PORT) { in lpfc_vmid_assign_cs_ctl()
136 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); in lpfc_vmid_assign_cs_ctl()
[all …]
H A Dlpfc_ct.c157 struct lpfc_vport *vport = ndlp->vport; in lpfc_ct_reject_event() local
158 struct lpfc_hba *phba = vport->phba; in lpfc_ct_reject_event()
241 cmdiocbq->vport = vport; in lpfc_ct_reject_event()
267 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, in lpfc_ct_reject_event()
269 rc, vport->fc_flag); in lpfc_ct_reject_event()
286 struct lpfc_vport *vport = ctiocbq->vport; in lpfc_ct_handle_mibreq() local
294 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, in lpfc_ct_handle_mibreq()
301 if (test_bit(FC_UNLOADING, &vport->load_flag)) in lpfc_ct_handle_mibreq()
304 ndlp = lpfc_findnode_did(vport, did); in lpfc_ct_handle_mibreq()
306 lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, in lpfc_ct_handle_mibreq()
[all …]
H A Dlpfc_hbadisc.c71 static void lpfc_disc_flush_list(struct lpfc_vport *vport);
117 if (!ndlp->vport) { in lpfc_rport_invalid()
131 struct lpfc_vport *vport; in lpfc_terminate_rport_io() local
138 vport = ndlp->vport; in lpfc_terminate_rport_io()
139 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, in lpfc_terminate_rport_io()
144 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); in lpfc_terminate_rport_io()
154 struct lpfc_vport *vport; in lpfc_dev_loss_tmo_callbk() local
163 vport = ndlp->vport; in lpfc_dev_loss_tmo_callbk()
164 phba = vport->phba; in lpfc_dev_loss_tmo_callbk()
166 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, in lpfc_dev_loss_tmo_callbk()
[all …]
H A Dlpfc_nportdisc.c64 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, in lpfc_check_adisc() argument
85 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, in lpfc_check_sparm() argument
88 volatile struct serv_parm *hsp = &vport->fc_sparam; in lpfc_check_sparm()
167 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_check_sparm()
240 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, in lpfc_els_abort()
332 rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI, in lpfc_defer_plogi_acc()
348 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, in lpfc_rcv_plogi() argument
351 struct lpfc_hba *phba = vport->phba; in lpfc_rcv_plogi()
372 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_rcv_plogi()
376 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, in lpfc_rcv_plogi()
[all …]
H A Dlpfc_els.c54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport);
68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max);
69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
94 lpfc_els_chk_latt(struct lpfc_vport *vport) in lpfc_els_chk_latt() argument
96 struct lpfc_hba *phba = vport->phba; in lpfc_els_chk_latt()
99 if (vport->port_state >= LPFC_VPORT_READY || in lpfc_els_chk_latt()
112 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, in lpfc_els_chk_latt()
123 set_bit(FC_ABORT_DISCOVERY, &vport->fc_flag); in lpfc_els_chk_latt()
[all …]
/linux/drivers/net/ethernet/intel/idpf/
H A Didpf_lib.c466 static int idpf_del_mac_filter(struct idpf_vport *vport, in idpf_del_mac_filter() argument
489 err = idpf_add_del_mac_filters(vport, np, false, async); in idpf_del_mac_filter()
545 static int idpf_add_mac_filter(struct idpf_vport *vport, in idpf_add_mac_filter() argument
558 err = idpf_add_del_mac_filters(vport, np, true, async); in idpf_add_mac_filter()
569 static void idpf_del_all_mac_filters(struct idpf_vport *vport) in idpf_del_all_mac_filters() argument
574 vport_config = vport->adapter->vport_config[vport->idx]; in idpf_del_all_mac_filters()
593 static void idpf_restore_mac_filters(struct idpf_vport *vport) in idpf_restore_mac_filters() argument
598 vport_config = vport->adapter->vport_config[vport->idx]; in idpf_restore_mac_filters()
606 idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev), in idpf_restore_mac_filters()
617 static void idpf_remove_mac_filters(struct idpf_vport *vport) in idpf_remove_mac_filters() argument
[all …]
H A Didpf_virtchnl.h19 int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
21 int idpf_queue_reg_init(struct idpf_vport *vport);
22 int idpf_vport_queue_ids_init(struct idpf_vport *vport);
28 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
29 u32 idpf_get_vport_id(struct idpf_vport *vport);
32 int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
33 int idpf_send_enable_vport_msg(struct idpf_vport *vport);
34 int idpf_send_disable_vport_msg(struct idpf_vport *vport);
36 int idpf_vport_adjust_qs(struct idpf_vport *vport);
41 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
[all …]
H A Didpf_txrx.c171 static void idpf_tx_desc_rel_all(struct idpf_vport *vport) in idpf_tx_desc_rel_all() argument
175 if (!vport->txq_grps) in idpf_tx_desc_rel_all()
178 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_tx_desc_rel_all()
179 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; in idpf_tx_desc_rel_all()
184 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_desc_rel_all()
242 static int idpf_tx_desc_alloc(const struct idpf_vport *vport, in idpf_tx_desc_alloc() argument
284 static int idpf_compl_desc_alloc(const struct idpf_vport *vport, in idpf_compl_desc_alloc() argument
308 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) in idpf_tx_desc_alloc_all() argument
316 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_tx_desc_alloc_all()
317 for (j = 0; j < vport->txq_grps[i].num_txq; j++) { in idpf_tx_desc_alloc_all()
[all …]
/linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/
H A Dhclge_mbx.c31 static int hclge_gen_resp_to_vf(struct hclge_vport *vport, in hclge_gen_resp_to_vf() argument
36 struct hclge_dev *hdev = vport->back; in hclge_gen_resp_to_vf()
91 static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, in hclge_send_mbx_msg() argument
95 struct hclge_dev *hdev = vport->back; in hclge_send_mbx_msg()
127 int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) in hclge_inform_vf_reset() argument
132 dest_vfid = (u8)vport->vport_id; in hclge_inform_vf_reset()
136 return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), in hclge_inform_vf_reset()
140 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) in hclge_inform_reset_assert_to_vf() argument
142 struct hclge_dev *hdev = vport->back; in hclge_inform_reset_assert_to_vf()
154 return hclge_inform_vf_reset(vport, reset_type); in hclge_inform_reset_assert_to_vf()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Deswitch.c63 u16 vport; member
109 struct mlx5_vport *vport; in mlx5_eswitch_get_vport() local
114 vport = xa_load(&esw->vports, vport_num); in mlx5_eswitch_get_vport()
115 if (!vport) { in mlx5_eswitch_get_vport()
119 return vport; in mlx5_eswitch_get_vport()
122 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, in arm_vport_context_events_cmd() argument
131 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); in arm_vport_context_events_cmd()
132 if (vport || mlx5_core_is_ecpf(dev)) in arm_vport_context_events_cmd()
153 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, in mlx5_eswitch_modify_esw_vport_context() argument
158 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); in mlx5_eswitch_modify_esw_vport_context()
[all …]
/linux/sound/isa/
H A Dsc6000.c183 static int sc6000_wait_data(char __iomem *vport) in sc6000_wait_data() argument
189 val = ioread8(vport + DSP_DATAVAIL); in sc6000_wait_data()
198 static int sc6000_read(char __iomem *vport) in sc6000_read() argument
200 if (sc6000_wait_data(vport)) in sc6000_read()
203 return ioread8(vport + DSP_READ); in sc6000_read()
207 static int sc6000_write(struct device *devptr, char __iomem *vport, int cmd) in sc6000_write() argument
213 val = ioread8(vport + DSP_STATUS); in sc6000_write()
218 iowrite8(cmd, vport + DSP_COMMAND); in sc6000_write()
230 char __iomem *vport, int command, in sc6000_dsp_get_answer() argument
235 if (sc6000_write(devptr, vport, command)) { in sc6000_dsp_get_answer()
[all …]

12345678