Home
last modified time | relevance | path

Searched refs:tb (Results 1 – 25 of 327) sorted by relevance

12345678910>>...14

/linux/drivers/thunderbolt/
H A Ddomain.c124 struct tb *tb = container_of(dev, struct tb, dev); in boot_acl_show() local
129 uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); in boot_acl_show()
133 pm_runtime_get_sync(&tb->dev); in boot_acl_show()
135 if (mutex_lock_interruptible(&tb->lock)) { in boot_acl_show()
139 ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl); in boot_acl_show()
141 mutex_unlock(&tb->lock); in boot_acl_show()
144 mutex_unlock(&tb->lock); in boot_acl_show()
146 for (ret = 0, i = 0; i < tb->nboot_acl; i++) { in boot_acl_show()
150 ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n"); in boot_acl_show()
154 pm_runtime_mark_last_busy(&tb->dev); in boot_acl_show()
[all …]
H A Dtb.c72 static inline struct tb *tcm_to_tb(struct tb_cm *tcm) in tcm_to_tb()
74 return ((void *)tcm - sizeof(struct tb)); in tcm_to_tb()
79 struct tb *tb; member
88 static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port,
90 static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port,
93 static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) in tb_queue_hotplug() argument
101 ev->tb = tb; in tb_queue_hotplug()
106 queue_delayed_work(tb->wq, &ev->work, 0); in tb_queue_hotplug()
113 struct tb_cm *tcm = tb_priv(sw->tb); in tb_add_dp_resources()
140 struct tb_cm *tcm = tb_priv(sw->tb); in tb_remove_dp_resources()
[all …]
H A Dicm.c106 bool (*is_supported)(struct tb *tb);
107 int (*cio_reset)(struct tb *tb);
108 int (*get_mode)(struct tb *tb);
109 int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
110 void (*save_devices)(struct tb *tb);
111 int (*driver_ready)(struct tb *tb,
114 void (*set_uuid)(struct tb *tb);
115 void (*device_connected)(struct tb *tb,
117 void (*device_disconnected)(struct tb *tb,
119 void (*xdomain_connected)(struct tb *tb,
[all …]
H A Dtb.h177 struct tb *tb; member
239 struct tb *tb; member
341 struct tb *tb; member
431 struct tb *tb; member
507 int (*driver_ready)(struct tb *tb);
508 int (*start)(struct tb *tb, bool reset);
509 void (*stop)(struct tb *tb);
510 void (*deinit)(struct tb *tb);
511 int (*suspend_noirq)(struct tb *tb);
512 int (*resume_noirq)(struct tb *tb);
[all …]
H A Ddebugfs.c226 struct tb *tb = sw->tb; in regs_write() local
236 if (mutex_lock_interruptible(&tb->lock)) { in regs_write()
263 mutex_unlock(&tb->lock); in regs_write()
387 struct tb *tb = sw->tb; in port_sb_regs_write() local
397 if (mutex_lock_interruptible(&tb->lock)) { in port_sb_regs_write()
405 mutex_unlock(&tb->lock); in port_sb_regs_write()
420 struct tb *tb = rt->tb; in retimer_sb_regs_write() local
430 if (mutex_lock_interruptible(&tb->lock)) { in retimer_sb_regs_write()
438 mutex_unlock(&tb->lock); in retimer_sb_regs_write()
621 struct tb *tb = margining->port->sw->tb; in margining_ber_level_write() local
[all …]
/linux/tools/bpf/bpftool/
H A Dnetlink_dumper.c14 static void xdp_dump_prog_id(struct nlattr **tb, int attr, in xdp_dump_prog_id() argument
18 if (!tb[attr]) in xdp_dump_prog_id()
24 NET_DUMP_UINT("id", " id %u", libbpf_nla_getattr_u32(tb[attr])) in xdp_dump_prog_id()
32 struct nlattr *tb[IFLA_XDP_MAX + 1]; in do_xdp_dump_one() local
35 if (libbpf_nla_parse_nested(tb, IFLA_XDP_MAX, attr, NULL) < 0) in do_xdp_dump_one()
38 if (!tb[IFLA_XDP_ATTACHED]) in do_xdp_dump_one()
41 mode = libbpf_nla_getattr_u8(tb[IFLA_XDP_ATTACHED]); in do_xdp_dump_one()
55 xdp_dump_prog_id(tb, IFLA_XDP_SKB_PROG_ID, "generic", true); in do_xdp_dump_one()
56 xdp_dump_prog_id(tb, IFLA_XDP_DRV_PROG_ID, "driver", true); in do_xdp_dump_one()
57 xdp_dump_prog_id(tb, IFLA_XDP_HW_PROG_I in do_xdp_dump_one()
72 do_xdp_dump(struct ifinfomsg * ifinfo,struct nlattr ** tb) do_xdp_dump() argument
83 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; do_bpf_dump_one_act() local
104 struct nlattr *tb[TCA_ACT_MAX + 1]; do_dump_one_act() local
121 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; do_bpf_act_dump() local
140 struct nlattr *tb[TCA_BPF_MAX + 1]; do_bpf_filter_dump() local
161 do_filter_dump(struct tcmsg * info,struct nlattr ** tb,const char * kind,const char * devname,int ifindex) do_filter_dump() argument
[all...]
/linux/net/bridge/
H A Dbr_cfm_netlink.c93 struct nlattr *tb[IFLA_BRIDGE_CFM_MEP_CREATE_MAX + 1]; in br_mep_create_parse() local
98 err = nla_parse_nested(tb, IFLA_BRIDGE_CFM_MEP_CREATE_MAX, attr, in br_mep_create_parse()
103 if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE]) { in br_mep_create_parse()
107 if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN]) { in br_mep_create_parse()
111 if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION]) { in br_mep_create_parse()
115 if (!tb[IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX]) { in br_mep_create_parse()
122 instance = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_INSTANCE]); in br_mep_create_parse()
123 create.domain = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_DOMAIN]); in br_mep_create_parse()
124 create.direction = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_DIRECTION]); in br_mep_create_parse()
125 create.ifindex = nla_get_u32(tb[IFLA_BRIDGE_CFM_MEP_CREATE_IFINDEX]); in br_mep_create_parse()
[all …]
H A Dbr_mrp_netlink.c34 struct nlattr *tb[IFLA_BRIDGE_MRP_INSTANCE_MAX + 1]; in br_mrp_instance_parse() local
38 err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_INSTANCE_MAX, attr, in br_mrp_instance_parse()
43 if (!tb[IFLA_BRIDGE_MRP_INSTANCE_RING_ID] || in br_mrp_instance_parse()
44 !tb[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX] || in br_mrp_instance_parse()
45 !tb[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX]) { in br_mrp_instance_parse()
53 inst.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_RING_ID]); in br_mrp_instance_parse()
54 inst.p_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX]); in br_mrp_instance_parse()
55 inst.s_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX]); in br_mrp_instance_parse()
58 if (tb[IFLA_BRIDGE_MRP_INSTANCE_PRIO]) in br_mrp_instance_parse()
59 inst.prio = nla_get_u16(tb[IFLA_BRIDGE_MRP_INSTANCE_PRIO]); in br_mrp_instance_parse()
[all …]
H A Dbr_vlan_options.c138 struct nlattr **tb, in br_vlan_modify_tunnel() argument
156 attr = tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]; in br_vlan_modify_tunnel()
176 vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]); in br_vlan_modify_tunnel()
197 struct nlattr **tb, in br_vlan_process_one_opts() argument
204 if (tb[BRIDGE_VLANDB_ENTRY_STATE]) { in br_vlan_process_one_opts()
205 u8 state = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_STATE]); in br_vlan_process_one_opts()
211 if (tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]) { in br_vlan_process_one_opts()
212 err = br_vlan_modify_tunnel(p, v, tb, changed, extack); in br_vlan_process_one_opts()
218 if (tb[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]) { in br_vlan_process_one_opts()
221 val = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]); in br_vlan_process_one_opts()
[all …]
/linux/tools/testing/selftests/powerpc/pmu/ebb/
H A Dtrace.c17 struct trace_buffer *tb; in trace_buffer_allocate() local
19 if (size < sizeof(*tb)) { in trace_buffer_allocate()
24 tb = mmap(NULL, size, PROT_READ | PROT_WRITE, in trace_buffer_allocate()
26 if (tb == MAP_FAILED) { in trace_buffer_allocate()
31 tb->size = size; in trace_buffer_allocate()
32 tb->tail = tb->data; in trace_buffer_allocate()
33 tb->overflow = false; in trace_buffer_allocate()
35 return tb; in trace_buffer_allocate()
38 static bool trace_check_bounds(struct trace_buffer *tb, void *p) in trace_check_bounds() argument
40 return p < ((void *)tb + tb->size); in trace_check_bounds()
[all …]
/linux/include/drm/
H A Dtask_barrier.h55 static inline void task_barrier_init(struct task_barrier *tb) in task_barrier_init() argument
57 tb->n = 0; in task_barrier_init()
58 atomic_set(&tb->count, 0); in task_barrier_init()
59 sema_init(&tb->enter_turnstile, 0); in task_barrier_init()
60 sema_init(&tb->exit_turnstile, 0); in task_barrier_init()
63 static inline void task_barrier_add_task(struct task_barrier *tb) in task_barrier_add_task() argument
65 tb->n++; in task_barrier_add_task()
68 static inline void task_barrier_rem_task(struct task_barrier *tb) in task_barrier_rem_task() argument
70 tb->n--; in task_barrier_rem_task()
78 static inline void task_barrier_enter(struct task_barrier *tb) in task_barrier_enter() argument
[all …]
/linux/net/netfilter/
H A Dnfnetlink_cthelper.c76 struct nlattr *tb[NFCTH_TUPLE_MAX+1]; in nfnl_cthelper_parse_tuple() local
78 err = nla_parse_nested_deprecated(tb, NFCTH_TUPLE_MAX, attr, in nfnl_cthelper_parse_tuple()
83 if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) in nfnl_cthelper_parse_tuple()
89 tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); in nfnl_cthelper_parse_tuple()
90 tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); in nfnl_cthelper_parse_tuple()
141 struct nlattr *tb[NFCTH_POLICY_MAX+1]; in nfnl_cthelper_expect_policy() local
143 err = nla_parse_nested_deprecated(tb, NFCTH_POLICY_MAX, attr, in nfnl_cthelper_expect_policy()
148 if (!tb[NFCTH_POLICY_NAME] || in nfnl_cthelper_expect_policy()
149 !tb[NFCTH_POLICY_EXPECT_MAX] || in nfnl_cthelper_expect_policy()
150 !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) in nfnl_cthelper_expect_policy()
[all …]
H A Dnft_tunnel.c76 const struct nlattr * const tb[]) in nft_tunnel_get_init() argument
81 if (!tb[NFTA_TUNNEL_KEY] || in nft_tunnel_get_init()
82 !tb[NFTA_TUNNEL_DREG]) in nft_tunnel_get_init()
85 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY])); in nft_tunnel_get_init()
97 if (tb[NFTA_TUNNEL_MODE]) { in nft_tunnel_get_init()
98 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE])); in nft_tunnel_get_init()
106 return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg, in nft_tunnel_get_init()
195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1]; in nft_tunnel_obj_ip_init() local
198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr, in nft_tunnel_obj_ip_init()
203 if (!tb[NFTA_TUNNEL_KEY_IP_DS in nft_tunnel_obj_ip_init()
224 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1]; nft_tunnel_obj_ip6_init() local
260 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1]; nft_tunnel_obj_vxlan_init() local
290 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1]; nft_tunnel_obj_erspan_init() local
345 struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1]; nft_tunnel_obj_geneve_init() local
446 nft_tunnel_obj_init(const struct nft_ctx * ctx,const struct nlattr * const tb[],struct nft_object * obj) nft_tunnel_obj_init() argument
[all...]
H A Dnft_bitwise.c139 const struct nlattr *const tb[]) in nft_bitwise_init_mask_xor() argument
153 if (tb[NFTA_BITWISE_DATA] || in nft_bitwise_init_mask_xor()
154 tb[NFTA_BITWISE_SREG2]) in nft_bitwise_init_mask_xor()
157 if (!tb[NFTA_BITWISE_MASK] || in nft_bitwise_init_mask_xor()
158 !tb[NFTA_BITWISE_XOR]) in nft_bitwise_init_mask_xor()
161 err = nft_data_init(NULL, &priv->mask, &mask, tb[NFTA_BITWISE_MASK]); in nft_bitwise_init_mask_xor()
165 err = nft_data_init(NULL, &priv->xor, &xor, tb[NFTA_BITWISE_XOR]); in nft_bitwise_init_mask_xor()
178 const struct nlattr *const tb[]) in nft_bitwise_init_shift() argument
187 if (tb[NFTA_BITWISE_MASK] || in nft_bitwise_init_shift()
188 tb[NFTA_BITWISE_XOR] || in nft_bitwise_init_shift()
[all …]
H A Dnft_hash.c72 const struct nlattr * const tb[]) in nft_jhash_init() argument
78 if (!tb[NFTA_HASH_SREG] || in nft_jhash_init()
79 !tb[NFTA_HASH_DREG] || in nft_jhash_init()
80 !tb[NFTA_HASH_LEN] || in nft_jhash_init()
81 !tb[NFTA_HASH_MODULUS]) in nft_jhash_init()
84 if (tb[NFTA_HASH_OFFSET]) in nft_jhash_init()
85 priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET])); in nft_jhash_init()
87 err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len); in nft_jhash_init()
95 err = nft_parse_register_load(ctx, tb[NFTA_HASH_SREG], &priv->sreg, len); in nft_jhash_init()
99 priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS])); in nft_jhash_init()
[all …]
/linux/net/ethtool/
H A Dbitset.c330 struct nlattr *tb[ARRAY_SIZE(bitset_policy)]; in ethnl_bitset_is_compact() local
333 ret = nla_parse_nested(tb, ARRAY_SIZE(bitset_policy) - 1, bitset, in ethnl_bitset_is_compact()
338 if (tb[ETHTOOL_A_BITSET_BITS]) { in ethnl_bitset_is_compact()
339 if (tb[ETHTOOL_A_BITSET_VALUE] || tb[ETHTOOL_A_BITSET_MASK]) in ethnl_bitset_is_compact()
344 if (!tb[ETHTOOL_A_BITSET_SIZE] || !tb[ETHTOOL_A_BITSET_VALUE]) in ethnl_bitset_is_compact()
382 struct nlattr *tb[ARRAY_SIZE(bit_policy)]; in ethnl_parse_bit() local
385 ret = nla_parse_nested(tb, ARRAY_SIZE(bit_policy) - 1, bit_attr, in ethnl_parse_bit()
390 if (tb[ETHTOOL_A_BITSET_BIT_INDEX]) { in ethnl_parse_bit()
393 idx = nla_get_u32(tb[ETHTOOL_A_BITSET_BIT_INDEX]); in ethnl_parse_bit()
396 tb[ETHTOOL_A_BITSET_BIT_INDEX], in ethnl_parse_bit()
[all …]
H A Drings.c154 struct nlattr **tb = info->attrs; in ethnl_set_rings_validate() local
156 if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] && in ethnl_set_rings_validate()
159 tb[ETHTOOL_A_RINGS_RX_BUF_LEN], in ethnl_set_rings_validate()
164 if (tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT] && in ethnl_set_rings_validate()
167 tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], in ethnl_set_rings_validate()
172 if (tb[ETHTOOL_A_RINGS_HDS_THRESH] && in ethnl_set_rings_validate()
175 tb[ETHTOOL_A_RINGS_HDS_THRESH], in ethnl_set_rings_validate()
180 if (tb[ETHTOOL_A_RINGS_CQE_SIZE] && in ethnl_set_rings_validate()
183 tb[ETHTOOL_A_RINGS_CQE_SIZE], in ethnl_set_rings_validate()
188 if (tb[ETHTOOL_A_RINGS_TX_PUSH] && in ethnl_set_rings_validate()
[all …]
H A Dcoalesce.c353 struct nlattr **tb = info->attrs; in ethnl_set_coalesce_validate() local
369 if (tb[a] && !(supported_params & attr_to_mask(a))) { in ethnl_set_coalesce_validate()
370 NL_SET_ERR_MSG_ATTR(info->extack, tb[a], in ethnl_set_coalesce_validate()
392 struct nlattr **tb, in ethnl_update_irq_moder() argument
399 if (!tb[attr_type]) in ethnl_update_irq_moder()
403 val = nla_get_u32(tb[attr_type]); in ethnl_update_irq_moder()
410 NL_SET_BAD_ATTR(extack, tb[attr_type]); in ethnl_update_irq_moder()
446 struct nlattr *tb[ARRAY_SIZE(coalesce_irq_moderation_policy)]; in ethnl_update_profile() local
466 ret = nla_parse_nested(tb, len_irq_moder - 1, nest, in ethnl_update_profile()
474 tb, DIM_COALESCE_USEC, in ethnl_update_profile()
[all …]
/linux/drivers/target/
H A Dtarget_core_hba.c40 struct target_backend *tb, *old; in transport_backend_register() local
42 tb = kzalloc(sizeof(*tb), GFP_KERNEL); in transport_backend_register()
43 if (!tb) in transport_backend_register()
45 tb->ops = ops; in transport_backend_register()
52 kfree(tb); in transport_backend_register()
56 target_setup_backend_cits(tb); in transport_backend_register()
57 list_add_tail(&tb->list, &backend_list); in transport_backend_register()
68 struct target_backend *tb; in target_backend_unregister() local
71 list_for_each_entry(tb, &backend_list, list) { in target_backend_unregister()
72 if (tb->ops == ops) { in target_backend_unregister()
[all …]
/linux/net/netfilter/ipset/
H A Dip_set_hash_ipportnet.c160 hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipportnet4_uadt() argument
173 if (tb[IPSET_ATTR_LINENO]) in hash_ipportnet4_uadt()
174 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_ipportnet4_uadt()
176 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || in hash_ipportnet4_uadt()
177 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_ipportnet4_uadt()
178 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || in hash_ipportnet4_uadt()
179 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_ipportnet4_uadt()
182 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_ipportnet4_uadt()
186 ret = ip_set_get_extensions(set, tb, &ext); in hash_ipportnet4_uadt()
190 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); in hash_ipportnet4_uadt()
[all …]
H A Dip_set_hash_ipportip.c108 hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipportip4_uadt() argument
119 if (tb[IPSET_ATTR_LINENO]) in hash_ipportip4_uadt()
120 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_ipportip4_uadt()
122 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || in hash_ipportip4_uadt()
123 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_ipportip4_uadt()
124 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO))) in hash_ipportip4_uadt()
127 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); in hash_ipportip4_uadt()
131 ret = ip_set_get_extensions(set, tb, &ext); in hash_ipportip4_uadt()
135 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &e.ip2); in hash_ipportip4_uadt()
139 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); in hash_ipportip4_uadt()
[all …]
H A Dip_set_hash_ipmac.c110 hash_ipmac4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipmac4_uadt() argument
118 if (unlikely(!tb[IPSET_ATTR_IP] || in hash_ipmac4_uadt()
119 !tb[IPSET_ATTR_ETHER] || in hash_ipmac4_uadt()
120 nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN || in hash_ipmac4_uadt()
121 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || in hash_ipmac4_uadt()
122 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || in hash_ipmac4_uadt()
123 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || in hash_ipmac4_uadt()
124 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || in hash_ipmac4_uadt()
125 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || in hash_ipmac4_uadt()
126 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) in hash_ipmac4_uadt()
[all …]
H A Dip_set_hash_netportnet.c188 hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_netportnet4_uadt() argument
200 if (tb[IPSET_ATTR_LINENO]) in hash_netportnet4_uadt()
201 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_netportnet4_uadt()
204 if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || in hash_netportnet4_uadt()
205 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_netportnet4_uadt()
206 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || in hash_netportnet4_uadt()
207 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_netportnet4_uadt()
210 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_netportnet4_uadt()
214 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); in hash_netportnet4_uadt()
218 ret = ip_set_get_extensions(set, tb, &ext); in hash_netportnet4_uadt()
[all …]
H A Dip_set_hash_netport.c154 hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_netport4_uadt() argument
166 if (tb[IPSET_ATTR_LINENO]) in hash_netport4_uadt()
167 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_netport4_uadt()
169 if (unlikely(!tb[IPSET_ATTR_IP] || in hash_netport4_uadt()
170 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_netport4_uadt()
171 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || in hash_netport4_uadt()
172 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) in hash_netport4_uadt()
175 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); in hash_netport4_uadt()
179 ret = ip_set_get_extensions(set, tb, &ext); in hash_netport4_uadt()
183 if (tb[IPSET_ATTR_CIDR]) { in hash_netport4_uadt()
[all …]
H A Dip_set_hash_ipport.c112 hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], in hash_ipport4_uadt() argument
123 if (tb[IPSET_ATTR_LINENO]) in hash_ipport4_uadt()
124 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); in hash_ipport4_uadt()
126 if (unlikely(!tb[IPSET_ATTR_IP] || in hash_ipport4_uadt()
127 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || in hash_ipport4_uadt()
128 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO))) in hash_ipport4_uadt()
131 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip); in hash_ipport4_uadt()
135 ret = ip_set_get_extensions(set, tb, &ext); in hash_ipport4_uadt()
143 e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); in hash_ipport4_uadt()
145 if (tb[IPSET_ATTR_PROTO]) { in hash_ipport4_uadt()
[all …]

12345678910>>...14