/linux/tools/testing/selftests/tc-testing/tc-tests/actions/ |
H A D | ct.json | 4 "name": "Add simple ct action", 7 "ct" 14 "$TC actions flush action ct", 20 "cmdUnderTest": "$TC actions add action ct index 42", 22 "verifyCmd": "$TC actions list action ct", 23 "matchPattern": "action order [0-9]*: ct zone 0 pipe.*index 42 ref", 26 "$TC actions flush action ct" 31 "name": "Add simple ct action with cookie", 34 "ct" 41 "$TC actions flush action ct", [all …]
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_guc_ct.c | 61 static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code); 63 #define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code) argument 65 #define CT_DEAD(ct, ctb, reason) \ argument 73 /* Used when a CT send wants to block and / or receive data */ 104 ct_to_guc(struct xe_guc_ct *ct) in ct_to_guc() argument 106 return container_of(ct, struct xe_guc, ct); in ct_to_guc() 110 ct_to_gt(struct xe_guc_ct *ct) in ct_to_gt() argument 112 return container_of(ct, struct xe_gt, uc.guc.ct); in ct_to_gt() 116 ct_to_xe(struct xe_guc_ct *ct) in ct_to_xe() argument 118 return gt_to_xe(ct_to_gt(ct)); in ct_to_xe() [all …]
|
/linux/drivers/gpu/drm/i915/gt/uc/ |
H A D | intel_guc_ct.c | 29 #define CT_DEAD(ct, reason) \ argument 31 if (!(ct)->dead_ct_reported) { \ 32 (ct)->dead_ct_reason |= 1 << CT_DEAD_##reason; \ 33 queue_work(system_unbound_wq, &(ct)->dead_ct_worker); \ 37 #define CT_DEAD(ct, reason) do { } while (0) argument 40 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) in ct_to_guc() argument 42 return container_of(ct, struct intel_guc, ct); in ct_to_guc() 46 guc_err(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__) 49 guc_dbg(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__) 54 guc_probe_error(ct_to_guc(ct), "CT: " _fmt, ##__VA_ARGS__) [all …]
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | test_bpf_nf_fail.c | 33 struct nf_conn *ct; in alloc_release() local 35 ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts)); in alloc_release() 36 if (!ct) in alloc_release() 38 bpf_ct_release(ct); in alloc_release() 47 struct nf_conn *ct; in insert_insert() local 49 ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts)); in insert_insert() 50 if (!ct) in insert_insert() 52 ct = bpf_ct_insert_entry(ct); in insert_insert() 53 if (!ct) in insert_insert() 55 ct = bpf_ct_insert_entry(ct); in insert_insert() [all …]
|
H A D | test_bpf_nf.c | 98 struct nf_conn *ct; in nf_ct_test() local 102 ct = lookup_fn(ctx, NULL, 0, &opts_def, sizeof(opts_def)); in nf_ct_test() 103 if (ct) in nf_ct_test() 104 bpf_ct_release(ct); in nf_ct_test() 109 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, in nf_ct_test() 113 if (ct) in nf_ct_test() 114 bpf_ct_release(ct); in nf_ct_test() 119 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, in nf_ct_test() 122 if (ct) in nf_ct_test() 123 bpf_ct_release(ct); in nf_ct_test() [all …]
|
/linux/drivers/scsi/bfa/ |
H A D | bfi_reg.h | 18 #define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */ 19 #define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */ 20 #define HOSTFN2_INT_STATUS 0x00014300 /* ct */ 21 #define HOSTFN3_INT_STATUS 0x00014400 /* ct */ 22 #define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */ 23 #define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */ 24 #define HOSTFN2_INT_MSK 0x00014304 /* ct */ 25 #define HOSTFN3_INT_MSK 0x00014404 /* ct */ 27 #define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */ 28 #define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */ [all …]
|
/linux/drivers/video/fbdev/aty/ |
H A D | mach64_ct.c | 4 * ATI Mach64 CT/VT/GT/LT Support 46 * ATI Mach64 CT clock synthesis description. 103 * PLL programming (Mach64 CT family) 254 if ((err = aty_valid_pll_ct(info, vclk_per, &pll->ct))) in aty_var_to_pll_ct() 256 if (M64_HAS(GTB_DSP) && (err = aty_dsp_gt(info, bpp, &pll->ct))) in aty_var_to_pll_ct() 258 /*aty_calc_pll_ct(info, &pll->ct);*/ in aty_var_to_pll_ct() 266 …ret = par->ref_clk_per * pll->ct.pll_ref_div * pll->ct.vclk_post_div_real / pll->ct.vclk_fb_div / … in aty_pll_to_var_ct() 268 if(pll->ct.xres > 0) { in aty_pll_to_var_ct() 270 ret /= pll->ct.xres; in aty_pll_to_var_ct() 293 pll->ct.pll_ext_cntl, pll->ct.pll_gen_cntl, pll->ct.pll_vclk_cntl); in aty_set_pll_ct() [all …]
|
/linux/drivers/net/ethernet/brocade/bna/ |
H A D | bfi_reg.h | 19 #define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */ 20 #define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */ 21 #define HOSTFN2_INT_STATUS 0x00014300 /* ct */ 22 #define HOSTFN3_INT_STATUS 0x00014400 /* ct */ 23 #define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */ 24 #define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */ 25 #define HOSTFN2_INT_MSK 0x00014304 /* ct */ 26 #define HOSTFN3_INT_MSK 0x00014404 /* ct */ 28 #define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */ 29 #define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */ [all …]
|
/linux/net/netfilter/ |
H A D | nf_nat_sip.c | 41 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in mangle_packet() local 45 if (nf_ct_protonum(ct) == IPPROTO_TCP) { in mangle_packet() 50 if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo, in mangle_packet() 58 if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, in mangle_packet() 70 static int sip_sprintf_addr(const struct nf_conn *ct, char *buffer, in sip_sprintf_addr() argument 73 if (nf_ct_l3num(ct) == NFPROTO_IPV4) in sip_sprintf_addr() 83 static int sip_sprintf_addr_port(const struct nf_conn *ct, char *buffer, in sip_sprintf_addr_port() argument 86 if (nf_ct_l3num(ct) == NFPROTO_IPV4) in sip_sprintf_addr_port() 99 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in map_addr() local 101 struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct); in map_addr() [all …]
|
H A D | nf_conntrack_core.c | 487 u32 nf_ct_get_id(const struct nf_conn *ct) in nf_ct_get_id() argument 494 a = (unsigned long)ct; in nf_ct_get_id() 495 b = (unsigned long)ct->master; in nf_ct_get_id() 496 c = (unsigned long)nf_ct_net(ct); in nf_ct_get_id() 497 d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, in nf_ct_get_id() 498 sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple), in nf_ct_get_id() 509 clean_from_lists(struct nf_conn *ct) in clean_from_lists() argument 511 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); in clean_from_lists() 512 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); in clean_from_lists() 515 nf_ct_remove_expectations(ct); in clean_from_lists() [all …]
|
H A D | xt_CT.c | 20 static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct) in xt_ct_target() argument 26 if (ct) { in xt_ct_target() 27 refcount_inc(&ct->ct_general.use); in xt_ct_target() 28 nf_ct_set(skb, ct, IP_CT_NEW); in xt_ct_target() 30 nf_ct_set(skb, ct, IP_CT_UNTRACKED); in xt_ct_target() 40 struct nf_conn *ct = info->ct; in xt_ct_target_v0() local 42 return xt_ct_target(skb, ct); in xt_ct_target_v0() 49 struct nf_conn *ct = info->ct; in xt_ct_target_v1() local 51 return xt_ct_target(skb, ct); in xt_ct_target_v1() 73 xt_ct_set_helper(struct nf_conn *ct, const char *helper_name, in xt_ct_set_helper() argument [all …]
|
H A D | nf_conntrack_proto_tcp.c | 266 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) in tcp_print_conntrack() argument 268 if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) in tcp_print_conntrack() 271 seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]); in tcp_print_conntrack() 485 const struct nf_conn *ct, in nf_tcp_log_invalid() argument 491 const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct)); in nf_tcp_log_invalid() 503 nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf); in nf_tcp_log_invalid() 510 tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir, in tcp_in_window() argument 515 struct ip_ct_tcp *state = &ct->proto.tcp; in tcp_in_window() 536 receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1); in tcp_in_window() 643 return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE, in tcp_in_window() [all …]
|
H A D | nf_conntrack_proto_sctp.c | 134 static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) 136 seq_printf(s, "%s ", sctp_conntrack_names[ct->proto.sctp.state]); 148 static int do_basic_checks(struct nf_conn *ct, 175 nf_ct_l4proto_log_invalid(skb, ct, state, in do_basic_checks() 240 sctp_new(struct nf_conn *ct, const struct sk_buff *skb, in sctp_new_state() 248 memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp)); in sctp_new() 276 ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag; in sctp_new() 280 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag; in sctp_new() 286 ct in sctp_new() 139 sctp_print_conntrack(struct seq_file * s,struct nf_conn * ct) sctp_print_conntrack() argument 153 do_basic_checks(struct nf_conn * ct,const struct sk_buff * skb,unsigned int dataoff,unsigned long * map,const struct nf_hook_state * state) do_basic_checks() argument 245 sctp_new(struct nf_conn * ct,const struct sk_buff * skb,const struct sctphdr * sh,unsigned int dataoff) sctp_new() argument 332 nf_conntrack_sctp_packet(struct nf_conn * ct,struct sk_buff * skb,unsigned int dataoff,enum ip_conntrack_info ctinfo,const struct nf_hook_state * state) nf_conntrack_sctp_packet() argument 537 sctp_can_early_drop(const struct nf_conn * ct) sctp_can_early_drop() argument 557 sctp_to_nlattr(struct sk_buff * skb,struct nlattr * nla,struct nf_conn * ct,bool destroy) sctp_to_nlattr() argument 600 nlattr_to_sctp(struct nlattr * cda[],struct nf_conn * ct) nlattr_to_sctp() argument [all...] |
H A D | nf_nat_core.c | 59 const struct nf_conn *ct, in nf_nat_ipv4_decode_session() argument 64 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; in nf_nat_ipv4_decode_session() 67 if (ct->status & statusbit) { in nf_nat_ipv4_decode_session() 79 if (ct->status & statusbit) { in nf_nat_ipv4_decode_session() 91 const struct nf_conn *ct, in nf_nat_ipv6_decode_session() argument 97 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple; in nf_nat_ipv6_decode_session() 100 if (ct->status & statusbit) { in nf_nat_ipv6_decode_session() 112 if (ct->status & statusbit) { in nf_nat_ipv6_decode_session() 126 const struct nf_conn *ct; in __nf_nat_decode_session() local 132 ct = nf_ct_get(skb, &ctinfo); in __nf_nat_decode_session() [all …]
|
H A D | nft_ct.c | 57 const struct nf_conn *ct; in nft_ct_get_eval() local 63 ct = nf_ct_get(pkt->skb, &ctinfo); in nft_ct_get_eval() 67 if (ct) in nft_ct_get_eval() 79 if (ct == NULL) in nft_ct_get_eval() 87 *dest = ct->status; in nft_ct_get_eval() 91 *dest = READ_ONCE(ct->mark); in nft_ct_get_eval() 96 *dest = ct->secmark; in nft_ct_get_eval() 100 *dest = jiffies_to_msecs(nf_ct_expires(ct)); in nft_ct_get_eval() 103 if (ct->master == NULL) in nft_ct_get_eval() 105 help = nfct_help(ct->master); in nft_ct_get_eval() [all …]
|
H A D | nf_conntrack_pptp.c | 89 static void pptp_expectfn(struct nf_conn *ct, in pptp_expectfn() argument 93 struct net *net = nf_ct_net(ct); in pptp_expectfn() 97 ct->proto.gre.timeout = PPTP_GRE_TIMEOUT; in pptp_expectfn() 98 ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT; in pptp_expectfn() 104 if (hook && ct->master->status & IPS_NAT_MASK) in pptp_expectfn() 105 hook->expectfn(ct, exp); in pptp_expectfn() 115 exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t); in pptp_expectfn() 127 static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct, in destroy_sibling_or_exp() argument 135 pr_debug("trying to timeout ct or exp for tuple "); in destroy_sibling_or_exp() 138 zone = nf_ct_zone(ct); in destroy_sibling_or_exp() [all …]
|
H A D | xt_conntrack.c | 40 conntrack_mt_origsrc(const struct nf_conn *ct, in conntrack_mt_origsrc() argument 44 return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3, in conntrack_mt_origsrc() 49 conntrack_mt_origdst(const struct nf_conn *ct, in conntrack_mt_origdst() argument 53 return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3, in conntrack_mt_origdst() 58 conntrack_mt_replsrc(const struct nf_conn *ct, in conntrack_mt_replsrc() argument 62 return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3, in conntrack_mt_replsrc() 67 conntrack_mt_repldst(const struct nf_conn *ct, in conntrack_mt_repldst() argument 71 return conntrack_addrcmp(&ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3, in conntrack_mt_repldst() 77 const struct nf_conn *ct) in ct_proto_port_check() argument 81 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; in ct_proto_port_check() [all …]
|
H A D | nf_nat_ovs.c | 10 static int nf_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, in nf_ct_nat_execute() argument 29 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, in nf_ct_nat_execute() 41 if (!nf_nat_icmpv6_reply_translation(skb, ct, in nf_ct_nat_execute() 55 if (!nf_nat_initialized(ct, maniptype)) { in nf_ct_nat_execute() 61 ? nf_nat_setup_info(ct, range, maniptype) in nf_ct_nat_execute() 62 : nf_nat_alloc_null_binding(ct, hooknum); in nf_ct_nat_execute() 77 err = nf_nat_packet(ct, ctinfo, hooknum, skb); in nf_ct_nat_execute() 85 int nf_ct_nat(struct sk_buff *skb, struct nf_conn *ct, in nf_ct_nat() argument 95 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct)) in nf_ct_nat() 98 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) && in nf_ct_nat() [all …]
|
H A D | xt_nat.c | 57 struct nf_conn *ct; in xt_snat_target_v0() local 59 ct = nf_ct_get(skb, &ctinfo); in xt_snat_target_v0() 60 WARN_ON(!(ct != NULL && in xt_snat_target_v0() 65 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); in xt_snat_target_v0() 74 struct nf_conn *ct; in xt_dnat_target_v0() local 76 ct = nf_ct_get(skb, &ctinfo); in xt_dnat_target_v0() 77 WARN_ON(!(ct != NULL && in xt_dnat_target_v0() 81 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); in xt_dnat_target_v0() 90 struct nf_conn *ct; in xt_snat_target_v1() local 92 ct = nf_ct_get(skb, &ctinfo); in xt_snat_target_v1() [all …]
|
/linux/net/netfilter/ipvs/ |
H A D | ip_vs_nfct.c | 74 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); in ip_vs_update_conntrack() local 77 if (ct == NULL || nf_ct_is_confirmed(ct) || in ip_vs_update_conntrack() 78 nf_ct_is_dying(ct)) in ip_vs_update_conntrack() 94 if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP && in ip_vs_update_conntrack() 95 !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct)) in ip_vs_update_conntrack() 104 new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple; in ip_vs_update_conntrack() 119 IP_VS_DBG_BUF(7, "%s: Updating conntrack ct=%p, status=0x%lX, " in ip_vs_update_conntrack() 121 __func__, ct, ct->status, ctinfo, in ip_vs_update_conntrack() 122 ARG_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple)); in ip_vs_update_conntrack() 123 IP_VS_DBG_BUF(7, "%s: Updating conntrack ct=%p, status=0x%lX, " in ip_vs_update_conntrack() [all …]
|
/linux/drivers/macintosh/ |
H A D | windfarm.h | 28 int (*set_value)(struct wf_control *ct, s32 val); 29 int (*get_value)(struct wf_control *ct, s32 *val); 30 s32 (*get_min)(struct wf_control *ct); 31 s32 (*get_max)(struct wf_control *ct); 32 void (*release)(struct wf_control *ct); 56 extern int wf_register_control(struct wf_control *ct); 57 extern void wf_unregister_control(struct wf_control *ct); 58 extern int wf_get_control(struct wf_control *ct); 59 extern void wf_put_control(struct wf_control *ct); 61 static inline int wf_control_set_max(struct wf_control *ct) in wf_control_set_max() argument [all …]
|
/linux/net/openvswitch/ |
H A D | conntrack.c | 64 struct nf_conn *ct; member 154 static u32 ovs_ct_get_mark(const struct nf_conn *ct) in ovs_ct_get_mark() argument 157 return ct ? READ_ONCE(ct->mark) : 0; in ovs_ct_get_mark() 168 static void ovs_ct_get_labels(const struct nf_conn *ct, in ovs_ct_get_labels() argument 173 if (ct) { in ovs_ct_get_labels() 174 if (ct->master && !nf_ct_is_confirmed(ct)) in ovs_ct_get_labels() 175 ct = ct->master; in ovs_ct_get_labels() 176 cl = nf_ct_labels_find(ct); in ovs_ct_get_labels() 190 key->ct.orig_tp.src = htons(orig->dst.u.icmp.type); in __ovs_ct_update_key_orig_tp() 191 key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code); in __ovs_ct_update_key_orig_tp() [all …]
|
/linux/include/net/netfilter/ |
H A D | nf_conntrack_l4proto.h | 31 bool (*can_early_drop)(const struct nf_conn *ct); 35 struct nf_conn *ct, bool destroy); 38 int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); 95 int nf_conntrack_icmp_packet(struct nf_conn *ct, 100 int nf_conntrack_icmpv6_packet(struct nf_conn *ct, 105 int nf_conntrack_udp_packet(struct nf_conn *ct, 110 int nf_conntrack_udplite_packet(struct nf_conn *ct, 115 int nf_conntrack_tcp_packet(struct nf_conn *ct, 120 int nf_conntrack_dccp_packet(struct nf_conn *ct, 125 int nf_conntrack_sctp_packet(struct nf_conn *ct, [all …]
|
/linux/kernel/irq/ |
H A D | generic-chip.c | 40 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_disable_reg() local 44 irq_reg_writel(gc, mask, ct->regs.disable); in irq_gc_mask_disable_reg() 45 *ct->mask_cache &= ~mask; in irq_gc_mask_disable_reg() 60 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_set_bit() local 64 *ct->mask_cache |= mask; in irq_gc_mask_set_bit() 65 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); in irq_gc_mask_set_bit() 80 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_clr_bit() local 84 *ct->mask_cache &= ~mask; in irq_gc_mask_clr_bit() 85 irq_reg_writel(gc, *ct in irq_gc_mask_clr_bit() 100 struct irq_chip_type *ct = irq_data_get_chip_type(d); irq_gc_unmask_enable_reg() local 117 struct irq_chip_type *ct = irq_data_get_chip_type(d); irq_gc_ack_set_bit() local 133 struct irq_chip_type *ct = irq_data_get_chip_type(d); irq_gc_ack_clr_bit() local 156 struct irq_chip_type *ct = irq_data_get_chip_type(d); irq_gc_mask_disable_and_ack_set() local 173 struct irq_chip_type *ct = irq_data_get_chip_type(d); irq_gc_eoi() local 222 struct irq_chip_type *ct = gc->chip_types; irq_init_generic_chip() local 263 struct irq_chip_type *ct = gc->chip_types; irq_gc_init_mask_cache() local 459 struct irq_chip_type *ct; irq_map_generic_chip() local 546 struct irq_chip_type *ct = gc->chip_types; irq_setup_generic_chip() local 590 struct irq_chip_type *ct = gc->chip_types; irq_setup_alt_chip() local 672 struct irq_chip_type *ct = gc->chip_types; irq_gc_suspend() local 692 struct irq_chip_type *ct = gc->chip_types; irq_gc_resume() local 715 struct irq_chip_type *ct = gc->chip_types; irq_gc_shutdown() local [all...] |
/linux/net/sched/ |
H A D | act_ct.c | 176 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct, in tcf_ct_flow_table_add_action_meta() argument 188 entry->ct_metadata.mark = READ_ONCE(ct->mark); in tcf_ct_flow_table_add_action_meta() 190 /* aligns with the CT reference on the SKB nf_ct_set */ in tcf_ct_flow_table_add_action_meta() 191 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo; in tcf_ct_flow_table_add_action_meta() 195 ct_labels = nf_ct_labels_find(ct); in tcf_ct_flow_table_add_action_meta() 203 struct nf_conn *ct, in tcf_ct_flow_table_add_action_nat() argument 207 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; in tcf_ct_flow_table_add_action_nat() 210 if (!(ct->status & IPS_NAT_MASK)) in tcf_ct_flow_table_add_action_nat() 213 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); in tcf_ct_flow_table_add_action_nat() 228 switch (nf_ct_protonum(ct)) { in tcf_ct_flow_table_add_action_nat() [all …]
|