| /linux/drivers/media/usb/pvrusb2/ |
| H A D | pvrusb2-ioread.c | 39 static int pvr2_ioread_init(struct pvr2_ioread *cp) in pvr2_ioread_init() argument 43 cp->stream = NULL; in pvr2_ioread_init() 44 mutex_init(&cp->mutex); in pvr2_ioread_init() 47 cp->buffer_storage[idx] = kmalloc(BUFFER_SIZE,GFP_KERNEL); in pvr2_ioread_init() 48 if (!(cp->buffer_storage[idx])) break; in pvr2_ioread_init() 54 if (!(cp->buffer_storage[idx])) continue; in pvr2_ioread_init() 55 kfree(cp->buffer_storage[idx]); in pvr2_ioread_init() 62 static void pvr2_ioread_done(struct pvr2_ioread *cp) in pvr2_ioread_done() argument 66 pvr2_ioread_setup(cp,NULL); in pvr2_ioread_done() 68 if (!(cp->buffer_storage[idx])) continue; in pvr2_ioread_done() [all …]
|
| H A D | pvrusb2-context.c | 237 struct pvr2_channel *cp; in pvr2_context_reset_input_limits() local 241 for (cp = mp->mc_first; cp; cp = cp->mc_next) { in pvr2_context_reset_input_limits() 242 if (!cp->input_mask) continue; in pvr2_context_reset_input_limits() 243 tmsk &= cp->input_mask; in pvr2_context_reset_input_limits() 276 void pvr2_channel_init(struct pvr2_channel *cp,struct pvr2_context *mp) in pvr2_channel_init() argument 279 cp->hdw = mp->hdw; in pvr2_channel_init() 280 cp->mc_head = mp; in pvr2_channel_init() 281 cp->mc_next = NULL; in pvr2_channel_init() 282 cp->mc_prev = mp->mc_last; in pvr2_channel_init() 284 mp->mc_last->mc_next = cp; in pvr2_channel_init() [all …]
|
| /linux/drivers/net/ethernet/sun/ |
| H A D | cassini.c | 111 * also, we need to make cp->lock finer-grained. 161 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) 230 static void cas_set_link_modes(struct cas *cp); 232 static inline void cas_lock_tx(struct cas *cp) in cas_lock_tx() argument 237 spin_lock_nested(&cp->tx_lock[i], i); in cas_lock_tx() 248 #define cas_lock_all_save(cp, flags) \ argument 250 struct cas *xxxcp = (cp); \ 255 static inline void cas_unlock_tx(struct cas *cp) in cas_unlock_tx() argument 260 spin_unlock(&cp->tx_lock[i - 1]); in cas_unlock_tx() 263 #define cas_unlock_all_restore(cp, flags) \ argument [all …]
|
| /linux/kernel/sched/ |
| H A D | cpudeadline.c | 26 static void cpudl_heapify_down(struct cpudl *cp, int idx) in cpudl_heapify_down() argument 30 int orig_cpu = cp->elements[idx].cpu; in cpudl_heapify_down() 31 u64 orig_dl = cp->elements[idx].dl; in cpudl_heapify_down() 33 if (left_child(idx) >= cp->size) in cpudl_heapify_down() 45 if ((l < cp->size) && dl_time_before(orig_dl, in cpudl_heapify_down() 46 cp->elements[l].dl)) { in cpudl_heapify_down() 48 largest_dl = cp->elements[l].dl; in cpudl_heapify_down() 50 if ((r < cp->size) && dl_time_before(largest_dl, in cpudl_heapify_down() 51 cp->elements[r].dl)) in cpudl_heapify_down() 58 cp->elements[idx].cpu = cp->elements[largest].cpu; in cpudl_heapify_down() [all …]
|
| /linux/drivers/net/ethernet/realtek/ |
| H A D | 8139cp.c | 1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */ 51 #define DRV_NAME "8139cp" 92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number"); 98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses"); 115 #define TX_BUFFS_AVAIL(CP) \ argument 116 (((CP)->tx_tail <= (CP)->tx_head) ? \ 117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \ 118 (CP)->tx_tail - (CP)->tx_head - 1) 355 #define cpr8(reg) readb(cp->regs + (reg)) 356 #define cpr16(reg) readw(cp->regs + (reg)) [all …]
|
| /linux/drivers/net/ethernet/broadcom/ |
| H A D | cnic.c | 191 struct cnic_local *cp = dev->cnic_priv; in cnic_ctx_wr() local 192 struct cnic_eth_dev *ethdev = cp->ethdev; in cnic_ctx_wr() 206 struct cnic_local *cp = dev->cnic_priv; in cnic_ctx_tbl_wr() local 207 struct cnic_eth_dev *ethdev = cp->ethdev; in cnic_ctx_tbl_wr() 220 struct cnic_local *cp = dev->cnic_priv; in cnic_ring_ctl() local 221 struct cnic_eth_dev *ethdev = cp->ethdev; in cnic_ring_ctl() 238 struct cnic_local *cp = dev->cnic_priv; in cnic_reg_wr_ind() local 239 struct cnic_eth_dev *ethdev = cp->ethdev; in cnic_reg_wr_ind() 252 struct cnic_local *cp = dev->cnic_priv; in cnic_reg_rd_ind() local 253 struct cnic_eth_dev *ethdev = cp->ethdev; in cnic_reg_rd_ind() [all …]
|
| /linux/net/rds/ |
| H A D | threads.c | 74 void rds_connect_path_complete(struct rds_conn_path *cp, int curr) in rds_connect_path_complete() argument 76 if (!rds_conn_path_transition(cp, curr, RDS_CONN_UP)) { in rds_connect_path_complete() 80 atomic_read(&cp->cp_state)); in rds_connect_path_complete() 81 rds_conn_path_drop(cp, false); in rds_connect_path_complete() 86 cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr); in rds_connect_path_complete() 88 cp->cp_reconnect_jiffies = 0; in rds_connect_path_complete() 89 set_bit(0, &cp->cp_conn->c_map_queued); in rds_connect_path_complete() 91 if (!rds_destroy_pending(cp->cp_conn)) { in rds_connect_path_complete() 92 queue_delayed_work(rds_wq, &cp->cp_send_w, 0); in rds_connect_path_complete() 93 queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); in rds_connect_path_complete() [all …]
|
| /linux/net/netfilter/ipvs/ |
| H A D | ip_vs_proto_udp.c | 136 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) in udp_snat_handler() 144 if (cp->af == AF_INET6 && iph->fragoffs) in udp_snat_handler() 153 if (unlikely(cp->app != NULL)) { in udp_snat_handler() 157 if (!udp_csum_check(cp->af, skb, pp)) in udp_snat_handler() 163 if (!(ret = ip_vs_app_pkt_out(cp, skb, iph))) in udp_snat_handler() 173 udph->source = cp->vport; in udp_snat_handler() 179 udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr, in udp_snat_handler() 184 udp_fast_csum_update(cp in udp_snat_handler() 137 udp_snat_handler(struct sk_buff * skb,struct ip_vs_protocol * pp,struct ip_vs_conn * cp,struct ip_vs_iphdr * iph) udp_snat_handler() argument 220 udp_dnat_handler(struct sk_buff * skb,struct ip_vs_protocol * pp,struct ip_vs_conn * cp,struct ip_vs_iphdr * iph) udp_dnat_handler() argument 395 udp_app_conn_bind(struct ip_vs_conn * cp) udp_app_conn_bind() argument 452 udp_state_transition(struct ip_vs_conn * cp,int direction,const struct sk_buff * skb,struct ip_vs_proto_data * pd) udp_state_transition() argument [all...] |
| H A D | ip_vs_proto_tcp.c | 147 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) in tcp_snat_handler() 155 if (cp->af == AF_INET6 && iph->fragoffs) in tcp_snat_handler() 164 if (unlikely(cp->app != NULL)) { in tcp_snat_handler() 168 if (!tcp_csum_check(cp->af, skb, pp)) in tcp_snat_handler() 172 if (!(ret = ip_vs_app_pkt_out(cp, skb, iph))) in tcp_snat_handler() 182 tcph->source = cp->vport; in tcp_snat_handler() 186 tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, in tcp_snat_handler() 191 tcp_fast_csum_update(cp in tcp_snat_handler() 148 tcp_snat_handler(struct sk_buff * skb,struct ip_vs_protocol * pp,struct ip_vs_conn * cp,struct ip_vs_iphdr * iph) tcp_snat_handler() argument 226 tcp_dnat_handler(struct sk_buff * skb,struct ip_vs_protocol * pp,struct ip_vs_conn * cp,struct ip_vs_iphdr * iph) tcp_dnat_handler() argument 512 set_tcp_state(struct ip_vs_proto_data * pd,struct ip_vs_conn * cp,int direction,struct tcphdr * th) set_tcp_state() argument 588 tcp_state_transition(struct ip_vs_conn * cp,int direction,const struct sk_buff * skb,struct ip_vs_proto_data * pd) tcp_state_transition() argument 651 tcp_app_conn_bind(struct ip_vs_conn * cp) tcp_app_conn_bind() argument 693 ip_vs_tcp_conn_listen(struct ip_vs_conn * cp) ip_vs_tcp_conn_listen() argument [all...] |
| H A D | ip_vs_nfct.c | 70 ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) 81 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) in ip_vs_update_conntrack() 85 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) in ip_vs_update_conntrack() 93 if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP && in ip_vs_update_conntrack() 108 new_tuple.src.u3 = cp->daddr; in ip_vs_update_conntrack() 111 new_tuple.src.u.tcp.port = cp->dport; in ip_vs_update_conntrack() 113 new_tuple.dst.u3 = cp->vaddr; in ip_vs_update_conntrack() 116 new_tuple.dst.u.tcp.port = cp->vport; in ip_vs_update_conntrack() 127 IP_VS_DBG_BUF(7, "%s: Updated conntrack ct=%p for cp=" FMT_CONN "\n", in ip_vs_update_conntrack() 128 __func__, ct, ARG_CONN(cp)); in ip_vs_update_conntrack() 71 ip_vs_update_conntrack(struct sk_buff * skb,struct ip_vs_conn * cp,int outin) ip_vs_update_conntrack() argument 144 struct ip_vs_conn *cp; ip_vs_nfct_expect_callback() local 212 ip_vs_nfct_expect_related(struct sk_buff * skb,struct nf_conn * ct,struct ip_vs_conn * cp,u_int8_t proto,const __be16 port,int from_rs) ip_vs_nfct_expect_related() argument 242 ip_vs_conn_drop_conntrack(struct ip_vs_conn * cp) ip_vs_conn_drop_conntrack() argument [all...] |
| H A D | ip_vs_xmit.c | 593 struct ip_vs_conn *cp) in ip_vs_tunnel_xmit_prepare() 598 if (unlikely(cp->flags & IP_VS_CONN_F_NFCT)) in ip_vs_tunnel_xmit_prepare() 626 struct ip_vs_conn *cp, int local) in ip_vs_nat_send_or_cont() 631 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) in ip_vs_nat_send_or_cont() 634 ip_vs_update_conntrack(skb, cp, 1); in ip_vs_nat_send_or_cont() 639 if (!local || cp->vport != cp->dport || in ip_vs_nat_send_or_cont() 640 !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp in ip_vs_nat_send_or_cont() 591 ip_vs_tunnel_xmit_prepare(struct sk_buff * skb,struct ip_vs_conn * cp) ip_vs_tunnel_xmit_prepare() argument 624 ip_vs_nat_send_or_cont(int pf,struct sk_buff * skb,struct ip_vs_conn * cp,int local) ip_vs_nat_send_or_cont() argument 655 ip_vs_send_or_cont(int pf,struct sk_buff * skb,struct ip_vs_conn * cp,int local) ip_vs_send_or_cont() argument 679 ip_vs_null_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_null_xmit() argument 693 ip_vs_bypass_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_bypass_xmit() argument 718 ip_vs_bypass_xmit_v6(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_bypass_xmit_v6() argument 746 ip_vs_nat_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_nat_xmit() argument 830 ip_vs_nat_xmit_v6(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_nat_xmit_v6() argument 996 ipvs_gue_encap(struct net * net,struct sk_buff * skb,struct ip_vs_conn * cp,__u8 * next_protocol) ipvs_gue_encap() argument 1070 ipvs_gre_encap(struct net * net,struct sk_buff * skb,struct ip_vs_conn * cp,__u8 * next_protocol) ipvs_gre_encap() argument 1106 ip_vs_tunnel_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_tunnel_xmit() argument 1251 ip_vs_tunnel_xmit_v6(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_tunnel_xmit_v6() argument 1398 ip_vs_dr_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_dr_xmit() argument 1428 ip_vs_dr_xmit_v6(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,struct ip_vs_iphdr * ipvsh) ip_vs_dr_xmit_v6() argument 1463 ip_vs_icmp_xmit(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,int offset,unsigned int hooknum,struct ip_vs_iphdr * iph) ip_vs_icmp_xmit() argument 1548 ip_vs_icmp_xmit_v6(struct sk_buff * skb,struct ip_vs_conn * cp,struct ip_vs_protocol * pp,int offset,unsigned int hooknum,struct ip_vs_iphdr * ipvsh) ip_vs_icmp_xmit_v6() argument [all...] |
| /linux/tools/perf/util/ |
| H A D | call-path.c | 14 static void call_path__init(struct call_path *cp, struct call_path *parent, in call_path__init() argument 17 cp->parent = parent; in call_path__init() 18 cp->sym = sym; in call_path__init() 19 cp->ip = sym ? 0 : ip; in call_path__init() 20 cp->db_id = 0; in call_path__init() 21 cp->in_kernel = in_kernel; in call_path__init() 22 RB_CLEAR_NODE(&cp->rb_node); in call_path__init() 23 cp->children = RB_ROOT; in call_path__init() 55 struct call_path *cp; in call_path__new() local 70 cp = &cpb->cp[n]; in call_path__new() [all …]
|
| /linux/scripts/ |
| H A D | unifdef.c | 642 const char *cp; in parseline() local 659 cp = skipcomment(tline); in parseline() 661 if (*cp == '#') { in parseline() 664 cp = skipcomment(cp + 1); in parseline() 665 } else if (*cp != '\0') in parseline() 669 keyword = tline + (cp - tline); in parseline() 670 cp = skipsym(cp); in parseline() 671 kwlen = cp - keyword; in parseline() 673 if (strncmp(cp, "\\\r\n", 3) == 0 || in parseline() 674 strncmp(cp, "\\\n", 2) == 0) in parseline() [all …]
|
| /linux/arch/riscv/kvm/ |
| H A D | vcpu_sbi_replace.c | 20 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_time_handler() local 23 if (cp->a6 != SBI_EXT_TIME_SET_TIMER) { in kvm_sbi_ext_time_handler() 30 next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; in kvm_sbi_ext_time_handler() 32 next_cycle = (u64)cp->a0; in kvm_sbi_ext_time_handler() 51 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_ipi_handler() local 52 unsigned long hmask = cp->a0; in kvm_sbi_ext_ipi_handler() 53 unsigned long hbase = cp->a1; in kvm_sbi_ext_ipi_handler() 56 if (cp->a6 != SBI_EXT_IPI_SEND_IPI) { in kvm_sbi_ext_ipi_handler() 95 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_rfence_handler() local 96 unsigned long hmask = cp->a0; in kvm_sbi_ext_rfence_handler() [all …]
|
| H A D | vcpu_sbi_v01.c | 24 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_v01_handler() local 28 switch (cp->a7) { in kvm_sbi_ext_v01_handler() 39 next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0; in kvm_sbi_ext_v01_handler() 41 next_cycle = (u64)cp->a0; in kvm_sbi_ext_v01_handler() 49 if (cp->a0) in kvm_sbi_ext_v01_handler() 50 hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, utrap); in kvm_sbi_ext_v01_handler() 71 if (cp->a0) in kvm_sbi_ext_v01_handler() 72 hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, utrap); in kvm_sbi_ext_v01_handler() 78 if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I) in kvm_sbi_ext_v01_handler() 80 else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) { in kvm_sbi_ext_v01_handler() [all …]
|
| /linux/drivers/tty/vt/ |
| H A D | ucs.c | 25 u16 cp = *(u16 *)key; in interval16_cmp() local 28 if (cp < entry->first) in interval16_cmp() 30 if (cp > entry->last) in interval16_cmp() 37 u32 cp = *(u32 *)key; in interval32_cmp() local 40 if (cp < entry->first) in interval32_cmp() 42 if (cp > entry->last) in interval32_cmp() 47 static bool cp_in_range16(u16 cp, const struct ucs_interval16 *ranges, size_t size) in cp_in_range16() argument 49 if (cp < ranges[0].first || cp > ranges[size - 1].last) in cp_in_range16() 52 return __inline_bsearch(&cp, ranges, size, sizeof(*ranges), in cp_in_range16() 56 static bool cp_in_range32(u32 cp, const struct ucs_interval32 *ranges, size_t size) in cp_in_range32() argument [all …]
|
| /linux/fs/nilfs2/ |
| H A D | cpfile.c | 73 struct nilfs_checkpoint *cp; in nilfs_cpfile_block_add_valid_checkpoints() local 76 cp = kmap_local_folio(bh->b_folio, in nilfs_cpfile_block_add_valid_checkpoints() 78 count = le32_to_cpu(cp->cp_checkpoints_count) + n; in nilfs_cpfile_block_add_valid_checkpoints() 79 cp->cp_checkpoints_count = cpu_to_le32(count); in nilfs_cpfile_block_add_valid_checkpoints() 80 kunmap_local(cp); in nilfs_cpfile_block_add_valid_checkpoints() 89 struct nilfs_checkpoint *cp; in nilfs_cpfile_block_sub_valid_checkpoints() local 92 cp = kmap_local_folio(bh->b_folio, in nilfs_cpfile_block_sub_valid_checkpoints() 94 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); in nilfs_cpfile_block_sub_valid_checkpoints() 95 count = le32_to_cpu(cp->cp_checkpoints_count) - n; in nilfs_cpfile_block_sub_valid_checkpoints() 96 cp->cp_checkpoints_count = cpu_to_le32(count); in nilfs_cpfile_block_sub_valid_checkpoints() [all …]
|
| /linux/net/bluetooth/ |
| H A D | hci_sync.c | 907 struct hci_cp_write_eir cp; in hci_update_eir_sync() local 923 memset(&cp, 0, sizeof(cp)); in hci_update_eir_sync() 925 eir_create(hdev, cp.data); in hci_update_eir_sync() 927 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) in hci_update_eir_sync() 930 memcpy(hdev->eir, cp.data, sizeof(cp.data)); in hci_update_eir_sync() 932 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, in hci_update_eir_sync() 1154 struct hci_cp_le_set_ext_adv_enable *cp; in hci_disable_ext_adv_instance_sync() local 1156 u8 data[sizeof(*cp) + sizeof(*set) * 1]; in hci_disable_ext_adv_instance_sync() 1173 cp = (void *)data; in hci_disable_ext_adv_instance_sync() 1174 set = (void *)cp->data; in hci_disable_ext_adv_instance_sync() [all …]
|
| H A D | mgmt.c | 969 struct mgmt_mode *cp = cmd->param; in mgmt_get_adv_discov_flags() local 970 if (cp->val == 0x01) in mgmt_get_adv_discov_flags() 972 else if (cp->val == 0x02) in mgmt_get_adv_discov_flags() 993 struct mgmt_mode *cp = cmd->param; in mgmt_get_connectable() local 995 return cp->val; in mgmt_get_connectable() 1335 struct mgmt_mode *cp; in mgmt_set_powered_complete() local 1341 cp = cmd->param; in mgmt_set_powered_complete() 1346 if (cp->val) { in mgmt_set_powered_complete() 1358 if (cp->val) in mgmt_set_powered_complete() 1371 struct mgmt_mode cp; in set_powered_sync() local [all …]
|
| /linux/drivers/accessibility/speakup/ |
| H A D | kobjects.c | 34 char *cp; in chars_chartab_show() local 49 cp = "B_CTL"; in chars_chartab_show() 51 cp = "WDLM"; in chars_chartab_show() 53 cp = "A_PUNC"; in chars_chartab_show() 55 cp = "PUNC"; in chars_chartab_show() 57 cp = "NUM"; in chars_chartab_show() 59 cp = "A_CAP"; in chars_chartab_show() 61 cp = "ALPHA"; in chars_chartab_show() 63 cp = "B_CAPSYM"; in chars_chartab_show() 65 cp = "B_SYM"; in chars_chartab_show() [all …]
|
| /linux/include/sound/ |
| H A D | seq_midi_emul.h | 134 #define SNDRV_GM_BANK_SELECT(cp) (((cp)->control[0]<<7)|((cp)->control[32])) argument 135 #define SNDRV_GM_MODULATION_WHEEL(cp) (((cp)->control[1]<<7)|((cp)->control[33])) argument 136 #define SNDRV_GM_BREATH(cp) (((cp)->control[2]<<7)|((cp)->control[34])) argument 137 #define SNDRV_GM_FOOT_PEDAL(cp) (((cp)->control[4]<<7)|((cp)->control[36])) argument 138 #define SNDRV_GM_PORTAMENTO_TIME(cp) (((cp)->control[5]<<7)|((cp)->control[37])) argument 139 #define SNDRV_GM_DATA_ENTRY(cp) (((cp)->control[6]<<7)|((cp)->control[38])) argument 140 #define SNDRV_GM_VOLUME(cp) (((cp)->control[7]<<7)|((cp)->control[39])) argument 141 #define SNDRV_GM_BALANCE(cp) (((cp)->control[8]<<7)|((cp)->control[40])) argument 142 #define SNDRV_GM_PAN(cp) (((cp)->control[10]<<7)|((cp)->control[42])) argument 143 #define SNDRV_GM_EXPRESSION(cp) (((cp)->control[11]<<7)|((cp)->control[43])) argument
|
| /linux/drivers/mailbox/ |
| H A D | mailbox-th1520.c | 112 static void th1520_mbox_chan_write(struct th1520_mbox_con_priv *cp, u32 val, in th1520_mbox_chan_write() argument 116 iowrite32(val, cp->comm_remote_base + offs); in th1520_mbox_chan_write() 118 iowrite32(val, cp->comm_local_base + offs); in th1520_mbox_chan_write() 121 static u32 th1520_mbox_chan_read(struct th1520_mbox_con_priv *cp, u32 offs, in th1520_mbox_chan_read() argument 125 return ioread32(cp->comm_remote_base + offs); in th1520_mbox_chan_read() 127 return ioread32(cp->comm_local_base + offs); in th1520_mbox_chan_read() 130 static void th1520_mbox_chan_rmw(struct th1520_mbox_con_priv *cp, u32 off, in th1520_mbox_chan_rmw() argument 133 struct th1520_mbox_priv *priv = to_th1520_mbox_priv(cp->chan->mbox); in th1520_mbox_chan_rmw() 138 val = th1520_mbox_chan_read(cp, off, is_remote); in th1520_mbox_chan_rmw() 141 th1520_mbox_chan_write(cp, val, off, is_remote); in th1520_mbox_chan_rmw() [all …]
|
| /linux/drivers/s390/cio/ |
| H A D | vfio_ccw_cp.c | 318 static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len) in ccwchain_alloc() argument 334 list_add_tail(&chain->next, &cp->ccwchain_list); in ccwchain_alloc() 366 * @cp: channel_program on which to perform the operation 376 static int ccwchain_calc_length(u64 iova, struct channel_program *cp) in ccwchain_calc_length() argument 378 struct ccw1 *ccw = cp->guest_cp; in ccwchain_calc_length() 404 static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp) in tic_target_chain_exists() argument 409 list_for_each_entry(chain, &cp->ccwchain_list, next) { in tic_target_chain_exists() 419 struct channel_program *cp); 421 static int ccwchain_handle_ccw(dma32_t cda, struct channel_program *cp) in ccwchain_handle_ccw() argument 424 &container_of(cp, struct vfio_ccw_private, cp)->vdev; in ccwchain_handle_ccw() [all …]
|
| /linux/kernel/debug/kdb/ |
| H A D | kdb_io.c | 191 * @cp: Cursor position, pointer the character in buffer where the cursor 207 static void kdb_position_cursor(char *prompt, char *buffer, char *cp) in kdb_position_cursor() argument 210 if (cp > buffer) in kdb_position_cursor() 211 kdb_printf("%.*s", (int)(cp - buffer), buffer); in kdb_position_cursor() 237 char *cp = buffer; in kdb_read() local 257 cp += len; in kdb_read() 259 cp--; in kdb_read() 262 lastchar = cp; in kdb_read() 263 *cp = '\0'; in kdb_read() 271 if (cp > buffer) { in kdb_read() [all …]
|
| /linux/drivers/scsi/sym53c8xx_2/ |
| H A D | sym_hipd.c | 46 static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp); 47 static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp); 48 static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp); 60 static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) in sym_print_msg() argument 62 sym_print_addr(cp->cmd, "%s: ", label); in sym_print_msg() 1406 static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr) argument 1408 struct sym_tcb *tp = &np->target[cp->target]; 1449 cp->nego_status = nego; 1452 tp->nego_cp = cp; /* Keep track a nego will be performed */ 1454 sym_print_nego_msg(np, cp->target, [all …]
|