| /linux/arch/x86/kvm/ |
| H A D | emulate.c | 195 int (*execute)(struct x86_emulate_ctxt *ctxt); 203 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 249 static void writeback_registers(struct x86_emulate_ctxt *ctxt) in writeback_registers() argument 251 unsigned long dirty = ctxt->regs_dirty; in writeback_registers() 255 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); in writeback_registers() 258 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) in invalidate_registers() argument 260 ctxt->regs_dirty = 0; in invalidate_registers() 261 ctxt->regs_valid = 0; in invalidate_registers() 278 static int em_##op(struct x86_emulate_ctxt *ctxt) \ 280 unsigned long flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; \ [all …]
|
| H A D | kvm_emulate.h | 106 void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); 112 ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg); 119 void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val); 128 int (*read_std)(struct x86_emulate_ctxt *ctxt, 141 int (*write_std)(struct x86_emulate_ctxt *ctxt, 151 int (*fetch)(struct x86_emulate_ctxt *ctxt, 161 int (*read_emulated)(struct x86_emulate_ctxt *ctxt, 172 int (*write_emulated)(struct x86_emulate_ctxt *ctxt, 185 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, 191 void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr); [all …]
|
| /linux/arch/arm64/kvm/hyp/include/hyp/ |
| H A D | sysreg-sr.h | 19 static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt); 21 static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) in ctxt_to_vcpu() argument 23 struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; in ctxt_to_vcpu() 26 vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); in ctxt_to_vcpu() 31 static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt) in ctxt_is_guest() argument 33 return host_data_ptr(host_ctxt) != ctxt; in ctxt_is_guest() 36 static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt) in ctxt_mdscr_el1() argument 38 struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); in ctxt_mdscr_el1() 40 if (ctxt_is_guest(ctxt) && kvm_host_owns_debug_regs(vcpu)) in ctxt_mdscr_el1() 43 return &ctxt_sys_reg(ctxt, MDSCR_EL1); in ctxt_mdscr_el1() [all …]
|
| H A D | switch.h | 394 hcr = __ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2); in ___deactivate_traps() 442 &vcpu->arch.ctxt.fp_regs.fpsr, in __hyp_sve_restore_guest() 596 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); in kvm_hyp_handle_fpsimd() 673 static inline u64 hyp_timer_get_offset(struct arch_timer_context *ctxt) in hyp_timer_get_offset() argument 677 if (ctxt->offset.vm_offset) in hyp_timer_get_offset() 678 offset += *kern_hyp_va(ctxt->offset.vm_offset); in hyp_timer_get_offset() 679 if (ctxt->offset.vcpu_offset) in hyp_timer_get_offset() 680 offset += *kern_hyp_va(ctxt->offset.vcpu_offset); in hyp_timer_get_offset() 685 static inline u64 compute_counter_value(struct arch_timer_context *ctxt) in compute_counter_value() argument 687 return arch_timer_read_cntpct_el0() - hyp_timer_get_offset(ctxt); in compute_counter_value() [all …]
|
| H A D | debug-sr.h | 107 struct kvm_cpu_context *ctxt) in __debug_save_state() argument 117 ctxt_sys_reg(ctxt, MDCCINT_EL1) = read_sysreg(mdccint_el1); in __debug_save_state() 121 struct kvm_cpu_context *ctxt) in __debug_restore_state() argument 131 write_sysreg(ctxt_sys_reg(ctxt, MDCCINT_EL1), mdccint_el1); in __debug_restore_state() 145 guest_ctxt = &vcpu->arch.ctxt; in __debug_switch_to_guest_common() 164 guest_ctxt = &vcpu->arch.ctxt; in __debug_switch_to_host_common()
|
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_vsi_vlan_lib.c | 94 struct ice_vsi_ctx *ctxt; in ice_vsi_manage_vlan_insertion() local 97 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_manage_vlan_insertion() 98 if (!ctxt) in ice_vsi_manage_vlan_insertion() 105 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; in ice_vsi_manage_vlan_insertion() 108 ctxt->info.inner_vlan_flags |= (vsi->info.inner_vlan_flags & in ice_vsi_manage_vlan_insertion() 111 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); in ice_vsi_manage_vlan_insertion() 113 err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_manage_vlan_insertion() 120 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_vsi_manage_vlan_insertion() 122 kfree(ctxt); in ice_vsi_manage_vlan_insertion() 134 struct ice_vsi_ctx *ctxt; in ice_vsi_manage_vlan_stripping() local [all …]
|
| /linux/kernel/printk/ |
| H A D | nbcon.c | 207 static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq) in nbcon_seq_try_update() argument 209 unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq); in nbcon_seq_try_update() 210 struct console *con = ctxt->console; in nbcon_seq_try_update() 214 ctxt->seq = new_seq; in nbcon_seq_try_update() 216 ctxt->seq = nbcon_seq_read(con); in nbcon_seq_try_update() 243 static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt, in nbcon_context_try_acquire_direct() argument 247 struct console *con = ctxt->console; in nbcon_context_try_acquire_direct() 270 if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio) in nbcon_context_try_acquire_direct() 283 new.prio = ctxt->prio; in nbcon_context_try_acquire_direct() 351 static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt, in nbcon_context_try_acquire_requested() argument [all …]
|
| /linux/net/sunrpc/xprtrdma/ |
| H A D | svc_rdma_sendto.c | 80 * ctxt, the Send WR is posted, and sendto returns. 120 struct svc_rdma_send_ctxt *ctxt; in svc_rdma_send_ctxt_alloc() local 126 ctxt = kzalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges), in svc_rdma_send_ctxt_alloc() 128 if (!ctxt) in svc_rdma_send_ctxt_alloc() 131 ctxt->sc_pages = kcalloc_node(pages, sizeof(struct page *), in svc_rdma_send_ctxt_alloc() 133 if (!ctxt->sc_pages) in svc_rdma_send_ctxt_alloc() 135 ctxt->sc_maxpages = pages; in svc_rdma_send_ctxt_alloc() 144 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); in svc_rdma_send_ctxt_alloc() 146 ctxt in svc_rdma_send_ctxt_alloc() 169 struct svc_rdma_send_ctxt *ctxt; svc_rdma_send_ctxts_destroy() local 192 struct svc_rdma_send_ctxt *ctxt; svc_rdma_send_ctxt_get() local 225 svc_rdma_send_ctxt_release(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt) svc_rdma_send_ctxt_release() argument 253 struct svc_rdma_send_ctxt *ctxt; svc_rdma_send_ctxt_put_async() local 267 svc_rdma_send_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt) svc_rdma_send_ctxt_put() argument 299 struct svc_rdma_send_ctxt *ctxt = svc_rdma_wc_send() local 339 svc_rdma_post_send(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt) svc_rdma_post_send() argument 567 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt; svc_rdma_page_dma_map() local 854 svc_rdma_save_io_pages(struct svc_rqst * rqstp,struct svc_rdma_send_ctxt * ctxt) svc_rdma_save_io_pages() argument [all...] |
| H A D | svc_rdma_rw.c | 59 struct svc_rdma_rw_ctxt *ctxt; in svc_rdma_get_rw_ctxt() local 66 ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node); in svc_rdma_get_rw_ctxt() 68 ctxt = kmalloc_node(struct_size(ctxt, rw_first_sgl, first_sgl_nents), in svc_rdma_get_rw_ctxt() 70 if (!ctxt) in svc_rdma_get_rw_ctxt() 73 INIT_LIST_HEAD(&ctxt->rw_list); in svc_rdma_get_rw_ctxt() 74 ctxt->rw_first_sgl_nents = first_sgl_nents; in svc_rdma_get_rw_ctxt() 77 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; in svc_rdma_get_rw_ctxt() 78 if (sg_alloc_table_chained(&ctxt in svc_rdma_get_rw_ctxt() 91 __svc_rdma_put_rw_ctxt(struct svc_rdma_rw_ctxt * ctxt,struct llist_head * list) __svc_rdma_put_rw_ctxt() argument 99 svc_rdma_put_rw_ctxt(struct svcxprt_rdma * rdma,struct svc_rdma_rw_ctxt * ctxt) svc_rdma_put_rw_ctxt() argument 111 struct svc_rdma_rw_ctxt *ctxt; svc_rdma_destroy_rw_ctxts() local 132 svc_rdma_rw_ctx_init(struct svcxprt_rdma * rdma,struct svc_rdma_rw_ctxt * ctxt,u64 offset,u32 handle,enum dma_data_direction direction) svc_rdma_rw_ctx_init() argument 177 struct svc_rdma_rw_ctxt *ctxt; svc_rdma_cc_release() local 239 svc_rdma_reply_chunk_release(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt) svc_rdma_reply_chunk_release() argument 323 struct svc_rdma_recv_ctxt *ctxt; svc_rdma_wc_read_done() local 379 struct svc_rdma_rw_ctxt *ctxt; svc_rdma_post_chunk_ctxt() local 421 svc_rdma_vec_to_sg(struct svc_rdma_write_info * info,unsigned int len,struct svc_rdma_rw_ctxt * ctxt) svc_rdma_vec_to_sg() argument 435 svc_rdma_pagelist_to_sg(struct svc_rdma_write_info * info,unsigned int remaining,struct svc_rdma_rw_ctxt * ctxt) svc_rdma_pagelist_to_sg() argument 471 svc_rdma_build_writes(struct svc_rdma_write_info * info,void (* constructor)(struct svc_rdma_write_info * info,unsigned int len,struct svc_rdma_rw_ctxt * ctxt),unsigned int remaining) svc_rdma_build_writes() argument 477 struct svc_rdma_rw_ctxt *ctxt; svc_rdma_build_writes() local 738 struct svc_rdma_rw_ctxt *ctxt; svc_rdma_build_read_segment() local [all...] |
| /linux/arch/x86/hyperv/ |
| H A D | hv_crash.c | 138 struct hv_crash_ctxt *ctxt = &hv_crash_ctxt; in hv_crash_c_entry() local 141 native_load_gdt(&ctxt->gdtr); in hv_crash_c_entry() 143 asm volatile("movw %%ax, %%ss" : : "a"(ctxt->ss)); in hv_crash_c_entry() 144 asm volatile("movq %0, %%rsp" : : "m"(ctxt->rsp)); in hv_crash_c_entry() 146 asm volatile("movw %%ax, %%ds" : : "a"(ctxt->ds)); in hv_crash_c_entry() 147 asm volatile("movw %%ax, %%es" : : "a"(ctxt->es)); in hv_crash_c_entry() 148 asm volatile("movw %%ax, %%fs" : : "a"(ctxt->fs)); in hv_crash_c_entry() 149 asm volatile("movw %%ax, %%gs" : : "a"(ctxt->gs)); in hv_crash_c_entry() 151 native_wrmsrq(MSR_IA32_CR_PAT, ctxt->pat); in hv_crash_c_entry() 152 asm volatile("movq %0, %%cr0" : : "r"(ctxt->cr0)); in hv_crash_c_entry() [all …]
|
| /linux/arch/arm64/include/asm/ |
| H A D | kvm_asm.h | 313 .macro get_vcpu_ptr vcpu, ctxt 314 get_host_ctxt \ctxt, \vcpu 315 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 318 .macro get_loaded_vcpu vcpu, ctxt 319 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu 320 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 323 .macro set_loaded_vcpu vcpu, ctxt, tmp 324 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp 325 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 352 .macro save_callee_saved_regs ctxt [all …]
|
| H A D | kvm_hyp.h | 96 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); 97 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); 101 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); 102 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); 103 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); 104 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | trace_ctxts.h | 25 __field(unsigned int, ctxt) 37 __entry->ctxt = uctxt->ctxt; 50 __entry->ctxt, 66 TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt, 69 TP_ARGS(dd, ctxt, subctxt, cinfo), 71 __field(unsigned int, ctxt) 80 __entry->ctxt = ctxt; 90 __entry->ctxt, 100 const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt); 102 TP_PROTO(unsigned int ctxt), [all …]
|
| H A D | init.c | 111 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_create_kctxt() 179 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free() 229 u16 ctxt; in allocate_rcd_index() local 232 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index() 233 if (!dd->rcd[ctxt]) in allocate_rcd_index() 236 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index() 237 rcd->ctxt = ctxt; in allocate_rcd_index() 238 dd->rcd[ctxt] = rcd; in allocate_rcd_index() 243 if (ctxt >= dd->num_rcv_contexts) in allocate_rcd_index() 246 *index = ctxt; in allocate_rcd_index() [all …]
|
| H A D | netdev_rx.c | 59 struct hfi1_ctxtdata **ctxt) in hfi1_netdev_allocate_ctxt() argument 85 dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt); in hfi1_netdev_allocate_ctxt() 86 *ctxt = uctxt; in hfi1_netdev_allocate_ctxt() 122 struct hfi1_ctxtdata **ctxt) in hfi1_netdev_allot_ctxt() argument 127 rc = hfi1_netdev_allocate_ctxt(dd, ctxt); in hfi1_netdev_allot_ctxt() 133 rc = hfi1_netdev_setup_ctxt(rx, *ctxt); in hfi1_netdev_allot_ctxt() 136 hfi1_netdev_deallocate_ctxt(dd, *ctxt); in hfi1_netdev_allot_ctxt() 137 *ctxt = NULL; in hfi1_netdev_allot_ctxt() 213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init() 271 rxq->rcd->ctxt); in enable_queues() [all …]
|
| H A D | file_ops.c | 131 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ argument 134 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ 279 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); in hfi1_write_iter() 307 static inline void mmap_cdbg(u16 ctxt, u8 subctxt, u8 type, u8 mapio, u8 vmf, in mmap_cdbg() argument 313 ctxt, subctxt, type, mapio, vmf, !!memdma, in mmap_cdbg() 331 u16 ctxt; in hfi1_file_mmap() local 339 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); in hfi1_file_mmap() 342 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { in hfi1_file_mmap() 441 mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr, in hfi1_file_mmap() 464 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); in hfi1_file_mmap() [all …]
|
| /linux/arch/arm64/kvm/hyp/nvhe/ |
| H A D | ffa.c | 97 static void ffa_set_retval(struct kvm_cpu_context *ctxt, in ffa_set_retval() argument 100 cpu_reg(ctxt, 0) = res->a0; in ffa_set_retval() 101 cpu_reg(ctxt, 1) = res->a1; in ffa_set_retval() 102 cpu_reg(ctxt, 2) = res->a2; in ffa_set_retval() 103 cpu_reg(ctxt, 3) = res->a3; in ffa_set_retval() 104 cpu_reg(ctxt, 4) = res->a4; in ffa_set_retval() 105 cpu_reg(ctxt, 5) = res->a5; in ffa_set_retval() 106 cpu_reg(ctxt, 6) = res->a6; in ffa_set_retval() 107 cpu_reg(ctxt, 7) = res->a7; in ffa_set_retval() 126 cpu_reg(ctxt, 8) = res->a8; in ffa_set_retval() [all …]
|
| /linux/arch/x86/xen/ |
| H A D | smp_pv.c | 227 struct vcpu_guest_context *ctxt; in cpu_initialize_context() local 234 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in cpu_initialize_context() 235 if (ctxt == NULL) { in cpu_initialize_context() 247 ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle; in cpu_initialize_context() 248 ctxt->flags = VGCF_IN_KERNEL; in cpu_initialize_context() 249 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ in cpu_initialize_context() 250 ctxt->user_regs.ds = __USER_DS; in cpu_initialize_context() 251 ctxt->user_regs.es = __USER_DS; in cpu_initialize_context() 252 ctxt->user_regs.ss = __KERNEL_DS; in cpu_initialize_context() 253 ctxt->user_regs.cs = __KERNEL_CS; in cpu_initialize_context() [all …]
|
| /linux/arch/arm64/kvm/hyp/vhe/ |
| H A D | sysreg-sr.c | 54 if (ctxt_has_tcrx(&vcpu->arch.ctxt)) { in __sysreg_save_vel2_state() 57 if (ctxt_has_s1pie(&vcpu->arch.ctxt)) { in __sysreg_save_vel2_state() 62 if (ctxt_has_s1poe(&vcpu->arch.ctxt)) in __sysreg_save_vel2_state() 81 if (ctxt_has_sctlr2(&vcpu->arch.ctxt)) in __sysreg_save_vel2_state() 93 write_sysreg(ctxt_midr_el1(&vcpu->arch.ctxt), vpidr_el2); in __sysreg_restore_vel2_state() 126 if (ctxt_has_tcrx(&vcpu->arch.ctxt)) { in __sysreg_restore_vel2_state() 129 if (ctxt_has_s1pie(&vcpu->arch.ctxt)) { in __sysreg_restore_vel2_state() 134 if (ctxt_has_s1poe(&vcpu->arch.ctxt)) in __sysreg_restore_vel2_state() 146 if (ctxt_has_sctlr2(&vcpu->arch.ctxt)) in __sysreg_restore_vel2_state() 160 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_host_state_vhe() argument [all …]
|
| /linux/fs/nilfs2/ |
| H A D | btnode.c | 210 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_prepare_change_key() argument 214 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_prepare_change_key() 220 obh = ctxt->bh; in nilfs_btnode_prepare_change_key() 221 ctxt->newbh = NULL; in nilfs_btnode_prepare_change_key() 260 ctxt->newbh = nbh; in nilfs_btnode_prepare_change_key() 286 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_commit_change_key() argument 288 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; in nilfs_btnode_commit_change_key() 289 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_commit_change_key() 316 ctxt->bh = nbh; in nilfs_btnode_commit_change_key() 337 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_abort_change_key() argument [all …]
|
| /linux/include/kvm/ |
| H A D | arm_arch_timer.h | 142 u32 timer_get_ctl(struct arch_timer_context *ctxt); 143 u64 timer_get_cval(struct arch_timer_context *ctxt); 164 static inline u64 timer_get_offset(struct arch_timer_context *ctxt) in timer_get_offset() argument 168 if (!ctxt) in timer_get_offset() 171 if (ctxt->offset.vm_offset) in timer_get_offset() 172 offset += *ctxt->offset.vm_offset; in timer_get_offset() 173 if (ctxt->offset.vcpu_offset) in timer_get_offset() 174 offset += *ctxt->offset.vcpu_offset; in timer_get_offset() 179 static inline void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) in timer_set_offset() argument 181 if (!ctxt->offset.vm_offset) { in timer_set_offset() [all …]
|
| /linux/fs/ocfs2/ |
| H A D | xattr.c | 264 struct ocfs2_xattr_set_ctxt *ctxt); 269 struct ocfs2_xattr_set_ctxt *ctxt); 702 struct ocfs2_xattr_set_ctxt *ctxt) in ocfs2_xattr_extend_allocation() argument 705 handle_t *handle = ctxt->handle; in ocfs2_xattr_extend_allocation() 728 ctxt->data_ac, in ocfs2_xattr_extend_allocation() 729 ctxt->meta_ac, in ocfs2_xattr_extend_allocation() 767 struct ocfs2_xattr_set_ctxt *ctxt) in __ocfs2_remove_xattr_range() argument 771 handle_t *handle = ctxt->handle; in __ocfs2_remove_xattr_range() 783 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, in __ocfs2_remove_xattr_range() 784 &ctxt->dealloc); in __ocfs2_remove_xattr_range() [all …]
|
| /linux/drivers/hwtracing/coresight/ |
| H A D | coresight-etm-perf.c | 466 struct etm_ctxt *ctxt) in etm_event_resume() argument 468 if (!ctxt->event_data) in etm_event_resume() 478 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); in etm_event_start() local 479 struct perf_output_handle *handle = &ctxt->handle; in etm_event_start() 488 if (etm_event_resume(csdev, ctxt) < 0) { in etm_event_start() 496 if (WARN_ON(ctxt->event_data)) in etm_event_start() 558 ctxt->event_data = event_data; in etm_event_start() 580 struct etm_ctxt *ctxt) in etm_event_pause() argument 584 struct perf_output_handle *handle = &ctxt->handle; in etm_event_pause() 588 if (!ctxt->event_data) in etm_event_pause() [all …]
|
| /linux/drivers/scsi/be2iscsi/ |
| H A D | be_cmds.c | 784 void *ctxt = &req->context; in beiscsi_cmd_cq_create() local 798 ctxt, coalesce_wm); in beiscsi_cmd_cq_create() 799 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); in beiscsi_cmd_cq_create() 800 AMAP_SET_BITS(struct amap_cq_context, count, ctxt, in beiscsi_cmd_cq_create() 802 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); in beiscsi_cmd_cq_create() 803 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); in beiscsi_cmd_cq_create() 804 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); in beiscsi_cmd_cq_create() 805 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); in beiscsi_cmd_cq_create() 806 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); in beiscsi_cmd_cq_create() 807 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, in beiscsi_cmd_cq_create() [all …]
|
| /linux/arch/arm64/kvm/ |
| H A D | reset.c | 161 free_page((unsigned long)vcpu->arch.ctxt.vncr_array); in kvm_arm_vcpu_destroy() 222 memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs)); in kvm_reset_vcpu() 223 vcpu->arch.ctxt.spsr_abt = 0; in kvm_reset_vcpu() 224 vcpu->arch.ctxt.spsr_und = 0; in kvm_reset_vcpu() 225 vcpu->arch.ctxt.spsr_irq = 0; in kvm_reset_vcpu() 226 vcpu->arch.ctxt.spsr_fiq = 0; in kvm_reset_vcpu()
|