| /linux/arch/x86/kvm/ |
| H A D | emulate.c | 195 int (*execute)(struct x86_emulate_ctxt *ctxt); 203 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 249 static void writeback_registers(struct x86_emulate_ctxt *ctxt) in writeback_registers() argument 251 unsigned long dirty = ctxt->regs_dirty; in writeback_registers() 255 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); in writeback_registers() 258 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) in invalidate_registers() argument 260 ctxt->regs_dirty = 0; in invalidate_registers() 261 ctxt->regs_valid = 0; in invalidate_registers() 278 static int em_##op(struct x86_emulate_ctxt *ctxt) \ 280 unsigned long flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF; \ [all …]
|
| H A D | kvm_emulate.h | 106 void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); 112 ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg); 119 void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val); 128 int (*read_std)(struct x86_emulate_ctxt *ctxt, 141 int (*write_std)(struct x86_emulate_ctxt *ctxt, 151 int (*fetch)(struct x86_emulate_ctxt *ctxt, 161 int (*read_emulated)(struct x86_emulate_ctxt *ctxt, 172 int (*write_emulated)(struct x86_emulate_ctxt *ctxt, 185 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, 191 void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr); [all …]
|
| /linux/arch/arm64/kvm/hyp/include/hyp/ |
| H A D | sysreg-sr.h | 19 static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt); 21 static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) in ctxt_to_vcpu() argument 23 struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; in ctxt_to_vcpu() 26 vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); in ctxt_to_vcpu() 31 static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt) in ctxt_is_guest() argument 33 return host_data_ptr(host_ctxt) != ctxt; in ctxt_is_guest() 36 static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt) in ctxt_mdscr_el1() argument 38 struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt); in ctxt_mdscr_el1() 40 if (ctxt_is_guest(ctxt) && kvm_host_owns_debug_regs(vcpu)) in ctxt_mdscr_el1() 43 return &ctxt_sys_reg(ctxt, MDSCR_EL1); in ctxt_mdscr_el1() [all …]
|
| H A D | switch.h | 392 hcr = __ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2); in ___deactivate_traps() 440 &vcpu->arch.ctxt.fp_regs.fpsr, in __hyp_sve_restore_guest() 594 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); in kvm_hyp_handle_fpsimd() 671 static inline u64 hyp_timer_get_offset(struct arch_timer_context *ctxt) in hyp_timer_get_offset() argument 675 if (ctxt->offset.vm_offset) in hyp_timer_get_offset() 676 offset += *kern_hyp_va(ctxt->offset.vm_offset); in hyp_timer_get_offset() 677 if (ctxt->offset.vcpu_offset) in hyp_timer_get_offset() 678 offset += *kern_hyp_va(ctxt->offset.vcpu_offset); in hyp_timer_get_offset() 683 static inline u64 compute_counter_value(struct arch_timer_context *ctxt) in compute_counter_value() argument 685 return arch_timer_read_cntpct_el0() - hyp_timer_get_offset(ctxt); in compute_counter_value() [all …]
|
| H A D | debug-sr.h | 107 struct kvm_cpu_context *ctxt) in __debug_save_state() argument 117 ctxt_sys_reg(ctxt, MDCCINT_EL1) = read_sysreg(mdccint_el1); in __debug_save_state() 121 struct kvm_cpu_context *ctxt) in __debug_restore_state() argument 131 write_sysreg(ctxt_sys_reg(ctxt, MDCCINT_EL1), mdccint_el1); in __debug_restore_state() 145 guest_ctxt = &vcpu->arch.ctxt; in __debug_switch_to_guest_common() 164 guest_ctxt = &vcpu->arch.ctxt; in __debug_switch_to_host_common()
|
| /linux/drivers/net/ethernet/intel/ice/ |
| H A D | ice_vsi_vlan_lib.c | 94 struct ice_vsi_ctx *ctxt; in ice_vsi_manage_vlan_insertion() local 97 ctxt = kzalloc_obj(*ctxt); in ice_vsi_manage_vlan_insertion() 98 if (!ctxt) in ice_vsi_manage_vlan_insertion() 105 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; in ice_vsi_manage_vlan_insertion() 108 ctxt->info.inner_vlan_flags |= (vsi->info.inner_vlan_flags & in ice_vsi_manage_vlan_insertion() 111 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); in ice_vsi_manage_vlan_insertion() 113 err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_manage_vlan_insertion() 120 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_vsi_manage_vlan_insertion() 122 kfree(ctxt); in ice_vsi_manage_vlan_insertion() 134 struct ice_vsi_ctx *ctxt; ice_vsi_manage_vlan_stripping() local 239 struct ice_vsi_ctx *ctxt; __ice_vsi_set_inner_port_vlan() local 292 struct ice_vsi_ctx *ctxt; ice_vsi_clear_inner_port_vlan() local 324 struct ice_vsi_ctx *ctxt; ice_cfg_vlan_pruning() local 468 struct ice_vsi_ctx *ctxt; ice_vsi_ena_outer_stripping() local 526 struct ice_vsi_ctx *ctxt; ice_vsi_dis_outer_stripping() local 577 struct ice_vsi_ctx *ctxt; ice_vsi_ena_outer_insertion() local 633 struct ice_vsi_ctx *ctxt; ice_vsi_dis_outer_insertion() local 690 struct ice_vsi_ctx *ctxt; __ice_vsi_set_outer_port_vlan() local 767 struct ice_vsi_ctx *ctxt; ice_vsi_clear_outer_port_vlan() local 794 struct ice_vsi_ctx *ctxt; ice_vsi_clear_port_vlan() local [all...] |
| H A D | ice_lib.c | 289 struct ice_vsi_ctx *ctxt; in ice_vsi_delete_from_hw() local 293 ctxt = kzalloc_obj(*ctxt); in ice_vsi_delete_from_hw() 294 if (!ctxt) in ice_vsi_delete_from_hw() 298 ctxt->vf_num = vsi->vf->vf_id; in ice_vsi_delete_from_hw() 299 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete_from_hw() 301 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete_from_hw() 303 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete_from_hw() 308 kfree(ctxt); in ice_vsi_delete_from_hw() 951 ice_set_dflt_vsi_ctx(struct ice_hw * hw,struct ice_vsi_ctx * ctxt) ice_set_dflt_vsi_ctx() argument 1005 ice_vsi_setup_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt) ice_vsi_setup_q_map() argument 1116 ice_set_fd_vsi_ctx(struct ice_vsi_ctx * ctxt,struct ice_vsi * vsi) ice_set_fd_vsi_ctx() argument 1158 ice_set_rss_vsi_ctx(struct ice_vsi_ctx * ctxt,struct ice_vsi * vsi) ice_set_rss_vsi_ctx() argument 1193 ice_chnl_vsi_setup_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt) ice_chnl_vsi_setup_q_map() argument 1237 struct ice_vsi_ctx *ctxt; ice_vsi_init() local 3266 ice_vsi_setup_q_map_mqprio(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt,u8 ena_tc) ice_vsi_setup_q_map_mqprio() argument [all...] |
| /linux/kernel/printk/ |
| H A D | nbcon.c | 198 * @ctxt: Pointer to an acquire context that contains 202 * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to 207 static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq) in nbcon_seq_try_update() argument 209 unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq); in nbcon_seq_try_update() 210 struct console *con = ctxt->console; in nbcon_seq_try_update() 214 ctxt->seq = new_seq; in nbcon_seq_try_update() 216 ctxt->seq = nbcon_seq_read(con); in nbcon_seq_try_update() 222 * @ctxt: The context of the caller 243 static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt, in nbcon_context_try_acquire_direct() argument 247 struct console *con = ctxt in nbcon_context_try_acquire_direct() 351 nbcon_context_try_acquire_requested(struct nbcon_context * ctxt,struct nbcon_state * cur) nbcon_context_try_acquire_requested() argument 434 nbcon_context_try_acquire_handover(struct nbcon_context * ctxt,struct nbcon_state * cur) nbcon_context_try_acquire_handover() argument 550 nbcon_context_try_acquire_hostile(struct nbcon_context * ctxt,struct nbcon_state * cur) nbcon_context_try_acquire_hostile() argument 598 nbcon_context_try_acquire(struct nbcon_context * ctxt,bool is_reacquire) nbcon_context_try_acquire() argument 677 nbcon_context_release(struct nbcon_context * ctxt) nbcon_context_release() argument 730 nbcon_context_can_proceed(struct nbcon_context * ctxt,struct nbcon_state * cur) nbcon_context_can_proceed() argument 800 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_can_proceed() local 832 __nbcon_context_update_unsafe(struct nbcon_context * ctxt,bool unsafe) __nbcon_context_update_unsafe() argument 863 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_write_context_set_buf() local 887 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_enter_unsafe() local 911 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_exit_unsafe() local 940 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_reacquire_nobuf() local 982 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_emit_next_record() local 1131 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_emit_one() local 1180 nbcon_kthread_should_wakeup(struct console * con,struct nbcon_context * ctxt) nbcon_kthread_should_wakeup() argument 1227 struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); nbcon_kthread_func() local 1510 struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); nbcon_legacy_emit_next_record() local 1567 struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); __nbcon_atomic_flush_pending_con() local 1882 struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt); nbcon_device_try_acquire() local 1906 struct nbcon_context *ctxt = &ACCESS_PRIVATE(con, nbcon_device_ctxt); nbcon_device_release() local 1965 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_kdb_try_acquire() local 1988 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_kdb_release() local [all...] |
| /linux/arch/x86/power/ |
| H A D | cpu.c | 41 static void msr_save_context(struct saved_context *ctxt) in msr_save_context() argument 43 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context() 44 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context() 53 static void msr_restore_context(struct saved_context *ctxt) in msr_restore_context() argument 55 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context() 56 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context() 69 * @ctxt: Structure to store the registers contents in. 81 static void __save_processor_state(struct saved_context *ctxt) in __save_processor_state() argument 91 store_idt(&ctxt->idt); in __save_processor_state() 99 ctxt in __save_processor_state() 197 __restore_processor_state(struct saved_context * ctxt) __restore_processor_state() argument [all...] |
| /linux/net/sunrpc/xprtrdma/ |
| H A D | svc_rdma_sendto.c | 80 * ctxt, the Send WR is posted, and sendto returns. 120 struct svc_rdma_send_ctxt *ctxt; in svc_rdma_send_ctxt_alloc() local 126 ctxt = kzalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges), in svc_rdma_send_ctxt_alloc() 128 if (!ctxt) in svc_rdma_send_ctxt_alloc() 131 ctxt->sc_pages = kcalloc_node(pages, sizeof(struct page *), in svc_rdma_send_ctxt_alloc() 133 if (!ctxt->sc_pages) in svc_rdma_send_ctxt_alloc() 135 ctxt->sc_maxpages = pages; in svc_rdma_send_ctxt_alloc() 144 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); in svc_rdma_send_ctxt_alloc() 146 ctxt in svc_rdma_send_ctxt_alloc() 169 struct svc_rdma_send_ctxt *ctxt; svc_rdma_send_ctxts_destroy() local 192 struct svc_rdma_send_ctxt *ctxt; svc_rdma_send_ctxt_get() local 225 svc_rdma_send_ctxt_release(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt) svc_rdma_send_ctxt_release() argument 253 struct svc_rdma_send_ctxt *ctxt; svc_rdma_send_ctxt_put_async() local 267 svc_rdma_send_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt) svc_rdma_send_ctxt_put() argument 299 struct svc_rdma_send_ctxt *ctxt = svc_rdma_wc_send() local 339 svc_rdma_post_send(struct svcxprt_rdma * rdma,struct svc_rdma_send_ctxt * ctxt) svc_rdma_post_send() argument 567 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt; svc_rdma_page_dma_map() local 854 svc_rdma_save_io_pages(struct svc_rqst * rqstp,struct svc_rdma_send_ctxt * ctxt) svc_rdma_save_io_pages() argument [all...] |
| H A D | svc_rdma_rw.c | 50 struct svc_rdma_rw_ctxt *ctxt); 64 struct svc_rdma_rw_ctxt *ctxt; in svc_rdma_get_rw_ctxt() local 71 ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node); in svc_rdma_get_rw_ctxt() 73 ctxt = kmalloc_node(struct_size(ctxt, rw_first_bvec, in svc_rdma_get_rw_ctxt() 76 if (!ctxt) in svc_rdma_get_rw_ctxt() 79 INIT_LIST_HEAD(&ctxt->rw_list); in svc_rdma_get_rw_ctxt() 80 ctxt->rw_first_bvec_nents = first_bvec_nents; in svc_rdma_get_rw_ctxt() 83 if (nr_bvec <= ctxt->rw_first_bvec_nents) { in svc_rdma_get_rw_ctxt() 84 ctxt->rw_bvec = ctxt->rw_first_bvec; in svc_rdma_get_rw_ctxt() 86 ctxt->rw_bvec = kmalloc_array_node(nr_bvec, in svc_rdma_get_rw_ctxt() [all …]
|
| /linux/arch/x86/hyperv/ |
| H A D | hv_crash.c | 138 struct hv_crash_ctxt *ctxt = &hv_crash_ctxt; in hv_crash_c_entry() local 141 native_load_gdt(&ctxt->gdtr); in hv_crash_c_entry() 143 asm volatile("movw %%ax, %%ss" : : "a"(ctxt->ss)); in hv_crash_c_entry() 144 asm volatile("movq %0, %%rsp" : : "m"(ctxt->rsp)); in hv_crash_c_entry() 146 asm volatile("movw %%ax, %%ds" : : "a"(ctxt->ds)); in hv_crash_c_entry() 147 asm volatile("movw %%ax, %%es" : : "a"(ctxt->es)); in hv_crash_c_entry() 148 asm volatile("movw %%ax, %%fs" : : "a"(ctxt->fs)); in hv_crash_c_entry() 149 asm volatile("movw %%ax, %%gs" : : "a"(ctxt->gs)); in hv_crash_c_entry() 151 native_wrmsrq(MSR_IA32_CR_PAT, ctxt->pat); in hv_crash_c_entry() 152 asm volatile("movq %0, %%cr0" : : "r"(ctxt->cr0)); in hv_crash_c_entry() [all …]
|
| /linux/arch/arm64/include/asm/ |
| H A D | kvm_asm.h | 313 .macro get_vcpu_ptr vcpu, ctxt 314 get_host_ctxt \ctxt, \vcpu 315 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 318 .macro get_loaded_vcpu vcpu, ctxt 319 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu 320 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 323 .macro set_loaded_vcpu vcpu, ctxt, tmp 324 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp 325 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 352 .macro save_callee_saved_regs ctxt [all …]
|
| H A D | kvm_hyp.h | 96 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); 97 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); 101 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); 102 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); 103 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); 104 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | trace_ctxts.h | 25 __field(unsigned int, ctxt) 37 __entry->ctxt = uctxt->ctxt; 50 __entry->ctxt, 66 TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt, 69 TP_ARGS(dd, ctxt, subctxt, cinfo), 71 __field(unsigned int, ctxt) 80 __entry->ctxt = ctxt; 90 __entry->ctxt, 100 const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt); 102 TP_PROTO(unsigned int ctxt), [all …]
|
| H A D | init.c | 48 * pio buffers per ctxt, etc.) Zero means use one user context per CPU. 111 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_create_kctxt() 179 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free() 229 u16 ctxt; in allocate_rcd_index() local 232 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index() 233 if (!dd->rcd[ctxt]) in allocate_rcd_index() 236 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index() 237 rcd->ctxt in allocate_rcd_index() 264 hfi1_rcd_get_by_index_safe(struct hfi1_devdata * dd,u16 ctxt) hfi1_rcd_get_by_index_safe() argument 284 hfi1_rcd_get_by_index(struct hfi1_devdata * dd,u16 ctxt) hfi1_rcd_get_by_index() argument 319 u16 ctxt; hfi1_create_ctxtdata() local 1481 int ctxt; cleanup_device_data() local [all...] |
| H A D | netdev_rx.c | 59 struct hfi1_ctxtdata **ctxt) in hfi1_netdev_allocate_ctxt() argument 85 dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt); in hfi1_netdev_allocate_ctxt() 86 *ctxt = uctxt; in hfi1_netdev_allocate_ctxt() 122 struct hfi1_ctxtdata **ctxt) in hfi1_netdev_allot_ctxt() argument 127 rc = hfi1_netdev_allocate_ctxt(dd, ctxt); in hfi1_netdev_allot_ctxt() 133 rc = hfi1_netdev_setup_ctxt(rx, *ctxt); in hfi1_netdev_allot_ctxt() 136 hfi1_netdev_deallocate_ctxt(dd, *ctxt); in hfi1_netdev_allot_ctxt() 137 *ctxt = NULL; in hfi1_netdev_allot_ctxt() 213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init() 271 rxq->rcd->ctxt); in enable_queues() [all …]
|
| H A D | file_ops.c | 131 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ argument 134 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ 279 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); in hfi1_write_iter() 307 static inline void mmap_cdbg(u16 ctxt, u8 subctxt, u8 type, u8 mapio, u8 vmf, in mmap_cdbg() argument 313 ctxt, subctxt, type, mapio, vmf, !!memdma, in mmap_cdbg() 331 u16 ctxt; in hfi1_file_mmap() local 339 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); in hfi1_file_mmap() 342 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { in hfi1_file_mmap() 360 /* 64K PIO space / ctxt */ in hfi1_file_mmap() 1492 u16 ctxt; hfi1_set_uevent_bits() local [all...] |
| /linux/arch/x86/xen/ |
| H A D | smp_pv.c | 226 struct vcpu_guest_context *ctxt; in cpu_initialize_context() local 233 ctxt = kzalloc_obj(*ctxt); in cpu_initialize_context() 234 if (ctxt == NULL) { in cpu_initialize_context() 246 ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle; in cpu_initialize_context() 247 ctxt->flags = VGCF_IN_KERNEL; in cpu_initialize_context() 248 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ in cpu_initialize_context() 249 ctxt->user_regs.ds = __USER_DS; in cpu_initialize_context() 250 ctxt->user_regs.es = __USER_DS; in cpu_initialize_context() 251 ctxt in cpu_initialize_context() [all...] |
| /linux/fs/nilfs2/ |
| H A D | btnode.c | 210 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_prepare_change_key() argument 214 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_prepare_change_key() 220 obh = ctxt->bh; in nilfs_btnode_prepare_change_key() 221 ctxt->newbh = NULL; in nilfs_btnode_prepare_change_key() 260 ctxt->newbh = nbh; in nilfs_btnode_prepare_change_key() 286 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_commit_change_key() argument 288 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; in nilfs_btnode_commit_change_key() 289 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_commit_change_key() 316 ctxt->bh = nbh; in nilfs_btnode_commit_change_key() 337 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_abort_change_key() argument [all …]
|
| /linux/include/kvm/ |
| H A D | arm_arch_timer.h | 142 u32 timer_get_ctl(struct arch_timer_context *ctxt); 143 u64 timer_get_cval(struct arch_timer_context *ctxt); 164 static inline u64 timer_get_offset(struct arch_timer_context *ctxt) in timer_get_offset() argument 168 if (!ctxt) in timer_get_offset() 171 if (ctxt->offset.vm_offset) in timer_get_offset() 172 offset += *ctxt->offset.vm_offset; in timer_get_offset() 173 if (ctxt->offset.vcpu_offset) in timer_get_offset() 174 offset += *ctxt->offset.vcpu_offset; in timer_get_offset() 179 static inline void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) in timer_set_offset() argument 181 if (!ctxt->offset.vm_offset) { in timer_set_offset() [all …]
|
| /linux/fs/ocfs2/ |
| H A D | xattr.c | 264 struct ocfs2_xattr_set_ctxt *ctxt); 269 struct ocfs2_xattr_set_ctxt *ctxt); 702 struct ocfs2_xattr_set_ctxt *ctxt) in ocfs2_xattr_extend_allocation() argument 705 handle_t *handle = ctxt->handle; in ocfs2_xattr_extend_allocation() 728 ctxt->data_ac, in ocfs2_xattr_extend_allocation() 729 ctxt->meta_ac, in ocfs2_xattr_extend_allocation() 767 struct ocfs2_xattr_set_ctxt *ctxt) in __ocfs2_remove_xattr_range() argument 771 handle_t *handle = ctxt->handle; in __ocfs2_remove_xattr_range() 783 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, in __ocfs2_remove_xattr_range() 784 &ctxt->dealloc); in __ocfs2_remove_xattr_range() [all …]
|
| /linux/drivers/hwtracing/coresight/ |
| H A D | coresight-etm-perf.c | 468 struct etm_ctxt *ctxt) in etm_event_resume() argument 470 if (!ctxt->event_data) in etm_event_resume() 480 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); in etm_event_start() local 481 struct perf_output_handle *handle = &ctxt->handle; in etm_event_start() 490 if (etm_event_resume(csdev, ctxt) < 0) { in etm_event_start() 498 if (WARN_ON(ctxt->event_data)) in etm_event_start() 560 ctxt->event_data = event_data; in etm_event_start() 582 struct etm_ctxt *ctxt) in etm_event_pause() argument 586 struct perf_output_handle *handle = &ctxt->handle; in etm_event_pause() 590 if (!ctxt in etm_event_pause() 634 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); etm_event_stop() local [all...] |
| /linux/drivers/scsi/be2iscsi/ |
| H A D | be_cmds.c | 784 void *ctxt = &req->context; in beiscsi_cmd_cq_create() local 798 ctxt, coalesce_wm); in beiscsi_cmd_cq_create() 799 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); in beiscsi_cmd_cq_create() 800 AMAP_SET_BITS(struct amap_cq_context, count, ctxt, in beiscsi_cmd_cq_create() 802 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); in beiscsi_cmd_cq_create() 803 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); in beiscsi_cmd_cq_create() 804 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); in beiscsi_cmd_cq_create() 805 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); in beiscsi_cmd_cq_create() 806 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); in beiscsi_cmd_cq_create() 807 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, in beiscsi_cmd_cq_create() [all …]
|
| /linux/arch/arm64/kvm/ |
| H A D | reset.c | 161 free_page((unsigned long)vcpu->arch.ctxt.vncr_array); in kvm_arm_vcpu_destroy() 222 memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs)); in kvm_reset_vcpu() 223 vcpu->arch.ctxt.spsr_abt = 0; in kvm_reset_vcpu() 224 vcpu->arch.ctxt.spsr_und = 0; in kvm_reset_vcpu() 225 vcpu->arch.ctxt.spsr_irq = 0; in kvm_reset_vcpu() 226 vcpu->arch.ctxt.spsr_fiq = 0; in kvm_reset_vcpu()
|