Searched refs:need_flush (Results 1 – 14 of 14) sorted by relevance
52 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush) in get_new_mmu_context() argument57 *need_flush = true; /* start new asid cycle */ in get_new_mmu_context()91 bool need_flush = false; in switch_mm_irqs_off() local96 get_new_mmu_context(next, cpu, &need_flush); in switch_mm_irqs_off()103 if (need_flush) in switch_mm_irqs_off()151 bool need_flush = false; in drop_mmu_context() local154 get_new_mmu_context(mm, cpu, &need_flush); in drop_mmu_context()157 if (need_flush) in drop_mmu_context()
368 unsigned int subregions, bool need_flush) in mpu_setup_region() argument387 if (need_flush) in mpu_setup_region()441 bool need_flush = region == PMSAv7_RAM_REGION; in pmsav7_setup() local448 xip[i].subreg, need_flush); in pmsav7_setup()
222 unsigned int need_flush : 1; member232 ns.need_flush = 1; in choose_new_asid()245 ns.need_flush = 0; in choose_new_asid()259 ns.need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < next_tlb_gen); in choose_new_asid()272 ns.need_flush = true; in choose_new_asid()566 bool need_flush) in load_new_mm_cr3() argument570 if (need_flush) { in load_new_mm_cr3()901 ns.need_flush = true; in switch_mm_irqs_off()947 if (ns.need_flush) { in switch_mm_irqs_off()
87 static bool need_flush; /* global flush state. set for each gart wrap */ variable104 need_flush = true; in alloc_iommu()113 need_flush = true; in alloc_iommu()117 need_flush = true; in alloc_iommu()142 if (need_flush) { in flush_gart()144 need_flush = false; in flush_gart()
352 bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq; in etnaviv_buffer_queue() local370 if (need_flush || switch_context) { in etnaviv_buffer_queue()377 if (need_flush) { in etnaviv_buffer_queue()406 if (need_flush) { in etnaviv_buffer_queue()
19 static inline bool need_flush(struct iommu_map_table *iommu) in need_flush() function206 (n < pool->hint || need_flush(iommu))) { in iommu_tbl_range_alloc()
199 int need_flush = 0; in flush_all_zero_pkmaps() local232 need_flush = 1; in flush_all_zero_pkmaps()234 if (need_flush) in flush_all_zero_pkmaps()
529 int need_flush = 0; in mlx4_ib_cm_paravirt_clean() local537 need_flush |= !cancel_delayed_work(&map->timeout); in mlx4_ib_cm_paravirt_clean()543 if (need_flush) in mlx4_ib_cm_paravirt_clean()
1135 bool need_flush = false; in stage2_unmap_walker() local1151 need_flush = !stage2_has_fwb(pgt); in stage2_unmap_walker()1161 if (need_flush && mm_ops->dcache_clean_inval_poc) in stage2_unmap_walker()
583 capsnap->need_flush = true; in ceph_queue_cap_snap()595 ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush"); in ceph_queue_cap_snap()
262 bool need_flush; member
1476 if (capsnap->need_flush) { in __prep_cap()1641 BUG_ON(!capsnap->need_flush); in __ceph_flush_snaps()3204 if (!capsnap->need_flush && in ceph_try_drop_cap_snap()
999 bool need_flush = false; in writecache_resume() local1070 need_flush = true; in writecache_resume()1091 need_flush = true; in writecache_resume()1098 if (need_flush) { in writecache_resume()
1617 bool need_flush = false; in vmxnet3_rq_rx_complete() local1678 need_flush |= act == XDP_REDIRECT; in vmxnet3_rq_rx_complete()1735 need_flush |= act == XDP_REDIRECT; in vmxnet3_rq_rx_complete()2010 if (need_flush) in vmxnet3_rq_rx_complete()