| /linux/mm/ |
| H A D | mmu_gather.c | 22 struct mmu_gather_batch *batch; in tlb_next_batch() local 28 batch = tlb->active; in tlb_next_batch() 29 if (batch->next) { in tlb_next_batch() 30 tlb->active = batch->next; in tlb_next_batch() 37 batch = (void *)__get_free_page(GFP_NOWAIT); in tlb_next_batch() 38 if (!batch) in tlb_next_batch() 42 batch->next = NULL; in tlb_next_batch() 43 batch->nr = 0; in tlb_next_batch() 44 batch->max = MAX_GATHER_BATCH; in tlb_next_batch() 46 tlb->active->next = batch; in tlb_next_batch() [all …]
|
| /linux/drivers/iommu/iommufd/ |
| H A D | pages.c | 286 static void batch_clear(struct pfn_batch *batch) in batch_clear() argument 288 batch->total_pfns = 0; in batch_clear() 289 batch->end = 0; in batch_clear() 290 batch->pfns[0] = 0; in batch_clear() 291 batch->npfns[0] = 0; in batch_clear() 292 batch->kind = 0; in batch_clear() 299 static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns) in batch_clear_carry() argument 302 return batch_clear(batch); in batch_clear_carry() 305 WARN_ON(!batch->end || in batch_clear_carry() 306 batch->npfns[batch->end - 1] < keep_pfns); in batch_clear_carry() [all …]
|
| /linux/include/trace/events/ |
| H A D | intel_ifs.h | 13 TP_PROTO(int batch, int start, int stop, u64 status), 15 TP_ARGS(batch, start, stop, status), 18 __field( int, batch ) 25 __entry->batch = batch; 32 __entry->batch, 40 TP_PROTO(int batch, union ifs_sbaf activate, union ifs_sbaf_status status), 42 TP_ARGS(batch, activate, status), 46 __field( int, batch ) 53 __entry->batch = batch; 59 __entry->batch,
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | test_bpf_ma.c | 56 static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx) in batch_alloc() argument 62 for (i = 0; i < batch; i++) { in batch_alloc() 83 static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx) in batch_free() argument 89 for (i = 0; i < batch; i++) { in batch_free() 105 static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch, in batch_percpu_alloc() argument 112 for (i = 0; i < batch; i++) { in batch_percpu_alloc() 133 static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch, in batch_percpu_free() argument 140 for (i = 0; i < batch; i++) { in batch_percpu_free() 154 #define CALL_BATCH_ALLOC(size, batch, idx) \ argument 155 batch_alloc((struct bpf_map *)(&array_##size), batch, idx) [all …]
|
| /linux/drivers/xen/ |
| H A D | gntdev.c | 617 struct gntdev_copy_batch *batch; in gntdev_release() local 631 while (priv->batch) { in gntdev_release() 632 batch = priv->batch; in gntdev_release() 633 priv->batch = batch->next; in gntdev_release() 634 kfree(batch); in gntdev_release() 803 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, in gntdev_get_page() argument 811 ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page); in gntdev_get_page() 815 batch->pages[batch->nr_pages++] = page; in gntdev_get_page() 823 static void gntdev_put_pages(struct gntdev_copy_batch *batch) in gntdev_put_pages() argument 825 unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable); in gntdev_put_pages() [all …]
|
| /linux/drivers/iommu/intel/ |
| H A D | cache.c | 293 static void qi_batch_flush_descs(struct intel_iommu *iommu, struct qi_batch *batch) in qi_batch_flush_descs() argument 295 if (!iommu || !batch->index) in qi_batch_flush_descs() 298 qi_submit_sync(iommu, batch->descs, batch->index, 0); in qi_batch_flush_descs() 301 memset(batch, 0, sizeof(*batch)); in qi_batch_flush_descs() 304 static void qi_batch_increment_index(struct intel_iommu *iommu, struct qi_batch *batch) in qi_batch_increment_index() argument 306 if (++batch->index == QI_MAX_BATCHED_DESC_COUNT) in qi_batch_increment_index() 307 qi_batch_flush_descs(iommu, batch); in qi_batch_increment_index() 312 struct qi_batch *batch) in qi_batch_add_iotlb() argument 314 qi_desc_iotlb(iommu, did, addr, size_order, type, &batch->descs[batch->index]); in qi_batch_add_iotlb() 315 qi_batch_increment_index(iommu, batch); in qi_batch_add_iotlb() [all …]
|
| /linux/drivers/gpu/drm/i915/gt/ |
| H A D | gen8_engine_cs.h | 53 __gen8_emit_pipe_control(u32 *batch, u32 bit_group_0, in __gen8_emit_pipe_control() argument 56 memset(batch, 0, 6 * sizeof(u32)); in __gen8_emit_pipe_control() 58 batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; in __gen8_emit_pipe_control() 59 batch[1] = bit_group_1; in __gen8_emit_pipe_control() 60 batch[2] = offset; in __gen8_emit_pipe_control() 62 return batch + 6; in __gen8_emit_pipe_control() 65 static inline u32 *gen8_emit_pipe_control(u32 *batch, in gen8_emit_pipe_control() argument 68 return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset); in gen8_emit_pipe_control() 71 static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0, in gen12_emit_pipe_control() argument 74 return __gen8_emit_pipe_control(batch, bit_group_0, in gen12_emit_pipe_control()
|
| H A D | selftest_hangcheck.c | 38 u32 *batch; member 81 h->batch = vaddr; in hang_init() 113 u32 *batch; in hang_create_request() local 133 h->batch = vaddr; in hang_create_request() 171 batch = h->batch; in hang_create_request() 173 *batch++ = MI_STORE_DWORD_IMM_GEN4; in hang_create_request() 174 *batch++ = lower_32_bits(hws_address(hws, rq)); in hang_create_request() 175 *batch++ = upper_32_bits(hws_address(hws, rq)); in hang_create_request() 176 *batch++ = rq->fence.seqno; in hang_create_request() 177 *batch++ = MI_NOOP; in hang_create_request() [all …]
|
| H A D | selftest_workarounds.c | 504 struct i915_vma *batch; in check_dirty_whitelist() local 513 batch = create_batch(ce->vm); in check_dirty_whitelist() 514 if (IS_ERR(batch)) { in check_dirty_whitelist() 515 err = PTR_ERR(batch); in check_dirty_whitelist() 542 err = i915_gem_object_lock(batch->obj, &ww); in check_dirty_whitelist() 548 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); in check_dirty_whitelist() 614 i915_gem_object_flush_map(batch->obj); in check_dirty_whitelist() 615 i915_gem_object_unpin_map(batch->obj); in check_dirty_whitelist() 631 err = i915_vma_move_to_active(batch, rq, 0); in check_dirty_whitelist() 641 i915_vma_offset(batch), PAGE_SIZE, in check_dirty_whitelist() [all …]
|
| H A D | selftest_engine_cs.c | 147 struct i915_vma *batch; in perf_mi_bb_start() local 156 batch = create_empty_batch(ce); in perf_mi_bb_start() 157 if (IS_ERR(batch)) { in perf_mi_bb_start() 158 err = PTR_ERR(batch); in perf_mi_bb_start() 163 err = i915_vma_sync(batch); in perf_mi_bb_start() 166 i915_vma_put(batch); in perf_mi_bb_start() 184 i915_vma_offset(batch), 8, in perf_mi_bb_start() 205 i915_vma_put(batch); in perf_mi_bb_start()
|
| /linux/drivers/gpu/drm/vmwgfx/ |
| H A D | vmwgfx_mob.c | 238 struct vmw_otable_batch *batch) in vmw_otable_batch_setup() argument 242 struct vmw_otable *otables = batch->otables; in vmw_otable_batch_setup() 247 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 257 &batch->otable_bo); in vmw_otable_batch_setup() 262 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 263 if (!batch->otables[i].enabled) in vmw_otable_batch_setup() 267 &batch->otable_bo->tbo, in vmw_otable_batch_setup() 278 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 279 if (batch->otables[i].enabled) in vmw_otable_batch_setup() 281 &batch->otables[i]); in vmw_otable_batch_setup() [all …]
|
| /linux/drivers/gpu/drm/i915/gem/selftests/ |
| H A D | igt_gem_utils.c | 116 struct i915_vma *batch; in igt_gpu_fill_dw() local 123 batch = igt_emit_store_dw(vma, offset, count, val); in igt_gpu_fill_dw() 124 if (IS_ERR(batch)) in igt_gpu_fill_dw() 125 return PTR_ERR(batch); in igt_gpu_fill_dw() 133 err = igt_vma_move_to_active_unlocked(batch, rq, 0); in igt_gpu_fill_dw() 146 i915_vma_offset(batch), in igt_gpu_fill_dw() 147 i915_vma_size(batch), in igt_gpu_fill_dw() 155 i915_vma_unpin_and_release(&batch, 0); in igt_gpu_fill_dw()
|
| H A D | i915_gem_client_blt.c | 106 struct i915_vma *batch; member 146 struct drm_i915_gem_object *batch) in prepare_blit() argument 148 const int ver = GRAPHICS_VER(to_i915(batch->base.dev)); in prepare_blit() 153 cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC); in prepare_blit() 173 if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55)) in prepare_blit() 184 if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55)) in prepare_blit() 255 i915_gem_object_flush_map(batch); in prepare_blit() 256 i915_gem_object_unpin_map(batch); in prepare_blit() 269 i915_vma_put(t->batch); in tiled_blits_destroy_buffers() 308 t->batch = __create_vma(t, PAGE_SIZE, false); in tiled_blits_create_buffers() [all …]
|
| /linux/drivers/vfio/ |
| H A D | vfio_iommu_type1.c | 494 static void __vfio_batch_init(struct vfio_batch *batch, bool single) in __vfio_batch_init() argument 496 batch->size = 0; in __vfio_batch_init() 497 batch->offset = 0; in __vfio_batch_init() 502 batch->pages = (struct page **) __get_free_page(GFP_KERNEL); in __vfio_batch_init() 503 if (!batch->pages) in __vfio_batch_init() 506 batch->capacity = VFIO_BATCH_MAX_CAPACITY; in __vfio_batch_init() 510 batch->pages = &batch->fallback_page; in __vfio_batch_init() 511 batch->capacity = 1; in __vfio_batch_init() 514 static void vfio_batch_init(struct vfio_batch *batch) in vfio_batch_init() argument 516 __vfio_batch_init(batch, false); in vfio_batch_init() [all …]
|
| /linux/tools/testing/selftests/bpf/map_tests/ |
| H A D | htab_map_batch_ops.c | 79 __u32 batch, count, total, total_success; in __test_map_lookup_and_delete_batch() local 109 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 119 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 127 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 153 total ? &batch : NULL, in __test_map_lookup_and_delete_batch() 154 &batch, keys + total, in __test_map_lookup_and_delete_batch() 216 total ? &batch : NULL, in __test_map_lookup_and_delete_batch() 217 &batch, keys + total, in __test_map_lookup_and_delete_batch()
|
| /linux/lib/ |
| H A D | percpu_counter.c | 93 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) in percpu_counter_add_batch() argument 100 if (unlikely(abs(count + amount) >= batch)) { in percpu_counter_add_batch() 120 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) in percpu_counter_add_batch() argument 127 if (abs(count) >= batch) { in percpu_counter_add_batch() 292 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 298 if (abs(count - rhs) > (batch * num_online_cpus())) { in __percpu_counter_compare() 328 s64 limit, s64 amount, s32 batch) in __percpu_counter_limited_add() argument 339 unknown = batch * num_online_cpus(); in __percpu_counter_limited_add() 343 if (abs(count + amount) <= batch && in __percpu_counter_limited_add()
|
| /linux/tools/virtio/ |
| H A D | virtio_test.c | 170 bool delayed, int batch, int reset_n, int bufs) in run_test() argument 178 const bool random_batch = batch == RANDOM_BATCH; in run_test() 193 batch = (random() % vq->vring.num) + 1; in run_test() 196 (started - completed) < batch) { in run_test() 349 long batch = 1, reset = 0; in main() local 376 batch = RANDOM_BATCH; in main() 378 batch = strtol(optarg, NULL, 10); in main() 379 assert(batch > 0); in main() 380 assert(batch < (long)INT_MAX + 1); in main() 401 run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000); in main()
|
| /linux/drivers/net/ethernet/netronome/nfp/flower/ |
| H A D | lag_conf.c | 234 unsigned int member_cnt, enum nfp_fl_lag_batch *batch) in nfp_fl_lag_config_group() argument 254 if (*batch == NFP_FL_LAG_BATCH_FIRST) { in nfp_fl_lag_config_group() 257 *batch = NFP_FL_LAG_BATCH_MEMBER; in nfp_fl_lag_config_group() 263 *batch = NFP_FL_LAG_BATCH_FINISHED; in nfp_fl_lag_config_group() 269 if (*batch == NFP_FL_LAG_BATCH_FINISHED) { in nfp_fl_lag_config_group() 296 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST; in nfp_fl_lag_do_work() local 318 &batch); in nfp_fl_lag_do_work() 390 active_count, &batch); in nfp_fl_lag_do_work() 404 if (batch == NFP_FL_LAG_BATCH_MEMBER) { in nfp_fl_lag_do_work() 405 batch = NFP_FL_LAG_BATCH_FINISHED; in nfp_fl_lag_do_work() [all …]
|
| /linux/arch/riscv/mm/ |
| H A D | tlbflush.c | 230 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, in arch_tlbbatch_add_pending() argument 233 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); in arch_tlbbatch_add_pending() 237 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) in arch_tlbbatch_flush() argument 239 __flush_tlb_range(NULL, &batch->cpumask, in arch_tlbbatch_flush() 241 cpumask_clear(&batch->cpumask); in arch_tlbbatch_flush()
|
| /linux/fs/smb/client/ |
| H A D | smbdirect.c | 39 struct smbdirect_send_batch *batch, 1226 struct smbdirect_send_batch *batch, in smbd_post_send() argument 1248 if (batch) { in smbd_post_send() 1251 if (!list_empty(&batch->msg_list)) { in smbd_post_send() 1254 last = list_last_entry(&batch->msg_list, in smbd_post_send() 1259 list_add_tail(&request->sibling_list, &batch->msg_list); in smbd_post_send() 1260 batch->wr_cnt++; in smbd_post_send() 1269 static void smbd_send_batch_init(struct smbdirect_send_batch *batch, in smbd_send_batch_init() argument 1273 INIT_LIST_HEAD(&batch->msg_list); in smbd_send_batch_init() 1274 batch->wr_cnt = 0; in smbd_send_batch_init() [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| H A D | i915_request.c | 1009 static int emit_bb_start(struct i915_request *rq, struct i915_vma *batch) in emit_bb_start() argument 1012 i915_vma_offset(batch), in emit_bb_start() 1013 i915_vma_size(batch), in emit_bb_start() 1019 struct i915_vma *batch) in empty_request() argument 1028 err = emit_bb_start(request, batch); in empty_request() 1054 struct i915_vma *batch; in live_empty_request() local 1058 batch = empty_batch(engine->gt); in live_empty_request() 1059 if (IS_ERR(batch)) in live_empty_request() 1060 return PTR_ERR(batch); in live_empty_request() 1069 request = empty_request(engine, batch); in live_empty_request() [all …]
|
| /linux/tools/virtio/ringtest/ |
| H A D | main.c | 22 int batch = 1; variable 116 int tokick = batch; in run_guest() 129 tokick = batch; in run_guest() 348 batch = c; in main() 372 if (batch > max_outstanding) in main() 373 batch = max_outstanding; in main()
|
| /linux/net/core/ |
| H A D | netclassid_cgroup.c | 66 unsigned int batch; member 78 if (--ctx->batch == 0) { in update_classid_sock() 79 ctx->batch = UPDATE_CLASSID_BATCH; in update_classid_sock() 89 .batch = UPDATE_CLASSID_BATCH in update_classid_task()
|
| /linux/drivers/net/ethernet/freescale/dpaa2/ |
| H A D | dpaa2-xsk.c | 401 int batch, i, err; in dpaa2_xsk_tx() local 410 batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget); in dpaa2_xsk_tx() 411 if (!batch) in dpaa2_xsk_tx() 415 for (i = 0; i < batch; i++) { in dpaa2_xsk_tx() 418 batch = i; in dpaa2_xsk_tx() 426 max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES; in dpaa2_xsk_tx() 430 while (total_enqueued < batch && retries < max_retries) { in dpaa2_xsk_tx() 432 batch - total_enqueued, &enqueued); in dpaa2_xsk_tx() 446 for (i = total_enqueued; i < batch; i++) { in dpaa2_xsk_tx()
|
| /linux/include/linux/ |
| H A D | percpu_counter.h | 57 s32 batch); 59 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); 61 s64 amount, s32 batch); 182 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 226 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) in percpu_counter_add_batch() argument
|