Home
last modified time | relevance | path

Searched full:scratch (Results 1 – 25 of 626) sorted by relevance

12345678910>>...26

/linux/arch/sparc/include/asm/
H A Dwinmacro.h50 #define LOAD_PT_YREG(base_reg, scratch) \ argument
51 ld [%base_reg + STACKFRAME_SZ + PT_Y], %scratch; \
52 wr %scratch, 0x0, %y;
59 #define LOAD_PT_ALL(base_reg, pt_psr, pt_pc, pt_npc, scratch) \ argument
60 LOAD_PT_YREG(base_reg, scratch) \
77 #define STORE_PT_YREG(base_reg, scratch) \ argument
78 rd %y, %scratch; \
79 st %scratch, [%base_reg + STACKFRAME_SZ + PT_Y];
92 #define SAVE_BOLIXED_USER_STACK(cur_reg, scratch) \ argument
93 ld [%cur_reg + TI_W_SAVED], %scratch; \
[all …]
/linux/arch/arc/kernel/
H A Dsignal.c108 uregs.scratch.bta = regs->bta; in stash_usr_regs()
109 uregs.scratch.lp_start = regs->lp_start; in stash_usr_regs()
110 uregs.scratch.lp_end = regs->lp_end; in stash_usr_regs()
111 uregs.scratch.lp_count = regs->lp_count; in stash_usr_regs()
112 uregs.scratch.status32 = regs->status32; in stash_usr_regs()
113 uregs.scratch.ret = regs->ret; in stash_usr_regs()
114 uregs.scratch.blink = regs->blink; in stash_usr_regs()
115 uregs.scratch.fp = regs->fp; in stash_usr_regs()
116 uregs.scratch.gp = regs->r26; in stash_usr_regs()
117 uregs.scratch.r12 = regs->r12; in stash_usr_regs()
[all …]
H A Dptrace.c192 REG_IN_ONE(scratch.bta, &ptregs->bta); in genregs_set()
193 REG_IN_ONE(scratch.lp_start, &ptregs->lp_start); in genregs_set()
194 REG_IN_ONE(scratch.lp_end, &ptregs->lp_end); in genregs_set()
195 REG_IN_ONE(scratch.lp_count, &ptregs->lp_count); in genregs_set()
197 REG_IGNORE_ONE(scratch.status32); in genregs_set()
199 REG_IN_ONE(scratch.ret, &ptregs->ret); in genregs_set()
200 REG_IN_ONE(scratch.blink, &ptregs->blink); in genregs_set()
201 REG_IN_ONE(scratch.fp, &ptregs->fp); in genregs_set()
202 REG_IN_ONE(scratch.gp, &ptregs->r26); in genregs_set()
203 REG_IN_ONE(scratch.r12, &ptregs->r12); in genregs_set()
[all …]
/linux/crypto/
H A Dscompress.c71 struct scomp_scratch *scratch; in crypto_scomp_free_scratches() local
75 scratch = per_cpu_ptr(&scomp_scratch, i); in crypto_scomp_free_scratches()
77 free_page(scratch->saddr); in crypto_scomp_free_scratches()
78 scratch->src = NULL; in crypto_scomp_free_scratches()
82 static int scomp_alloc_scratch(struct scomp_scratch *scratch, int cpu) in scomp_alloc_scratch() argument
90 spin_lock_bh(&scratch->lock); in scomp_alloc_scratch()
91 scratch->src = page_address(page); in scomp_alloc_scratch()
92 spin_unlock_bh(&scratch->lock); in scomp_alloc_scratch()
101 struct scomp_scratch *scratch; in scomp_scratch_workfn() local
103 scratch = per_cpu_ptr(&scomp_scratch, cpu); in scomp_scratch_workfn()
[all …]
/linux/drivers/infiniband/hw/irdma/
H A Dtype.h409 u64 scratch; member
810 u64 scratch; member
1320 int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1322 int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
1328 int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
1331 int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
1350 bool first, u64 *scratch,
1363 struct irdma_create_qp_info *info, u64 scratch,
1365 int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
1368 struct irdma_qp_flush_info *info, u64 scratch,
[all …]
H A Duda.h40 u32 op, u64 scratch);
43 u64 scratch);
51 struct irdma_ah_info *info, u64 scratch) in irdma_sc_create_ah() argument
54 scratch); in irdma_sc_create_ah()
58 struct irdma_ah_info *info, u64 scratch) in irdma_sc_destroy_ah() argument
61 scratch); in irdma_sc_destroy_ah()
66 u64 scratch) in irdma_sc_create_mcast_grp() argument
69 scratch); in irdma_sc_create_mcast_grp()
74 u64 scratch) in irdma_sc_modify_mcast_grp() argument
77 scratch); in irdma_sc_modify_mcast_grp()
[all …]
H A Dctrl.c187 * @scratch: u64 saved to be used during cqp completion
192 u64 scratch, bool post_sq) in irdma_sc_add_arp_cache_entry() argument
197 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_add_arp_cache_entry()
223 * @scratch: u64 saved to be used during cqp completion
227 static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch, in irdma_sc_del_arp_cache_entry() argument
233 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_del_arp_cache_entry()
257 * @scratch: u64 saved to be used during cqp completion
262 u64 scratch, bool post_sq) in irdma_sc_manage_apbvt_entry() argument
267 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); in irdma_sc_manage_apbvt_entry()
292 * @scratch: u64 saved to be used during cqp completion
[all …]
/linux/drivers/mmc/host/
H A Dsdhci-pci-o2micro.c112 u16 scratch; in sdhci_o2_enable_internal_clock() local
132 scratch = sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1); in sdhci_o2_enable_internal_clock()
133 if (scratch & O2_PLL_LOCK_STATUS) in sdhci_o2_enable_internal_clock()
321 u16 scratch = 0; in sdhci_o2_execute_tuning() local
338 scratch = sdhci_readw(host, O2_SD_MISC_CTRL); in sdhci_o2_execute_tuning()
339 scratch |= O2_SD_PWR_FORCE_L0; in sdhci_o2_execute_tuning()
340 sdhci_writew(host, scratch, O2_SD_MISC_CTRL); in sdhci_o2_execute_tuning()
420 scratch = sdhci_readw(host, O2_SD_MISC_CTRL); in sdhci_o2_execute_tuning()
421 scratch &= ~(O2_SD_PWR_FORCE_L0); in sdhci_o2_execute_tuning()
422 sdhci_writew(host, scratch, O2_SD_MISC_CTRL); in sdhci_o2_execute_tuning()
[all …]
/linux/drivers/scsi/aic94xx/
H A Daic94xx_reg_def.h1958 * Sequencers (Central and Link) Scratch RAM page definitions.
1962 * The Central Management Sequencer (CSEQ) Scratch Memory is a 1024
1968 * dependent scratch memory, Mode 8, page 0-3 overlaps mode
1969 * independent scratch memory, pages 0-3.
1970 * - 896 bytes of mode dependent scratch, 96 bytes per Modes 0-7, and
1972 * - 259 bytes of mode independent scratch, common to modes 0-15.
1974 * Sequencer scratch RAM is 1024 bytes. This scratch memory is
1975 * divided into mode dependent and mode independent scratch with this
1977 * pages (160 bytes) of mode independent scratch and 3 pages of
1978 * dependent scratch memory for modes 0-7 (768 bytes). Mode 8 pages
[all …]
/linux/arch/arc/include/asm/
H A Dirqflags-compact.h185 .macro IRQ_DISABLE scratch
186 lr \scratch, [status32]
187 bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
188 flag \scratch
192 .macro IRQ_ENABLE scratch
194 lr \scratch, [status32]
195 or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
196 flag \scratch
/linux/arch/mips/include/asm/octeon/
H A Dcvmx-fau.h318 * @scraddr: Scratch pad byte address to write to. Must be 8 byte aligned
353 * placed in the scratch memory at byte address scraddr.
355 * @scraddr: Scratch memory byte address to put response in.
361 * Returns Placed in the scratch pad register
373 * placed in the scratch memory at byte address scraddr.
375 * @scraddr: Scratch memory byte address to put response in.
381 * Returns Placed in the scratch pad register
393 * placed in the scratch memory at byte address scraddr.
395 * @scraddr: Scratch memory byte address to put response in.
400 * Returns Placed in the scratch pad register
[all …]
/linux/arch/riscv/kernel/
H A Dmodule-sections.c121 Elf_Rela *scratch = NULL; in module_frob_arch_sections() local
169 scratch_size_needed = (num_scratch_relas + num_relas) * sizeof(*scratch); in module_frob_arch_sections()
172 new_scratch = kvrealloc(scratch, scratch_size, GFP_KERNEL); in module_frob_arch_sections()
174 kvfree(scratch); in module_frob_arch_sections()
177 scratch = new_scratch; in module_frob_arch_sections()
182 scratch[num_scratch_relas++] = relas[j]; in module_frob_arch_sections()
185 if (scratch) { in module_frob_arch_sections()
187 sort(scratch, num_scratch_relas, sizeof(*scratch), cmp_rela, NULL); in module_frob_arch_sections()
188 count_max_entries(scratch, num_scratch_relas, &num_plts, &num_gots); in module_frob_arch_sections()
189 kvfree(scratch); in module_frob_arch_sections()
/linux/drivers/gpu/drm/i915/gt/
H A Dgen6_ppgtt.c23 dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); in gen6_write_pde()
79 const gen6_pte_t scratch_pte = vm->scratch[0]->encode; in gen6_ppgtt_clear_range()
100 * entries back to scratch. in gen6_ppgtt_clear_range()
193 fill32_px(pt, vm->scratch[0]->encode); in gen6_alloc_va_range()
228 vm->scratch[0]->encode = in gen6_ppgtt_init_scratch()
229 vm->pte_encode(px_dma(vm->scratch[0]), in gen6_ppgtt_init_scratch()
234 vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); in gen6_ppgtt_init_scratch()
235 if (IS_ERR(vm->scratch[1])) { in gen6_ppgtt_init_scratch()
236 ret = PTR_ERR(vm->scratch[1]); in gen6_ppgtt_init_scratch()
240 ret = map_pt_dma(vm, vm->scratch[1]); in gen6_ppgtt_init_scratch()
[all …]
H A Dselftest_workarounds.c503 struct i915_vma *scratch; in check_dirty_whitelist() local
509 scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz); in check_dirty_whitelist()
510 if (IS_ERR(scratch)) in check_dirty_whitelist()
511 return PTR_ERR(scratch); in check_dirty_whitelist()
522 u64 addr = i915_vma_offset(scratch); in check_dirty_whitelist()
540 err = i915_gem_object_lock(scratch->obj, &ww); in check_dirty_whitelist()
554 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); in check_dirty_whitelist()
604 GEM_BUG_ON(idx * sizeof(u32) > scratch->size); in check_dirty_whitelist()
635 err = i915_vma_move_to_active(scratch, rq, in check_dirty_whitelist()
730 i915_gem_object_unpin_map(scratch->obj); in check_dirty_whitelist()
[all …]
H A Dgen8_ppgtt.c243 const struct drm_i915_gem_object * const scratch = vm->scratch[lvl]; in __gen8_ppgtt_clear() local
261 clear_pd_entry(pd, idx, scratch); in __gen8_ppgtt_clear()
293 vm->scratch[0]->encode, in __gen8_ppgtt_clear()
300 if (release_pd_entry(pd, idx, pt, scratch)) in __gen8_ppgtt_clear()
351 fill_px(pt, vm->scratch[lvl]->encode); in __gen8_ppgtt_alloc()
714 * the 64K PTE, it will read/write into the scratch page in gen8_ppgtt_insert_huge()
721 encode = vm->scratch[0]->encode; in gen8_ppgtt_insert_huge()
833 * If everybody agrees to not to write into the scratch page, in gen8_init_scratch()
843 vm->scratch[i] = i915_gem_object_get(clone->scratch[i]); in gen8_init_scratch()
853 if (i915_gem_object_is_lmem(vm->scratch[0])) in gen8_init_scratch()
[all …]
/linux/Documentation/core-api/kho/
H A Dconcepts.rst30 Scratch Regions
38 We guarantee that we always have such regions through the scratch regions: On
41 scratch region per NUMA node plus a scratch region to satisfy allocations
43 By default, size of the scratch region is calculated based on amount of memory
45 used to explicitly define size of the scratch regions.
46 The scratch regions are declared as CMA when page allocator is initialized so
/linux/drivers/net/wireless/intel/iwlwifi/pcie/
H A Diwl-context-info-v2.h38 * enum iwl_prph_scratch_flags - PRPH scratch control flags
82 * enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags
98 * @version: prph scratch information version id
121 * struct iwl_prph_scratch_pnvm_cfg - PNVM scratch
164 * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
176 * struct iwl_prph_scratch_step_cfg - prph scratch step configuration
190 * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
223 * struct iwl_prph_scratch - peripheral scratch mapping
224 * @ctrl_cfg: control and configuration of prph scratch
292 * @prph_scratch_base_addr: the peripheral scratch structure start address
[all …]
/linux/tools/testing/selftests/powerpc/copyloops/asm/
H A Dppc_asm.h50 #define DCBT_SETUP_STREAMS(from, from_parms, to, to_parms, scratch) \ argument
51 lis scratch,0x8000; /* GO=1 */ \
52 clrldi scratch,scratch,32; \
60 dcbt 0,scratch,0b01010; /* all streams GO */
/linux/arch/mips/mm/
H A Dtlbex.c757 * TMP and PTR are scratch.
847 * BVADDR is the faulting address, PTR is scratch.
923 * TMP and PTR are scratch.
1056 const int scratch = 1; /* Our extra working register */ in build_fast_tlb_refill_handler() local
1058 rv.huge_pte = scratch; in build_fast_tlb_refill_handler()
1071 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); in build_fast_tlb_refill_handler()
1073 UASM_i_SW(p, scratch, scratchpad_offset(0), 0); in build_fast_tlb_refill_handler()
1075 uasm_i_dsrl_safe(p, scratch, tmp, in build_fast_tlb_refill_handler()
1077 uasm_il_bnez(p, r, scratch, label_vmalloc); in build_fast_tlb_refill_handler()
1093 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg); in build_fast_tlb_refill_handler()
[all …]
/linux/arch/x86/include/asm/
H A Dbootparam_utils.h52 static struct boot_params scratch; in sanitize_boot_params() local
54 char *save_base = (char *)&scratch; in sanitize_boot_params()
68 BOOT_PARAM_PRESERVE(scratch), in sanitize_boot_params()
80 memset(&scratch, 0, sizeof(scratch)); in sanitize_boot_params()
/linux/net/ipv4/
H A Dtcp_sigpool.c53 * sigpool_reserve_scratch - re-allocates scratch buffer, slow-path
54 * @size: request size for the scratch/temp buffer
74 void *scratch, *old_scratch; in sigpool_reserve_scratch() local
76 scratch = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); in sigpool_reserve_scratch()
77 if (!scratch) { in sigpool_reserve_scratch()
83 scratch, lockdep_is_held(&cpool_mutex)); in sigpool_reserve_scratch()
155 * @scratch_size: reserve a tcp_sigpool::scratch buffer of this size
284 /* Pairs with tcp_sigpool_reserve_scratch(), scratch area is in tcp_sigpool_start()
288 c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch.pad)); in tcp_sigpool_start()
/linux/fs/xfs/
H A Dxfs_zone_gc.c53 * Size of each GC scratch pad. This is also the upper bound for each
109 struct xfs_zone_scratch *scratch; member
146 struct xfs_zone_scratch scratch[XFS_ZONE_GC_NR_SCRATCH]; member
222 data->scratch[i].folio = in xfs_zone_gc_data_alloc()
224 if (!data->scratch[i].folio) in xfs_zone_gc_data_alloc()
235 folio_put(data->scratch[i].folio); in xfs_zone_gc_data_alloc()
251 folio_put(data->scratch[i].folio); in xfs_zone_gc_data_free()
593 return XFS_GC_CHUNK_SIZE - data->scratch[data->scratch_idx].offset; in xfs_zone_gc_scratch_available()
704 chunk->scratch = &data->scratch[data->scratch_idx]; in xfs_zone_gc_start_chunk()
713 bio_add_folio_nofail(bio, chunk->scratch->folio, chunk->len, in xfs_zone_gc_start_chunk()
[all …]
/linux/Documentation/admin-guide/mm/
H A Dkho.rst24 scratch regions. For example ``kho_scratch=16M,512M,256M`` will reserve a
25 16 MiB low memory scratch area, a 512 MiB global scratch region, and 256 MiB
26 per NUMA node scratch regions on boot.
92 Lengths of KHO scratch regions, which are physically contiguous
98 Physical locations of KHO scratch regions. Kexec user space tools
/linux/drivers/gpu/drm/i915/selftests/
H A Di915_perf.c296 void *scratch; in live_noa_gpr() local
315 /* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */ in live_noa_gpr()
316 scratch = __px_vaddr(ce->vm->scratch[0]); in live_noa_gpr()
317 memset(scratch, POISON_FREE, PAGE_SIZE); in live_noa_gpr()
404 /* Verify that the user's scratch page was not used for GPR storage */ in live_noa_gpr()
405 if (memchr_inv(scratch, POISON_FREE, PAGE_SIZE)) { in live_noa_gpr()
406 pr_err("Scratch page overwritten!\n"); in live_noa_gpr()
407 igt_hexdump(scratch, 4096); in live_noa_gpr()
/linux/kernel/
H A Dkexec_handover.c321 * preservation. The successor kernel will remain isolated to the scratch space
474 * be anywhere in physical address space. The scratch regions give us a
476 * can later safely load our new kexec images into and then use the scratch
484 * The scratch areas are scaled by default as percent of memory allocated from
490 * per-node scratch areas:
527 pr_notice("scratch scale is %d%%\n", scratch_scale); in kho_parse_scratch_size()
560 pr_notice("scratch areas: lowmem: %lluMiB global: %lluMiB pernode: %lldMiB\n", in kho_parse_scratch_size()
629 * reserve scratch area in low memory for lowmem allocations in the in kho_reserve_scratch()
673 pr_warn("Failed to reserve scratch area, disabling kexec handover\n"); in kho_reserve_scratch()
1465 * Mark scratch mem as CMA before we return it. That way we in kho_release_scratch()
[all …]

12345678910>>...26