/linux/arch/riscv/lib/ |
H A D | uaccess.S | 80 * Copy first bytes until dst is aligned to word boundary. 82 * t1 - start of aligned dst 86 /* dst is already aligned, skip */ 94 bltu a0, t1, 1b /* t1 - start of aligned dst */ 98 * Now dst is aligned. 100 * Use word-copy if both src and dst are aligned because 109 * Both src and dst are aligned, unrolled word copy 111 * a0 - start of aligned dst 112 * a1 - start of aligned src 113 * t0 - end of aligned dst [all …]
|
/linux/drivers/scsi/ |
H A D | ipr.h | 333 }__attribute__((packed, aligned (4))); 411 }__attribute__ ((packed, aligned (4))); 434 }__attribute__ ((packed, aligned (8))); 441 }__attribute__((packed, aligned (4))); 448 }__attribute__((packed, aligned (4))); 453 }__attribute__((packed, aligned (4))); 458 }__attribute__((packed, aligned (8))); 473 }__attribute__((packed, aligned (4))); 481 }__attribute__((packed, aligned (4))); 543 }__attribute__ ((packed, aligned(4))); [all …]
|
/linux/arch/xtensa/include/asm/ |
H A D | coprocessor.h | 118 #define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a))); 119 #define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a))); 122 __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN))); 124 __attribute__ ((aligned (XCHAL_NCP_SA_ALIGN))); 129 __attribute__ ((aligned (XCHAL_CP0_SA_ALIGN))); 131 __attribute__ ((aligned (XCHAL_CP1_SA_ALIGN))); 133 __attribute__ ((aligned (XCHAL_CP2_SA_ALIGN))); 135 __attribute__ ((aligned (XCHAL_CP3_SA_ALIGN))); 137 __attribute__ ((aligned (XCHAL_CP4_SA_ALIGN))); 139 __attribute__ ((aligned (XCHAL_CP5_SA_ALIGN))); [all …]
|
/linux/arch/xtensa/lib/ |
H A D | memset.S | 23 * If the destination is aligned, 27 * setting 1B and 2B and then go to aligned case. 29 * case of an aligned destination (except for the branches to 47 .L0: # return here from .Ldstunaligned when dst is aligned 54 * Destination is word-aligned. 56 # set 16 bytes per iteration for word-aligned dst 106 bbci.l a5, 0, .L20 # branch if dst alignment half-aligned 107 # dst is only byte aligned 112 # now retest if dst aligned 113 bbci.l a5, 1, .L0 # if now aligned, return to main algorithm [all …]
|
H A D | memcopy.S | 34 * If source is aligned, 40 * case of aligned source and destination and multiple 89 .Ldst1mod2: # dst is only byte aligned 98 _bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then 100 .Ldst2mod4: # dst 16-bit aligned 110 j .Ldstaligned # dst is now aligned, return to main algorithm 121 .Ldstaligned: # return here from .Ldst?mod? once dst is aligned 124 movi a8, 3 # if source is not aligned, 127 * Destination and source are word-aligned, use word copy. 129 # copy 16 bytes per iteration for word-aligned dst and word-aligned src [all …]
|
H A D | usercopy.S | 30 * If the destination and source are both aligned, 33 * If destination is aligned and source unaligned, 38 * case of aligned destinations (except for the branches to 75 .Ldstaligned: # return here from .Ldstunaligned when dst is aligned 78 movi a8, 3 # if source is also aligned, 89 .Ldst1mod2: # dst is only byte aligned 98 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then 100 .Ldst2mod4: # dst 16-bit aligned 110 j .Ldstaligned # dst is now aligned, return to main algorithm 138 * Destination and source are word-aligned. [all …]
|
H A D | checksum.S | 44 * is aligned on either a 2-byte or 4-byte boundary. 48 bnez a5, 8f /* branch if 2-byte aligned */ 112 /* uncommon case, buf is 2-byte aligned */ 118 bnez a5, 8f /* branch if 1-byte aligned */ 124 j 1b /* now buf is 4-byte aligned */ 126 /* case: odd-byte aligned, len > 1 188 This function is optimized for 4-byte aligned addresses. Other 199 aligned case. Two bbsi.l instructions might seem more optimal 206 beqz a9, 1f /* branch if both are 4-byte aligned */ 208 j 3f /* one address is 2-byte aligned */ [all …]
|
/linux/drivers/scsi/pm8001/ |
H A D | pm8001_hwi.h | 146 } __attribute__((packed, aligned(4))); 158 } __attribute__((packed, aligned(4))); 169 } __attribute__((packed, aligned(4))); 221 } __attribute__((packed, aligned(4))); 234 } __attribute__((packed, aligned(4))); 250 } __attribute__((packed, aligned(4))); 263 } __attribute__((packed, aligned(4))); 276 } __attribute__((packed, aligned(4))); 287 } __attribute__((packed, aligned(4))); 299 } __attribute__((packed, aligned(4))); [all …]
|
H A D | pm80xx_hwi.h | 345 } __attribute__((packed, aligned(4))); 357 } __attribute__((packed, aligned(4))); 367 } __attribute__((packed, aligned(4))); 418 } __attribute__((packed, aligned(4))); 432 } __attribute__((packed, aligned(4))); 441 } __attribute__((packed, aligned(4))); 456 } __attribute__((packed, aligned(4))); 468 } __attribute__((packed, aligned(4))); 479 } __attribute__((packed, aligned(4))); 489 } __attribute__((packed, aligned(4))); [all …]
|
/linux/tools/mm/ |
H A D | thpmaps | 237 'aligned': [0] * (PMD_ORDER + 1), 242 'aligned': [0] * (PMD_ORDER + 1), 278 align = 'aligned' if align_forward(vfn, nr) == vfn else 'unaligned' 288 …stats['anon']['aligned'][PMD_ORDER] = max(0, stats['anon']['aligned'][PMD_ORDER] - kbnr(anon_pmd_m… 289 …stats['file']['aligned'][PMD_ORDER] = max(0, stats['file']['aligned'][PMD_ORDER] - kbnr(file_pmd_m… 292 f"anon-thp-pmd-aligned-{odkb(PMD_ORDER)}kB": {'type': 'anon', 'value': anon_pmd_mapped}, 293 f"file-thp-pmd-aligned-{odkb(PMD_ORDER)}kB": {'type': 'file', 'value': file_pmd_mapped}, 302 flatten_sub(type, 'aligned', stats['aligned']) 353 f"anon-cont-pmd-aligned-{nrkb(nr_cont)}kB": {'type': 'anon', 'value': anon_pmd_mapped}, 354 f"file-cont-pmd-aligned-{nrkb(nr_cont)}kB": {'type': 'file', 'value': file_pmd_mapped}, [all …]
|
/linux/include/uapi/linux/ |
H A D | rseq.h | 41 * struct rseq_cs is aligned on 4 * 8 bytes to ensure it is always 54 } __attribute__((aligned(4 * sizeof(__u64)))); 57 * struct rseq is aligned on 4 * 8 bytes to ensure it is always 67 * registered this data structure. Aligned on 32-bit. Always 80 * data structure. Aligned on 32-bit. Values 107 * thread which registered this data structure. Aligned on 64-bit. 138 * Aligned on 32-bit. Contains the current NUMA node ID. 146 * Aligned on 32-bit. Contains the current thread's concurrency ID 155 } __attribute__((aligned(4 * sizeof(__u64))));
|
H A D | swab.h | 169 * @p: pointer to a naturally-aligned 16-bit value 182 * @p: pointer to a naturally-aligned 32-bit value 195 * @p: pointer to a naturally-aligned 64-bit value 208 * @p: pointer to a naturally-aligned 32-bit value 223 * @p: pointer to a naturally-aligned 32-bit value 238 * @p: pointer to a naturally-aligned 16-bit value 250 * @p: pointer to a naturally-aligned 32-bit value 263 * @p: pointer to a naturally-aligned 64-bit value 276 * @p: pointer to a naturally-aligned 32-bit value 291 * @p: pointer to a naturally-aligned 32-bit value
|
/linux/tools/testing/selftests/rseq/ |
H A D | rseq-abi.h | 41 * struct rseq_abi_cs is aligned on 4 * 8 bytes to ensure it is always 54 } __attribute__((aligned(4 * sizeof(__u64)))); 57 * struct rseq_abi is aligned on 4 * 8 bytes to ensure it is always 67 * registered this data structure. Aligned on 32-bit. Always 80 * data structure. Aligned on 32-bit. Values 107 * thread which registered this data structure. Aligned on 64-bit. 154 * Aligned on 32-bit. Contains the current NUMA node ID. 162 * Aligned on 32-bit. Contains the current thread's concurrency ID 171 } __attribute__((aligned(4 * sizeof(__u64))));
|
/linux/arch/mips/kernel/ |
H A D | cmpxchg.c | 16 /* Check that ptr is naturally aligned */ in __xchg_small() 25 * exchange within the naturally aligned 4 byte integer that includes in __xchg_small() 35 * Calculate a pointer to the naturally aligned 4 byte integer that in __xchg_small() 57 /* Check that ptr is naturally aligned */ in __cmpxchg_small() 67 * compare & exchange within the naturally aligned 4 byte integer in __cmpxchg_small() 77 * Calculate a pointer to the naturally aligned 4 byte integer that in __cmpxchg_small() 93 * Calculate the old & new values of the naturally aligned in __cmpxchg_small()
|
/linux/arch/sparc/kernel/ |
H A D | sstate.c | 38 static const char booting_msg[32] __attribute__((aligned(32))) = 40 static const char running_msg[32] __attribute__((aligned(32))) = 42 static const char halting_msg[32] __attribute__((aligned(32))) = 44 static const char poweroff_msg[32] __attribute__((aligned(32))) = 46 static const char rebooting_msg[32] __attribute__((aligned(32))) = 48 static const char panicking_msg[32] __attribute__((aligned(32))) =
|
/linux/lib/ |
H A D | iomap_copy.c | 11 * @to: destination, in MMIO space (must be 32-bit aligned) 12 * @from: source (must be 32-bit aligned) 34 * @to: destination (must be 32-bit aligned) 35 * @from: source, in MMIO space (must be 32-bit aligned) 55 * @to: destination, in MMIO space (must be 64-bit aligned) 56 * @from: source (must be 64-bit aligned)
|
/linux/arch/nios2/lib/ |
H A D | memcpy.c | 22 /* Type to use for aligned memory operations. 48 the assumption that DST_BP is aligned on an OPSIZ multiple. If 70 Both SRCP and DSTP should be aligned for memory operations on `op_t's. */ 109 DSTP should be aligned for memory operations on `op_t's, but SRCP must 110 *not* be aligned. */ 119 aligned srcp to make it aligned for copy. */ in _wordcopy_fwd_dest_aligned() 124 /* Make SRCP aligned by rounding it down to the beginning of the `op_t' in _wordcopy_fwd_dest_aligned() 169 /* Copy just a few bytes to make DSTP aligned. */ in memcpy()
|
/linux/drivers/gpu/drm/msm/disp/dpu1/ |
H A D | msm_media_info.h | 46 * Y_Stride : Width aligned to 128 47 * UV_Stride : Width aligned to 128 48 * Y_Scanlines: Height aligned to 32 49 * UV_Scanlines: Height/2 aligned to 16 84 * Y_Stride : Width aligned to 128 85 * UV_Stride : Width aligned to 128 86 * Y_Scanlines: Height aligned to 32 87 * UV_Scanlines: Height/2 aligned to 16 142 * Y_Stride : Width aligned to 128 143 * UV_Stride : Width aligned to 128 [all …]
|
/linux/arch/sparc/lib/ |
H A D | M7memset.S | 144 andcc %o5, 7, %o3 ! is sp1 aligned on a 8 byte bound? 145 bz,pt %xcc, .blkalign ! already long word aligned 146 sub %o3, 8, %o3 ! -(bytes till long word aligned) 149 ! Set -(%o3) bytes till sp1 long word aligned 155 ! Now sp1 is long word aligned (sp1 is found in %o5) 161 andcc %o5, 63, %o3 ! is sp1 block aligned? 162 bz,pt %xcc, .blkwr ! now block aligned 163 sub %o3, 64, %o3 ! o3 is -(bytes till block aligned) 166 ! Store -(%o3) bytes till dst is block (64 byte) aligned. 168 ! Recall that dst is already long word aligned [all …]
|
H A D | M7memcpy.S | 37 * if src & dst aligned on word boundary but not long word boundary, 39 * if src & dst aligned on long word boundary 41 * if src & dst not aligned and length <= SHORTCHECK (SHORTCHECK=14) 59 * finish_long: src/dst aligned on 8 bytes 62 * } else { src/dst aligned; count > MED_MAX 72 * } else { src/dst not aligned on 8 bytes 73 * if src is word aligned and count < MED_WMAX 168 #define SHORT_LONG 64 /* max copy for short longword-aligned case */ 171 #define MED_UMAX 1024 /* max copy for medium un-aligned case */ 172 #define MED_WMAX 1024 /* max copy for medium word-aligned case */ [all …]
|
/linux/Documentation/driver-api/dmaengine/ |
H A D | pxa_dma.rst | 100 - if Buffer1 and Buffer2 had all their addresses 8 bytes aligned 102 - and if Buffer3 has at least one address not 4 bytes aligned 107 this specific case if the DMA is already running in aligned mode. 146 - a driver submitted an aligned tx1, not chained 148 - a driver submitted an aligned tx2 => tx2 is cold chained to tx1 150 - a driver issued tx1+tx2 => channel is running in aligned mode 152 - a driver submitted an aligned tx3 => tx3 is hot-chained 159 - a driver submitted an aligned tx5 => tx5 is put in submitted queue, not 162 - a driver submitted an aligned tx6 => tx6 is put in submitted queue,
|
/linux/arch/sparc/include/asm/ |
H A D | hypervisor.h | 118 * ERRORS: HV_EBADALIGN Buffer is badly aligned 125 * aligned. Upon success or HV_EINVAL, this service returns the 250 * stopped state. The supplied RTBA must be aligned on a 256 byte 334 * EBADALIGN Base real address is not correctly aligned 339 * must be a power of 2. The base real address must be aligned 341 * long, so for example a 32 entry queue must be aligned on a 2048 397 * ERRORS: EBADALIGN Mondo data is not 64-byte aligned or CPU list 398 * is not 2-byte aligned. 410 * aligned. The mondo data will be delivered to the cpu_mondo queues 463 * EBADALIGN RTBA is incorrectly aligned for a trap table [all …]
|
/linux/arch/parisc/lib/ |
H A D | lusercopy.S | 76 * aligned operands. It is interesting to note that the glibc version of memcpy 78 * it by 30-40% for aligned copies because of the loop unrolling, but in some 123 /* only do 64-bit copies if we can get aligned. */ 127 /* loop until we are 64-bit aligned */ 179 /* loop until we are 32-bit aligned */ 234 /* src and dst are not aligned the same way. */ 237 /* align until dst is 32bit-word-aligned */ 260 * Copy from a not-aligned src to an aligned dst using shifts. 269 /* Make src aligned by rounding it down. */
|
/linux/tools/testing/selftests/powerpc/ptrace/ |
H A D | ptrace-hwbreak.c | 40 static volatile __u8 big_var[DAWR_MAX_LEN] __attribute__((aligned(512))); 45 __u8 a[A_LEN]; /* double word aligned */ 48 static volatile struct gstruct gstruct __attribute__((aligned(512))); 50 static volatile char cwd[PATH_MAX] __attribute__((aligned(8))); 163 /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, WO test */ in test_workload() 166 /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, RO test */ in test_workload() 169 /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, RW test */ in test_workload() 199 /* PPC_PTRACE_SETHWDEBUG 2, MODE_RANGE, DW ALIGNED, WO test */ in test_workload() 401 char *name = "PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED"; in test_sethwdebug_range_aligned() 405 /* PPC_PTRACE_SETHWDEBUG, MODE_RANGE, DW ALIGNED, WO test */ in test_sethwdebug_range_aligned() [all …]
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | verifier_var_off.c | 23 /* Make it small and 4-byte aligned */ \ in variable_offset_ctx_access() 48 /* Make it small and 4-byte aligned */ \ in stack_read_priv_vs_unpriv() 71 /* Make it small and 4-byte aligned */ \ in variable_offset_stack_read_uninitialized() 102 /* Make it small and 8-byte aligned */ \ in stack_write_priv_vs_unpriv() 140 /* Make it small and 8-byte aligned */ \ in stack_write_followed_by_read() 182 /* Make it small and 8-byte aligned */ \ in stack_write_clobbers_spilled_regs() 248 /* Make it small and 4-byte aligned */ \ in access_max_out_of_bound() 306 /* Make it small and 4-byte aligned */ \ in access_min_out_of_bound() 336 /* Make it small and 4-byte aligned. */ \ in access_min_off_min_initialized() 369 /* Make it small and 4-byte aligned. */ \ in stack_access_priv_vs_unpriv() [all …]
|