xref: /linux/arch/sh/include/cpu-sh4/cpu/sq.h (revision 87c9c16317882dd6dbbc07e349bc3223e14f3244)
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * include/asm-sh/cpu-sh4/sq.h
4  *
5  * Copyright (C) 2001, 2002, 2003  Paul Mundt
6  * Copyright (C) 2001, 2002  M. R. Brown
7  */
8 #ifndef __ASM_CPU_SH4_SQ_H
9 #define __ASM_CPU_SH4_SQ_H
10 
11 #include <asm/addrspace.h>
12 #include <asm/page.h>
13 
14 /*
15  * Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
16  * mapped to any physical address space. Since data is written (and aligned)
17  * to 32-byte boundaries, we need to be sure that all allocations are aligned.
18  */
19 #define SQ_SIZE                 32
20 #define SQ_ALIGN_MASK           (~(SQ_SIZE - 1))
21 #define SQ_ALIGN(addr)          (((addr)+SQ_SIZE-1) & SQ_ALIGN_MASK)
22 
23 #define SQ_QACR0		(P4SEG_REG_BASE  + 0x38)
24 #define SQ_QACR1		(P4SEG_REG_BASE  + 0x3c)
25 #define SQ_ADDRMAX              (P4SEG_STORE_QUE + 0x04000000)
26 
27 /* arch/sh/kernel/cpu/sh4/sq.c */
28 unsigned long sq_remap(unsigned long phys, unsigned int size,
29 		       const char *name, pgprot_t prot);
30 void sq_unmap(unsigned long vaddr);
31 void sq_flush_range(unsigned long start, unsigned int len);
32 
33 #endif /* __ASM_CPU_SH4_SQ_H */
34