xref: /linux/arch/arm64/Makefile (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1#
2# arch/arm64/Makefile
3#
4# This file is included by the global makefile so that you can add your own
5# architecture-specific flags and dependencies.
6#
7# This file is subject to the terms and conditions of the GNU General Public
8# License.  See the file "COPYING" in the main directory of this archive
9# for more details.
10#
11# Copyright (C) 1995-2001 by Russell King
12
13LDFLAGS_vmlinux	:=--no-undefined -X
14
15ifeq ($(CONFIG_RELOCATABLE), y)
16# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
17# for relative relocs, since this leads to better Image compression
18# with the relocation offsets always being zero.
19LDFLAGS_vmlinux		+= -shared -Bsymbolic -z notext \
20			$(call ld-option, --no-apply-dynamic-relocs)
21endif
22
23ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
24  ifeq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
25LDFLAGS_vmlinux	+= --fix-cortex-a53-843419
26  endif
27endif
28
29cc_has_k_constraint := $(call try-run,echo				\
30	'int main(void) {						\
31		asm volatile("and w0, w0, %w0" :: "K" (4294967295));	\
32		return 0;						\
33	}' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
34
35ifeq ($(CONFIG_BROKEN_GAS_INST),y)
36$(warning Detected assembler with broken .inst; disassembly will be unreliable)
37endif
38
39KBUILD_CFLAGS	+= -mgeneral-regs-only	\
40		   $(compat_vdso) $(cc_has_k_constraint)
41KBUILD_CFLAGS	+= $(call cc-disable-warning, psabi)
42KBUILD_AFLAGS	+= $(compat_vdso)
43
44KBUILD_CFLAGS	+= $(call cc-option,-mabi=lp64)
45KBUILD_AFLAGS	+= $(call cc-option,-mabi=lp64)
46
47# Avoid generating .eh_frame* sections.
48ifneq ($(CONFIG_UNWIND_TABLES),y)
49KBUILD_CFLAGS	+= -fno-asynchronous-unwind-tables -fno-unwind-tables
50KBUILD_AFLAGS	+= -fno-asynchronous-unwind-tables -fno-unwind-tables
51else
52KBUILD_CFLAGS	+= -fasynchronous-unwind-tables
53KBUILD_AFLAGS	+= -fasynchronous-unwind-tables
54endif
55
56ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
57prepare: stack_protector_prepare
58stack_protector_prepare: prepare0
59	$(eval KBUILD_CFLAGS += -mstack-protector-guard=sysreg		  \
60				-mstack-protector-guard-reg=sp_el0	  \
61				-mstack-protector-guard-offset=$(shell	  \
62			awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
63					include/generated/asm-offsets.h))
64endif
65
66ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
67  KBUILD_CFLAGS += -mbranch-protection=pac-ret+bti
68else ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
69  ifeq ($(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET),y)
70    KBUILD_CFLAGS += -mbranch-protection=pac-ret
71  else
72    KBUILD_CFLAGS += -msign-return-address=non-leaf
73  endif
74else
75  KBUILD_CFLAGS += $(call cc-option,-mbranch-protection=none)
76endif
77
78# Tell the assembler to support instructions from the latest target
79# architecture.
80#
81# For non-integrated assemblers we'll pass this on the command line, and for
82# integrated assemblers we'll define ARM64_ASM_ARCH and ARM64_ASM_PREAMBLE for
83# inline usage.
84#
85# We cannot pass the same arch flag to the compiler as this would allow it to
86# freely generate instructions which are not supported by earlier architecture
87# versions, which would prevent a single kernel image from working on earlier
88# hardware.
89ifeq ($(CONFIG_AS_HAS_ARMV8_5), y)
90  asm-arch := armv8.5-a
91else ifeq ($(CONFIG_AS_HAS_ARMV8_4), y)
92  asm-arch := armv8.4-a
93else ifeq ($(CONFIG_AS_HAS_ARMV8_3), y)
94  asm-arch := armv8.3-a
95else ifeq ($(CONFIG_AS_HAS_ARMV8_2), y)
96  asm-arch := armv8.2-a
97endif
98
99ifdef asm-arch
100KBUILD_CFLAGS	+= -Wa,-march=$(asm-arch) \
101		   -DARM64_ASM_ARCH='"$(asm-arch)"'
102endif
103
104ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
105KBUILD_CFLAGS	+= -ffixed-x18
106endif
107
108ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
109KBUILD_CPPFLAGS	+= -mbig-endian
110CHECKFLAGS	+= -D__AARCH64EB__
111# Prefer the baremetal ELF build target, but not all toolchains include
112# it so fall back to the standard linux version if needed.
113KBUILD_LDFLAGS	+= -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
114UTS_MACHINE	:= aarch64_be
115else
116KBUILD_CPPFLAGS	+= -mlittle-endian
117CHECKFLAGS	+= -D__AARCH64EL__
118# Same as above, prefer ELF but fall back to linux target if needed.
119KBUILD_LDFLAGS	+= -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
120UTS_MACHINE	:= aarch64
121endif
122
123ifeq ($(CONFIG_LD_IS_LLD), y)
124KBUILD_LDFLAGS	+= -z norelro
125endif
126
127CHECKFLAGS	+= -D__aarch64__
128
129ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS),y)
130  KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
131  CC_FLAGS_FTRACE := -fpatchable-function-entry=4,2
132else ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_ARGS),y)
133  KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
134  CC_FLAGS_FTRACE := -fpatchable-function-entry=2
135endif
136
137ifeq ($(CONFIG_KASAN_SW_TAGS), y)
138KASAN_SHADOW_SCALE_SHIFT := 4
139else ifeq ($(CONFIG_KASAN_GENERIC), y)
140KASAN_SHADOW_SCALE_SHIFT := 3
141endif
142
143KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
144KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
145KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
146
147libs-y		:= arch/arm64/lib/ $(libs-y)
148libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
149
150# Default target when executing plain make
151boot		:= arch/arm64/boot
152
153ifeq ($(CONFIG_EFI_ZBOOT),)
154KBUILD_IMAGE	:= $(boot)/Image.gz
155else
156KBUILD_IMAGE	:= $(boot)/vmlinuz.efi
157endif
158
159all:	$(notdir $(KBUILD_IMAGE))
160
161vmlinuz.efi: Image
162Image vmlinuz.efi: vmlinux
163	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
164
165Image.%: Image
166	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
167
168install: KBUILD_IMAGE := $(boot)/Image
169install zinstall:
170	$(call cmd,install)
171
172archprepare:
173	$(Q)$(MAKE) $(build)=arch/arm64/tools kapi
174ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
175  ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
176	@echo "warning: ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum" >&2
177  endif
178endif
179ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS),y)
180  ifneq ($(CONFIG_ARM64_LSE_ATOMICS),y)
181	@echo "warning: LSE atomics not supported by binutils" >&2
182  endif
183endif
184
185ifeq ($(KBUILD_EXTMOD),)
186# We need to generate vdso-offsets.h before compiling certain files in kernel/.
187# In order to do that, we should use the archprepare target, but we can't since
188# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
189# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
190# Therefore we need to generate the header after prepare0 has been made, hence
191# this hack.
192prepare: vdso_prepare
193vdso_prepare: prepare0
194	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso \
195	include/generated/vdso-offsets.h arch/arm64/kernel/vdso/vdso.so
196ifdef CONFIG_COMPAT_VDSO
197	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 \
198	include/generated/vdso32-offsets.h arch/arm64/kernel/vdso32/vdso.so
199endif
200endif
201
202vdso-install-y				+= arch/arm64/kernel/vdso/vdso.so.dbg
203vdso-install-$(CONFIG_COMPAT_VDSO)	+= arch/arm64/kernel/vdso32/vdso.so.dbg:vdso32.so
204
205include $(srctree)/scripts/Makefile.defconf
206
207PHONY += virtconfig
208virtconfig:
209	$(call merge_into_defconfig_override,defconfig,virt)
210
211define archhelp
212  echo  '* Image.gz      - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
213  echo  '  Image         - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
214  echo  '  install       - Install uncompressed kernel'
215  echo  '  zinstall      - Install compressed kernel'
216  echo  '                  Install using (your) ~/bin/installkernel or'
217  echo  '                  (distribution) /sbin/installkernel or'
218  echo  '                  install to $$(INSTALL_PATH) and run lilo'
219endef
220