xref: /linux/arch/s390/mm/page-states.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
245e576b1SMartin Schwidefsky /*
345e576b1SMartin Schwidefsky  * Copyright IBM Corp. 2008
445e576b1SMartin Schwidefsky  *
545e576b1SMartin Schwidefsky  * Guest page hinting for unused pages.
645e576b1SMartin Schwidefsky  *
745e576b1SMartin Schwidefsky  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
845e576b1SMartin Schwidefsky  */
945e576b1SMartin Schwidefsky 
1045e576b1SMartin Schwidefsky #include <linux/mm.h>
112d42f947SClaudio Imbrenda #include <asm/page-states.h>
12*a51324c4SHeiko Carstens #include <asm/sections.h>
13*a51324c4SHeiko Carstens #include <asm/page.h>
1445e576b1SMartin Schwidefsky 
15468a3bc2SHeiko Carstens int __bootdata_preserved(cmma_flag);
1645e576b1SMartin Schwidefsky 
arch_free_page(struct page * page,int order)17c9b5ad54SMartin Schwidefsky void arch_free_page(struct page *page, int order)
18c9b5ad54SMartin Schwidefsky {
19c9b5ad54SMartin Schwidefsky 	if (!cmma_flag)
20c9b5ad54SMartin Schwidefsky 		return;
21a3e89e20SHeiko Carstens 	__set_page_unused(page_to_virt(page), 1UL << order);
22c9b5ad54SMartin Schwidefsky }
23c9b5ad54SMartin Schwidefsky 
arch_alloc_page(struct page * page,int order)2445e576b1SMartin Schwidefsky void arch_alloc_page(struct page *page, int order)
2545e576b1SMartin Schwidefsky {
26846955c8SHeiko Carstens 	if (!cmma_flag)
27846955c8SHeiko Carstens 		return;
28c9b5ad54SMartin Schwidefsky 	if (cmma_flag < 2)
29a3e89e20SHeiko Carstens 		__set_page_stable_dat(page_to_virt(page), 1UL << order);
30c9b5ad54SMartin Schwidefsky 	else
31a3e89e20SHeiko Carstens 		__set_page_stable_nodat(page_to_virt(page), 1UL << order);
32c9b5ad54SMartin Schwidefsky }
33