xref: /linux/arch/s390/include/asm/physmem_info.h (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_S390_MEM_DETECT_H
3 #define _ASM_S390_MEM_DETECT_H
4 
5 #include <linux/types.h>
6 #include <asm/page.h>
7 
8 enum physmem_info_source {
9 	MEM_DETECT_NONE = 0,
10 	MEM_DETECT_SCLP_STOR_INFO,
11 	MEM_DETECT_DIAG260,
12 	MEM_DETECT_SCLP_READ_INFO,
13 	MEM_DETECT_BIN_SEARCH
14 };
15 
16 struct physmem_range {
17 	u64 start;
18 	u64 end;
19 };
20 
21 enum reserved_range_type {
22 	RR_DECOMPRESSOR,
23 	RR_INITRD,
24 	RR_VMLINUX,
25 	RR_RELOC,
26 	RR_AMODE31,
27 	RR_IPLREPORT,
28 	RR_CERT_COMP_LIST,
29 	RR_MEM_DETECT_EXTENDED,
30 	RR_VMEM,
31 	RR_MAX
32 };
33 
34 struct reserved_range {
35 	unsigned long start;
36 	unsigned long end;
37 	struct reserved_range *chain;
38 };
39 
40 /*
41  * Storage element id is defined as 1 byte (up to 256 storage elements).
42  * In practise only storage element id 0 and 1 are used).
43  * According to architecture one storage element could have as much as
44  * 1020 subincrements. 255 physmem_ranges are embedded in physmem_info.
45  * If more physmem_ranges are required, a block of memory from already
46  * known physmem_range is taken (online_extended points to it).
47  */
48 #define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
49 
50 struct physmem_info {
51 	u32 range_count;
52 	u8 info_source;
53 	unsigned long usable;
54 	struct reserved_range reserved[RR_MAX];
55 	struct physmem_range online[MEM_INLINED_ENTRIES];
56 	struct physmem_range *online_extended;
57 };
58 
59 extern struct physmem_info physmem_info;
60 
61 void add_physmem_online_range(u64 start, u64 end);
62 
63 static inline int __get_physmem_range(u32 n, unsigned long *start,
64 				      unsigned long *end, bool respect_usable_limit)
65 {
66 	if (n >= physmem_info.range_count) {
67 		*start = 0;
68 		*end = 0;
69 		return -1;
70 	}
71 
72 	if (n < MEM_INLINED_ENTRIES) {
73 		*start = (unsigned long)physmem_info.online[n].start;
74 		*end = (unsigned long)physmem_info.online[n].end;
75 	} else {
76 		*start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start;
77 		*end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end;
78 	}
79 
80 	if (respect_usable_limit && physmem_info.usable) {
81 		if (*start >= physmem_info.usable)
82 			return -1;
83 		if (*end > physmem_info.usable)
84 			*end = physmem_info.usable;
85 	}
86 	return 0;
87 }
88 
89 /**
90  * for_each_physmem_usable_range - early online memory range iterator
91  * @i: an integer used as loop variable
92  * @p_start: ptr to unsigned long for start address of the range
93  * @p_end: ptr to unsigned long for end address of the range
94  *
95  * Walks over detected online memory ranges below usable limit.
96  */
97 #define for_each_physmem_usable_range(i, p_start, p_end)		\
98 	for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++)
99 
100 /* Walks over all detected online memory ranges disregarding usable limit. */
101 #define for_each_physmem_online_range(i, p_start, p_end)		\
102 	for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++)
103 
104 static inline const char *get_physmem_info_source(void)
105 {
106 	switch (physmem_info.info_source) {
107 	case MEM_DETECT_SCLP_STOR_INFO:
108 		return "sclp storage info";
109 	case MEM_DETECT_DIAG260:
110 		return "diag260";
111 	case MEM_DETECT_SCLP_READ_INFO:
112 		return "sclp read info";
113 	case MEM_DETECT_BIN_SEARCH:
114 		return "binary search";
115 	}
116 	return "none";
117 }
118 
119 #define RR_TYPE_NAME(t) case RR_ ## t: return #t
120 static inline const char *get_rr_type_name(enum reserved_range_type t)
121 {
122 	switch (t) {
123 	RR_TYPE_NAME(DECOMPRESSOR);
124 	RR_TYPE_NAME(INITRD);
125 	RR_TYPE_NAME(VMLINUX);
126 	RR_TYPE_NAME(AMODE31);
127 	RR_TYPE_NAME(IPLREPORT);
128 	RR_TYPE_NAME(CERT_COMP_LIST);
129 	RR_TYPE_NAME(MEM_DETECT_EXTENDED);
130 	RR_TYPE_NAME(VMEM);
131 	default:
132 		return "UNKNOWN";
133 	}
134 }
135 
136 #define for_each_physmem_reserved_type_range(t, range, p_start, p_end)				\
137 	for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end;	\
138 	     range && range->end; range = range->chain ? __va(range->chain) : NULL,		\
139 	     *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
140 
141 static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
142 							     struct reserved_range *range)
143 {
144 	if (!range) {
145 		range = &physmem_info.reserved[*t];
146 		if (range->end)
147 			return range;
148 	}
149 	if (range->chain)
150 		return __va(range->chain);
151 	while (++*t < RR_MAX) {
152 		range = &physmem_info.reserved[*t];
153 		if (range->end)
154 			return range;
155 	}
156 	return NULL;
157 }
158 
159 #define for_each_physmem_reserved_range(t, range, p_start, p_end)			\
160 	for (t = 0, range = __physmem_reserved_next(&t, NULL),			\
161 	    *p_start = range ? range->start : 0, *p_end = range ? range->end : 0;	\
162 	     range; range = __physmem_reserved_next(&t, range),			\
163 	    *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
164 
165 static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
166 						 unsigned long *addr, unsigned long *size)
167 {
168 	*addr = physmem_info.reserved[type].start;
169 	*size = physmem_info.reserved[type].end - physmem_info.reserved[type].start;
170 	return *size;
171 }
172 
173 #endif
174