xref: /linux/arch/s390/include/asm/physmem_info.h (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_S390_MEM_DETECT_H
3 #define _ASM_S390_MEM_DETECT_H
4 
5 #include <linux/types.h>
6 
7 enum physmem_info_source {
8 	MEM_DETECT_NONE = 0,
9 	MEM_DETECT_SCLP_STOR_INFO,
10 	MEM_DETECT_DIAG260,
11 	MEM_DETECT_SCLP_READ_INFO,
12 	MEM_DETECT_BIN_SEARCH
13 };
14 
15 struct physmem_range {
16 	u64 start;
17 	u64 end;
18 };
19 
20 enum reserved_range_type {
21 	RR_DECOMPRESSOR,
22 	RR_INITRD,
23 	RR_VMLINUX,
24 	RR_AMODE31,
25 	RR_IPLREPORT,
26 	RR_CERT_COMP_LIST,
27 	RR_MEM_DETECT_EXTENDED,
28 	RR_VMEM,
29 	RR_MAX
30 };
31 
32 struct reserved_range {
33 	unsigned long start;
34 	unsigned long end;
35 	struct reserved_range *chain;
36 };
37 
38 /*
39  * Storage element id is defined as 1 byte (up to 256 storage elements).
40  * In practise only storage element id 0 and 1 are used).
41  * According to architecture one storage element could have as much as
42  * 1020 subincrements. 255 physmem_ranges are embedded in physmem_info.
43  * If more physmem_ranges are required, a block of memory from already
44  * known physmem_range is taken (online_extended points to it).
45  */
46 #define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
47 
48 struct physmem_info {
49 	u32 range_count;
50 	u8 info_source;
51 	unsigned long usable;
52 	struct reserved_range reserved[RR_MAX];
53 	struct physmem_range online[MEM_INLINED_ENTRIES];
54 	struct physmem_range *online_extended;
55 };
56 
57 extern struct physmem_info physmem_info;
58 
59 void add_physmem_online_range(u64 start, u64 end);
60 
61 static inline int __get_physmem_range(u32 n, unsigned long *start,
62 				      unsigned long *end, bool respect_usable_limit)
63 {
64 	if (n >= physmem_info.range_count) {
65 		*start = 0;
66 		*end = 0;
67 		return -1;
68 	}
69 
70 	if (n < MEM_INLINED_ENTRIES) {
71 		*start = (unsigned long)physmem_info.online[n].start;
72 		*end = (unsigned long)physmem_info.online[n].end;
73 	} else {
74 		*start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start;
75 		*end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end;
76 	}
77 
78 	if (respect_usable_limit && physmem_info.usable) {
79 		if (*start >= physmem_info.usable)
80 			return -1;
81 		if (*end > physmem_info.usable)
82 			*end = physmem_info.usable;
83 	}
84 	return 0;
85 }
86 
87 /**
88  * for_each_physmem_usable_range - early online memory range iterator
89  * @i: an integer used as loop variable
90  * @p_start: ptr to unsigned long for start address of the range
91  * @p_end: ptr to unsigned long for end address of the range
92  *
93  * Walks over detected online memory ranges below usable limit.
94  */
95 #define for_each_physmem_usable_range(i, p_start, p_end)		\
96 	for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++)
97 
98 /* Walks over all detected online memory ranges disregarding usable limit. */
99 #define for_each_physmem_online_range(i, p_start, p_end)		\
100 	for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++)
101 
102 static inline const char *get_physmem_info_source(void)
103 {
104 	switch (physmem_info.info_source) {
105 	case MEM_DETECT_SCLP_STOR_INFO:
106 		return "sclp storage info";
107 	case MEM_DETECT_DIAG260:
108 		return "diag260";
109 	case MEM_DETECT_SCLP_READ_INFO:
110 		return "sclp read info";
111 	case MEM_DETECT_BIN_SEARCH:
112 		return "binary search";
113 	}
114 	return "none";
115 }
116 
117 #define RR_TYPE_NAME(t) case RR_ ## t: return #t
118 static inline const char *get_rr_type_name(enum reserved_range_type t)
119 {
120 	switch (t) {
121 	RR_TYPE_NAME(DECOMPRESSOR);
122 	RR_TYPE_NAME(INITRD);
123 	RR_TYPE_NAME(VMLINUX);
124 	RR_TYPE_NAME(AMODE31);
125 	RR_TYPE_NAME(IPLREPORT);
126 	RR_TYPE_NAME(CERT_COMP_LIST);
127 	RR_TYPE_NAME(MEM_DETECT_EXTENDED);
128 	RR_TYPE_NAME(VMEM);
129 	default:
130 		return "UNKNOWN";
131 	}
132 }
133 
134 #define for_each_physmem_reserved_type_range(t, range, p_start, p_end)				\
135 	for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end;	\
136 	     range && range->end; range = range->chain,						\
137 	     *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
138 
139 static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
140 							     struct reserved_range *range)
141 {
142 	if (!range) {
143 		range = &physmem_info.reserved[*t];
144 		if (range->end)
145 			return range;
146 	}
147 	if (range->chain)
148 		return range->chain;
149 	while (++*t < RR_MAX) {
150 		range = &physmem_info.reserved[*t];
151 		if (range->end)
152 			return range;
153 	}
154 	return NULL;
155 }
156 
157 #define for_each_physmem_reserved_range(t, range, p_start, p_end)			\
158 	for (t = 0, range = __physmem_reserved_next(&t, NULL),			\
159 	    *p_start = range ? range->start : 0, *p_end = range ? range->end : 0;	\
160 	     range; range = __physmem_reserved_next(&t, range),			\
161 	    *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
162 
163 static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
164 						 unsigned long *addr, unsigned long *size)
165 {
166 	*addr = physmem_info.reserved[type].start;
167 	*size = physmem_info.reserved[type].end - physmem_info.reserved[type].start;
168 	return *size;
169 }
170 
171 #endif
172