xref: /linux/drivers/s390/char/sclp_early.c (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * SCLP early driver
3  *
4  * Copyright IBM Corp. 2013
5  */
6 
7 #define KMSG_COMPONENT "sclp_early"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <asm/ctl_reg.h>
11 #include <asm/sclp.h>
12 #include <asm/ipl.h>
13 #include "sclp_sdias.h"
14 #include "sclp.h"
15 
16 #define SCLP_CMDW_READ_SCP_INFO		0x00020001
17 #define SCLP_CMDW_READ_SCP_INFO_FORCED	0x00120001
18 
19 struct read_info_sccb {
20 	struct	sccb_header header;	/* 0-7 */
21 	u16	rnmax;			/* 8-9 */
22 	u8	rnsize;			/* 10 */
23 	u8	_pad_11[16 - 11];	/* 11-15 */
24 	u16	ncpurl;			/* 16-17 */
25 	u16	cpuoff;			/* 18-19 */
26 	u8	_pad_20[24 - 20];	/* 20-23 */
27 	u8	loadparm[8];		/* 24-31 */
28 	u8	_pad_32[42 - 32];	/* 32-41 */
29 	u8	fac42;			/* 42 */
30 	u8	fac43;			/* 43 */
31 	u8	_pad_44[48 - 44];	/* 44-47 */
32 	u64	facilities;		/* 48-55 */
33 	u8	_pad_56[66 - 56];	/* 56-65 */
34 	u8	fac66;			/* 66 */
35 	u8	_pad_67[76 - 67];	/* 67-83 */
36 	u32	ibc;			/* 76-79 */
37 	u8	_pad80[84 - 80];	/* 80-83 */
38 	u8	fac84;			/* 84 */
39 	u8	fac85;			/* 85 */
40 	u8	_pad_86[91 - 86];	/* 86-90 */
41 	u8	flags;			/* 91 */
42 	u8	_pad_92[100 - 92];	/* 92-99 */
43 	u32	rnsize2;		/* 100-103 */
44 	u64	rnmax2;			/* 104-111 */
45 	u8	_pad_112[120 - 112];	/* 112-119 */
46 	u16	hcpua;			/* 120-121 */
47 	u8	_pad_122[4096 - 122];	/* 122-4095 */
48 } __packed __aligned(PAGE_SIZE);
49 
50 static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
51 static struct sclp_ipl_info sclp_ipl_info;
52 
53 struct sclp_info sclp;
54 EXPORT_SYMBOL(sclp);
55 
56 static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
57 {
58 	int rc;
59 
60 	__ctl_set_bit(0, 9);
61 	rc = sclp_service_call(cmd, sccb);
62 	if (rc)
63 		goto out;
64 	__load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
65 			PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
66 	local_irq_disable();
67 out:
68 	/* Contents of the sccb might have changed. */
69 	barrier();
70 	__ctl_clear_bit(0, 9);
71 	return rc;
72 }
73 
74 static int __init sclp_read_info_early(struct read_info_sccb *sccb)
75 {
76 	int rc, i;
77 	sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
78 				  SCLP_CMDW_READ_SCP_INFO};
79 
80 	for (i = 0; i < ARRAY_SIZE(commands); i++) {
81 		do {
82 			memset(sccb, 0, sizeof(*sccb));
83 			sccb->header.length = sizeof(*sccb);
84 			sccb->header.function_code = 0x80;
85 			sccb->header.control_mask[2] = 0x80;
86 			rc = sclp_cmd_sync_early(commands[i], sccb);
87 		} while (rc == -EBUSY);
88 
89 		if (rc)
90 			break;
91 		if (sccb->header.response_code == 0x10)
92 			return 0;
93 		if (sccb->header.response_code != 0x1f0)
94 			break;
95 	}
96 	return -EIO;
97 }
98 
99 static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
100 {
101 	struct sclp_core_entry *cpue;
102 	u16 boot_cpu_address, cpu;
103 
104 	if (sclp_read_info_early(sccb))
105 		return;
106 
107 	sclp.facilities = sccb->facilities;
108 	sclp.has_sprp = !!(sccb->fac84 & 0x02);
109 	sclp.has_core_type = !!(sccb->fac84 & 0x01);
110 	if (sccb->fac85 & 0x02)
111 		S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
112 	sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
113 	sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
114 	sclp.rzm <<= 20;
115 	sclp.ibc = sccb->ibc;
116 
117 	if (!sccb->hcpua) {
118 		if (MACHINE_IS_VM)
119 			sclp.max_cores = 64;
120 		else
121 			sclp.max_cores = sccb->ncpurl;
122 	} else {
123 		sclp.max_cores = sccb->hcpua + 1;
124 	}
125 
126 	boot_cpu_address = stap();
127 	cpue = (void *)sccb + sccb->cpuoff;
128 	for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
129 		if (boot_cpu_address != cpue->core_id)
130 			continue;
131 		sclp.has_siif = cpue->siif;
132 		sclp.has_sigpif = cpue->sigpif;
133 		break;
134 	}
135 
136 	/* Save IPL information */
137 	sclp_ipl_info.is_valid = 1;
138 	if (sccb->flags & 0x2)
139 		sclp_ipl_info.has_dump = 1;
140 	memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
141 
142 	sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
143 	sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
144 	sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
145 }
146 
147 /*
148  * This function will be called after sclp_facilities_detect(), which gets
149  * called from early.c code. The sclp_facilities_detect() function retrieves
150  * and saves the IPL information.
151  */
152 void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
153 {
154 	*info = sclp_ipl_info;
155 }
156 
157 static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
158 {
159 	int rc;
160 
161 	do {
162 		rc = sclp_cmd_sync_early(cmd, sccb);
163 	} while (rc == -EBUSY);
164 
165 	if (rc)
166 		return -EIO;
167 	if (((struct sccb_header *) sccb)->response_code != 0x0020)
168 		return -EIO;
169 	return 0;
170 }
171 
172 static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
173 {
174 	memset(sccb, 0, sizeof(*sccb));
175 
176 	sccb->hdr.length = sizeof(*sccb);
177 	sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
178 	sccb->evbuf.hdr.type = EVTYP_SDIAS;
179 	sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
180 	sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
181 	sccb->evbuf.event_id = 4712;
182 	sccb->evbuf.dbs = 1;
183 }
184 
185 static int __init sclp_set_event_mask(struct init_sccb *sccb,
186 				      unsigned long receive_mask,
187 				      unsigned long send_mask)
188 {
189 	memset(sccb, 0, sizeof(*sccb));
190 	sccb->header.length = sizeof(*sccb);
191 	sccb->mask_length = sizeof(sccb_mask_t);
192 	sccb->receive_mask = receive_mask;
193 	sccb->send_mask = send_mask;
194 	return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
195 }
196 
197 static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
198 {
199 	sccb_init_eq_size(sccb);
200 	if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
201 		return -EIO;
202 	if (sccb->evbuf.blk_cnt == 0)
203 		return 0;
204 	return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
205 }
206 
207 static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
208 {
209 	memset(sccb, 0, PAGE_SIZE);
210 	sccb->length = PAGE_SIZE;
211 	if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
212 		return -EIO;
213 	if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0)
214 		return 0;
215 	return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
216 }
217 
218 static void __init sclp_hsa_size_detect(void *sccb)
219 {
220 	long size;
221 
222 	/* First try synchronous interface (LPAR) */
223 	if (sclp_set_event_mask(sccb, 0, 0x40000010))
224 		return;
225 	size = sclp_hsa_size_init(sccb);
226 	if (size < 0)
227 		return;
228 	if (size != 0)
229 		goto out;
230 	/* Then try asynchronous interface (z/VM) */
231 	if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010))
232 		return;
233 	size = sclp_hsa_size_init(sccb);
234 	if (size < 0)
235 		return;
236 	size = sclp_hsa_copy_wait(sccb);
237 	if (size < 0)
238 		return;
239 out:
240 	sclp.hsa_size = size;
241 }
242 
243 static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
244 {
245 	if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
246 		return 0;
247 	if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
248 		return 0;
249 	return 1;
250 }
251 
252 static void __init sclp_console_detect(struct init_sccb *sccb)
253 {
254 	if (sccb->header.response_code != 0x20)
255 		return;
256 
257 	if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
258 		sclp.has_vt220 = 1;
259 
260 	if (sclp_con_check_linemode(sccb))
261 		sclp.has_linemode = 1;
262 }
263 
264 void __init sclp_early_detect(void)
265 {
266 	void *sccb = &sccb_early;
267 
268 	sclp_facilities_detect(sccb);
269 	sclp_hsa_size_detect(sccb);
270 
271 	/* Turn off SCLP event notifications.  Also save remote masks in the
272 	 * sccb.  These are sufficient to detect sclp console capabilities.
273 	 */
274 	sclp_set_event_mask(sccb, 0, 0);
275 	sclp_console_detect(sccb);
276 }
277