1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2007,2012
4 *
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11 #include <linux/completion.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <asm/chpid.h>
18 #include <asm/ctlreg.h>
19 #include <asm/sclp.h>
20
21 #include "sclp.h"
22
23 /* CPU configuration related functions */
24 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
25 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
26 /* Channel path configuration related functions */
27 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
28 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
29 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
30
31 struct cpu_configure_sccb {
32 struct sccb_header header;
33 } __packed __aligned(8);
34
35 struct chp_cfg_sccb {
36 struct sccb_header header;
37 u8 ccm;
38 u8 reserved[6];
39 u8 cssid;
40 } __packed;
41
42 struct chp_info_sccb {
43 struct sccb_header header;
44 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
45 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
46 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
47 u8 ccm;
48 u8 reserved[6];
49 u8 cssid;
50 } __packed;
51
sclp_sync_callback(struct sclp_req * req,void * data)52 static void sclp_sync_callback(struct sclp_req *req, void *data)
53 {
54 struct completion *completion = data;
55
56 complete(completion);
57 }
58
sclp_sync_request(sclp_cmdw_t cmd,void * sccb)59 int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
60 {
61 return sclp_sync_request_timeout(cmd, sccb, 0);
62 }
63
sclp_sync_request_timeout(sclp_cmdw_t cmd,void * sccb,int timeout)64 int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
65 {
66 struct completion completion;
67 struct sclp_req *request;
68 int rc;
69
70 request = kzalloc(sizeof(*request), GFP_KERNEL);
71 if (!request)
72 return -ENOMEM;
73 if (timeout)
74 request->queue_timeout = timeout;
75 request->command = cmd;
76 request->sccb = sccb;
77 request->status = SCLP_REQ_FILLED;
78 request->callback = sclp_sync_callback;
79 request->callback_data = &completion;
80 init_completion(&completion);
81
82 rc = sclp_add_request(request);
83 if (rc)
84 goto out;
85 wait_for_completion(&completion);
86
87 if (request->status != SCLP_REQ_DONE) {
88 pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
89 cmd, request->status);
90 rc = -EIO;
91 }
92 out:
93 kfree(request);
94 return rc;
95 }
96
_sclp_get_core_info(struct sclp_core_info * info)97 int _sclp_get_core_info(struct sclp_core_info *info)
98 {
99 struct read_cpu_info_sccb *sccb;
100 int rc, length;
101
102 if (!SCLP_HAS_CPU_INFO)
103 return -EOPNOTSUPP;
104
105 length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
106 sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
107 if (!sccb)
108 return -ENOMEM;
109 sccb->header.length = length;
110 sccb->header.control_mask[2] = 0x80;
111 rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
112 SCLP_QUEUE_INTERVAL);
113 if (rc)
114 goto out;
115 if (sccb->header.response_code != 0x0010) {
116 pr_warn("readcpuinfo failed (response=0x%04x)\n",
117 sccb->header.response_code);
118 rc = -EIO;
119 goto out;
120 }
121 sclp_fill_core_info(info, sccb);
122 out:
123 free_pages((unsigned long)sccb, get_order(length));
124 return rc;
125 }
126
do_core_configure(sclp_cmdw_t cmd)127 static int do_core_configure(sclp_cmdw_t cmd)
128 {
129 struct cpu_configure_sccb *sccb;
130 int rc;
131
132 if (!SCLP_HAS_CPU_RECONFIG)
133 return -EOPNOTSUPP;
134 /*
135 * Use kmalloc to have a minimum alignment of 8 bytes and ensure sccb
136 * is not going to cross a page boundary.
137 */
138 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
139 if (!sccb)
140 return -ENOMEM;
141 sccb->header.length = sizeof(*sccb);
142 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
143 if (rc)
144 goto out;
145 switch (sccb->header.response_code) {
146 case 0x0020:
147 case 0x0120:
148 break;
149 default:
150 pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
151 cmd, sccb->header.response_code);
152 rc = -EIO;
153 break;
154 }
155 out:
156 kfree(sccb);
157 return rc;
158 }
159
sclp_core_configure(u8 core)160 int sclp_core_configure(u8 core)
161 {
162 return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8);
163 }
164
sclp_core_deconfigure(u8 core)165 int sclp_core_deconfigure(u8 core)
166 {
167 return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
168 }
169
do_chp_configure(sclp_cmdw_t cmd)170 static int do_chp_configure(sclp_cmdw_t cmd)
171 {
172 struct chp_cfg_sccb *sccb;
173 int rc;
174
175 if (!SCLP_HAS_CHP_RECONFIG)
176 return -EOPNOTSUPP;
177 sccb = (struct chp_cfg_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
178 if (!sccb)
179 return -ENOMEM;
180 sccb->header.length = sizeof(*sccb);
181 rc = sclp_sync_request(cmd, sccb);
182 if (rc)
183 goto out;
184 switch (sccb->header.response_code) {
185 case 0x0020:
186 case 0x0120:
187 case 0x0440:
188 case 0x0450:
189 break;
190 default:
191 pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
192 cmd, sccb->header.response_code);
193 rc = -EIO;
194 break;
195 }
196 out:
197 free_page((unsigned long)sccb);
198 return rc;
199 }
200
201 /**
202 * sclp_chp_configure - perform configure channel-path sclp command
203 * @chpid: channel-path ID
204 *
205 * Perform configure channel-path command sclp command for specified chpid.
206 * Return 0 after command successfully finished, non-zero otherwise.
207 */
sclp_chp_configure(struct chp_id chpid)208 int sclp_chp_configure(struct chp_id chpid)
209 {
210 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
211 }
212
213 /**
214 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
215 * @chpid: channel-path ID
216 *
217 * Perform deconfigure channel-path command sclp command for specified chpid
218 * and wait for completion. On success return 0. Return non-zero otherwise.
219 */
sclp_chp_deconfigure(struct chp_id chpid)220 int sclp_chp_deconfigure(struct chp_id chpid)
221 {
222 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
223 }
224
225 /**
226 * sclp_chp_read_info - perform read channel-path information sclp command
227 * @info: resulting channel-path information data
228 *
229 * Perform read channel-path information sclp command and wait for completion.
230 * On success, store channel-path information in @info and return 0. Return
231 * non-zero otherwise.
232 */
sclp_chp_read_info(struct sclp_chp_info * info)233 int sclp_chp_read_info(struct sclp_chp_info *info)
234 {
235 struct chp_info_sccb *sccb;
236 int rc;
237
238 if (!SCLP_HAS_CHP_INFO)
239 return -EOPNOTSUPP;
240 sccb = (struct chp_info_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
241 if (!sccb)
242 return -ENOMEM;
243 sccb->header.length = sizeof(*sccb);
244 rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
245 if (rc)
246 goto out;
247 if (sccb->header.response_code != 0x0010) {
248 pr_warn("read channel-path info failed (response=0x%04x)\n",
249 sccb->header.response_code);
250 rc = -EIO;
251 goto out;
252 }
253 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
254 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
255 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
256 out:
257 free_page((unsigned long)sccb);
258 return rc;
259 }
260