1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #define pr_fmt(fmt) "papr-vpd: " fmt
4
5 #include <linux/anon_inodes.h>
6 #include <linux/build_bug.h>
7 #include <linux/file.h>
8 #include <linux/fs.h>
9 #include <linux/init.h>
10 #include <linux/lockdep.h>
11 #include <linux/kernel.h>
12 #include <linux/miscdevice.h>
13 #include <linux/signal.h>
14 #include <linux/slab.h>
15 #include <linux/string.h>
16 #include <linux/string_helpers.h>
17 #include <linux/uaccess.h>
18 #include <asm/machdep.h>
19 #include <asm/papr-vpd.h>
20 #include <asm/rtas-work-area.h>
21 #include <asm/rtas.h>
22 #include <uapi/asm/papr-vpd.h>
23
24 /*
25 * Function-specific return values for ibm,get-vpd, derived from PAPR+
26 * v2.13 7.3.20 "ibm,get-vpd RTAS Call".
27 */
28 #define RTAS_IBM_GET_VPD_COMPLETE 0 /* All VPD has been retrieved. */
29 #define RTAS_IBM_GET_VPD_MORE_DATA 1 /* More VPD is available. */
30 #define RTAS_IBM_GET_VPD_START_OVER -4 /* VPD changed, restart call sequence. */
31
32 /**
33 * struct rtas_ibm_get_vpd_params - Parameters (in and out) for ibm,get-vpd.
34 * @loc_code: In: Caller-provided location code buffer. Must be RTAS-addressable.
35 * @work_area: In: Caller-provided work area buffer for results.
36 * @sequence: In: Sequence number. Out: Next sequence number.
37 * @written: Out: Bytes written by ibm,get-vpd to @work_area.
38 * @status: Out: RTAS call status.
39 */
40 struct rtas_ibm_get_vpd_params {
41 const struct papr_location_code *loc_code;
42 struct rtas_work_area *work_area;
43 u32 sequence;
44 u32 written;
45 s32 status;
46 };
47
48 /**
49 * rtas_ibm_get_vpd() - Call ibm,get-vpd to fill a work area buffer.
50 * @params: See &struct rtas_ibm_get_vpd_params.
51 *
52 * Calls ibm,get-vpd until it errors or successfully deposits data
53 * into the supplied work area. Handles RTAS retry statuses. Maps RTAS
54 * error statuses to reasonable errno values.
55 *
56 * The caller is expected to invoke rtas_ibm_get_vpd() multiple times
57 * to retrieve all the VPD for the provided location code. Only one
58 * sequence should be in progress at any time; starting a new sequence
59 * will disrupt any sequence already in progress. Serialization of VPD
60 * retrieval sequences is the responsibility of the caller.
61 *
62 * The caller should inspect @params.status to determine whether more
63 * calls are needed to complete the sequence.
64 *
65 * Context: May sleep.
66 * Return: -ve on error, 0 otherwise.
67 */
rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params * params)68 static int rtas_ibm_get_vpd(struct rtas_ibm_get_vpd_params *params)
69 {
70 const struct papr_location_code *loc_code = params->loc_code;
71 struct rtas_work_area *work_area = params->work_area;
72 u32 rets[2];
73 s32 fwrc;
74 int ret;
75
76 lockdep_assert_held(&rtas_ibm_get_vpd_lock);
77
78 do {
79 fwrc = rtas_call(rtas_function_token(RTAS_FN_IBM_GET_VPD), 4, 3,
80 rets,
81 __pa(loc_code),
82 rtas_work_area_phys(work_area),
83 rtas_work_area_size(work_area),
84 params->sequence);
85 } while (rtas_busy_delay(fwrc));
86
87 switch (fwrc) {
88 case RTAS_HARDWARE_ERROR:
89 ret = -EIO;
90 break;
91 case RTAS_INVALID_PARAMETER:
92 ret = -EINVAL;
93 break;
94 case RTAS_IBM_GET_VPD_START_OVER:
95 ret = -EAGAIN;
96 break;
97 case RTAS_IBM_GET_VPD_MORE_DATA:
98 params->sequence = rets[0];
99 fallthrough;
100 case RTAS_IBM_GET_VPD_COMPLETE:
101 params->written = rets[1];
102 /*
103 * Kernel or firmware bug, do not continue.
104 */
105 if (WARN(params->written > rtas_work_area_size(work_area),
106 "possible write beyond end of work area"))
107 ret = -EFAULT;
108 else
109 ret = 0;
110 break;
111 default:
112 ret = -EIO;
113 pr_err_ratelimited("unexpected ibm,get-vpd status %d\n", fwrc);
114 break;
115 }
116
117 params->status = fwrc;
118 return ret;
119 }
120
121 /*
122 * Internal VPD "blob" APIs for accumulating ibm,get-vpd results into
123 * an immutable buffer to be attached to a file descriptor.
124 */
125 struct vpd_blob {
126 const char *data;
127 size_t len;
128 };
129
vpd_blob_has_data(const struct vpd_blob * blob)130 static bool vpd_blob_has_data(const struct vpd_blob *blob)
131 {
132 return blob->data && blob->len;
133 }
134
vpd_blob_free(const struct vpd_blob * blob)135 static void vpd_blob_free(const struct vpd_blob *blob)
136 {
137 if (blob) {
138 kvfree(blob->data);
139 kfree(blob);
140 }
141 }
142
143 /**
144 * vpd_blob_extend() - Append data to a &struct vpd_blob.
145 * @blob: The blob to extend.
146 * @data: The new data to append to @blob.
147 * @len: The length of @data.
148 *
149 * Context: May sleep.
150 * Return: -ENOMEM on allocation failure, 0 otherwise.
151 */
vpd_blob_extend(struct vpd_blob * blob,const char * data,size_t len)152 static int vpd_blob_extend(struct vpd_blob *blob, const char *data, size_t len)
153 {
154 const size_t new_len = blob->len + len;
155 const size_t old_len = blob->len;
156 const char *old_ptr = blob->data;
157 char *new_ptr;
158
159 new_ptr = kvrealloc(old_ptr, new_len, GFP_KERNEL_ACCOUNT);
160 if (!new_ptr)
161 return -ENOMEM;
162
163 memcpy(&new_ptr[old_len], data, len);
164 blob->data = new_ptr;
165 blob->len = new_len;
166 return 0;
167 }
168
169 /**
170 * vpd_blob_generate() - Construct a new &struct vpd_blob.
171 * @generator: Function that supplies the blob data.
172 * @arg: Context pointer supplied by caller, passed to @generator.
173 *
174 * The @generator callback is invoked until it returns NULL. @arg is
175 * passed to @generator in its first argument on each call. When
176 * @generator returns data, it should store the data length in its
177 * second argument.
178 *
179 * Context: May sleep.
180 * Return: A completely populated &struct vpd_blob, or NULL on error.
181 */
182 static const struct vpd_blob *
vpd_blob_generate(const char * (* generator)(void *,size_t *),void * arg)183 vpd_blob_generate(const char * (*generator)(void *, size_t *), void *arg)
184 {
185 struct vpd_blob *blob;
186 const char *buf;
187 size_t len;
188 int err = 0;
189
190 blob = kzalloc(sizeof(*blob), GFP_KERNEL_ACCOUNT);
191 if (!blob)
192 return NULL;
193
194 while (err == 0 && (buf = generator(arg, &len)))
195 err = vpd_blob_extend(blob, buf, len);
196
197 if (err != 0 || !vpd_blob_has_data(blob))
198 goto free_blob;
199
200 return blob;
201 free_blob:
202 vpd_blob_free(blob);
203 return NULL;
204 }
205
206 /*
207 * Internal VPD sequence APIs. A VPD sequence is a series of calls to
208 * ibm,get-vpd for a given location code. The sequence ends when an
209 * error is encountered or all VPD for the location code has been
210 * returned.
211 */
212
213 /**
214 * struct vpd_sequence - State for managing a VPD sequence.
215 * @error: Shall be zero as long as the sequence has not encountered an error,
216 * -ve errno otherwise. Use vpd_sequence_set_err() to update this.
217 * @params: Parameter block to pass to rtas_ibm_get_vpd().
218 */
219 struct vpd_sequence {
220 int error;
221 struct rtas_ibm_get_vpd_params params;
222 };
223
224 /**
225 * vpd_sequence_begin() - Begin a VPD retrieval sequence.
226 * @seq: Uninitialized sequence state.
227 * @loc_code: Location code that defines the scope of the VPD to return.
228 *
229 * Initializes @seq with the resources necessary to carry out a VPD
230 * sequence. Callers must pass @seq to vpd_sequence_end() regardless
231 * of whether the sequence succeeds.
232 *
233 * Context: May sleep.
234 */
vpd_sequence_begin(struct vpd_sequence * seq,const struct papr_location_code * loc_code)235 static void vpd_sequence_begin(struct vpd_sequence *seq,
236 const struct papr_location_code *loc_code)
237 {
238 /*
239 * Use a static data structure for the location code passed to
240 * RTAS to ensure it's in the RMA and avoid a separate work
241 * area allocation. Guarded by the function lock.
242 */
243 static struct papr_location_code static_loc_code;
244
245 /*
246 * We could allocate the work area before acquiring the
247 * function lock, but that would allow concurrent requests to
248 * exhaust the limited work area pool for no benefit. So
249 * allocate the work area under the lock.
250 */
251 mutex_lock(&rtas_ibm_get_vpd_lock);
252 static_loc_code = *loc_code;
253 *seq = (struct vpd_sequence) {
254 .params = {
255 .work_area = rtas_work_area_alloc(SZ_4K),
256 .loc_code = &static_loc_code,
257 .sequence = 1,
258 },
259 };
260 }
261
262 /**
263 * vpd_sequence_end() - Finalize a VPD retrieval sequence.
264 * @seq: Sequence state.
265 *
266 * Releases resources obtained by vpd_sequence_begin().
267 */
vpd_sequence_end(struct vpd_sequence * seq)268 static void vpd_sequence_end(struct vpd_sequence *seq)
269 {
270 rtas_work_area_free(seq->params.work_area);
271 mutex_unlock(&rtas_ibm_get_vpd_lock);
272 }
273
274 /**
275 * vpd_sequence_should_stop() - Determine whether a VPD retrieval sequence
276 * should continue.
277 * @seq: VPD sequence state.
278 *
279 * Examines the sequence error state and outputs of the last call to
280 * ibm,get-vpd to determine whether the sequence in progress should
281 * continue or stop.
282 *
283 * Return: True if the sequence has encountered an error or if all VPD for
284 * this sequence has been retrieved. False otherwise.
285 */
vpd_sequence_should_stop(const struct vpd_sequence * seq)286 static bool vpd_sequence_should_stop(const struct vpd_sequence *seq)
287 {
288 bool done;
289
290 if (seq->error)
291 return true;
292
293 switch (seq->params.status) {
294 case 0:
295 if (seq->params.written == 0)
296 done = false; /* Initial state. */
297 else
298 done = true; /* All data consumed. */
299 break;
300 case 1:
301 done = false; /* More data available. */
302 break;
303 default:
304 done = true; /* Error encountered. */
305 break;
306 }
307
308 return done;
309 }
310
vpd_sequence_set_err(struct vpd_sequence * seq,int err)311 static int vpd_sequence_set_err(struct vpd_sequence *seq, int err)
312 {
313 /* Preserve the first error recorded. */
314 if (seq->error == 0)
315 seq->error = err;
316
317 return seq->error;
318 }
319
320 /*
321 * Generator function to be passed to vpd_blob_generate().
322 */
vpd_sequence_fill_work_area(void * arg,size_t * len)323 static const char *vpd_sequence_fill_work_area(void *arg, size_t *len)
324 {
325 struct vpd_sequence *seq = arg;
326 struct rtas_ibm_get_vpd_params *p = &seq->params;
327
328 if (vpd_sequence_should_stop(seq))
329 return NULL;
330 if (vpd_sequence_set_err(seq, rtas_ibm_get_vpd(p)))
331 return NULL;
332 *len = p->written;
333 return rtas_work_area_raw_buf(p->work_area);
334 }
335
336 /*
337 * Higher-level VPD retrieval code below. These functions use the
338 * vpd_blob_* and vpd_sequence_* APIs defined above to create fd-based
339 * VPD handles for consumption by user space.
340 */
341
342 /**
343 * papr_vpd_run_sequence() - Run a single VPD retrieval sequence.
344 * @loc_code: Location code that defines the scope of VPD to return.
345 *
346 * Context: May sleep. Holds a mutex and an RTAS work area for its
347 * duration. Typically performs multiple sleepable slab
348 * allocations.
349 *
350 * Return: A populated &struct vpd_blob on success. Encoded error
351 * pointer otherwise.
352 */
papr_vpd_run_sequence(const struct papr_location_code * loc_code)353 static const struct vpd_blob *papr_vpd_run_sequence(const struct papr_location_code *loc_code)
354 {
355 const struct vpd_blob *blob;
356 struct vpd_sequence seq;
357
358 vpd_sequence_begin(&seq, loc_code);
359 blob = vpd_blob_generate(vpd_sequence_fill_work_area, &seq);
360 if (!blob)
361 vpd_sequence_set_err(&seq, -ENOMEM);
362 vpd_sequence_end(&seq);
363
364 if (seq.error) {
365 vpd_blob_free(blob);
366 return ERR_PTR(seq.error);
367 }
368
369 return blob;
370 }
371
372 /**
373 * papr_vpd_retrieve() - Return the VPD for a location code.
374 * @loc_code: Location code that defines the scope of VPD to return.
375 *
376 * Run VPD sequences against @loc_code until a blob is successfully
377 * instantiated, or a hard error is encountered, or a fatal signal is
378 * pending.
379 *
380 * Context: May sleep.
381 * Return: A fully populated VPD blob when successful. Encoded error
382 * pointer otherwise.
383 */
papr_vpd_retrieve(const struct papr_location_code * loc_code)384 static const struct vpd_blob *papr_vpd_retrieve(const struct papr_location_code *loc_code)
385 {
386 const struct vpd_blob *blob;
387
388 /*
389 * EAGAIN means the sequence errored with a -4 (VPD changed)
390 * status from ibm,get-vpd, and we should attempt a new
391 * sequence. PAPR+ v2.13 R1–7.3.20–5 indicates that this
392 * should be a transient condition, not something that happens
393 * continuously. But we'll stop trying on a fatal signal.
394 */
395 do {
396 blob = papr_vpd_run_sequence(loc_code);
397 if (!IS_ERR(blob)) /* Success. */
398 break;
399 if (PTR_ERR(blob) != -EAGAIN) /* Hard error. */
400 break;
401 pr_info_ratelimited("VPD changed during retrieval, retrying\n");
402 cond_resched();
403 } while (!fatal_signal_pending(current));
404
405 return blob;
406 }
407
papr_vpd_handle_read(struct file * file,char __user * buf,size_t size,loff_t * off)408 static ssize_t papr_vpd_handle_read(struct file *file, char __user *buf, size_t size, loff_t *off)
409 {
410 const struct vpd_blob *blob = file->private_data;
411
412 /* bug: we should not instantiate a handle without any data attached. */
413 if (!vpd_blob_has_data(blob)) {
414 pr_err_once("handle without data\n");
415 return -EIO;
416 }
417
418 return simple_read_from_buffer(buf, size, off, blob->data, blob->len);
419 }
420
papr_vpd_handle_release(struct inode * inode,struct file * file)421 static int papr_vpd_handle_release(struct inode *inode, struct file *file)
422 {
423 const struct vpd_blob *blob = file->private_data;
424
425 vpd_blob_free(blob);
426
427 return 0;
428 }
429
papr_vpd_handle_seek(struct file * file,loff_t off,int whence)430 static loff_t papr_vpd_handle_seek(struct file *file, loff_t off, int whence)
431 {
432 const struct vpd_blob *blob = file->private_data;
433
434 return fixed_size_llseek(file, off, whence, blob->len);
435 }
436
437
438 static const struct file_operations papr_vpd_handle_ops = {
439 .read = papr_vpd_handle_read,
440 .llseek = papr_vpd_handle_seek,
441 .release = papr_vpd_handle_release,
442 };
443
444 /**
445 * papr_vpd_create_handle() - Create a fd-based handle for reading VPD.
446 * @ulc: Location code in user memory; defines the scope of the VPD to
447 * retrieve.
448 *
449 * Handler for PAPR_VPD_IOC_CREATE_HANDLE ioctl command. Validates
450 * @ulc and instantiates an immutable VPD "blob" for it. The blob is
451 * attached to a file descriptor for reading by user space. The memory
452 * backing the blob is freed when the file is released.
453 *
454 * The entire requested VPD is retrieved by this call and all
455 * necessary RTAS interactions are performed before returning the fd
456 * to user space. This keeps the read handler simple and ensures that
457 * the kernel can prevent interleaving of ibm,get-vpd call sequences.
458 *
459 * Return: The installed fd number if successful, -ve errno otherwise.
460 */
papr_vpd_create_handle(struct papr_location_code __user * ulc)461 static long papr_vpd_create_handle(struct papr_location_code __user *ulc)
462 {
463 struct papr_location_code klc;
464 const struct vpd_blob *blob;
465 struct file *file;
466 long err;
467 int fd;
468
469 if (copy_from_user(&klc, ulc, sizeof(klc)))
470 return -EFAULT;
471
472 if (!string_is_terminated(klc.str, ARRAY_SIZE(klc.str)))
473 return -EINVAL;
474
475 blob = papr_vpd_retrieve(&klc);
476 if (IS_ERR(blob))
477 return PTR_ERR(blob);
478
479 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
480 if (fd < 0) {
481 err = fd;
482 goto free_blob;
483 }
484
485 file = anon_inode_getfile("[papr-vpd]", &papr_vpd_handle_ops,
486 (void *)blob, O_RDONLY);
487 if (IS_ERR(file)) {
488 err = PTR_ERR(file);
489 goto put_fd;
490 }
491
492 file->f_mode |= FMODE_LSEEK | FMODE_PREAD;
493 fd_install(fd, file);
494 return fd;
495 put_fd:
496 put_unused_fd(fd);
497 free_blob:
498 vpd_blob_free(blob);
499 return err;
500 }
501
502 /*
503 * Top-level ioctl handler for /dev/papr-vpd.
504 */
papr_vpd_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)505 static long papr_vpd_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
506 {
507 void __user *argp = (__force void __user *)arg;
508 long ret;
509
510 switch (ioctl) {
511 case PAPR_VPD_IOC_CREATE_HANDLE:
512 ret = papr_vpd_create_handle(argp);
513 break;
514 default:
515 ret = -ENOIOCTLCMD;
516 break;
517 }
518 return ret;
519 }
520
521 static const struct file_operations papr_vpd_ops = {
522 .unlocked_ioctl = papr_vpd_dev_ioctl,
523 };
524
525 static struct miscdevice papr_vpd_dev = {
526 .minor = MISC_DYNAMIC_MINOR,
527 .name = "papr-vpd",
528 .fops = &papr_vpd_ops,
529 };
530
papr_vpd_init(void)531 static __init int papr_vpd_init(void)
532 {
533 if (!rtas_function_implemented(RTAS_FN_IBM_GET_VPD))
534 return -ENODEV;
535
536 return misc_register(&papr_vpd_dev);
537 }
538 machine_device_initcall(pseries, papr_vpd_init);
539