1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #define pr_fmt(fmt) "papr-hvpipe: " fmt
4
5 #include <linux/module.h>
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <linux/delay.h>
9 #include <linux/anon_inodes.h>
10 #include <linux/miscdevice.h>
11 #include <linux/file.h>
12 #include <linux/fs.h>
13 #include <linux/poll.h>
14 #include <linux/of.h>
15 #include <asm/machdep.h>
16 #include <asm/rtas.h>
17 #include <asm/rtas-work-area.h>
18 #include <asm/papr-sysparm.h>
19 #include <uapi/asm/papr-hvpipe.h>
20 #include "pseries.h"
21 #include "papr-hvpipe.h"
22
23 static DEFINE_SPINLOCK(hvpipe_src_list_lock);
24 static LIST_HEAD(hvpipe_src_list);
25
26 static unsigned char hvpipe_ras_buf[RTAS_ERROR_LOG_MAX];
27 static struct workqueue_struct *papr_hvpipe_wq;
28 static struct work_struct *papr_hvpipe_work;
29 static int hvpipe_check_exception_token;
30 static bool hvpipe_feature;
31
32 /*
33 * New PowerPC FW provides support for partitions and various
34 * sources (Ex: remote hardware management console (HMC)) to
35 * exchange information through an inband hypervisor channel
36 * called HVPIPE. Only HMCs are supported right now and
37 * partitions can communicate with multiple HMCs and each
38 * source represented by source ID.
39 *
40 * FW introduces send HVPIPE and recv HVPIPE RTAS calls for
41 * partitions to send and receive payloads respectively.
42 *
43 * These RTAS functions have the following certain requirements
44 * / limitations:
45 * - One hvpipe per partition for all sources.
46 * - Assume the return status of send HVPIPE as delivered to source
47 * - Assume the return status of recv HVPIPE as ACK to source
48 * - Generates HVPIPE event message when the payload is ready
49 * for the partition. The hypervisor will not deliver another
50 * event until the partition read the previous payload which
51 * means the pipe is blocked for any sources.
52 *
53 * Linux implementation:
54 * Follow the similar interfaces that the OS has for other RTAS calls.
55 * ex: /dev/papr-indices, /dev/papr-vpd, etc.
56 * - /dev/papr-hvpipe is available for the user space.
57 * - devfd = open("/dev/papr-hvpipe", ..)
58 * - fd = ioctl(fd,HVPIPE_IOC_CREATE_HANDLE,&srcID)-for each source
59 * - write(fd, buf, size) --> Issue send HVPIPE RTAS call and
60 * returns size for success or the corresponding error for RTAS
61 * return code for failure.
62 * - poll(fd,..) -> wakeup FD if the payload is available to read.
63 * HVPIPE event message handler wakeup FD based on source ID in
64 * the event message
65 * - read(fd, buf, size) --> Issue recv HVPIPE RTAS call and
66 * returns size for success or the corresponding error for RTAS
67 * return code for failure.
68 */
69
70 /*
71 * ibm,receive-hvpipe-msg RTAS call.
72 * @area: Caller-provided work area buffer for results.
73 * @srcID: Source ID returned by the RTAS call.
74 * @bytesw: Bytes written by RTAS call to @area.
75 */
rtas_ibm_receive_hvpipe_msg(struct rtas_work_area * area,u32 * srcID,u32 * bytesw)76 static int rtas_ibm_receive_hvpipe_msg(struct rtas_work_area *area,
77 u32 *srcID, u32 *bytesw)
78 {
79 const s32 token = rtas_function_token(RTAS_FN_IBM_RECEIVE_HVPIPE_MSG);
80 u32 rets[2];
81 s32 fwrc;
82 int ret;
83
84 if (token == RTAS_UNKNOWN_SERVICE)
85 return -ENOENT;
86
87 do {
88 fwrc = rtas_call(token, 2, 3, rets,
89 rtas_work_area_phys(area),
90 rtas_work_area_size(area));
91
92 } while (rtas_busy_delay(fwrc));
93
94 switch (fwrc) {
95 case RTAS_SUCCESS:
96 *srcID = rets[0];
97 *bytesw = rets[1];
98 ret = 0;
99 break;
100 case RTAS_HARDWARE_ERROR:
101 ret = -EIO;
102 break;
103 case RTAS_INVALID_PARAMETER:
104 ret = -EINVAL;
105 break;
106 case RTAS_FUNC_NOT_SUPPORTED:
107 ret = -EOPNOTSUPP;
108 break;
109 default:
110 ret = -EIO;
111 pr_err_ratelimited("unexpected ibm,receive-hvpipe-msg status %d\n", fwrc);
112 break;
113 }
114
115 return ret;
116 }
117
118 /*
119 * ibm,send-hvpipe-msg RTAS call
120 * @area: Caller-provided work area buffer to send.
121 * @srcID: Target source for the send pipe message.
122 */
rtas_ibm_send_hvpipe_msg(struct rtas_work_area * area,u32 srcID)123 static int rtas_ibm_send_hvpipe_msg(struct rtas_work_area *area, u32 srcID)
124 {
125 const s32 token = rtas_function_token(RTAS_FN_IBM_SEND_HVPIPE_MSG);
126 s32 fwrc;
127 int ret;
128
129 if (token == RTAS_UNKNOWN_SERVICE)
130 return -ENOENT;
131
132 do {
133 fwrc = rtas_call(token, 2, 1, NULL, srcID,
134 rtas_work_area_phys(area));
135
136 } while (rtas_busy_delay(fwrc));
137
138 switch (fwrc) {
139 case RTAS_SUCCESS:
140 ret = 0;
141 break;
142 case RTAS_HARDWARE_ERROR:
143 ret = -EIO;
144 break;
145 case RTAS_INVALID_PARAMETER:
146 ret = -EINVAL;
147 break;
148 case RTAS_HVPIPE_CLOSED:
149 ret = -EPIPE;
150 break;
151 case RTAS_FUNC_NOT_SUPPORTED:
152 ret = -EOPNOTSUPP;
153 break;
154 default:
155 ret = -EIO;
156 pr_err_ratelimited("unexpected ibm,receive-hvpipe-msg status %d\n", fwrc);
157 break;
158 }
159
160 return ret;
161 }
162
hvpipe_find_source(u32 srcID)163 static struct hvpipe_source_info *hvpipe_find_source(u32 srcID)
164 {
165 struct hvpipe_source_info *src_info;
166
167 list_for_each_entry(src_info, &hvpipe_src_list, list)
168 if (src_info->srcID == srcID)
169 return src_info;
170
171 return NULL;
172 }
173
174 /*
175 * This work function collects receive buffer with recv HVPIPE
176 * RTAS call. Called from read()
177 * @buf: User specified buffer to copy the payload that returned
178 * from recv HVPIPE RTAS.
179 * @size: Size of buffer user passed.
180 */
hvpipe_rtas_recv_msg(char __user * buf,int size)181 static int hvpipe_rtas_recv_msg(char __user *buf, int size)
182 {
183 struct rtas_work_area *work_area;
184 u32 srcID, bytes_written;
185 int ret;
186
187 work_area = rtas_work_area_alloc(SZ_4K);
188 if (!work_area) {
189 pr_err("Could not allocate RTAS buffer for recv pipe\n");
190 return -ENOMEM;
191 }
192
193 ret = rtas_ibm_receive_hvpipe_msg(work_area, &srcID,
194 &bytes_written);
195 if (!ret) {
196 /*
197 * Recv HVPIPE RTAS is successful.
198 * When releasing FD or no one is waiting on the
199 * specific source, issue recv HVPIPE RTAS call
200 * so that pipe is not blocked - this func is called
201 * with NULL buf.
202 */
203 if (buf) {
204 if (size < bytes_written) {
205 pr_err("Received the payload size = %d, but the buffer size = %d\n",
206 bytes_written, size);
207 bytes_written = size;
208 }
209 ret = copy_to_user(buf,
210 rtas_work_area_raw_buf(work_area),
211 bytes_written);
212 if (!ret)
213 ret = bytes_written;
214 }
215 } else {
216 pr_err("ibm,receive-hvpipe-msg failed with %d\n",
217 ret);
218 }
219
220 rtas_work_area_free(work_area);
221 return ret;
222 }
223
224 /*
225 * papr_hvpipe_handle_write - Issue send HVPIPE RTAS and return
226 * the size (payload + HVPIPE_HDR_LEN) for RTAS success.
227 * Otherwise returns the status of RTAS to the user space
228 */
papr_hvpipe_handle_write(struct file * file,const char __user * buf,size_t size,loff_t * off)229 static ssize_t papr_hvpipe_handle_write(struct file *file,
230 const char __user *buf, size_t size, loff_t *off)
231 {
232 struct hvpipe_source_info *src_info = file->private_data;
233 struct rtas_work_area *work_area, *work_buf;
234 unsigned long ret, len;
235 __be64 *area_be;
236
237 /*
238 * Return -ENXIO during migration
239 */
240 if (!hvpipe_feature)
241 return -ENXIO;
242
243 if (!src_info)
244 return -EIO;
245
246 /*
247 * Send HVPIPE RTAS is used to send payload to the specific
248 * source with the input parameters source ID and the payload
249 * as buffer list. Each entry in the buffer list contains
250 * address/length pair of the buffer.
251 *
252 * The buffer list format is as follows:
253 *
254 * Header (length of address/length pairs and the header length)
255 * Address of 4K buffer 1
256 * Length of 4K buffer 1 used
257 * ...
258 * Address of 4K buffer n
259 * Length of 4K buffer n used
260 *
261 * See PAPR 7.3.32.2 ibm,send-hvpipe-msg
262 *
263 * Even though can support max 1MB payload, the hypervisor
264 * supports only 4048 bytes payload at present and also
265 * just one address/length entry.
266 *
267 * writev() interface can be added in future when the
268 * hypervisor supports multiple buffer list entries.
269 */
270 /* HVPIPE_MAX_WRITE_BUFFER_SIZE = 4048 bytes */
271 if ((size > (HVPIPE_HDR_LEN + HVPIPE_MAX_WRITE_BUFFER_SIZE)) ||
272 (size <= HVPIPE_HDR_LEN))
273 return -EINVAL;
274
275 /*
276 * The length of (address + length) pair + the length of header
277 */
278 len = (2 * sizeof(u64)) + sizeof(u64);
279 size -= HVPIPE_HDR_LEN;
280 buf += HVPIPE_HDR_LEN;
281 mutex_lock(&rtas_ibm_send_hvpipe_msg_lock);
282 work_area = rtas_work_area_alloc(SZ_4K);
283 if (!work_area) {
284 ret = -ENOMEM;
285 goto out;
286 }
287 area_be = (__be64 *)rtas_work_area_raw_buf(work_area);
288 /* header */
289 area_be[0] = cpu_to_be64(len);
290
291 work_buf = rtas_work_area_alloc(SZ_4K);
292 if (!work_buf) {
293 ret = -ENOMEM;
294 goto out_work;
295 }
296 /* First buffer address */
297 area_be[1] = cpu_to_be64(rtas_work_area_phys(work_buf));
298 /* First buffer address length */
299 area_be[2] = cpu_to_be64(size);
300
301 if (!copy_from_user(rtas_work_area_raw_buf(work_buf), buf, size)) {
302 ret = rtas_ibm_send_hvpipe_msg(work_area, src_info->srcID);
303 if (!ret)
304 ret = size + HVPIPE_HDR_LEN;
305 } else
306 ret = -EPERM;
307
308 rtas_work_area_free(work_buf);
309 out_work:
310 rtas_work_area_free(work_area);
311 out:
312 mutex_unlock(&rtas_ibm_send_hvpipe_msg_lock);
313 return ret;
314 }
315
316 /*
317 * papr_hvpipe_handle_read - If the payload for the specific
318 * source is pending in the hypervisor, issue recv HVPIPE RTAS
319 * and return the payload to the user space.
320 *
321 * When the payload is available for the partition, the
322 * hypervisor notifies HVPIPE event with the source ID
323 * and the event handler wakeup FD(s) that are waiting.
324 */
papr_hvpipe_handle_read(struct file * file,char __user * buf,size_t size,loff_t * off)325 static ssize_t papr_hvpipe_handle_read(struct file *file,
326 char __user *buf, size_t size, loff_t *off)
327 {
328
329 struct hvpipe_source_info *src_info = file->private_data;
330 struct papr_hvpipe_hdr hdr;
331 long ret;
332
333 /*
334 * Return -ENXIO during migration
335 */
336 if (!hvpipe_feature)
337 return -ENXIO;
338
339 if (!src_info)
340 return -EIO;
341
342 /*
343 * Max payload is 4048 (HVPIPE_MAX_WRITE_BUFFER_SIZE)
344 */
345 if ((size > (HVPIPE_HDR_LEN + HVPIPE_MAX_WRITE_BUFFER_SIZE)) ||
346 (size < HVPIPE_HDR_LEN))
347 return -EINVAL;
348
349 /*
350 * Payload is not available to receive or source pipe
351 * is not closed.
352 */
353 if (!src_info->hvpipe_status)
354 return 0;
355
356 hdr.version = 0;
357 hdr.flags = 0;
358
359 /*
360 * In case if the hvpipe has payload and also the
361 * hypervisor closed the pipe to the source, retrieve
362 * the payload and return to the user space first and
363 * then notify the userspace about the hvpipe close in
364 * next read().
365 */
366 if (src_info->hvpipe_status & HVPIPE_MSG_AVAILABLE)
367 hdr.flags = HVPIPE_MSG_AVAILABLE;
368 else if (src_info->hvpipe_status & HVPIPE_LOST_CONNECTION)
369 hdr.flags = HVPIPE_LOST_CONNECTION;
370 else
371 /*
372 * Should not be here without one of the above
373 * flags set
374 */
375 return -EIO;
376
377 ret = copy_to_user(buf, &hdr, HVPIPE_HDR_LEN);
378 if (ret)
379 return ret;
380
381 /*
382 * Message event has payload, so get the payload with
383 * recv HVPIPE RTAS.
384 */
385 if (hdr.flags & HVPIPE_MSG_AVAILABLE) {
386 ret = hvpipe_rtas_recv_msg(buf + HVPIPE_HDR_LEN,
387 size - HVPIPE_HDR_LEN);
388 if (ret > 0) {
389 src_info->hvpipe_status &= ~HVPIPE_MSG_AVAILABLE;
390 ret += HVPIPE_HDR_LEN;
391 }
392 } else if (hdr.flags & HVPIPE_LOST_CONNECTION) {
393 /*
394 * Hypervisor is closing the pipe for the specific
395 * source. So notify user space.
396 */
397 src_info->hvpipe_status &= ~HVPIPE_LOST_CONNECTION;
398 ret = HVPIPE_HDR_LEN;
399 }
400
401 return ret;
402 }
403
404 /*
405 * The user space waits for the payload to receive.
406 * The hypervisor sends HVPIPE event message to the partition
407 * when the payload is available. The event handler wakeup FD
408 * depends on the source ID in the message event.
409 */
papr_hvpipe_handle_poll(struct file * filp,struct poll_table_struct * wait)410 static __poll_t papr_hvpipe_handle_poll(struct file *filp,
411 struct poll_table_struct *wait)
412 {
413 struct hvpipe_source_info *src_info = filp->private_data;
414
415 /*
416 * HVPIPE is disabled during SUSPEND and enabled after migration.
417 * So return POLLRDHUP during migration
418 */
419 if (!hvpipe_feature)
420 return POLLRDHUP;
421
422 if (!src_info)
423 return POLLNVAL;
424
425 /*
426 * If hvpipe already has pending payload, return so that
427 * the user space can issue read().
428 */
429 if (src_info->hvpipe_status)
430 return POLLIN | POLLRDNORM;
431
432 /*
433 * Wait for the message event
434 * hvpipe_event_interrupt() wakes up this wait_queue
435 */
436 poll_wait(filp, &src_info->recv_wqh, wait);
437 if (src_info->hvpipe_status)
438 return POLLIN | POLLRDNORM;
439
440 return 0;
441 }
442
papr_hvpipe_handle_release(struct inode * inode,struct file * file)443 static int papr_hvpipe_handle_release(struct inode *inode,
444 struct file *file)
445 {
446 struct hvpipe_source_info *src_info;
447
448 /*
449 * Hold the lock, remove source from src_list, reset the
450 * hvpipe status and release the lock to prevent any race
451 * with message event IRQ.
452 */
453 spin_lock(&hvpipe_src_list_lock);
454 src_info = file->private_data;
455 list_del(&src_info->list);
456 file->private_data = NULL;
457 /*
458 * If the pipe for this specific source has any pending
459 * payload, issue recv HVPIPE RTAS so that pipe will not
460 * be blocked.
461 */
462 if (src_info->hvpipe_status & HVPIPE_MSG_AVAILABLE) {
463 src_info->hvpipe_status = 0;
464 spin_unlock(&hvpipe_src_list_lock);
465 hvpipe_rtas_recv_msg(NULL, 0);
466 } else
467 spin_unlock(&hvpipe_src_list_lock);
468
469 kfree(src_info);
470 return 0;
471 }
472
473 static const struct file_operations papr_hvpipe_handle_ops = {
474 .read = papr_hvpipe_handle_read,
475 .write = papr_hvpipe_handle_write,
476 .release = papr_hvpipe_handle_release,
477 .poll = papr_hvpipe_handle_poll,
478 };
479
papr_hvpipe_dev_create_handle(u32 srcID)480 static int papr_hvpipe_dev_create_handle(u32 srcID)
481 {
482 struct hvpipe_source_info *src_info __free(kfree) = NULL;
483
484 spin_lock(&hvpipe_src_list_lock);
485 /*
486 * Do not allow more than one process communicates with
487 * each source.
488 */
489 src_info = hvpipe_find_source(srcID);
490 if (src_info) {
491 spin_unlock(&hvpipe_src_list_lock);
492 pr_err("pid(%d) is already using the source(%d)\n",
493 src_info->tsk->pid, srcID);
494 return -EALREADY;
495 }
496 spin_unlock(&hvpipe_src_list_lock);
497
498 src_info = kzalloc_obj(*src_info, GFP_KERNEL_ACCOUNT);
499 if (!src_info)
500 return -ENOMEM;
501
502 src_info->srcID = srcID;
503 src_info->tsk = current;
504 init_waitqueue_head(&src_info->recv_wqh);
505
506 FD_PREPARE(fdf, O_RDONLY | O_CLOEXEC,
507 anon_inode_getfile("[papr-hvpipe]", &papr_hvpipe_handle_ops,
508 (void *)src_info, O_RDWR));
509 if (fdf.err)
510 return fdf.err;
511
512 retain_and_null_ptr(src_info);
513 spin_lock(&hvpipe_src_list_lock);
514 /*
515 * If two processes are executing ioctl() for the same
516 * source ID concurrently, prevent the second process to
517 * acquire FD.
518 */
519 if (hvpipe_find_source(srcID)) {
520 spin_unlock(&hvpipe_src_list_lock);
521 return -EALREADY;
522 }
523 list_add(&src_info->list, &hvpipe_src_list);
524 spin_unlock(&hvpipe_src_list_lock);
525 return fd_publish(fdf);
526 }
527
528 /*
529 * Top-level ioctl handler for /dev/papr_hvpipe
530 *
531 * Use separate FD for each source (exa :HMC). So ioctl is called
532 * with source ID which returns FD.
533 */
papr_hvpipe_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)534 static long papr_hvpipe_dev_ioctl(struct file *filp, unsigned int ioctl,
535 unsigned long arg)
536 {
537 u32 __user *argp = (void __user *)arg;
538 u32 srcID;
539 long ret;
540
541 /*
542 * Return -ENXIO during migration
543 */
544 if (!hvpipe_feature)
545 return -ENXIO;
546
547 if (get_user(srcID, argp))
548 return -EFAULT;
549
550 /*
551 * Support only HMC source right now
552 */
553 if (!(srcID & HVPIPE_HMC_ID_MASK))
554 return -EINVAL;
555
556 switch (ioctl) {
557 case PAPR_HVPIPE_IOC_CREATE_HANDLE:
558 ret = papr_hvpipe_dev_create_handle(srcID);
559 break;
560 default:
561 ret = -ENOIOCTLCMD;
562 break;
563 }
564
565 return ret;
566 }
567
568 /*
569 * papr_hvpipe_work_fn - called to issue recv HVPIPE RTAS for
570 * sources that are not monitored by user space so that pipe
571 * will not be blocked.
572 */
papr_hvpipe_work_fn(struct work_struct * work)573 static void papr_hvpipe_work_fn(struct work_struct *work)
574 {
575 hvpipe_rtas_recv_msg(NULL, 0);
576 }
577
578 /*
579 * HVPIPE event message IRQ handler.
580 * The hypervisor sends event IRQ if the partition has payload
581 * and generates another event only after payload is read with
582 * recv HVPIPE RTAS.
583 */
hvpipe_event_interrupt(int irq,void * dev_id)584 static irqreturn_t hvpipe_event_interrupt(int irq, void *dev_id)
585 {
586 struct hvpipe_event_buf *hvpipe_event;
587 struct pseries_errorlog *pseries_log;
588 struct hvpipe_source_info *src_info;
589 struct rtas_error_log *elog;
590 int rc;
591
592 rc = rtas_call(hvpipe_check_exception_token, 6, 1, NULL,
593 RTAS_VECTOR_EXTERNAL_INTERRUPT, virq_to_hw(irq),
594 RTAS_HVPIPE_MSG_EVENTS, 1, __pa(&hvpipe_ras_buf),
595 rtas_get_error_log_max());
596
597 if (rc != 0) {
598 pr_err_ratelimited("unexpected hvpipe-event-notification failed %d\n", rc);
599 return IRQ_HANDLED;
600 }
601
602 elog = (struct rtas_error_log *)hvpipe_ras_buf;
603 if (unlikely(rtas_error_type(elog) != RTAS_TYPE_HVPIPE)) {
604 pr_warn_ratelimited("Unexpected event type %d\n",
605 rtas_error_type(elog));
606 return IRQ_HANDLED;
607 }
608
609 pseries_log = get_pseries_errorlog(elog,
610 PSERIES_ELOG_SECT_ID_HVPIPE_EVENT);
611 hvpipe_event = (struct hvpipe_event_buf *)pseries_log->data;
612
613 /*
614 * The hypervisor notifies partition when the payload is
615 * available to read with recv HVPIPE RTAS and it will not
616 * notify another event for any source until the previous
617 * payload is read. Means the pipe is blocked in the
618 * hypervisor until the payload is read.
619 *
620 * If the source is ready to accept payload and wakeup the
621 * corresponding FD. Hold lock and update hvpipe_status
622 * and this lock is needed in case the user space process
623 * is in release FD instead of poll() so that release()
624 * reads the payload to unblock pipe before closing FD.
625 *
626 * otherwise (means no other user process waiting for the
627 * payload, issue recv HVPIPE RTAS (papr_hvpipe_work_fn())
628 * to unblock pipe.
629 */
630 spin_lock(&hvpipe_src_list_lock);
631 src_info = hvpipe_find_source(be32_to_cpu(hvpipe_event->srcID));
632 if (src_info) {
633 u32 flags = 0;
634
635 if (hvpipe_event->event_type & HVPIPE_LOST_CONNECTION)
636 flags = HVPIPE_LOST_CONNECTION;
637 else if (hvpipe_event->event_type & HVPIPE_MSG_AVAILABLE)
638 flags = HVPIPE_MSG_AVAILABLE;
639
640 src_info->hvpipe_status |= flags;
641 wake_up(&src_info->recv_wqh);
642 spin_unlock(&hvpipe_src_list_lock);
643 } else {
644 spin_unlock(&hvpipe_src_list_lock);
645 /*
646 * user space is not waiting on this source. So
647 * execute receive pipe RTAS so that pipe will not
648 * be blocked.
649 */
650 if (hvpipe_event->event_type & HVPIPE_MSG_AVAILABLE)
651 queue_work(papr_hvpipe_wq, papr_hvpipe_work);
652 }
653
654 return IRQ_HANDLED;
655 }
656
657 /*
658 * Enable hvpipe by system parameter set with parameter
659 * token = 64 and with 1 byte buffer data:
660 * 0 = hvpipe not in use/disable
661 * 1 = hvpipe in use/enable
662 */
set_hvpipe_sys_param(u8 val)663 static int set_hvpipe_sys_param(u8 val)
664 {
665 struct papr_sysparm_buf *buf;
666 int ret;
667
668 buf = papr_sysparm_buf_alloc();
669 if (!buf)
670 return -ENOMEM;
671
672 buf->len = cpu_to_be16(1);
673 buf->val[0] = val;
674 ret = papr_sysparm_set(PAPR_SYSPARM_HVPIPE_ENABLE, buf);
675 if (ret)
676 pr_err("Can not enable hvpipe %d\n", ret);
677
678 papr_sysparm_buf_free(buf);
679
680 return ret;
681 }
682
enable_hvpipe_IRQ(void)683 static int __init enable_hvpipe_IRQ(void)
684 {
685 struct device_node *np;
686
687 hvpipe_check_exception_token = rtas_function_token(RTAS_FN_CHECK_EXCEPTION);
688 if (hvpipe_check_exception_token == RTAS_UNKNOWN_SERVICE)
689 return -ENODEV;
690
691 /* hvpipe events */
692 np = of_find_node_by_path("/event-sources/ibm,hvpipe-msg-events");
693 if (np != NULL) {
694 request_event_sources_irqs(np, hvpipe_event_interrupt,
695 "HPIPE_EVENT");
696 of_node_put(np);
697 } else {
698 pr_err("Can not enable hvpipe event IRQ\n");
699 return -ENODEV;
700 }
701
702 return 0;
703 }
704
hvpipe_migration_handler(int action)705 void hvpipe_migration_handler(int action)
706 {
707 pr_info("hvpipe migration event %d\n", action);
708
709 /*
710 * HVPIPE is not used (Failed to create /dev/papr-hvpipe).
711 * So nothing to do for migration.
712 */
713 if (!papr_hvpipe_work)
714 return;
715
716 switch (action) {
717 case HVPIPE_SUSPEND:
718 if (hvpipe_feature) {
719 /*
720 * Disable hvpipe_feature to the user space.
721 * It will be enabled with RESUME event.
722 */
723 hvpipe_feature = false;
724 /*
725 * set system parameter hvpipe 'disable'
726 */
727 set_hvpipe_sys_param(0);
728 }
729 break;
730 case HVPIPE_RESUME:
731 /*
732 * set system parameter hvpipe 'enable'
733 */
734 if (!set_hvpipe_sys_param(1))
735 hvpipe_feature = true;
736 else
737 pr_err("hvpipe is not enabled after migration\n");
738
739 break;
740 }
741 }
742
743 static const struct file_operations papr_hvpipe_ops = {
744 .unlocked_ioctl = papr_hvpipe_dev_ioctl,
745 };
746
747 static struct miscdevice papr_hvpipe_dev = {
748 .minor = MISC_DYNAMIC_MINOR,
749 .name = "papr-hvpipe",
750 .fops = &papr_hvpipe_ops,
751 };
752
papr_hvpipe_init(void)753 static int __init papr_hvpipe_init(void)
754 {
755 int ret;
756
757 if (!of_find_property(rtas.dev, "ibm,hypervisor-pipe-capable",
758 NULL))
759 return -ENODEV;
760
761 if (!rtas_function_implemented(RTAS_FN_IBM_SEND_HVPIPE_MSG) ||
762 !rtas_function_implemented(RTAS_FN_IBM_RECEIVE_HVPIPE_MSG))
763 return -ENODEV;
764
765 papr_hvpipe_work = kzalloc_obj(struct work_struct, GFP_ATOMIC);
766 if (!papr_hvpipe_work)
767 return -ENOMEM;
768
769 INIT_WORK(papr_hvpipe_work, papr_hvpipe_work_fn);
770
771 papr_hvpipe_wq = alloc_ordered_workqueue("papr hvpipe workqueue", 0);
772 if (!papr_hvpipe_wq) {
773 ret = -ENOMEM;
774 goto out;
775 }
776
777 ret = enable_hvpipe_IRQ();
778 if (!ret) {
779 ret = set_hvpipe_sys_param(1);
780 if (!ret)
781 ret = misc_register(&papr_hvpipe_dev);
782 }
783
784 if (!ret) {
785 pr_info("hvpipe feature is enabled\n");
786 hvpipe_feature = true;
787 return 0;
788 }
789
790 pr_err("hvpipe feature is not enabled %d\n", ret);
791 destroy_workqueue(papr_hvpipe_wq);
792 out:
793 kfree(papr_hvpipe_work);
794 papr_hvpipe_work = NULL;
795 return ret;
796 }
797 machine_device_initcall(pseries, papr_hvpipe_init);
798