xref: /linux/drivers/virtio/virtio_rtc_driver.c (revision 8ca154e4910efff1b04e7750e007d75732c68323)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * virtio_rtc driver core
4  *
5  * Copyright (C) 2022-2024 OpenSynergy GmbH
6  * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
7  */
8 
9 #include <linux/completion.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/pm.h>
13 #include <linux/virtio.h>
14 #include <linux/virtio_config.h>
15 #include <linux/virtio_ids.h>
16 
17 #include <uapi/linux/virtio_rtc.h>
18 
19 #include "virtio_rtc_internal.h"
20 
21 #define VIORTC_ALARMQ_BUF_CAP sizeof(union virtio_rtc_notif_alarmq)
22 
23 /* virtqueue order */
24 enum {
25 	VIORTC_REQUESTQ,
26 	VIORTC_ALARMQ,
27 	VIORTC_MAX_NR_QUEUES,
28 };
29 
30 /**
31  * struct viortc_vq - virtqueue abstraction
32  * @vq: virtqueue
33  * @lock: protects access to vq
34  */
35 struct viortc_vq {
36 	struct virtqueue *vq;
37 	spinlock_t lock;
38 };
39 
40 /**
41  * struct viortc_dev - virtio_rtc device data
42  * @vdev: virtio device
43  * @viortc_class: RTC class wrapper for UTC-like clock, NULL if not available
44  * @vqs: virtqueues
45  * @clocks_to_unregister: Clock references, which are only used during device
46  *                        removal.
47  *			  For other uses, there would be a race between device
48  *			  creation and setting the pointers here.
49  * @alarmq_bufs: alarmq buffers list
50  * @num_alarmq_bufs: # of alarmq buffers
51  * @num_clocks: # of virtio_rtc clocks
52  */
53 struct viortc_dev {
54 	struct virtio_device *vdev;
55 	struct viortc_class *viortc_class;
56 	struct viortc_vq vqs[VIORTC_MAX_NR_QUEUES];
57 	struct viortc_ptp_clock **clocks_to_unregister;
58 	void **alarmq_bufs;
59 	unsigned int num_alarmq_bufs;
60 	u16 num_clocks;
61 };
62 
63 /**
64  * struct viortc_msg - Message requested by driver, responded by device.
65  * @viortc: device data
66  * @req: request buffer
67  * @resp: response buffer
68  * @responded: vqueue callback signals response reception
69  * @refcnt: Message reference count, message and buffers will be deallocated
70  *	    once 0. refcnt is decremented in the vqueue callback and in the
71  *	    thread waiting on the responded completion.
72  *          If a message response wait function times out, the message will be
73  *          freed upon late reception (refcnt will reach 0 in the callback), or
74  *          device removal.
75  * @req_size: size of request in bytes
76  * @resp_cap: maximum size of response in bytes
77  * @resp_actual_size: actual size of response
78  */
79 struct viortc_msg {
80 	struct viortc_dev *viortc;
81 	void *req;
82 	void *resp;
83 	struct completion responded;
84 	refcount_t refcnt;
85 	unsigned int req_size;
86 	unsigned int resp_cap;
87 	unsigned int resp_actual_size;
88 };
89 
90 /**
91  * viortc_class_from_dev() - Get RTC class object from virtio device.
92  * @dev: virtio device
93  *
94  * Context: Any context.
95  * Return: RTC class object if available, ERR_PTR otherwise.
96  */
viortc_class_from_dev(struct device * dev)97 struct viortc_class *viortc_class_from_dev(struct device *dev)
98 {
99 	struct virtio_device *vdev;
100 	struct viortc_dev *viortc;
101 
102 	vdev = container_of(dev, typeof(*vdev), dev);
103 	viortc = vdev->priv;
104 
105 	return viortc->viortc_class ?: ERR_PTR(-ENODEV);
106 }
107 
108 /**
109  * viortc_alarms_supported() - Whether device and driver support alarms.
110  * @vdev: virtio device
111  *
112  * NB: Device and driver may not support alarms for the same clocks.
113  *
114  * Context: Any context.
115  * Return: True if both device and driver can support alarms.
116  */
viortc_alarms_supported(struct virtio_device * vdev)117 static bool viortc_alarms_supported(struct virtio_device *vdev)
118 {
119 	return IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS) &&
120 	       virtio_has_feature(vdev, VIRTIO_RTC_F_ALARM);
121 }
122 
123 /**
124  * viortc_feed_vq() - Make a device write-only buffer available.
125  * @viortc: device data
126  * @vq: notification virtqueue
127  * @buf: buffer
128  * @buf_len: buffer capacity in bytes
129  * @data: token, identifying buffer
130  *
131  * Context: Caller must prevent concurrent access to vq.
132  * Return: Zero on success, negative error code otherwise.
133  */
viortc_feed_vq(struct viortc_dev * viortc,struct virtqueue * vq,void * buf,unsigned int buf_len,void * data)134 static int viortc_feed_vq(struct viortc_dev *viortc, struct virtqueue *vq,
135 			  void *buf, unsigned int buf_len, void *data)
136 {
137 	struct scatterlist sg;
138 
139 	sg_init_one(&sg, buf, buf_len);
140 
141 	return virtqueue_add_inbuf(vq, &sg, 1, data, GFP_ATOMIC);
142 }
143 
144 /**
145  * viortc_msg_init() - Allocate and initialize requestq message.
146  * @viortc: device data
147  * @msg_type: virtio_rtc message type
148  * @req_size: size of request buffer to be allocated
149  * @resp_cap: size of response buffer to be allocated
150  *
151  * Initializes the message refcnt to 2. The refcnt will be decremented once in
152  * the virtqueue callback, and once in the thread waiting on the message (on
153  * completion or timeout).
154  *
155  * Context: Process context.
156  * Return: non-NULL on success.
157  */
viortc_msg_init(struct viortc_dev * viortc,u16 msg_type,unsigned int req_size,unsigned int resp_cap)158 static struct viortc_msg *viortc_msg_init(struct viortc_dev *viortc,
159 					  u16 msg_type, unsigned int req_size,
160 					  unsigned int resp_cap)
161 {
162 	struct device *dev = &viortc->vdev->dev;
163 	struct virtio_rtc_req_head *req_head;
164 	struct viortc_msg *msg;
165 
166 	msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL);
167 	if (!msg)
168 		return NULL;
169 
170 	init_completion(&msg->responded);
171 
172 	msg->req = devm_kzalloc(dev, req_size, GFP_KERNEL);
173 	if (!msg->req)
174 		goto err_free_msg;
175 
176 	req_head = msg->req;
177 
178 	msg->resp = devm_kzalloc(dev, resp_cap, GFP_KERNEL);
179 	if (!msg->resp)
180 		goto err_free_msg_req;
181 
182 	msg->viortc = viortc;
183 	msg->req_size = req_size;
184 	msg->resp_cap = resp_cap;
185 
186 	refcount_set(&msg->refcnt, 2);
187 
188 	req_head->msg_type = virtio_cpu_to_le(msg_type, req_head->msg_type);
189 
190 	return msg;
191 
192 err_free_msg_req:
193 	devm_kfree(dev, msg->req);
194 
195 err_free_msg:
196 	devm_kfree(dev, msg);
197 
198 	return NULL;
199 }
200 
201 /**
202  * viortc_msg_release() - Decrement message refcnt, potentially free message.
203  * @msg: message requested by driver
204  *
205  * Context: Any context.
206  */
viortc_msg_release(struct viortc_msg * msg)207 static void viortc_msg_release(struct viortc_msg *msg)
208 {
209 	struct device *dev;
210 
211 	if (refcount_dec_and_test(&msg->refcnt)) {
212 		dev = &msg->viortc->vdev->dev;
213 
214 		devm_kfree(dev, msg->req);
215 		devm_kfree(dev, msg->resp);
216 		devm_kfree(dev, msg);
217 	}
218 }
219 
220 /**
221  * viortc_do_cb() - generic virtqueue callback logic
222  * @vq: virtqueue
223  * @handle_buf: function to process a used buffer
224  *
225  * Context: virtqueue callback, typically interrupt. Takes and releases vq lock.
226  */
viortc_do_cb(struct virtqueue * vq,void (* handle_buf)(void * token,unsigned int len,struct virtqueue * vq,struct viortc_vq * viortc_vq,struct viortc_dev * viortc))227 static void viortc_do_cb(struct virtqueue *vq,
228 			 void (*handle_buf)(void *token, unsigned int len,
229 					    struct virtqueue *vq,
230 					    struct viortc_vq *viortc_vq,
231 					    struct viortc_dev *viortc))
232 {
233 	struct viortc_dev *viortc = vq->vdev->priv;
234 	struct viortc_vq *viortc_vq;
235 	bool cb_enabled = true;
236 	unsigned long flags;
237 	unsigned int len;
238 	void *token;
239 
240 	viortc_vq = &viortc->vqs[vq->index];
241 
242 	for (;;) {
243 		spin_lock_irqsave(&viortc_vq->lock, flags);
244 
245 		if (cb_enabled) {
246 			virtqueue_disable_cb(vq);
247 			cb_enabled = false;
248 		}
249 
250 		token = virtqueue_get_buf(vq, &len);
251 		if (!token) {
252 			if (virtqueue_enable_cb(vq)) {
253 				spin_unlock_irqrestore(&viortc_vq->lock, flags);
254 				return;
255 			}
256 			cb_enabled = true;
257 		}
258 
259 		spin_unlock_irqrestore(&viortc_vq->lock, flags);
260 
261 		if (token)
262 			handle_buf(token, len, vq, viortc_vq, viortc);
263 	}
264 }
265 
266 /**
267  * viortc_requestq_hdlr() - process a requestq used buffer
268  * @token: token identifying the buffer
269  * @len: bytes written by device
270  * @vq: virtqueue
271  * @viortc_vq: device specific data for virtqueue
272  * @viortc: device data
273  *
274  * Signals completion for each received message.
275  *
276  * Context: virtqueue callback
277  */
viortc_requestq_hdlr(void * token,unsigned int len,struct virtqueue * vq,struct viortc_vq * viortc_vq,struct viortc_dev * viortc)278 static void viortc_requestq_hdlr(void *token, unsigned int len,
279 				 struct virtqueue *vq,
280 				 struct viortc_vq *viortc_vq,
281 				 struct viortc_dev *viortc)
282 {
283 	struct viortc_msg *msg = token;
284 
285 	msg->resp_actual_size = len;
286 
287 	complete(&msg->responded);
288 	viortc_msg_release(msg);
289 }
290 
291 /**
292  * viortc_cb_requestq() - callback for requestq
293  * @vq: virtqueue
294  *
295  * Context: virtqueue callback
296  */
viortc_cb_requestq(struct virtqueue * vq)297 static void viortc_cb_requestq(struct virtqueue *vq)
298 {
299 	viortc_do_cb(vq, viortc_requestq_hdlr);
300 }
301 
302 /**
303  * viortc_alarmq_hdlr() - process an alarmq used buffer
304  * @token: token identifying the buffer
305  * @len: bytes written by device
306  * @vq: virtqueue
307  * @viortc_vq: device specific data for virtqueue
308  * @viortc: device data
309  *
310  * Processes a VIRTIO_RTC_NOTIF_ALARM notification by calling the RTC class
311  * driver. Makes the buffer available again.
312  *
313  * Context: virtqueue callback
314  */
viortc_alarmq_hdlr(void * token,unsigned int len,struct virtqueue * vq,struct viortc_vq * viortc_vq,struct viortc_dev * viortc)315 static void viortc_alarmq_hdlr(void *token, unsigned int len,
316 			       struct virtqueue *vq,
317 			       struct viortc_vq *viortc_vq,
318 			       struct viortc_dev *viortc)
319 {
320 	struct virtio_rtc_notif_alarm *notif = token;
321 	struct virtio_rtc_notif_head *head = token;
322 	unsigned long flags;
323 	u16 clock_id;
324 	bool notify;
325 
326 	if (len < sizeof(*head)) {
327 		dev_err_ratelimited(&viortc->vdev->dev,
328 				    "%s: ignoring notification with short header\n",
329 				    __func__);
330 		goto feed_vq;
331 	}
332 
333 	if (virtio_le_to_cpu(head->msg_type) != VIRTIO_RTC_NOTIF_ALARM) {
334 		dev_err_ratelimited(&viortc->vdev->dev,
335 				    "%s: ignoring unknown notification type 0x%x\n",
336 				    __func__, virtio_le_to_cpu(head->msg_type));
337 		goto feed_vq;
338 	}
339 
340 	if (len < sizeof(*notif)) {
341 		dev_err_ratelimited(&viortc->vdev->dev,
342 				    "%s: ignoring too small alarm notification\n",
343 				    __func__);
344 		goto feed_vq;
345 	}
346 
347 	clock_id = virtio_le_to_cpu(notif->clock_id);
348 
349 	if (!viortc->viortc_class)
350 		dev_warn_ratelimited(&viortc->vdev->dev,
351 				     "ignoring alarm, no RTC class device available\n");
352 	else
353 		viortc_class_alarm(viortc->viortc_class, clock_id);
354 
355 feed_vq:
356 	spin_lock_irqsave(&viortc_vq->lock, flags);
357 
358 	if (viortc_feed_vq(viortc, vq, notif, VIORTC_ALARMQ_BUF_CAP, token))
359 		dev_warn(&viortc->vdev->dev,
360 			 "%s: failed to re-expose input buffer\n", __func__);
361 
362 	notify = virtqueue_kick_prepare(vq);
363 
364 	spin_unlock_irqrestore(&viortc_vq->lock, flags);
365 
366 	if (notify)
367 		virtqueue_notify(vq);
368 }
369 
370 /**
371  * viortc_cb_alarmq() - callback for alarmq
372  * @vq: virtqueue
373  *
374  * Context: virtqueue callback
375  */
viortc_cb_alarmq(struct virtqueue * vq)376 static void viortc_cb_alarmq(struct virtqueue *vq)
377 {
378 	viortc_do_cb(vq, viortc_alarmq_hdlr);
379 }
380 
381 /**
382  * viortc_get_resp_errno() - converts virtio_rtc errnos to system errnos
383  * @resp_head: message response header
384  *
385  * Return: negative system errno, or 0
386  */
viortc_get_resp_errno(struct virtio_rtc_resp_head * resp_head)387 static int viortc_get_resp_errno(struct virtio_rtc_resp_head *resp_head)
388 {
389 	switch (virtio_le_to_cpu(resp_head->status)) {
390 	case VIRTIO_RTC_S_OK:
391 		return 0;
392 	case VIRTIO_RTC_S_EOPNOTSUPP:
393 		return -EOPNOTSUPP;
394 	case VIRTIO_RTC_S_EINVAL:
395 		return -EINVAL;
396 	case VIRTIO_RTC_S_ENODEV:
397 		return -ENODEV;
398 	case VIRTIO_RTC_S_EIO:
399 	default:
400 		return -EIO;
401 	}
402 }
403 
404 /**
405  * viortc_msg_xfer() - send message request, wait until message response
406  * @vq: virtqueue
407  * @msg: message with driver request
408  * @timeout_jiffies: message response timeout, 0 for no timeout
409  *
410  * Context: Process context. Takes and releases vq.lock. May sleep.
411  * Return: Zero on success, negative error code otherwise.
412  */
viortc_msg_xfer(struct viortc_vq * vq,struct viortc_msg * msg,unsigned long timeout_jiffies)413 static int viortc_msg_xfer(struct viortc_vq *vq, struct viortc_msg *msg,
414 			   unsigned long timeout_jiffies)
415 {
416 	struct scatterlist out_sg[1];
417 	struct scatterlist in_sg[1];
418 	struct scatterlist *sgs[2];
419 	unsigned long flags;
420 	long timeout_ret;
421 	bool notify;
422 	int ret;
423 
424 	sgs[0] = out_sg;
425 	sgs[1] = in_sg;
426 
427 	sg_init_one(out_sg, msg->req, msg->req_size);
428 	sg_init_one(in_sg, msg->resp, msg->resp_cap);
429 
430 	spin_lock_irqsave(&vq->lock, flags);
431 
432 	ret = virtqueue_add_sgs(vq->vq, sgs, 1, 1, msg, GFP_ATOMIC);
433 	if (ret) {
434 		spin_unlock_irqrestore(&vq->lock, flags);
435 		/*
436 		 * Release in place of the response callback, which will never
437 		 * come.
438 		 */
439 		viortc_msg_release(msg);
440 		return ret;
441 	}
442 
443 	notify = virtqueue_kick_prepare(vq->vq);
444 
445 	spin_unlock_irqrestore(&vq->lock, flags);
446 
447 	if (notify)
448 		virtqueue_notify(vq->vq);
449 
450 	if (timeout_jiffies) {
451 		timeout_ret = wait_for_completion_interruptible_timeout(
452 			&msg->responded, timeout_jiffies);
453 
454 		if (!timeout_ret)
455 			return -ETIMEDOUT;
456 		else if (timeout_ret < 0)
457 			return (int)timeout_ret;
458 	} else {
459 		ret = wait_for_completion_interruptible(&msg->responded);
460 		if (ret)
461 			return ret;
462 	}
463 
464 	if (msg->resp_actual_size < sizeof(struct virtio_rtc_resp_head))
465 		return -EINVAL;
466 
467 	ret = viortc_get_resp_errno(msg->resp);
468 	if (ret)
469 		return ret;
470 
471 	/*
472 	 * There is not yet a case where returning a short message would make
473 	 * sense, so consider any deviation an error.
474 	 */
475 	if (msg->resp_actual_size != msg->resp_cap)
476 		return -EINVAL;
477 
478 	return 0;
479 }
480 
481 /*
482  * common message handle macros for messages of different types
483  */
484 
485 /**
486  * VIORTC_DECLARE_MSG_HDL_ONSTACK() - declare message handle on stack
487  * @hdl: message handle name
488  * @msg_id: message type id
489  * @msg_req: message request type
490  * @msg_resp: message response type
491  */
492 #define VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, msg_id, msg_req, msg_resp)         \
493 	struct {                                                               \
494 		struct viortc_msg *msg;                                        \
495 		msg_req *req;                                                  \
496 		msg_resp *resp;                                                \
497 		unsigned int req_size;                                         \
498 		unsigned int resp_cap;                                         \
499 		u16 msg_type;                                                  \
500 	} hdl = {                                                              \
501 		NULL, NULL, NULL, sizeof(msg_req), sizeof(msg_resp), (msg_id), \
502 	}
503 
504 /**
505  * VIORTC_MSG() - extract message from message handle
506  * @hdl: message handle
507  *
508  * Return: struct viortc_msg
509  */
510 #define VIORTC_MSG(hdl) ((hdl).msg)
511 
512 /**
513  * VIORTC_MSG_INIT() - initialize message handle
514  * @hdl: message handle
515  * @viortc: device data (struct viortc_dev *)
516  *
517  * Context: Process context.
518  * Return: 0 on success, -ENOMEM otherwise.
519  */
520 #define VIORTC_MSG_INIT(hdl, viortc)                                         \
521 	({                                                                   \
522 		typeof(hdl) *_hdl = &(hdl);                                  \
523 									     \
524 		_hdl->msg = viortc_msg_init((viortc), _hdl->msg_type,        \
525 					    _hdl->req_size, _hdl->resp_cap); \
526 		if (_hdl->msg) {                                             \
527 			_hdl->req = _hdl->msg->req;                          \
528 			_hdl->resp = _hdl->msg->resp;                        \
529 		}                                                            \
530 		_hdl->msg ? 0 : -ENOMEM;                                     \
531 	})
532 
533 /**
534  * VIORTC_MSG_WRITE() - write a request message field
535  * @hdl: message handle
536  * @dest_member: request message field name
537  * @src_ptr: pointer to data of compatible type
538  *
539  * Writes the field in little-endian format.
540  */
541 #define VIORTC_MSG_WRITE(hdl, dest_member, src_ptr)                         \
542 	do {                                                                \
543 		typeof(hdl) _hdl = (hdl);                                   \
544 		typeof(src_ptr) _src_ptr = (src_ptr);                       \
545 									    \
546 		/* Sanity check: must match the member's type */            \
547 		typecheck(typeof(virtio_le_to_cpu(_hdl.req->dest_member)),  \
548 			  *_src_ptr);                                       \
549 									    \
550 		_hdl.req->dest_member =                                     \
551 			virtio_cpu_to_le(*_src_ptr, _hdl.req->dest_member); \
552 	} while (0)
553 
554 /**
555  * VIORTC_MSG_READ() - read from a response message field
556  * @hdl: message handle
557  * @src_member: response message field name
558  * @dest_ptr: pointer to data of compatible type
559  *
560  * Converts from little-endian format and writes to dest_ptr.
561  */
562 #define VIORTC_MSG_READ(hdl, src_member, dest_ptr)                          \
563 	do {                                                                \
564 		typeof(dest_ptr) _dest_ptr = (dest_ptr);                    \
565 									    \
566 		/* Sanity check: must match the member's type */            \
567 		typecheck(typeof(virtio_le_to_cpu((hdl).resp->src_member)), \
568 			  *_dest_ptr);                                      \
569 									    \
570 		*_dest_ptr = virtio_le_to_cpu((hdl).resp->src_member);      \
571 	} while (0)
572 
573 /*
574  * read requests
575  */
576 
577 /** timeout for clock readings, where timeouts are considered non-fatal */
578 #define VIORTC_MSG_READ_TIMEOUT secs_to_jiffies(60)
579 
580 /**
581  * viortc_read() - VIRTIO_RTC_REQ_READ wrapper
582  * @viortc: device data
583  * @vio_clk_id: virtio_rtc clock id
584  * @reading: clock reading [ns]
585  *
586  * Context: Process context.
587  * Return: Zero on success, negative error code otherwise.
588  */
viortc_read(struct viortc_dev * viortc,u16 vio_clk_id,u64 * reading)589 int viortc_read(struct viortc_dev *viortc, u16 vio_clk_id, u64 *reading)
590 {
591 	VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ,
592 				       struct virtio_rtc_req_read,
593 				       struct virtio_rtc_resp_read);
594 	int ret;
595 
596 	ret = VIORTC_MSG_INIT(hdl, viortc);
597 	if (ret)
598 		return ret;
599 
600 	VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
601 
602 	ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
603 			      VIORTC_MSG_READ_TIMEOUT);
604 	if (ret) {
605 		dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
606 			ret);
607 		goto out_release;
608 	}
609 
610 	VIORTC_MSG_READ(hdl, clock_reading, reading);
611 
612 out_release:
613 	viortc_msg_release(VIORTC_MSG(hdl));
614 
615 	return ret;
616 }
617 
618 /**
619  * viortc_read_cross() - VIRTIO_RTC_REQ_READ_CROSS wrapper
620  * @viortc: device data
621  * @vio_clk_id: virtio_rtc clock id
622  * @hw_counter: virtio_rtc HW counter type
623  * @reading: clock reading [ns]
624  * @cycles: HW counter cycles during clock reading
625  *
626  * Context: Process context.
627  * Return: Zero on success, negative error code otherwise.
628  */
viortc_read_cross(struct viortc_dev * viortc,u16 vio_clk_id,u8 hw_counter,u64 * reading,u64 * cycles)629 int viortc_read_cross(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
630 		      u64 *reading, u64 *cycles)
631 {
632 	VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ_CROSS,
633 				       struct virtio_rtc_req_read_cross,
634 				       struct virtio_rtc_resp_read_cross);
635 	int ret;
636 
637 	ret = VIORTC_MSG_INIT(hdl, viortc);
638 	if (ret)
639 		return ret;
640 
641 	VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
642 	VIORTC_MSG_WRITE(hdl, hw_counter, &hw_counter);
643 
644 	ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
645 			      VIORTC_MSG_READ_TIMEOUT);
646 	if (ret) {
647 		dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
648 			ret);
649 		goto out_release;
650 	}
651 
652 	VIORTC_MSG_READ(hdl, clock_reading, reading);
653 	VIORTC_MSG_READ(hdl, counter_cycles, cycles);
654 
655 out_release:
656 	viortc_msg_release(VIORTC_MSG(hdl));
657 
658 	return ret;
659 }
660 
661 /*
662  * control requests
663  */
664 
665 /**
666  * viortc_cfg() - VIRTIO_RTC_REQ_CFG wrapper
667  * @viortc: device data
668  * @num_clocks: # of virtio_rtc clocks
669  *
670  * Context: Process context.
671  * Return: Zero on success, negative error code otherwise.
672  */
viortc_cfg(struct viortc_dev * viortc,u16 * num_clocks)673 static int viortc_cfg(struct viortc_dev *viortc, u16 *num_clocks)
674 {
675 	VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CFG,
676 				       struct virtio_rtc_req_cfg,
677 				       struct virtio_rtc_resp_cfg);
678 	int ret;
679 
680 	ret = VIORTC_MSG_INIT(hdl, viortc);
681 	if (ret)
682 		return ret;
683 
684 	ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
685 			      0);
686 	if (ret) {
687 		dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
688 			ret);
689 		goto out_release;
690 	}
691 
692 	VIORTC_MSG_READ(hdl, num_clocks, num_clocks);
693 
694 out_release:
695 	viortc_msg_release(VIORTC_MSG(hdl));
696 
697 	return ret;
698 }
699 
700 /**
701  * viortc_clock_cap() - VIRTIO_RTC_REQ_CLOCK_CAP wrapper
702  * @viortc: device data
703  * @vio_clk_id: virtio_rtc clock id
704  * @type: virtio_rtc clock type
705  * @leap_second_smearing: virtio_rtc smearing variant
706  * @flags: struct virtio_rtc_resp_clock_cap.flags
707  *
708  * Context: Process context.
709  * Return: Zero on success, negative error code otherwise.
710  */
viortc_clock_cap(struct viortc_dev * viortc,u16 vio_clk_id,u8 * type,u8 * leap_second_smearing,u8 * flags)711 static int viortc_clock_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 *type,
712 			    u8 *leap_second_smearing, u8 *flags)
713 {
714 	VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CLOCK_CAP,
715 				       struct virtio_rtc_req_clock_cap,
716 				       struct virtio_rtc_resp_clock_cap);
717 	int ret;
718 
719 	ret = VIORTC_MSG_INIT(hdl, viortc);
720 	if (ret)
721 		return ret;
722 
723 	VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
724 
725 	ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
726 			      0);
727 	if (ret) {
728 		dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
729 			ret);
730 		goto out_release;
731 	}
732 
733 	VIORTC_MSG_READ(hdl, type, type);
734 	VIORTC_MSG_READ(hdl, leap_second_smearing, leap_second_smearing);
735 	VIORTC_MSG_READ(hdl, flags, flags);
736 
737 out_release:
738 	viortc_msg_release(VIORTC_MSG(hdl));
739 
740 	return ret;
741 }
742 
743 /**
744  * viortc_cross_cap() - VIRTIO_RTC_REQ_CROSS_CAP wrapper
745  * @viortc: device data
746  * @vio_clk_id: virtio_rtc clock id
747  * @hw_counter: virtio_rtc HW counter type
748  * @supported: xtstamping is supported for the vio_clk_id/hw_counter pair
749  *
750  * Context: Process context.
751  * Return: Zero on success, negative error code otherwise.
752  */
viortc_cross_cap(struct viortc_dev * viortc,u16 vio_clk_id,u8 hw_counter,bool * supported)753 int viortc_cross_cap(struct viortc_dev *viortc, u16 vio_clk_id, u8 hw_counter,
754 		     bool *supported)
755 {
756 	VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_CROSS_CAP,
757 				       struct virtio_rtc_req_cross_cap,
758 				       struct virtio_rtc_resp_cross_cap);
759 	u8 flags;
760 	int ret;
761 
762 	ret = VIORTC_MSG_INIT(hdl, viortc);
763 	if (ret)
764 		return ret;
765 
766 	VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
767 	VIORTC_MSG_WRITE(hdl, hw_counter, &hw_counter);
768 
769 	ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
770 			      0);
771 	if (ret) {
772 		dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
773 			ret);
774 		goto out_release;
775 	}
776 
777 	VIORTC_MSG_READ(hdl, flags, &flags);
778 	*supported = !!(flags & VIRTIO_RTC_FLAG_CROSS_CAP);
779 
780 out_release:
781 	viortc_msg_release(VIORTC_MSG(hdl));
782 
783 	return ret;
784 }
785 
786 /**
787  * viortc_read_alarm() - VIRTIO_RTC_REQ_READ_ALARM wrapper
788  * @viortc: device data
789  * @vio_clk_id: virtio_rtc clock id
790  * @alarm_time: alarm time in ns
791  * @enabled: whether alarm is enabled
792  *
793  * Context: Process context.
794  * Return: Zero on success, negative error code otherwise.
795  */
viortc_read_alarm(struct viortc_dev * viortc,u16 vio_clk_id,u64 * alarm_time,bool * enabled)796 int viortc_read_alarm(struct viortc_dev *viortc, u16 vio_clk_id,
797 		      u64 *alarm_time, bool *enabled)
798 {
799 	VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_READ_ALARM,
800 				       struct virtio_rtc_req_read_alarm,
801 				       struct virtio_rtc_resp_read_alarm);
802 	u8 flags;
803 	int ret;
804 
805 	ret = VIORTC_MSG_INIT(hdl, viortc);
806 	if (ret)
807 		return ret;
808 
809 	VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
810 
811 	ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
812 			      0);
813 	if (ret) {
814 		dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
815 			ret);
816 		goto out_release;
817 	}
818 
819 	VIORTC_MSG_READ(hdl, alarm_time, alarm_time);
820 	VIORTC_MSG_READ(hdl, flags, &flags);
821 
822 	*enabled = !!(flags & VIRTIO_RTC_FLAG_ALARM_ENABLED);
823 
824 out_release:
825 	viortc_msg_release(VIORTC_MSG(hdl));
826 
827 	return ret;
828 }
829 
830 /**
831  * viortc_set_alarm() - VIRTIO_RTC_REQ_SET_ALARM wrapper
832  * @viortc: device data
833  * @vio_clk_id: virtio_rtc clock id
834  * @alarm_time: alarm time in ns
835  * @alarm_enable: enable or disable alarm
836  *
837  * Context: Process context.
838  * Return: Zero on success, negative error code otherwise.
839  */
viortc_set_alarm(struct viortc_dev * viortc,u16 vio_clk_id,u64 alarm_time,bool alarm_enable)840 int viortc_set_alarm(struct viortc_dev *viortc, u16 vio_clk_id, u64 alarm_time,
841 		     bool alarm_enable)
842 {
843 	VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_SET_ALARM,
844 				       struct virtio_rtc_req_set_alarm,
845 				       struct virtio_rtc_resp_set_alarm);
846 	u8 flags = 0;
847 	int ret;
848 
849 	ret = VIORTC_MSG_INIT(hdl, viortc);
850 	if (ret)
851 		return ret;
852 
853 	if (alarm_enable)
854 		flags |= VIRTIO_RTC_FLAG_ALARM_ENABLED;
855 
856 	VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
857 	VIORTC_MSG_WRITE(hdl, alarm_time, &alarm_time);
858 	VIORTC_MSG_WRITE(hdl, flags, &flags);
859 
860 	ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
861 			      0);
862 	if (ret) {
863 		dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
864 			ret);
865 		goto out_release;
866 	}
867 
868 out_release:
869 	viortc_msg_release(VIORTC_MSG(hdl));
870 
871 	return ret;
872 }
873 
874 /**
875  * viortc_set_alarm_enabled() - VIRTIO_RTC_REQ_SET_ALARM_ENABLED wrapper
876  * @viortc: device data
877  * @vio_clk_id: virtio_rtc clock id
878  * @alarm_enable: enable or disable alarm
879  *
880  * Context: Process context.
881  * Return: Zero on success, negative error code otherwise.
882  */
viortc_set_alarm_enabled(struct viortc_dev * viortc,u16 vio_clk_id,bool alarm_enable)883 int viortc_set_alarm_enabled(struct viortc_dev *viortc, u16 vio_clk_id,
884 			     bool alarm_enable)
885 {
886 	VIORTC_DECLARE_MSG_HDL_ONSTACK(hdl, VIRTIO_RTC_REQ_SET_ALARM_ENABLED,
887 				       struct virtio_rtc_req_set_alarm_enabled,
888 				       struct virtio_rtc_resp_set_alarm_enabled);
889 	u8 flags = 0;
890 	int ret;
891 
892 	ret = VIORTC_MSG_INIT(hdl, viortc);
893 	if (ret)
894 		return ret;
895 
896 	if (alarm_enable)
897 		flags |= VIRTIO_RTC_FLAG_ALARM_ENABLED;
898 
899 	VIORTC_MSG_WRITE(hdl, clock_id, &vio_clk_id);
900 	VIORTC_MSG_WRITE(hdl, flags, &flags);
901 
902 	ret = viortc_msg_xfer(&viortc->vqs[VIORTC_REQUESTQ], VIORTC_MSG(hdl),
903 			      0);
904 	if (ret) {
905 		dev_dbg(&viortc->vdev->dev, "%s: xfer returned %d\n", __func__,
906 			ret);
907 		goto out_release;
908 	}
909 
910 out_release:
911 	viortc_msg_release(VIORTC_MSG(hdl));
912 
913 	return ret;
914 }
915 
916 /*
917  * init, deinit
918  */
919 
920 /**
921  * viortc_init_rtc_class_clock() - init and register a RTC class device
922  * @viortc: device data
923  * @vio_clk_id: virtio_rtc clock id
924  * @clock_type: virtio_rtc clock type
925  * @flags: struct virtio_rtc_resp_clock_cap.flags
926  *
927  * The clock must be a UTC-like clock.
928  *
929  * Context: Process context.
930  * Return: Positive if registered, zero if not supported by configuration,
931  *         negative error code otherwise.
932  */
viortc_init_rtc_class_clock(struct viortc_dev * viortc,u16 vio_clk_id,u8 clock_type,u8 flags)933 static int viortc_init_rtc_class_clock(struct viortc_dev *viortc,
934 				       u16 vio_clk_id, u8 clock_type, u8 flags)
935 {
936 	struct virtio_device *vdev = viortc->vdev;
937 	struct viortc_class *viortc_class;
938 	struct device *dev = &vdev->dev;
939 	bool have_alarm;
940 
941 	if (clock_type != VIRTIO_RTC_CLOCK_UTC_SMEARED) {
942 		dev_info(dev,
943 			 "not creating RTC class device for clock %d, which may step on leap seconds\n",
944 			 vio_clk_id);
945 		return 0;
946 	}
947 
948 	if (viortc->viortc_class) {
949 		dev_warn_once(dev,
950 			      "multiple UTC-like clocks are present, but creating only one RTC class device\n");
951 		return 0;
952 	}
953 
954 	have_alarm = viortc_alarms_supported(vdev) &&
955 		     !!(flags & VIRTIO_RTC_FLAG_ALARM_CAP);
956 
957 	viortc_class = viortc_class_init(viortc, vio_clk_id, have_alarm, dev);
958 	if (IS_ERR(viortc_class))
959 		return PTR_ERR(viortc_class);
960 
961 	viortc->viortc_class = viortc_class;
962 
963 	if (have_alarm)
964 		devm_device_init_wakeup(dev);
965 
966 	return viortc_class_register(viortc_class) ?: 1;
967 }
968 
969 /**
970  * viortc_init_ptp_clock() - init and register PTP clock
971  * @viortc: device data
972  * @vio_clk_id: virtio_rtc clock id
973  * @clock_type: virtio_rtc clock type
974  * @leap_second_smearing: virtio_rtc leap second smearing
975  *
976  * Context: Process context.
977  * Return: Positive if registered, zero if not supported by configuration,
978  *         negative error code otherwise.
979  */
viortc_init_ptp_clock(struct viortc_dev * viortc,u16 vio_clk_id,u8 clock_type,u8 leap_second_smearing)980 static int viortc_init_ptp_clock(struct viortc_dev *viortc, u16 vio_clk_id,
981 				 u8 clock_type, u8 leap_second_smearing)
982 {
983 	struct device *dev = &viortc->vdev->dev;
984 	char ptp_clock_name[PTP_CLOCK_NAME_LEN];
985 	struct viortc_ptp_clock *vio_ptp;
986 
987 	snprintf(ptp_clock_name, PTP_CLOCK_NAME_LEN,
988 		 "Virtio PTP type %hhu/variant %hhu", clock_type,
989 		 leap_second_smearing);
990 
991 	vio_ptp = viortc_ptp_register(viortc, dev, vio_clk_id, ptp_clock_name);
992 	if (IS_ERR(vio_ptp)) {
993 		dev_err(dev, "failed to register PTP clock '%s'\n",
994 			ptp_clock_name);
995 		return PTR_ERR(vio_ptp);
996 	}
997 
998 	viortc->clocks_to_unregister[vio_clk_id] = vio_ptp;
999 
1000 	return !!vio_ptp;
1001 }
1002 
1003 /**
1004  * viortc_init_clock() - init local representation of virtio_rtc clock
1005  * @viortc: device data
1006  * @vio_clk_id: virtio_rtc clock id
1007  *
1008  * Initializes PHC and/or RTC class device to represent virtio_rtc clock.
1009  *
1010  * Context: Process context.
1011  * Return: Zero on success, negative error code otherwise.
1012  */
viortc_init_clock(struct viortc_dev * viortc,u16 vio_clk_id)1013 static int viortc_init_clock(struct viortc_dev *viortc, u16 vio_clk_id)
1014 {
1015 	u8 clock_type, leap_second_smearing, flags;
1016 	bool is_exposed = false;
1017 	int ret;
1018 
1019 	ret = viortc_clock_cap(viortc, vio_clk_id, &clock_type,
1020 			       &leap_second_smearing, &flags);
1021 	if (ret)
1022 		return ret;
1023 
1024 	if (IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS) &&
1025 	    (clock_type == VIRTIO_RTC_CLOCK_UTC ||
1026 	     clock_type == VIRTIO_RTC_CLOCK_UTC_SMEARED ||
1027 	     clock_type == VIRTIO_RTC_CLOCK_UTC_MAYBE_SMEARED)) {
1028 		ret = viortc_init_rtc_class_clock(viortc, vio_clk_id,
1029 						  clock_type, flags);
1030 		if (ret < 0)
1031 			return ret;
1032 		if (ret > 0)
1033 			is_exposed = true;
1034 	}
1035 
1036 	if (IS_ENABLED(CONFIG_VIRTIO_RTC_PTP)) {
1037 		ret = viortc_init_ptp_clock(viortc, vio_clk_id, clock_type,
1038 					    leap_second_smearing);
1039 		if (ret < 0)
1040 			return ret;
1041 		if (ret > 0)
1042 			is_exposed = true;
1043 	}
1044 
1045 	if (!is_exposed)
1046 		dev_warn(&viortc->vdev->dev,
1047 			 "cannot expose clock %d (type %d, variant %d) to userspace\n",
1048 			 vio_clk_id, clock_type, leap_second_smearing);
1049 
1050 	return 0;
1051 }
1052 
1053 /**
1054  * viortc_clocks_deinit() - unregister PHCs, stop RTC ops
1055  * @viortc: device data
1056  */
viortc_clocks_deinit(struct viortc_dev * viortc)1057 static void viortc_clocks_deinit(struct viortc_dev *viortc)
1058 {
1059 	struct viortc_ptp_clock *vio_ptp;
1060 	unsigned int i;
1061 
1062 	for (i = 0; i < viortc->num_clocks; i++) {
1063 		vio_ptp = viortc->clocks_to_unregister[i];
1064 
1065 		if (!vio_ptp)
1066 			continue;
1067 
1068 		viortc->clocks_to_unregister[i] = NULL;
1069 
1070 		WARN_ON(viortc_ptp_unregister(vio_ptp, &viortc->vdev->dev));
1071 	}
1072 
1073 	if (viortc->viortc_class)
1074 		viortc_class_stop(viortc->viortc_class);
1075 }
1076 
1077 /**
1078  * viortc_clocks_init() - init local representations of virtio_rtc clocks
1079  * @viortc: device data
1080  *
1081  * Context: Process context.
1082  * Return: Zero on success, negative error code otherwise.
1083  */
viortc_clocks_init(struct viortc_dev * viortc)1084 static int viortc_clocks_init(struct viortc_dev *viortc)
1085 {
1086 	u16 num_clocks;
1087 	unsigned int i;
1088 	int ret;
1089 
1090 	ret = viortc_cfg(viortc, &num_clocks);
1091 	if (ret)
1092 		return ret;
1093 
1094 	if (num_clocks < 1) {
1095 		dev_err(&viortc->vdev->dev, "device reported 0 clocks\n");
1096 		return -ENODEV;
1097 	}
1098 
1099 	viortc->num_clocks = num_clocks;
1100 
1101 	viortc->clocks_to_unregister =
1102 		devm_kcalloc(&viortc->vdev->dev, num_clocks,
1103 			     sizeof(*viortc->clocks_to_unregister), GFP_KERNEL);
1104 	if (!viortc->clocks_to_unregister)
1105 		return -ENOMEM;
1106 
1107 	for (i = 0; i < num_clocks; i++) {
1108 		ret = viortc_init_clock(viortc, i);
1109 		if (ret)
1110 			goto err_deinit_clocks;
1111 	}
1112 
1113 	return 0;
1114 
1115 err_deinit_clocks:
1116 	viortc_clocks_deinit(viortc);
1117 
1118 	return ret;
1119 }
1120 
1121 /**
1122  * viortc_populate_vq() - populate alarmq with device-writable buffers
1123  * @viortc: device data
1124  * @viortc_vq: device specific data for virtqueue
1125  * @buf_cap: device-writable buffer size in bytes
1126  * @lock: lock queue during accesses
1127  *
1128  * Populates the alarmq with pre-allocated buffers.
1129  *
1130  * The caller is responsible for kicking the device.
1131  *
1132  * Context: Process context.
1133  * Return: Zero on success, negative error code otherwise.
1134  */
viortc_populate_vq(struct viortc_dev * viortc,struct viortc_vq * viortc_vq,u32 buf_cap,bool lock)1135 static int viortc_populate_vq(struct viortc_dev *viortc,
1136 			      struct viortc_vq *viortc_vq, u32 buf_cap,
1137 			      bool lock)
1138 {
1139 	unsigned int num_elems, i;
1140 	struct virtqueue *vq;
1141 	unsigned long flags;
1142 	void *buf;
1143 	int ret;
1144 
1145 	num_elems = viortc->num_alarmq_bufs;
1146 	vq = viortc_vq->vq;
1147 
1148 	for (i = 0; i < num_elems; i++) {
1149 		buf = viortc->alarmq_bufs[i];
1150 
1151 		if (lock) {
1152 			spin_lock_irqsave(&viortc_vq->lock, flags);
1153 
1154 			ret = viortc_feed_vq(viortc, vq, buf, buf_cap, buf);
1155 
1156 			spin_unlock_irqrestore(&viortc_vq->lock, flags);
1157 		} else {
1158 			ret = viortc_feed_vq(viortc, vq, buf, buf_cap, buf);
1159 		}
1160 
1161 		if (ret)
1162 			return ret;
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 /**
1169  * viortc_alloc_vq_bufs() - allocate alarmq buffers
1170  * @viortc: device data
1171  * @num_elems: # of buffers
1172  * @buf_cap: per-buffer device-writable bytes
1173  *
1174  * Context: Process context.
1175  * Return: Zero on success, negative error code otherwise.
1176  */
viortc_alloc_vq_bufs(struct viortc_dev * viortc,unsigned int num_elems,u32 buf_cap)1177 static int viortc_alloc_vq_bufs(struct viortc_dev *viortc,
1178 				unsigned int num_elems, u32 buf_cap)
1179 {
1180 	struct device *dev = &viortc->vdev->dev;
1181 	void **buf_list;
1182 	unsigned int i;
1183 	void *buf;
1184 
1185 	buf_list = devm_kcalloc(dev, num_elems, sizeof(*buf_list), GFP_KERNEL);
1186 	if (!buf_list)
1187 		return -ENOMEM;
1188 
1189 	viortc->alarmq_bufs = buf_list;
1190 	viortc->num_alarmq_bufs = num_elems;
1191 
1192 	for (i = 0; i < num_elems; i++) {
1193 		buf = devm_kzalloc(dev, buf_cap, GFP_KERNEL);
1194 		if (!buf)
1195 			return -ENOMEM;
1196 
1197 		buf_list[i] = buf;
1198 	}
1199 
1200 	return 0;
1201 }
1202 
1203 /**
1204  * viortc_init_vqs() - init virtqueues
1205  * @viortc: device data
1206  *
1207  * Inits virtqueues and associated data.
1208  *
1209  * Context: Process context.
1210  * Return: Zero on success, negative error code otherwise.
1211  */
viortc_init_vqs(struct viortc_dev * viortc)1212 static int viortc_init_vqs(struct viortc_dev *viortc)
1213 {
1214 	struct virtqueue *vqs[VIORTC_MAX_NR_QUEUES];
1215 	struct virtqueue_info vqs_info[] = {
1216 		{ "requestq", viortc_cb_requestq },
1217 		{ "alarmq", viortc_cb_alarmq },
1218 	};
1219 	struct virtio_device *vdev = viortc->vdev;
1220 	unsigned int num_elems;
1221 	int nr_queues, ret;
1222 	bool have_alarms;
1223 
1224 	have_alarms = viortc_alarms_supported(vdev);
1225 
1226 	if (have_alarms)
1227 		nr_queues = VIORTC_ALARMQ + 1;
1228 	else
1229 		nr_queues = VIORTC_REQUESTQ + 1;
1230 
1231 	ret = virtio_find_vqs(vdev, nr_queues, vqs, vqs_info, NULL);
1232 	if (ret)
1233 		return ret;
1234 
1235 	viortc->vqs[VIORTC_REQUESTQ].vq = vqs[VIORTC_REQUESTQ];
1236 	spin_lock_init(&viortc->vqs[VIORTC_REQUESTQ].lock);
1237 
1238 	if (have_alarms) {
1239 		viortc->vqs[VIORTC_ALARMQ].vq = vqs[VIORTC_ALARMQ];
1240 		spin_lock_init(&viortc->vqs[VIORTC_ALARMQ].lock);
1241 
1242 		num_elems = virtqueue_get_vring_size(vqs[VIORTC_ALARMQ]);
1243 		if (num_elems == 0)
1244 			return -ENOSPC;
1245 
1246 		if (!viortc->alarmq_bufs) {
1247 			ret = viortc_alloc_vq_bufs(viortc, num_elems,
1248 						   VIORTC_ALARMQ_BUF_CAP);
1249 			if (ret)
1250 				return ret;
1251 		} else {
1252 			viortc->num_alarmq_bufs =
1253 				min(num_elems, viortc->num_alarmq_bufs);
1254 		}
1255 	}
1256 
1257 	return 0;
1258 }
1259 
1260 /**
1261  * viortc_probe() - probe a virtio_rtc virtio device
1262  * @vdev: virtio device
1263  *
1264  * Context: Process context.
1265  * Return: Zero on success, negative error code otherwise.
1266  */
viortc_probe(struct virtio_device * vdev)1267 static int viortc_probe(struct virtio_device *vdev)
1268 {
1269 	struct viortc_vq *alarm_viortc_vq;
1270 	struct virtqueue *alarm_vq;
1271 	struct viortc_dev *viortc;
1272 	unsigned long flags;
1273 	bool notify;
1274 	int ret;
1275 
1276 	viortc = devm_kzalloc(&vdev->dev, sizeof(*viortc), GFP_KERNEL);
1277 	if (!viortc)
1278 		return -ENOMEM;
1279 
1280 	vdev->priv = viortc;
1281 	viortc->vdev = vdev;
1282 
1283 	ret = viortc_init_vqs(viortc);
1284 	if (ret)
1285 		return ret;
1286 
1287 	virtio_device_ready(vdev);
1288 
1289 	ret = viortc_clocks_init(viortc);
1290 	if (ret)
1291 		goto err_reset_vdev;
1292 
1293 	if (viortc_alarms_supported(vdev)) {
1294 		alarm_viortc_vq = &viortc->vqs[VIORTC_ALARMQ];
1295 		alarm_vq = alarm_viortc_vq->vq;
1296 
1297 		ret = viortc_populate_vq(viortc, alarm_viortc_vq,
1298 					 VIORTC_ALARMQ_BUF_CAP, true);
1299 		if (ret)
1300 			goto err_deinit_clocks;
1301 
1302 		spin_lock_irqsave(&alarm_viortc_vq->lock, flags);
1303 		notify = virtqueue_kick_prepare(alarm_vq);
1304 		spin_unlock_irqrestore(&alarm_viortc_vq->lock, flags);
1305 
1306 		if (notify && !virtqueue_notify(alarm_vq)) {
1307 			ret = -EIO;
1308 			goto err_deinit_clocks;
1309 		}
1310 	}
1311 
1312 	return 0;
1313 
1314 err_deinit_clocks:
1315 	viortc_clocks_deinit(viortc);
1316 
1317 err_reset_vdev:
1318 	virtio_reset_device(vdev);
1319 	vdev->config->del_vqs(vdev);
1320 
1321 	return ret;
1322 }
1323 
1324 /**
1325  * viortc_remove() - remove a virtio_rtc virtio device
1326  * @vdev: virtio device
1327  */
viortc_remove(struct virtio_device * vdev)1328 static void viortc_remove(struct virtio_device *vdev)
1329 {
1330 	struct viortc_dev *viortc = vdev->priv;
1331 
1332 	viortc_clocks_deinit(viortc);
1333 
1334 	virtio_reset_device(vdev);
1335 	vdev->config->del_vqs(vdev);
1336 }
1337 
viortc_freeze(struct virtio_device * dev)1338 static int viortc_freeze(struct virtio_device *dev)
1339 {
1340 	/*
1341 	 * Do not reset the device, so that the device may still wake up the
1342 	 * system through an alarmq notification.
1343 	 */
1344 
1345 	return 0;
1346 }
1347 
viortc_restore(struct virtio_device * dev)1348 static int viortc_restore(struct virtio_device *dev)
1349 {
1350 	struct viortc_dev *viortc = dev->priv;
1351 	struct viortc_vq *alarm_viortc_vq;
1352 	struct virtqueue *alarm_vq;
1353 	bool notify = false;
1354 	int ret;
1355 
1356 	ret = viortc_init_vqs(viortc);
1357 	if (ret)
1358 		return ret;
1359 
1360 	alarm_viortc_vq = &viortc->vqs[VIORTC_ALARMQ];
1361 	alarm_vq = alarm_viortc_vq->vq;
1362 
1363 	if (viortc_alarms_supported(dev)) {
1364 		ret = viortc_populate_vq(viortc, alarm_viortc_vq,
1365 					 VIORTC_ALARMQ_BUF_CAP, false);
1366 		if (ret)
1367 			return ret;
1368 
1369 		notify = virtqueue_kick_prepare(alarm_vq);
1370 	}
1371 
1372 	virtio_device_ready(dev);
1373 
1374 	if (notify && !virtqueue_notify(alarm_vq))
1375 		ret = -EIO;
1376 
1377 	return ret;
1378 }
1379 
1380 static unsigned int features[] = {
1381 #if IS_ENABLED(CONFIG_VIRTIO_RTC_CLASS)
1382 	VIRTIO_RTC_F_ALARM,
1383 #endif
1384 };
1385 
1386 static struct virtio_device_id id_table[] = {
1387 	{ VIRTIO_ID_CLOCK, VIRTIO_DEV_ANY_ID },
1388 	{ 0 },
1389 };
1390 MODULE_DEVICE_TABLE(virtio, id_table);
1391 
1392 static struct virtio_driver virtio_rtc_drv = {
1393 	.driver.name = KBUILD_MODNAME,
1394 	.feature_table = features,
1395 	.feature_table_size = ARRAY_SIZE(features),
1396 	.id_table = id_table,
1397 	.probe = viortc_probe,
1398 	.remove = viortc_remove,
1399 	.freeze = pm_sleep_ptr(viortc_freeze),
1400 	.restore = pm_sleep_ptr(viortc_restore),
1401 };
1402 
1403 module_virtio_driver(virtio_rtc_drv);
1404 
1405 MODULE_DESCRIPTION("Virtio RTC driver");
1406 MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
1407 MODULE_LICENSE("GPL");
1408