1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */
3
4 #include <linux/io.h>
5 #include <linux/kernel.h>
6 #include <linux/math64.h>
7 #include <linux/mhi.h>
8 #include <linux/mod_devicetable.h>
9 #include <linux/module.h>
10 #include <linux/time64.h>
11 #include <linux/timer.h>
12
13 #include "qaic.h"
14 #include "qaic_timesync.h"
15
16 #define QTIMER_REG_OFFSET 0xa28
17 #define QAIC_TIMESYNC_SIGNATURE 0x55aa
18 #define QAIC_CONV_QTIMER_TO_US(qtimer) (mul_u64_u32_div(qtimer, 10, 192))
19
20 static unsigned int timesync_delay_ms = 1000; /* 1 sec default */
21 module_param(timesync_delay_ms, uint, 0600);
22 MODULE_PARM_DESC(timesync_delay_ms, "Delay in ms between two consecutive timesync operations");
23
24 enum qts_msg_type {
25 QAIC_TS_CMD_TO_HOST,
26 QAIC_TS_SYNC_REQ,
27 QAIC_TS_ACK_TO_HOST,
28 QAIC_TS_MSG_TYPE_MAX
29 };
30
31 /**
32 * struct qts_hdr - Timesync message header structure.
33 * @signature: Unique signature to identify the timesync message.
34 * @reserved_1: Reserved for future use.
35 * @reserved_2: Reserved for future use.
36 * @msg_type: sub-type of the timesync message.
37 * @reserved_3: Reserved for future use.
38 */
39 struct qts_hdr {
40 __le16 signature;
41 __le16 reserved_1;
42 u8 reserved_2;
43 u8 msg_type;
44 __le16 reserved_3;
45 } __packed;
46
47 /**
48 * struct qts_timeval - Structure to carry time information.
49 * @tv_sec: Seconds part of the time.
50 * @tv_usec: uS (microseconds) part of the time.
51 */
52 struct qts_timeval {
53 __le64 tv_sec;
54 __le64 tv_usec;
55 } __packed;
56
57 /**
58 * struct qts_host_time_sync_msg_data - Structure to denote the timesync message.
59 * @header: Header of the timesync message.
60 * @data: Time information.
61 */
62 struct qts_host_time_sync_msg_data {
63 struct qts_hdr header;
64 struct qts_timeval data;
65 } __packed;
66
67 /**
68 * struct mqts_dev - MHI QAIC Timesync Control device.
69 * @qdev: Pointer to the root device struct driven by QAIC driver.
70 * @mhi_dev: Pointer to associated MHI device.
71 * @timer: Timer handle used for timesync.
72 * @qtimer_addr: Device QTimer register pointer.
73 * @buff_in_use: atomic variable to track if the sync_msg buffer is in use.
74 * @dev: Device pointer to qdev->pdev->dev stored for easy access.
75 * @sync_msg: Buffer used to send timesync message over MHI.
76 */
77 struct mqts_dev {
78 struct qaic_device *qdev;
79 struct mhi_device *mhi_dev;
80 struct timer_list timer;
81 void __iomem *qtimer_addr;
82 atomic_t buff_in_use;
83 struct device *dev;
84 struct qts_host_time_sync_msg_data *sync_msg;
85 };
86
87 struct qts_resp_msg {
88 struct qts_hdr hdr;
89 } __packed;
90
91 struct qts_resp {
92 struct qts_resp_msg data;
93 struct work_struct work;
94 struct qaic_device *qdev;
95 };
96
97 #ifdef readq
read_qtimer(const volatile void __iomem * addr)98 static u64 read_qtimer(const volatile void __iomem *addr)
99 {
100 return readq(addr);
101 }
102 #else
read_qtimer(const volatile void __iomem * addr)103 static u64 read_qtimer(const volatile void __iomem *addr)
104 {
105 u64 low, high;
106
107 low = readl(addr);
108 high = readl(addr + sizeof(u32));
109 return low | (high << 32);
110 }
111 #endif
112
qaic_timesync_ul_xfer_cb(struct mhi_device * mhi_dev,struct mhi_result * mhi_result)113 static void qaic_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
114 {
115 struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
116
117 dev_dbg(mqtsdev->dev, "%s status: %d xfer_len: %zu\n", __func__,
118 mhi_result->transaction_status, mhi_result->bytes_xferd);
119
120 atomic_set(&mqtsdev->buff_in_use, 0);
121 }
122
qaic_timesync_dl_xfer_cb(struct mhi_device * mhi_dev,struct mhi_result * mhi_result)123 static void qaic_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
124 {
125 struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
126
127 dev_err(mqtsdev->dev, "%s no data expected on dl channel\n", __func__);
128 }
129
qaic_timesync_timer(struct timer_list * t)130 static void qaic_timesync_timer(struct timer_list *t)
131 {
132 struct mqts_dev *mqtsdev = timer_container_of(mqtsdev, t, timer);
133 struct qts_host_time_sync_msg_data *sync_msg;
134 u64 device_qtimer_us;
135 u64 device_qtimer;
136 u64 host_time_us;
137 u64 offset_us;
138 u64 host_sec;
139 int ret;
140
141 if (atomic_read(&mqtsdev->buff_in_use)) {
142 dev_dbg(mqtsdev->dev, "%s buffer not free, schedule next cycle\n", __func__);
143 goto mod_timer;
144 }
145 atomic_set(&mqtsdev->buff_in_use, 1);
146
147 sync_msg = mqtsdev->sync_msg;
148 sync_msg->header.signature = cpu_to_le16(QAIC_TIMESYNC_SIGNATURE);
149 sync_msg->header.msg_type = QAIC_TS_SYNC_REQ;
150 /* Read host UTC time and convert to uS*/
151 host_time_us = div_u64(ktime_get_real_ns(), NSEC_PER_USEC);
152 device_qtimer = read_qtimer(mqtsdev->qtimer_addr);
153 device_qtimer_us = QAIC_CONV_QTIMER_TO_US(device_qtimer);
154 /* Offset between host UTC and device time */
155 offset_us = host_time_us - device_qtimer_us;
156
157 host_sec = div_u64(offset_us, USEC_PER_SEC);
158 sync_msg->data.tv_usec = cpu_to_le64(offset_us - host_sec * USEC_PER_SEC);
159 sync_msg->data.tv_sec = cpu_to_le64(host_sec);
160 ret = mhi_queue_buf(mqtsdev->mhi_dev, DMA_TO_DEVICE, sync_msg, sizeof(*sync_msg), MHI_EOT);
161 if (ret && (ret != -EAGAIN)) {
162 dev_err(mqtsdev->dev, "%s unable to queue to mhi:%d\n", __func__, ret);
163 return;
164 } else if (ret == -EAGAIN) {
165 atomic_set(&mqtsdev->buff_in_use, 0);
166 }
167
168 mod_timer:
169 ret = mod_timer(t, jiffies + msecs_to_jiffies(timesync_delay_ms));
170 if (ret)
171 dev_err(mqtsdev->dev, "%s mod_timer error:%d\n", __func__, ret);
172 }
173
qaic_mqts_ch_stop_timer(struct mhi_device * mhi_dev)174 void qaic_mqts_ch_stop_timer(struct mhi_device *mhi_dev)
175 {
176 struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
177
178 timer_delete_sync(&mqtsdev->timer);
179 }
180
qaic_timesync_probe(struct mhi_device * mhi_dev,const struct mhi_device_id * id)181 static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
182 {
183 struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
184 struct mqts_dev *mqtsdev;
185 struct timer_list *timer;
186 int ret;
187
188 mqtsdev = kzalloc_obj(*mqtsdev);
189 if (!mqtsdev) {
190 ret = -ENOMEM;
191 goto out;
192 }
193
194 timer = &mqtsdev->timer;
195 mqtsdev->mhi_dev = mhi_dev;
196 mqtsdev->qdev = qdev;
197 mqtsdev->dev = &qdev->pdev->dev;
198
199 mqtsdev->sync_msg = kzalloc_obj(*mqtsdev->sync_msg);
200 if (!mqtsdev->sync_msg) {
201 ret = -ENOMEM;
202 goto free_mqts_dev;
203 }
204 atomic_set(&mqtsdev->buff_in_use, 0);
205
206 ret = mhi_prepare_for_transfer(mhi_dev);
207 if (ret)
208 goto free_sync_msg;
209
210 /* Qtimer register pointer */
211 mqtsdev->qtimer_addr = qdev->bar_mhi + QTIMER_REG_OFFSET;
212 timer_setup(timer, qaic_timesync_timer, 0);
213 timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms);
214 add_timer(timer);
215 dev_set_drvdata(&mhi_dev->dev, mqtsdev);
216 qdev->mqts_ch = mhi_dev;
217
218 return 0;
219
220 free_sync_msg:
221 kfree(mqtsdev->sync_msg);
222 free_mqts_dev:
223 kfree(mqtsdev);
224 out:
225 return ret;
226 };
227
qaic_timesync_remove(struct mhi_device * mhi_dev)228 static void qaic_timesync_remove(struct mhi_device *mhi_dev)
229 {
230 struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
231
232 mqtsdev->qdev->mqts_ch = NULL;
233 timer_delete_sync(&mqtsdev->timer);
234 mhi_unprepare_from_transfer(mqtsdev->mhi_dev);
235 kfree(mqtsdev->sync_msg);
236 kfree(mqtsdev);
237 }
238
239 static const struct mhi_device_id qaic_timesync_match_table[] = {
240 { .chan = "QAIC_TIMESYNC_PERIODIC"},
241 {},
242 };
243
244 MODULE_DEVICE_TABLE(mhi, qaic_timesync_match_table);
245
246 static struct mhi_driver qaic_timesync_driver = {
247 .id_table = qaic_timesync_match_table,
248 .remove = qaic_timesync_remove,
249 .probe = qaic_timesync_probe,
250 .ul_xfer_cb = qaic_timesync_ul_xfer_cb,
251 .dl_xfer_cb = qaic_timesync_dl_xfer_cb,
252 .driver = {
253 .name = "qaic_timesync_periodic",
254 },
255 };
256
qaic_boot_timesync_worker(struct work_struct * work)257 static void qaic_boot_timesync_worker(struct work_struct *work)
258 {
259 struct qts_resp *resp = container_of(work, struct qts_resp, work);
260 struct qts_host_time_sync_msg_data *req;
261 struct qts_resp_msg data = resp->data;
262 struct qaic_device *qdev = resp->qdev;
263 struct mhi_device *mhi_dev;
264 struct timespec64 ts;
265 int ret;
266
267 mhi_dev = qdev->qts_ch;
268 /* Queue the response message beforehand to avoid race conditions */
269 ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, &resp->data, sizeof(resp->data), MHI_EOT);
270 if (ret) {
271 kfree(resp);
272 dev_warn(&mhi_dev->dev, "Failed to re-queue response buffer %d\n", ret);
273 return;
274 }
275
276 switch (data.hdr.msg_type) {
277 case QAIC_TS_CMD_TO_HOST:
278 req = kzalloc_obj(*req);
279 if (!req)
280 break;
281
282 req->header = data.hdr;
283 req->header.msg_type = QAIC_TS_SYNC_REQ;
284 ktime_get_real_ts64(&ts);
285 req->data.tv_sec = cpu_to_le64(ts.tv_sec);
286 req->data.tv_usec = cpu_to_le64(div_u64(ts.tv_nsec, NSEC_PER_USEC));
287
288 ret = mhi_queue_buf(mhi_dev, DMA_TO_DEVICE, req, sizeof(*req), MHI_EOT);
289 if (ret) {
290 kfree(req);
291 dev_dbg(&mhi_dev->dev, "Failed to send request message. Error %d\n", ret);
292 }
293 break;
294 case QAIC_TS_ACK_TO_HOST:
295 dev_dbg(&mhi_dev->dev, "ACK received from device\n");
296 break;
297 default:
298 dev_err(&mhi_dev->dev, "Invalid message type %u.\n", data.hdr.msg_type);
299 }
300 }
301
qaic_boot_timesync_queue_resp(struct mhi_device * mhi_dev,struct qaic_device * qdev)302 static int qaic_boot_timesync_queue_resp(struct mhi_device *mhi_dev, struct qaic_device *qdev)
303 {
304 struct qts_resp *resp;
305 int ret;
306
307 resp = kzalloc_obj(*resp);
308 if (!resp)
309 return -ENOMEM;
310
311 resp->qdev = qdev;
312 INIT_WORK(&resp->work, qaic_boot_timesync_worker);
313
314 ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, &resp->data, sizeof(resp->data), MHI_EOT);
315 if (ret) {
316 kfree(resp);
317 dev_warn(&mhi_dev->dev, "Failed to queue response buffer %d\n", ret);
318 return ret;
319 }
320
321 return 0;
322 }
323
qaic_boot_timesync_remove(struct mhi_device * mhi_dev)324 static void qaic_boot_timesync_remove(struct mhi_device *mhi_dev)
325 {
326 struct qaic_device *qdev;
327
328 qdev = dev_get_drvdata(&mhi_dev->dev);
329 mhi_unprepare_from_transfer(qdev->qts_ch);
330 qdev->qts_ch = NULL;
331 }
332
qaic_boot_timesync_probe(struct mhi_device * mhi_dev,const struct mhi_device_id * id)333 static int qaic_boot_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
334 {
335 struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
336 int ret;
337
338 ret = mhi_prepare_for_transfer(mhi_dev);
339 if (ret)
340 return ret;
341
342 qdev->qts_ch = mhi_dev;
343 dev_set_drvdata(&mhi_dev->dev, qdev);
344
345 ret = qaic_boot_timesync_queue_resp(mhi_dev, qdev);
346 if (ret) {
347 dev_set_drvdata(&mhi_dev->dev, NULL);
348 qdev->qts_ch = NULL;
349 mhi_unprepare_from_transfer(mhi_dev);
350 }
351
352 return ret;
353 }
354
qaic_boot_timesync_ul_xfer_cb(struct mhi_device * mhi_dev,struct mhi_result * mhi_result)355 static void qaic_boot_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
356 {
357 kfree(mhi_result->buf_addr);
358 }
359
qaic_boot_timesync_dl_xfer_cb(struct mhi_device * mhi_dev,struct mhi_result * mhi_result)360 static void qaic_boot_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
361 {
362 struct qts_resp *resp = container_of(mhi_result->buf_addr, struct qts_resp, data);
363
364 if (mhi_result->transaction_status || mhi_result->bytes_xferd != sizeof(resp->data)) {
365 kfree(resp);
366 return;
367 }
368
369 queue_work(resp->qdev->qts_wq, &resp->work);
370 }
371
372 static const struct mhi_device_id qaic_boot_timesync_match_table[] = {
373 { .chan = "QAIC_TIMESYNC"},
374 {},
375 };
376
377 static struct mhi_driver qaic_boot_timesync_driver = {
378 .id_table = qaic_boot_timesync_match_table,
379 .remove = qaic_boot_timesync_remove,
380 .probe = qaic_boot_timesync_probe,
381 .ul_xfer_cb = qaic_boot_timesync_ul_xfer_cb,
382 .dl_xfer_cb = qaic_boot_timesync_dl_xfer_cb,
383 .driver = {
384 .name = "qaic_timesync",
385 },
386 };
387
qaic_timesync_init(void)388 int qaic_timesync_init(void)
389 {
390 int ret;
391
392 ret = mhi_driver_register(&qaic_timesync_driver);
393 if (ret)
394 return ret;
395 ret = mhi_driver_register(&qaic_boot_timesync_driver);
396
397 return ret;
398 }
399
qaic_timesync_deinit(void)400 void qaic_timesync_deinit(void)
401 {
402 mhi_driver_unregister(&qaic_boot_timesync_driver);
403 mhi_driver_unregister(&qaic_timesync_driver);
404 }
405