1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Haijun Liu <haijun.liu@mediatek.com>
8 * Eliot Lee <eliot.lee@intel.com>
9 * Moises Veleta <moises.veleta@intel.com>
10 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
11 *
12 * Contributors:
13 * Amir Hanania <amir.hanania@intel.com>
14 * Sreehari Kancharla <sreehari.kancharla@intel.com>
15 */
16
17 #include <linux/bits.h>
18 #include <linux/bitfield.h>
19 #include <linux/completion.h>
20 #include <linux/device.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
23 #include <linux/gfp.h>
24 #include <linux/iopoll.h>
25 #include <linux/jiffies.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/wait.h>
34
35 #include "t7xx_hif_cldma.h"
36 #include "t7xx_mhccif.h"
37 #include "t7xx_modem_ops.h"
38 #include "t7xx_pci.h"
39 #include "t7xx_pcie_mac.h"
40 #include "t7xx_port_proxy.h"
41 #include "t7xx_reg.h"
42 #include "t7xx_state_monitor.h"
43
44 #define FSM_DRM_DISABLE_DELAY_MS 200
45 #define FSM_EVENT_POLL_INTERVAL_MS 20
46 #define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000
47 #define FSM_MD_EX_PASS_TIMEOUT_MS 45000
48 #define FSM_CMD_TIMEOUT_MS 2000
49
50 #define wait_for_expected_dev_stage(status) \
51 read_poll_timeout(ioread32, status, \
52 ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LINUX) || \
53 ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LK), 100000, \
54 20000000, false, IREG_BASE(md->t7xx_dev) + \
55 T7XX_PCIE_MISC_DEV_STATUS)
56
t7xx_fsm_notifier_register(struct t7xx_modem * md,struct t7xx_fsm_notifier * notifier)57 void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
58 {
59 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
60 unsigned long flags;
61
62 spin_lock_irqsave(&ctl->notifier_lock, flags);
63 list_add_tail(¬ifier->entry, &ctl->notifier_list);
64 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
65 }
66
t7xx_fsm_notifier_unregister(struct t7xx_modem * md,struct t7xx_fsm_notifier * notifier)67 void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
68 {
69 struct t7xx_fsm_notifier *notifier_cur, *notifier_next;
70 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
71 unsigned long flags;
72
73 spin_lock_irqsave(&ctl->notifier_lock, flags);
74 list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) {
75 if (notifier_cur == notifier)
76 list_del(¬ifier->entry);
77 }
78 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
79 }
80
fsm_state_notify(struct t7xx_modem * md,enum md_state state)81 static void fsm_state_notify(struct t7xx_modem *md, enum md_state state)
82 {
83 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
84 struct t7xx_fsm_notifier *notifier;
85 unsigned long flags;
86
87 spin_lock_irqsave(&ctl->notifier_lock, flags);
88 list_for_each_entry(notifier, &ctl->notifier_list, entry) {
89 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
90 if (notifier->notifier_fn)
91 notifier->notifier_fn(state, notifier->data);
92
93 spin_lock_irqsave(&ctl->notifier_lock, flags);
94 }
95 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
96 }
97
t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl * ctl,enum md_state state)98 void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
99 {
100 ctl->md_state = state;
101
102 /* Update to port first, otherwise sending message on HS2 may fail */
103 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state);
104 fsm_state_notify(ctl->md, state);
105 }
106
fsm_release_command(struct kref * ref)107 static void fsm_release_command(struct kref *ref)
108 {
109 struct t7xx_fsm_command *cmd = container_of(ref, typeof(*cmd), refcnt);
110
111 kfree(cmd);
112 }
113
fsm_finish_command(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd,int result)114 static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
115 {
116 if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
117 cmd->result = result;
118 complete_all(&cmd->done);
119 }
120
121 kref_put(&cmd->refcnt, fsm_release_command);
122 }
123
fsm_del_kf_event(struct t7xx_fsm_event * event)124 static void fsm_del_kf_event(struct t7xx_fsm_event *event)
125 {
126 list_del(&event->entry);
127 kfree(event);
128 }
129
fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl * ctl)130 static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl)
131 {
132 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
133 struct t7xx_fsm_event *event, *evt_next;
134 struct t7xx_fsm_command *cmd, *cmd_next;
135 unsigned long flags;
136
137 spin_lock_irqsave(&ctl->command_lock, flags);
138 list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) {
139 dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id);
140 list_del(&cmd->entry);
141 fsm_finish_command(ctl, cmd, -EINVAL);
142 }
143 spin_unlock_irqrestore(&ctl->command_lock, flags);
144
145 spin_lock_irqsave(&ctl->event_lock, flags);
146 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
147 dev_warn(dev, "Unhandled event %d\n", event->event_id);
148 fsm_del_kf_event(event);
149 }
150 spin_unlock_irqrestore(&ctl->event_lock, flags);
151 }
152
fsm_wait_for_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_expected,enum t7xx_fsm_event_state event_ignore,int retries)153 static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected,
154 enum t7xx_fsm_event_state event_ignore, int retries)
155 {
156 struct t7xx_fsm_event *event;
157 bool event_received = false;
158 unsigned long flags;
159 int cnt = 0;
160
161 while (cnt++ < retries && !event_received) {
162 bool sleep_required = true;
163
164 if (kthread_should_stop())
165 return;
166
167 spin_lock_irqsave(&ctl->event_lock, flags);
168 event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry);
169 if (event) {
170 event_received = event->event_id == event_expected;
171 if (event_received || event->event_id == event_ignore) {
172 fsm_del_kf_event(event);
173 sleep_required = false;
174 }
175 }
176 spin_unlock_irqrestore(&ctl->event_lock, flags);
177
178 if (sleep_required)
179 msleep(FSM_EVENT_POLL_INTERVAL_MS);
180 }
181 }
182
fsm_routine_exception(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd,enum t7xx_ex_reason reason)183 static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd,
184 enum t7xx_ex_reason reason)
185 {
186 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
187
188 if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) {
189 if (cmd)
190 fsm_finish_command(ctl, cmd, -EINVAL);
191
192 return;
193 }
194
195 ctl->curr_state = FSM_STATE_EXCEPTION;
196
197 switch (reason) {
198 case EXCEPTION_HS_TIMEOUT:
199 dev_err(dev, "Boot Handshake failure\n");
200 break;
201
202 case EXCEPTION_EVENT:
203 dev_err(dev, "Exception event\n");
204 t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
205 t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
206 t7xx_md_exception_handshake(ctl->md);
207
208 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
209 FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
210 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID,
211 FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
212 break;
213
214 default:
215 dev_err(dev, "Exception %d\n", reason);
216 break;
217 }
218
219 if (cmd)
220 fsm_finish_command(ctl, cmd, 0);
221 }
222
t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl * ctl,unsigned int status)223 static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
224 {
225 struct t7xx_modem *md = ctl->md;
226 struct cldma_ctrl *md_ctrl;
227 enum lk_event_id lk_event;
228 struct device *dev;
229 struct t7xx_port *port;
230
231 dev = &md->t7xx_dev->pdev->dev;
232 lk_event = FIELD_GET(MISC_LK_EVENT_MASK, status);
233 switch (lk_event) {
234 case LK_EVENT_NORMAL:
235 case LK_EVENT_RESET:
236 break;
237
238 case LK_EVENT_CREATE_PD_PORT:
239 case LK_EVENT_CREATE_POST_DL_PORT:
240 md_ctrl = md->md_ctrl[CLDMA_ID_AP];
241 t7xx_cldma_hif_hw_init(md_ctrl);
242 t7xx_cldma_stop(md_ctrl);
243 t7xx_cldma_switch_cfg(md_ctrl, CLDMA_DEDICATED_Q_CFG);
244
245 port = &ctl->md->port_prox->ports[0];
246 port->port_conf->ops->enable_chl(port);
247
248 t7xx_cldma_start(md_ctrl);
249
250 if (lk_event == LK_EVENT_CREATE_POST_DL_PORT)
251 t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DOWNLOAD);
252 else
253 t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DUMP);
254 break;
255
256 default:
257 dev_err(dev, "Invalid LK event %d\n", lk_event);
258 break;
259 }
260 }
261
fsm_stopped_handler(struct t7xx_fsm_ctl * ctl)262 static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
263 {
264 enum t7xx_mode mode;
265
266 ctl->curr_state = FSM_STATE_STOPPED;
267
268 mode = READ_ONCE(ctl->md->t7xx_dev->mode);
269 if (mode == T7XX_FASTBOOT_DOWNLOAD || mode == T7XX_FASTBOOT_DUMP)
270 return 0;
271
272 t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
273 return t7xx_md_reset(ctl->md->t7xx_dev);
274 }
275
fsm_routine_stopped(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)276 static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
277 {
278 if (ctl->curr_state == FSM_STATE_STOPPED) {
279 fsm_finish_command(ctl, cmd, -EINVAL);
280 return;
281 }
282
283 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
284 }
285
fsm_routine_stopping(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)286 static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
287 {
288 struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
289 struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
290
291 if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
292 fsm_finish_command(ctl, cmd, -EINVAL);
293 return;
294 }
295
296 ctl->curr_state = FSM_STATE_STOPPING;
297 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
298 t7xx_cldma_stop(md_ctrl);
299
300 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
301 /* Wait for the DRM disable to take effect */
302 msleep(FSM_DRM_DISABLE_DELAY_MS);
303
304 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
305 }
306
t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl * ctl)307 static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl)
308 {
309 if (ctl->md_state != MD_STATE_WAITING_FOR_HS2)
310 return;
311
312 ctl->md_state = MD_STATE_READY;
313
314 fsm_state_notify(ctl->md, MD_STATE_READY);
315 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY);
316 }
317
fsm_routine_ready(struct t7xx_fsm_ctl * ctl)318 static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
319 {
320 struct t7xx_modem *md = ctl->md;
321
322 ctl->curr_state = FSM_STATE_READY;
323 t7xx_fsm_broadcast_ready_state(ctl);
324 t7xx_mode_update(md->t7xx_dev, T7XX_READY);
325 t7xx_md_event_notify(md, FSM_READY);
326 }
327
fsm_routine_starting(struct t7xx_fsm_ctl * ctl)328 static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
329 {
330 struct t7xx_modem *md = ctl->md;
331 struct device *dev;
332
333 ctl->curr_state = FSM_STATE_STARTING;
334
335 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
336 t7xx_md_event_notify(md, FSM_START);
337
338 wait_event_interruptible_timeout(ctl->async_hk_wq,
339 (md->core_md.ready && md->core_ap.ready) ||
340 ctl->exp_flg, HZ * 60);
341 dev = &md->t7xx_dev->pdev->dev;
342
343 if (ctl->exp_flg)
344 dev_err(dev, "MD exception is captured during handshake\n");
345
346 if (!md->core_md.ready) {
347 dev_err(dev, "MD handshake timeout\n");
348 if (md->core_md.handshake_ongoing)
349 t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
350
351 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
352 return -ETIMEDOUT;
353 } else if (!md->core_ap.ready) {
354 dev_err(dev, "AP handshake timeout\n");
355 if (md->core_ap.handshake_ongoing)
356 t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0);
357
358 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
359 return -ETIMEDOUT;
360 }
361
362 t7xx_pci_pm_init_late(md->t7xx_dev);
363 fsm_routine_ready(ctl);
364 return 0;
365 }
366
fsm_routine_start(struct t7xx_fsm_ctl * ctl,struct t7xx_fsm_command * cmd)367 static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
368 {
369 struct t7xx_modem *md = ctl->md;
370 struct device *dev;
371 u32 status;
372 int ret;
373
374 if (!md)
375 return;
376
377 if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START &&
378 ctl->curr_state != FSM_STATE_STOPPED) {
379 fsm_finish_command(ctl, cmd, -EINVAL);
380 return;
381 }
382
383 dev = &md->t7xx_dev->pdev->dev;
384 ctl->curr_state = FSM_STATE_PRE_START;
385 t7xx_md_event_notify(md, FSM_PRE_START);
386
387 ret = wait_for_expected_dev_stage(status);
388
389 if (ret) {
390 dev_err(dev, "read poll timeout %d\n", ret);
391 goto finish_command;
392 }
393
394 if (status != ctl->status || cmd->flag != 0) {
395 u32 stage = FIELD_GET(MISC_STAGE_MASK, status);
396
397 switch (stage) {
398 case T7XX_DEV_STAGE_INIT:
399 case T7XX_DEV_STAGE_BROM_PRE:
400 case T7XX_DEV_STAGE_BROM_POST:
401 dev_dbg(dev, "BROM_STAGE Entered\n");
402 ret = t7xx_fsm_append_cmd(ctl, FSM_CMD_START, 0);
403 break;
404
405 case T7XX_DEV_STAGE_LK:
406 dev_dbg(dev, "LK_STAGE Entered\n");
407 t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
408 t7xx_lk_stage_event_handling(ctl, status);
409
410 break;
411
412 case T7XX_DEV_STAGE_LINUX:
413 dev_dbg(dev, "LINUX_STAGE Entered\n");
414 t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM |
415 D2H_INT_ASYNC_MD_HK | D2H_INT_ASYNC_AP_HK);
416 if (cmd->flag == 0)
417 break;
418 t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
419 t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
420 t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_NORMAL);
421 ret = fsm_routine_starting(ctl);
422 break;
423
424 default:
425 break;
426 }
427 ctl->status = status;
428 }
429
430 finish_command:
431 if (ret)
432 t7xx_mode_update(md->t7xx_dev, T7XX_UNKNOWN);
433
434 fsm_finish_command(ctl, cmd, ret);
435 }
436
fsm_main_thread(void * data)437 static int fsm_main_thread(void *data)
438 {
439 struct t7xx_fsm_ctl *ctl = data;
440 struct t7xx_fsm_command *cmd;
441 unsigned long flags;
442
443 while (!kthread_should_stop()) {
444 if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) ||
445 kthread_should_stop()))
446 continue;
447
448 if (kthread_should_stop())
449 break;
450
451 spin_lock_irqsave(&ctl->command_lock, flags);
452 cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry);
453 list_del(&cmd->entry);
454 spin_unlock_irqrestore(&ctl->command_lock, flags);
455
456 switch (cmd->cmd_id) {
457 case FSM_CMD_START:
458 fsm_routine_start(ctl, cmd);
459 break;
460
461 case FSM_CMD_EXCEPTION:
462 fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag));
463 break;
464
465 case FSM_CMD_PRE_STOP:
466 fsm_routine_stopping(ctl, cmd);
467 break;
468
469 case FSM_CMD_STOP:
470 fsm_routine_stopped(ctl, cmd);
471 break;
472
473 default:
474 fsm_finish_command(ctl, cmd, -EINVAL);
475 fsm_flush_event_cmd_qs(ctl);
476 break;
477 }
478 }
479
480 return 0;
481 }
482
t7xx_fsm_append_cmd(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_cmd_state cmd_id,unsigned int flag)483 int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
484 {
485 struct t7xx_fsm_command *cmd;
486 unsigned long flags;
487 int ret;
488
489 cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL);
490 if (!cmd)
491 return -ENOMEM;
492
493 INIT_LIST_HEAD(&cmd->entry);
494 cmd->cmd_id = cmd_id;
495 cmd->flag = flag;
496 kref_init(&cmd->refcnt);
497 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
498 init_completion(&cmd->done);
499 kref_get(&cmd->refcnt);
500 }
501
502 kref_get(&cmd->refcnt);
503 spin_lock_irqsave(&ctl->command_lock, flags);
504 list_add_tail(&cmd->entry, &ctl->command_queue);
505 spin_unlock_irqrestore(&ctl->command_lock, flags);
506
507 wake_up(&ctl->command_wq);
508
509 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
510 unsigned long wait_ret;
511
512 wait_ret = wait_for_completion_timeout(&cmd->done,
513 msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
514
515 ret = wait_ret ? cmd->result : -ETIMEDOUT;
516 kref_put(&cmd->refcnt, fsm_release_command);
517 return ret;
518 }
519
520 return 0;
521 }
522
t7xx_fsm_append_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id,unsigned char * data,unsigned int length)523 int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
524 unsigned char *data, unsigned int length)
525 {
526 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
527 struct t7xx_fsm_event *event;
528 unsigned long flags;
529
530 if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) {
531 dev_err(dev, "Invalid event %d\n", event_id);
532 return -EINVAL;
533 }
534
535 event = kmalloc(struct_size(event, data, length),
536 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
537 if (!event)
538 return -ENOMEM;
539
540 INIT_LIST_HEAD(&event->entry);
541 event->event_id = event_id;
542 event->length = length;
543
544 if (data && length)
545 memcpy(event->data, data, length);
546
547 spin_lock_irqsave(&ctl->event_lock, flags);
548 list_add_tail(&event->entry, &ctl->event_queue);
549 spin_unlock_irqrestore(&ctl->event_lock, flags);
550
551 wake_up_all(&ctl->event_wq);
552 return 0;
553 }
554
t7xx_fsm_clr_event(struct t7xx_fsm_ctl * ctl,enum t7xx_fsm_event_state event_id)555 void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id)
556 {
557 struct t7xx_fsm_event *event, *evt_next;
558 unsigned long flags;
559
560 spin_lock_irqsave(&ctl->event_lock, flags);
561 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
562 if (event->event_id == event_id)
563 fsm_del_kf_event(event);
564 }
565 spin_unlock_irqrestore(&ctl->event_lock, flags);
566 }
567
t7xx_fsm_get_md_state(struct t7xx_fsm_ctl * ctl)568 enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl)
569 {
570 if (ctl)
571 return ctl->md_state;
572
573 return MD_STATE_INVALID;
574 }
575
t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl * ctl)576 unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl)
577 {
578 if (ctl)
579 return ctl->curr_state;
580
581 return FSM_STATE_STOPPED;
582 }
583
t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl * ctl,enum t7xx_md_irq_type type)584 int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type)
585 {
586 unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT;
587
588 if (type == MD_IRQ_PORT_ENUM) {
589 return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags);
590 } else if (type == MD_IRQ_CCIF_EX) {
591 ctl->exp_flg = true;
592 wake_up(&ctl->async_hk_wq);
593 cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT);
594 return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags);
595 }
596
597 return -EINVAL;
598 }
599
t7xx_fsm_reset(struct t7xx_modem * md)600 void t7xx_fsm_reset(struct t7xx_modem *md)
601 {
602 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
603
604 fsm_flush_event_cmd_qs(ctl);
605 ctl->curr_state = FSM_STATE_STOPPED;
606 ctl->exp_flg = false;
607 ctl->status = T7XX_DEV_STAGE_INIT;
608 }
609
t7xx_fsm_init(struct t7xx_modem * md)610 int t7xx_fsm_init(struct t7xx_modem *md)
611 {
612 struct device *dev = &md->t7xx_dev->pdev->dev;
613 struct t7xx_fsm_ctl *ctl;
614
615 ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL);
616 if (!ctl)
617 return -ENOMEM;
618
619 md->fsm_ctl = ctl;
620 ctl->md = md;
621 ctl->curr_state = FSM_STATE_INIT;
622 INIT_LIST_HEAD(&ctl->command_queue);
623 INIT_LIST_HEAD(&ctl->event_queue);
624 init_waitqueue_head(&ctl->async_hk_wq);
625 init_waitqueue_head(&ctl->event_wq);
626 INIT_LIST_HEAD(&ctl->notifier_list);
627 init_waitqueue_head(&ctl->command_wq);
628 spin_lock_init(&ctl->event_lock);
629 spin_lock_init(&ctl->command_lock);
630 ctl->exp_flg = false;
631 spin_lock_init(&ctl->notifier_lock);
632
633 ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm");
634 return PTR_ERR_OR_ZERO(ctl->fsm_thread);
635 }
636
t7xx_fsm_uninit(struct t7xx_modem * md)637 void t7xx_fsm_uninit(struct t7xx_modem *md)
638 {
639 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
640
641 if (!ctl)
642 return;
643
644 if (ctl->fsm_thread)
645 kthread_stop(ctl->fsm_thread);
646
647 fsm_flush_event_cmd_qs(ctl);
648 }
649