xref: /linux/drivers/net/wwan/iosm/iosm_ipc_task_queue.c (revision fd7d598270724cc787982ea48bbe17ad383a8b7f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include "iosm_ipc_imem.h"
7 #include "iosm_ipc_task_queue.h"
8 
9 /* Actual tasklet function, will be called whenever tasklet is scheduled.
10  * Calls event handler involves callback for each element in the message queue
11  */
12 static void ipc_task_queue_handler(unsigned long data)
13 {
14 	struct ipc_task_queue *ipc_task = (struct ipc_task_queue *)data;
15 	unsigned int q_rpos = ipc_task->q_rpos;
16 
17 	/* Loop over the input queue contents. */
18 	while (q_rpos != ipc_task->q_wpos) {
19 		/* Get the current first queue element. */
20 		struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
21 
22 		/* Process the input message. */
23 		if (args->func)
24 			args->response = args->func(args->ipc_imem, args->arg,
25 						    args->msg, args->size);
26 
27 		/* Signal completion for synchronous calls */
28 		if (args->completion)
29 			complete(args->completion);
30 
31 		/* Free message if copy was allocated. */
32 		if (args->is_copy)
33 			kfree(args->msg);
34 
35 		/* Set invalid queue element. Technically
36 		 * spin_lock_irqsave is not required here as
37 		 * the array element has been processed already
38 		 * so we can assume that immediately after processing
39 		 * ipc_task element, queue will not rotate again to
40 		 * ipc_task same element within such short time.
41 		 */
42 		args->completion = NULL;
43 		args->func = NULL;
44 		args->msg = NULL;
45 		args->size = 0;
46 		args->is_copy = false;
47 
48 		/* calculate the new read ptr and update the volatile read
49 		 * ptr
50 		 */
51 		q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
52 		ipc_task->q_rpos = q_rpos;
53 	}
54 }
55 
56 /* Free memory alloc and trigger completions left in the queue during dealloc */
57 static void ipc_task_queue_cleanup(struct ipc_task_queue *ipc_task)
58 {
59 	unsigned int q_rpos = ipc_task->q_rpos;
60 
61 	while (q_rpos != ipc_task->q_wpos) {
62 		struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
63 
64 		if (args->completion)
65 			complete(args->completion);
66 
67 		if (args->is_copy)
68 			kfree(args->msg);
69 
70 		q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
71 		ipc_task->q_rpos = q_rpos;
72 	}
73 }
74 
75 /* Add a message to the queue and trigger the ipc_task. */
76 static int
77 ipc_task_queue_add_task(struct iosm_imem *ipc_imem,
78 			int arg, void *msg,
79 			int (*func)(struct iosm_imem *ipc_imem, int arg,
80 				    void *msg, size_t size),
81 			size_t size, bool is_copy, bool wait)
82 {
83 	struct tasklet_struct *ipc_tasklet = ipc_imem->ipc_task->ipc_tasklet;
84 	struct ipc_task_queue *ipc_task = &ipc_imem->ipc_task->ipc_queue;
85 	struct completion completion;
86 	unsigned int pos, nextpos;
87 	unsigned long flags;
88 	int result = -EIO;
89 
90 	init_completion(&completion);
91 
92 	/* tasklet send may be called from both interrupt or thread
93 	 * context, therefore protect queue operation by spinlock
94 	 */
95 	spin_lock_irqsave(&ipc_task->q_lock, flags);
96 
97 	pos = ipc_task->q_wpos;
98 	nextpos = (pos + 1) % IPC_THREAD_QUEUE_SIZE;
99 
100 	/* Get next queue position. */
101 	if (nextpos != ipc_task->q_rpos) {
102 		/* Get the reference to the queue element and save the passed
103 		 * values.
104 		 */
105 		ipc_task->args[pos].arg = arg;
106 		ipc_task->args[pos].msg = msg;
107 		ipc_task->args[pos].func = func;
108 		ipc_task->args[pos].ipc_imem = ipc_imem;
109 		ipc_task->args[pos].size = size;
110 		ipc_task->args[pos].is_copy = is_copy;
111 		ipc_task->args[pos].completion = wait ? &completion : NULL;
112 		ipc_task->args[pos].response = -1;
113 
114 		/* apply write barrier so that ipc_task->q_rpos elements
115 		 * are updated before ipc_task->q_wpos is being updated.
116 		 */
117 		smp_wmb();
118 
119 		/* Update the status of the free queue space. */
120 		ipc_task->q_wpos = nextpos;
121 		result = 0;
122 	}
123 
124 	spin_unlock_irqrestore(&ipc_task->q_lock, flags);
125 
126 	if (result == 0) {
127 		tasklet_schedule(ipc_tasklet);
128 
129 		if (wait) {
130 			wait_for_completion(&completion);
131 			result = ipc_task->args[pos].response;
132 		}
133 	} else {
134 		dev_err(ipc_imem->ipc_task->dev, "queue is full");
135 	}
136 
137 	return result;
138 }
139 
140 int ipc_task_queue_send_task(struct iosm_imem *imem,
141 			     int (*func)(struct iosm_imem *ipc_imem, int arg,
142 					 void *msg, size_t size),
143 			     int arg, void *msg, size_t size, bool wait)
144 {
145 	bool is_copy = false;
146 	void *copy = msg;
147 	int ret = -ENOMEM;
148 
149 	if (size > 0) {
150 		copy = kmemdup(msg, size, GFP_ATOMIC);
151 		if (!copy)
152 			goto out;
153 
154 		is_copy = true;
155 	}
156 
157 	ret = ipc_task_queue_add_task(imem, arg, copy, func,
158 				      size, is_copy, wait);
159 	if (ret < 0) {
160 		dev_err(imem->ipc_task->dev,
161 			"add task failed for %ps %d, %p, %zu, %d", func, arg,
162 			copy, size, is_copy);
163 		if (is_copy)
164 			kfree(copy);
165 		goto out;
166 	}
167 
168 	ret = 0;
169 out:
170 	return ret;
171 }
172 
173 int ipc_task_init(struct ipc_task *ipc_task)
174 {
175 	struct ipc_task_queue *ipc_queue = &ipc_task->ipc_queue;
176 
177 	ipc_task->ipc_tasklet = kzalloc(sizeof(*ipc_task->ipc_tasklet),
178 					GFP_KERNEL);
179 
180 	if (!ipc_task->ipc_tasklet)
181 		return -ENOMEM;
182 
183 	/* Initialize the spinlock needed to protect the message queue of the
184 	 * ipc_task
185 	 */
186 	spin_lock_init(&ipc_queue->q_lock);
187 
188 	tasklet_init(ipc_task->ipc_tasklet, ipc_task_queue_handler,
189 		     (unsigned long)ipc_queue);
190 	return 0;
191 }
192 
193 void ipc_task_deinit(struct ipc_task *ipc_task)
194 {
195 	tasklet_kill(ipc_task->ipc_tasklet);
196 
197 	kfree(ipc_task->ipc_tasklet);
198 	/* This will free/complete any outstanding messages,
199 	 * without calling the actual handler
200 	 */
201 	ipc_task_queue_cleanup(&ipc_task->ipc_queue);
202 }
203