xref: /linux/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-21 Intel Corporation.
4  */
5 
6 #include "iosm_ipc_protocol.h"
7 #include "iosm_ipc_protocol_ops.h"
8 
9 /* Get the next free message element.*/
10 static union ipc_mem_msg_entry *
11 ipc_protocol_free_msg_get(struct iosm_protocol *ipc_protocol, int *index)
12 {
13 	u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
14 	u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
15 	union ipc_mem_msg_entry *msg;
16 
17 	if (new_head == le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail)) {
18 		dev_err(ipc_protocol->dev, "message ring is full");
19 		return NULL;
20 	}
21 
22 	/* Get the pointer to the next free message element,
23 	 * reset the fields and mark is as invalid.
24 	 */
25 	msg = &ipc_protocol->p_ap_shm->msg_ring[head];
26 	memset(msg, 0, sizeof(*msg));
27 
28 	/* return index in message ring */
29 	*index = head;
30 
31 	return msg;
32 }
33 
34 /* Updates the message ring Head pointer */
35 void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem)
36 {
37 	struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
38 	u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
39 	u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
40 
41 	/* Update head pointer and fire doorbell. */
42 	ipc_protocol->p_ap_shm->msg_head = cpu_to_le32(new_head);
43 	ipc_protocol->old_msg_tail =
44 		le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
45 
46 	ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, IPC_HP_MR, false);
47 }
48 
49 /* Allocate and prepare a OPEN_PIPE message.
50  * This also allocates the memory for the new TDR structure and
51  * updates the pipe structure referenced in the preparation arguments.
52  */
53 static int ipc_protocol_msg_prepipe_open(struct iosm_protocol *ipc_protocol,
54 					 union ipc_msg_prep_args *args)
55 {
56 	int index;
57 	union ipc_mem_msg_entry *msg =
58 		ipc_protocol_free_msg_get(ipc_protocol, &index);
59 	struct ipc_pipe *pipe = args->pipe_open.pipe;
60 	struct ipc_protocol_td *tdr;
61 	struct sk_buff **skbr;
62 
63 	if (!msg) {
64 		dev_err(ipc_protocol->dev, "failed to get free message");
65 		return -EIO;
66 	}
67 
68 	/* Allocate the skbuf elements for the skbuf which are on the way.
69 	 * SKB ring is internal memory allocation for driver. No need to
70 	 * re-calculate the start and end addresses.
71 	 */
72 	skbr = kcalloc(pipe->nr_of_entries, sizeof(*skbr), GFP_ATOMIC);
73 	if (!skbr)
74 		return -ENOMEM;
75 
76 	/* Allocate the transfer descriptors for the pipe. */
77 	tdr = dma_alloc_coherent(&ipc_protocol->pcie->pci->dev,
78 				 pipe->nr_of_entries * sizeof(*tdr),
79 				 &pipe->phy_tdr_start, GFP_ATOMIC);
80 	if (!tdr) {
81 		kfree(skbr);
82 		dev_err(ipc_protocol->dev, "tdr alloc error");
83 		return -ENOMEM;
84 	}
85 
86 	pipe->max_nr_of_queued_entries = pipe->nr_of_entries - 1;
87 	pipe->nr_of_queued_entries = 0;
88 	pipe->tdr_start = tdr;
89 	pipe->skbr_start = skbr;
90 	pipe->old_tail = 0;
91 
92 	ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
93 
94 	msg->open_pipe.type_of_message = IPC_MEM_MSG_OPEN_PIPE;
95 	msg->open_pipe.pipe_nr = pipe->pipe_nr;
96 	msg->open_pipe.tdr_addr = cpu_to_le64(pipe->phy_tdr_start);
97 	msg->open_pipe.tdr_entries = cpu_to_le16(pipe->nr_of_entries);
98 	msg->open_pipe.accumulation_backoff =
99 				cpu_to_le32(pipe->accumulation_backoff);
100 	msg->open_pipe.irq_vector = cpu_to_le32(pipe->irq);
101 
102 	return index;
103 }
104 
105 static int ipc_protocol_msg_prepipe_close(struct iosm_protocol *ipc_protocol,
106 					  union ipc_msg_prep_args *args)
107 {
108 	int index = -1;
109 	union ipc_mem_msg_entry *msg =
110 		ipc_protocol_free_msg_get(ipc_protocol, &index);
111 	struct ipc_pipe *pipe = args->pipe_close.pipe;
112 
113 	if (!msg)
114 		return -EIO;
115 
116 	msg->close_pipe.type_of_message = IPC_MEM_MSG_CLOSE_PIPE;
117 	msg->close_pipe.pipe_nr = pipe->pipe_nr;
118 
119 	dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_CLOSE_PIPE(pipe_nr=%d)",
120 		msg->close_pipe.pipe_nr);
121 
122 	return index;
123 }
124 
125 static int ipc_protocol_msg_prep_sleep(struct iosm_protocol *ipc_protocol,
126 				       union ipc_msg_prep_args *args)
127 {
128 	int index = -1;
129 	union ipc_mem_msg_entry *msg =
130 		ipc_protocol_free_msg_get(ipc_protocol, &index);
131 
132 	if (!msg) {
133 		dev_err(ipc_protocol->dev, "failed to get free message");
134 		return -EIO;
135 	}
136 
137 	/* Prepare and send the host sleep message to CP to enter or exit D3. */
138 	msg->host_sleep.type_of_message = IPC_MEM_MSG_SLEEP;
139 	msg->host_sleep.target = args->sleep.target; /* 0=host, 1=device */
140 
141 	/* state; 0=enter, 1=exit 2=enter w/o protocol */
142 	msg->host_sleep.state = args->sleep.state;
143 
144 	dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_SLEEP(target=%d; state=%d)",
145 		msg->host_sleep.target, msg->host_sleep.state);
146 
147 	return index;
148 }
149 
150 static int ipc_protocol_msg_prep_feature_set(struct iosm_protocol *ipc_protocol,
151 					     union ipc_msg_prep_args *args)
152 {
153 	int index = -1;
154 	union ipc_mem_msg_entry *msg =
155 		ipc_protocol_free_msg_get(ipc_protocol, &index);
156 
157 	if (!msg) {
158 		dev_err(ipc_protocol->dev, "failed to get free message");
159 		return -EIO;
160 	}
161 
162 	msg->feature_set.type_of_message = IPC_MEM_MSG_FEATURE_SET;
163 	msg->feature_set.reset_enable = args->feature_set.reset_enable <<
164 					RESET_BIT;
165 
166 	dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_FEATURE_SET(reset_enable=%d)",
167 		msg->feature_set.reset_enable >> RESET_BIT);
168 
169 	return index;
170 }
171 
172 /* Processes the message consumed by CP. */
173 bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq)
174 {
175 	struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
176 	struct ipc_rsp **rsp_ring = ipc_protocol->rsp_ring;
177 	bool msg_processed = false;
178 	u32 i;
179 
180 	if (le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail) >=
181 			IPC_MEM_MSG_ENTRIES) {
182 		dev_err(ipc_protocol->dev, "msg_tail out of range: %d",
183 			le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail));
184 		return msg_processed;
185 	}
186 
187 	if (irq != IMEM_IRQ_DONT_CARE &&
188 	    irq != ipc_protocol->p_ap_shm->ci.msg_irq_vector)
189 		return msg_processed;
190 
191 	for (i = ipc_protocol->old_msg_tail;
192 	     i != le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
193 	     i = (i + 1) % IPC_MEM_MSG_ENTRIES) {
194 		union ipc_mem_msg_entry *msg =
195 			&ipc_protocol->p_ap_shm->msg_ring[i];
196 
197 		dev_dbg(ipc_protocol->dev, "msg[%d]: type=%u status=%d", i,
198 			msg->common.type_of_message,
199 			msg->common.completion_status);
200 
201 		/* Update response with status and wake up waiting requestor */
202 		if (rsp_ring[i]) {
203 			rsp_ring[i]->status =
204 				le32_to_cpu(msg->common.completion_status);
205 			complete(&rsp_ring[i]->completion);
206 			rsp_ring[i] = NULL;
207 		}
208 		msg_processed = true;
209 	}
210 
211 	ipc_protocol->old_msg_tail = i;
212 	return msg_processed;
213 }
214 
215 /* Sends data from UL list to CP for the provided pipe by updating the Head
216  * pointer of given pipe.
217  */
218 bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol,
219 			     struct ipc_pipe *pipe,
220 			     struct sk_buff_head *p_ul_list)
221 {
222 	struct ipc_protocol_td *td;
223 	bool hpda_pending = false;
224 	struct sk_buff *skb;
225 	s32 free_elements;
226 	u32 head;
227 	u32 tail;
228 
229 	if (!ipc_protocol->p_ap_shm) {
230 		dev_err(ipc_protocol->dev, "driver is not initialized");
231 		return false;
232 	}
233 
234 	/* Get head and tail of the td list and calculate
235 	 * the number of free elements.
236 	 */
237 	head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
238 	tail = pipe->old_tail;
239 
240 	while (!skb_queue_empty(p_ul_list)) {
241 		if (head < tail)
242 			free_elements = tail - head - 1;
243 		else
244 			free_elements =
245 				pipe->nr_of_entries - head + ((s32)tail - 1);
246 
247 		if (free_elements <= 0) {
248 			dev_dbg(ipc_protocol->dev,
249 				"no free td elements for UL pipe %d",
250 				pipe->pipe_nr);
251 			break;
252 		}
253 
254 		/* Get the td address. */
255 		td = &pipe->tdr_start[head];
256 
257 		/* Take the first element of the uplink list and add it
258 		 * to the td list.
259 		 */
260 		skb = skb_dequeue(p_ul_list);
261 		if (WARN_ON(!skb))
262 			break;
263 
264 		/* Save the reference to the uplink skbuf. */
265 		pipe->skbr_start[head] = skb;
266 
267 		td->buffer.address = IPC_CB(skb)->mapping;
268 		td->scs = cpu_to_le32(skb->len) & cpu_to_le32(SIZE_MASK);
269 		td->next = 0;
270 
271 		pipe->nr_of_queued_entries++;
272 
273 		/* Calculate the new head and save it. */
274 		head++;
275 		if (head >= pipe->nr_of_entries)
276 			head = 0;
277 
278 		ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
279 			cpu_to_le32(head);
280 	}
281 
282 	if (pipe->old_head != head) {
283 		dev_dbg(ipc_protocol->dev, "New UL TDs Pipe:%d", pipe->pipe_nr);
284 
285 		pipe->old_head = head;
286 		/* Trigger doorbell because of pending UL packets. */
287 		hpda_pending = true;
288 	}
289 
290 	return hpda_pending;
291 }
292 
293 /* Checks for Tail pointer update from CP and returns the data as SKB. */
294 struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol,
295 					   struct ipc_pipe *pipe)
296 {
297 	struct ipc_protocol_td *p_td = &pipe->tdr_start[pipe->old_tail];
298 	struct sk_buff *skb = pipe->skbr_start[pipe->old_tail];
299 
300 	pipe->nr_of_queued_entries--;
301 	pipe->old_tail++;
302 	if (pipe->old_tail >= pipe->nr_of_entries)
303 		pipe->old_tail = 0;
304 
305 	if (!p_td->buffer.address) {
306 		dev_err(ipc_protocol->dev, "Td buffer address is NULL");
307 		return NULL;
308 	}
309 
310 	if (p_td->buffer.address != IPC_CB(skb)->mapping) {
311 		dev_err(ipc_protocol->dev,
312 			"pipe %d: invalid buf_addr or skb_data",
313 			pipe->pipe_nr);
314 		return NULL;
315 	}
316 
317 	return skb;
318 }
319 
320 /* Allocates an SKB for CP to send data and updates the Head Pointer
321  * of the given Pipe#.
322  */
323 bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
324 				struct ipc_pipe *pipe)
325 {
326 	struct ipc_protocol_td *td;
327 	dma_addr_t mapping = 0;
328 	u32 head, new_head;
329 	struct sk_buff *skb;
330 	u32 tail;
331 
332 	/* Get head and tail of the td list and calculate
333 	 * the number of free elements.
334 	 */
335 	head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
336 	tail = le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
337 
338 	new_head = head + 1;
339 	if (new_head >= pipe->nr_of_entries)
340 		new_head = 0;
341 
342 	if (new_head == tail)
343 		return false;
344 
345 	/* Get the td address. */
346 	td = &pipe->tdr_start[head];
347 
348 	/* Allocate the skbuf for the descriptor. */
349 	skb = ipc_pcie_alloc_skb(ipc_protocol->pcie, pipe->buf_size, GFP_ATOMIC,
350 				 &mapping, DMA_FROM_DEVICE,
351 				 IPC_MEM_DL_ETH_OFFSET);
352 	if (!skb)
353 		return false;
354 
355 	td->buffer.address = mapping;
356 	td->scs = cpu_to_le32(pipe->buf_size) & cpu_to_le32(SIZE_MASK);
357 	td->next = 0;
358 
359 	/* store the new head value. */
360 	ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
361 		cpu_to_le32(new_head);
362 
363 	/* Save the reference to the skbuf. */
364 	pipe->skbr_start[head] = skb;
365 
366 	pipe->nr_of_queued_entries++;
367 
368 	return true;
369 }
370 
371 /* Processes DL TD's */
372 struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
373 					   struct ipc_pipe *pipe)
374 {
375 	u32 tail =
376 		le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
377 	struct ipc_protocol_td *p_td;
378 	struct sk_buff *skb;
379 
380 	if (!pipe->tdr_start)
381 		return NULL;
382 
383 	/* Copy the reference to the downlink buffer. */
384 	p_td = &pipe->tdr_start[pipe->old_tail];
385 	skb = pipe->skbr_start[pipe->old_tail];
386 
387 	/* Reset the ring elements. */
388 	pipe->skbr_start[pipe->old_tail] = NULL;
389 
390 	pipe->nr_of_queued_entries--;
391 
392 	pipe->old_tail++;
393 	if (pipe->old_tail >= pipe->nr_of_entries)
394 		pipe->old_tail = 0;
395 
396 	if (!skb) {
397 		dev_err(ipc_protocol->dev, "skb is null");
398 		goto ret;
399 	} else if (!p_td->buffer.address) {
400 		dev_err(ipc_protocol->dev, "td/buffer address is null");
401 		ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
402 		skb = NULL;
403 		goto ret;
404 	}
405 
406 	if (!IPC_CB(skb)) {
407 		dev_err(ipc_protocol->dev, "pipe# %d, tail: %d skb_cb is NULL",
408 			pipe->pipe_nr, tail);
409 		ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
410 		skb = NULL;
411 		goto ret;
412 	}
413 
414 	if (p_td->buffer.address != IPC_CB(skb)->mapping) {
415 		dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
416 			(unsigned long long)p_td->buffer.address, skb->data);
417 		ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
418 		skb = NULL;
419 		goto ret;
420 	} else if ((le32_to_cpu(p_td->scs) & SIZE_MASK) > pipe->buf_size) {
421 		dev_err(ipc_protocol->dev, "invalid buffer size %d > %d",
422 			le32_to_cpu(p_td->scs) & SIZE_MASK,
423 			pipe->buf_size);
424 		ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
425 		skb = NULL;
426 		goto ret;
427 	} else if (le32_to_cpu(p_td->scs) >> COMPLETION_STATUS ==
428 		  IPC_MEM_TD_CS_ABORT) {
429 		/* Discard aborted buffers. */
430 		dev_dbg(ipc_protocol->dev, "discard 'aborted' buffers");
431 		ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
432 		skb = NULL;
433 		goto ret;
434 	}
435 
436 	/* Set the length field in skbuf. */
437 	skb_put(skb, le32_to_cpu(p_td->scs) & SIZE_MASK);
438 
439 ret:
440 	return skb;
441 }
442 
443 void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol,
444 				      struct ipc_pipe *pipe, u32 *head,
445 				      u32 *tail)
446 {
447 	struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
448 
449 	if (head)
450 		*head = le32_to_cpu(ipc_ap_shm->head_array[pipe->pipe_nr]);
451 
452 	if (tail)
453 		*tail = le32_to_cpu(ipc_ap_shm->tail_array[pipe->pipe_nr]);
454 }
455 
456 /* Frees the TDs given to CP.  */
457 void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol,
458 			       struct ipc_pipe *pipe)
459 {
460 	struct sk_buff *skb;
461 	u32 head;
462 	u32 tail;
463 
464 	/* Get the start and the end of the buffer list. */
465 	head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
466 	tail = pipe->old_tail;
467 
468 	/* Reset tail and head to 0. */
469 	ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr] = 0;
470 	ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
471 
472 	/* Free pending uplink and downlink buffers. */
473 	if (pipe->skbr_start) {
474 		while (head != tail) {
475 			/* Get the reference to the skbuf,
476 			 * which is on the way and free it.
477 			 */
478 			skb = pipe->skbr_start[tail];
479 			if (skb)
480 				ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
481 
482 			tail++;
483 			if (tail >= pipe->nr_of_entries)
484 				tail = 0;
485 		}
486 
487 		kfree(pipe->skbr_start);
488 		pipe->skbr_start = NULL;
489 	}
490 
491 	pipe->old_tail = 0;
492 
493 	/* Free and reset the td and skbuf circular buffers. kfree is save! */
494 	if (pipe->tdr_start) {
495 		dma_free_coherent(&ipc_protocol->pcie->pci->dev,
496 				  sizeof(*pipe->tdr_start) * pipe->nr_of_entries,
497 				  pipe->tdr_start, pipe->phy_tdr_start);
498 
499 		pipe->tdr_start = NULL;
500 	}
501 }
502 
503 enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol
504 							  *ipc_protocol)
505 {
506 	return (enum ipc_mem_device_ipc_state)
507 		le32_to_cpu(ipc_protocol->p_ap_shm->device_info.ipc_status);
508 }
509 
510 enum ipc_mem_exec_stage
511 ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol)
512 {
513 	return le32_to_cpu(ipc_protocol->p_ap_shm->device_info.execution_stage);
514 }
515 
516 int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem,
517 			  enum ipc_msg_prep_type msg_type,
518 			  union ipc_msg_prep_args *args)
519 {
520 	struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
521 
522 	switch (msg_type) {
523 	case IPC_MSG_PREP_SLEEP:
524 		return ipc_protocol_msg_prep_sleep(ipc_protocol, args);
525 
526 	case IPC_MSG_PREP_PIPE_OPEN:
527 		return ipc_protocol_msg_prepipe_open(ipc_protocol, args);
528 
529 	case IPC_MSG_PREP_PIPE_CLOSE:
530 		return ipc_protocol_msg_prepipe_close(ipc_protocol, args);
531 
532 	case IPC_MSG_PREP_FEATURE_SET:
533 		return ipc_protocol_msg_prep_feature_set(ipc_protocol, args);
534 
535 		/* Unsupported messages in protocol */
536 	case IPC_MSG_PREP_MAP:
537 	case IPC_MSG_PREP_UNMAP:
538 	default:
539 		dev_err(ipc_protocol->dev,
540 			"unsupported message type: %d in protocol", msg_type);
541 		return -EINVAL;
542 	}
543 }
544 
545 u32
546 ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol *ipc_protocol)
547 {
548 	struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
549 
550 	return le32_to_cpu(ipc_ap_shm->device_info.device_sleep_notification);
551 }
552