xref: /linux/drivers/mailbox/bcm-pdc-mailbox.c (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2016 Broadcom
4  */
5 
6 /*
7  * Broadcom PDC Mailbox Driver
8  * The PDC provides a ring based programming interface to one or more hardware
9  * offload engines. For example, the PDC driver works with both SPU-M and SPU2
10  * cryptographic offload hardware. In some chips the PDC is referred to as MDE,
11  * and in others the FA2/FA+ hardware is used with this PDC driver.
12  *
13  * The PDC driver registers with the Linux mailbox framework as a mailbox
14  * controller, once for each PDC instance. Ring 0 for each PDC is registered as
15  * a mailbox channel. The PDC driver uses interrupts to determine when data
16  * transfers to and from an offload engine are complete. The PDC driver uses
17  * threaded IRQs so that response messages are handled outside of interrupt
18  * context.
19  *
20  * The PDC driver allows multiple messages to be pending in the descriptor
21  * rings. The tx_msg_start descriptor index indicates where the last message
22  * starts. The txin_numd value at this index indicates how many descriptor
23  * indexes make up the message. Similar state is kept on the receive side. When
24  * an rx interrupt indicates a response is ready, the PDC driver processes numd
25  * descriptors from the tx and rx ring, thus processing one response at a time.
26  */
27 
28 #include <linux/errno.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <linux/debugfs.h>
33 #include <linux/interrupt.h>
34 #include <linux/wait.h>
35 #include <linux/platform_device.h>
36 #include <linux/property.h>
37 #include <linux/io.h>
38 #include <linux/of.h>
39 #include <linux/of_irq.h>
40 #include <linux/mailbox_controller.h>
41 #include <linux/mailbox/brcm-message.h>
42 #include <linux/scatterlist.h>
43 #include <linux/dma-direction.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/dmapool.h>
46 
47 #define PDC_SUCCESS  0
48 
49 #define RING_ENTRY_SIZE   sizeof(struct dma64dd)
50 
51 /* # entries in PDC dma ring */
52 #define PDC_RING_ENTRIES  512
53 /*
54  * Minimum number of ring descriptor entries that must be free to tell mailbox
55  * framework that it can submit another request
56  */
57 #define PDC_RING_SPACE_MIN  15
58 
59 #define PDC_RING_SIZE    (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
60 /* Rings are 8k aligned */
61 #define RING_ALIGN_ORDER  13
62 #define RING_ALIGN        BIT(RING_ALIGN_ORDER)
63 
64 #define RX_BUF_ALIGN_ORDER  5
65 #define RX_BUF_ALIGN	    BIT(RX_BUF_ALIGN_ORDER)
66 
67 /* descriptor bumping macros */
68 #define XXD(x, max_mask)              ((x) & (max_mask))
69 #define TXD(x, max_mask)              XXD((x), (max_mask))
70 #define RXD(x, max_mask)              XXD((x), (max_mask))
71 #define NEXTTXD(i, max_mask)          TXD((i) + 1, (max_mask))
72 #define PREVTXD(i, max_mask)          TXD((i) - 1, (max_mask))
73 #define NEXTRXD(i, max_mask)          RXD((i) + 1, (max_mask))
74 #define PREVRXD(i, max_mask)          RXD((i) - 1, (max_mask))
75 #define NTXDACTIVE(h, t, max_mask)    TXD((t) - (h), (max_mask))
76 #define NRXDACTIVE(h, t, max_mask)    RXD((t) - (h), (max_mask))
77 
78 /* Length of BCM header at start of SPU msg, in bytes */
79 #define BCM_HDR_LEN  8
80 
81 /*
82  * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
83  * not currently support use of multiple ringsets on a single PDC engine.
84  */
85 #define PDC_RINGSET  0
86 
87 /*
88  * Interrupt mask and status definitions. Enable interrupts for tx and rx on
89  * ring 0
90  */
91 #define PDC_RCVINT_0         (16 + PDC_RINGSET)
92 #define PDC_RCVINTEN_0       BIT(PDC_RCVINT_0)
93 #define PDC_INTMASK	     (PDC_RCVINTEN_0)
94 #define PDC_LAZY_FRAMECOUNT  1
95 #define PDC_LAZY_TIMEOUT     10000
96 #define PDC_LAZY_INT  (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
97 #define PDC_INTMASK_OFFSET   0x24
98 #define PDC_INTSTATUS_OFFSET 0x20
99 #define PDC_RCVLAZY0_OFFSET  (0x30 + 4 * PDC_RINGSET)
100 #define FA_RCVLAZY0_OFFSET   0x100
101 
102 /*
103  * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
104  * before frame
105  */
106 #define PDC_SPU2_RESP_HDR_LEN  17
107 #define PDC_CKSUM_CTRL         BIT(27)
108 #define PDC_CKSUM_CTRL_OFFSET  0x400
109 
110 #define PDC_SPUM_RESP_HDR_LEN  32
111 
112 /*
113  * Sets the following bits for write to transmit control reg:
114  * 11    - PtyChkDisable - parity check is disabled
115  * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
116  */
117 #define PDC_TX_CTL		0x000C0800
118 
119 /* Bit in tx control reg to enable tx channel */
120 #define PDC_TX_ENABLE		0x1
121 
122 /*
123  * Sets the following bits for write to receive control reg:
124  * 7:1   - RcvOffset - size in bytes of status region at start of rx frame buf
125  * 9     - SepRxHdrDescEn - place start of new frames only in descriptors
126  *                          that have StartOfFrame set
127  * 10    - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
128  *                         remaining bytes in current frame, report error
129  *                         in rx frame status for current frame
130  * 11    - PtyChkDisable - parity check is disabled
131  * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
132  */
133 #define PDC_RX_CTL		0x000C0E00
134 
135 /* Bit in rx control reg to enable rx channel */
136 #define PDC_RX_ENABLE		0x1
137 
138 #define CRYPTO_D64_RS0_CD_MASK   ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
139 
140 /* descriptor flags */
141 #define D64_CTRL1_EOT   BIT(28)	/* end of descriptor table */
142 #define D64_CTRL1_IOC   BIT(29)	/* interrupt on complete */
143 #define D64_CTRL1_EOF   BIT(30)	/* end of frame */
144 #define D64_CTRL1_SOF   BIT(31)	/* start of frame */
145 
146 #define RX_STATUS_OVERFLOW       0x00800000
147 #define RX_STATUS_LEN            0x0000FFFF
148 
149 #define PDC_TXREGS_OFFSET  0x200
150 #define PDC_RXREGS_OFFSET  0x220
151 
152 /* Maximum size buffer the DMA engine can handle */
153 #define PDC_DMA_BUF_MAX 16384
154 
155 enum pdc_hw {
156 	FA_HW,		/* FA2/FA+ hardware (i.e. Northstar Plus) */
157 	PDC_HW		/* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
158 };
159 
160 struct pdc_dma_map {
161 	void *ctx;          /* opaque context associated with frame */
162 };
163 
164 /* dma descriptor */
165 struct dma64dd {
166 	u32 ctrl1;      /* misc control bits */
167 	u32 ctrl2;      /* buffer count and address extension */
168 	u32 addrlow;    /* memory address of the date buffer, bits 31:0 */
169 	u32 addrhigh;   /* memory address of the date buffer, bits 63:32 */
170 };
171 
172 /* dma registers per channel(xmt or rcv) */
173 struct dma64_regs {
174 	u32  control;   /* enable, et al */
175 	u32  ptr;       /* last descriptor posted to chip */
176 	u32  addrlow;   /* descriptor ring base address low 32-bits */
177 	u32  addrhigh;  /* descriptor ring base address bits 63:32 */
178 	u32  status0;   /* last rx descriptor written by hw */
179 	u32  status1;   /* driver does not use */
180 };
181 
182 /* cpp contortions to concatenate w/arg prescan */
183 #ifndef PAD
184 #define _PADLINE(line)  pad ## line
185 #define _XSTR(line)     _PADLINE(line)
186 #define PAD             _XSTR(__LINE__)
187 #endif  /* PAD */
188 
189 /* dma registers. matches hw layout. */
190 struct dma64 {
191 	struct dma64_regs dmaxmt;  /* dma tx */
192 	u32          PAD[2];
193 	struct dma64_regs dmarcv;  /* dma rx */
194 	u32          PAD[2];
195 };
196 
197 /* PDC registers */
198 struct pdc_regs {
199 	u32  devcontrol;             /* 0x000 */
200 	u32  devstatus;              /* 0x004 */
201 	u32  PAD;
202 	u32  biststatus;             /* 0x00c */
203 	u32  PAD[4];
204 	u32  intstatus;              /* 0x020 */
205 	u32  intmask;                /* 0x024 */
206 	u32  gptimer;                /* 0x028 */
207 
208 	u32  PAD;
209 	u32  intrcvlazy_0;           /* 0x030 (Only in PDC, not FA2) */
210 	u32  intrcvlazy_1;           /* 0x034 (Only in PDC, not FA2) */
211 	u32  intrcvlazy_2;           /* 0x038 (Only in PDC, not FA2) */
212 	u32  intrcvlazy_3;           /* 0x03c (Only in PDC, not FA2) */
213 
214 	u32  PAD[48];
215 	u32  fa_intrecvlazy;         /* 0x100 (Only in FA2, not PDC) */
216 	u32  flowctlthresh;          /* 0x104 */
217 	u32  wrrthresh;              /* 0x108 */
218 	u32  gmac_idle_cnt_thresh;   /* 0x10c */
219 
220 	u32  PAD[4];
221 	u32  ifioaccessaddr;         /* 0x120 */
222 	u32  ifioaccessbyte;         /* 0x124 */
223 	u32  ifioaccessdata;         /* 0x128 */
224 
225 	u32  PAD[21];
226 	u32  phyaccess;              /* 0x180 */
227 	u32  PAD;
228 	u32  phycontrol;             /* 0x188 */
229 	u32  txqctl;                 /* 0x18c */
230 	u32  rxqctl;                 /* 0x190 */
231 	u32  gpioselect;             /* 0x194 */
232 	u32  gpio_output_en;         /* 0x198 */
233 	u32  PAD;                    /* 0x19c */
234 	u32  txq_rxq_mem_ctl;        /* 0x1a0 */
235 	u32  memory_ecc_status;      /* 0x1a4 */
236 	u32  serdes_ctl;             /* 0x1a8 */
237 	u32  serdes_status0;         /* 0x1ac */
238 	u32  serdes_status1;         /* 0x1b0 */
239 	u32  PAD[11];                /* 0x1b4-1dc */
240 	u32  clk_ctl_st;             /* 0x1e0 */
241 	u32  hw_war;                 /* 0x1e4 (Only in PDC, not FA2) */
242 	u32  pwrctl;                 /* 0x1e8 */
243 	u32  PAD[5];
244 
245 #define PDC_NUM_DMA_RINGS   4
246 	struct dma64 dmaregs[PDC_NUM_DMA_RINGS];  /* 0x0200 - 0x2fc */
247 
248 	/* more registers follow, but we don't use them */
249 };
250 
251 /* structure for allocating/freeing DMA rings */
252 struct pdc_ring_alloc {
253 	dma_addr_t  dmabase; /* DMA address of start of ring */
254 	void	   *vbase;   /* base kernel virtual address of ring */
255 	u32	    size;    /* ring allocation size in bytes */
256 };
257 
258 /*
259  * context associated with a receive descriptor.
260  * @rxp_ctx: opaque context associated with frame that starts at each
261  *           rx ring index.
262  * @dst_sg:  Scatterlist used to form reply frames beginning at a given ring
263  *           index. Retained in order to unmap each sg after reply is processed.
264  * @rxin_numd: Number of rx descriptors associated with the message that starts
265  *             at a descriptor index. Not set for every index. For example,
266  *             if descriptor index i points to a scatterlist with 4 entries,
267  *             then the next three descriptor indexes don't have a value set.
268  * @resp_hdr: Virtual address of buffer used to catch DMA rx status
269  * @resp_hdr_daddr: physical address of DMA rx status buffer
270  */
271 struct pdc_rx_ctx {
272 	void *rxp_ctx;
273 	struct scatterlist *dst_sg;
274 	u32  rxin_numd;
275 	void *resp_hdr;
276 	dma_addr_t resp_hdr_daddr;
277 };
278 
279 /* PDC state structure */
280 struct pdc_state {
281 	/* Index of the PDC whose state is in this structure instance */
282 	u8 pdc_idx;
283 
284 	/* Platform device for this PDC instance */
285 	struct platform_device *pdev;
286 
287 	/*
288 	 * Each PDC instance has a mailbox controller. PDC receives request
289 	 * messages through mailboxes, and sends response messages through the
290 	 * mailbox framework.
291 	 */
292 	struct mbox_controller mbc;
293 
294 	unsigned int pdc_irq;
295 
296 	/* tasklet for deferred processing after DMA rx interrupt */
297 	struct tasklet_struct rx_tasklet;
298 
299 	/* Number of bytes of receive status prior to each rx frame */
300 	u32 rx_status_len;
301 	/* Whether a BCM header is prepended to each frame */
302 	bool use_bcm_hdr;
303 	/* Sum of length of BCM header and rx status header */
304 	u32 pdc_resp_hdr_len;
305 
306 	/* The base virtual address of DMA hw registers */
307 	void __iomem *pdc_reg_vbase;
308 
309 	/* Pool for allocation of DMA rings */
310 	struct dma_pool *ring_pool;
311 
312 	/* Pool for allocation of metadata buffers for response messages */
313 	struct dma_pool *rx_buf_pool;
314 
315 	/*
316 	 * The base virtual address of DMA tx/rx descriptor rings. Corresponding
317 	 * DMA address and size of ring allocation.
318 	 */
319 	struct pdc_ring_alloc tx_ring_alloc;
320 	struct pdc_ring_alloc rx_ring_alloc;
321 
322 	struct pdc_regs *regs;    /* start of PDC registers */
323 
324 	struct dma64_regs *txregs_64; /* dma tx engine registers */
325 	struct dma64_regs *rxregs_64; /* dma rx engine registers */
326 
327 	/*
328 	 * Arrays of PDC_RING_ENTRIES descriptors
329 	 * To use multiple ringsets, this needs to be extended
330 	 */
331 	struct dma64dd   *txd_64;  /* tx descriptor ring */
332 	struct dma64dd   *rxd_64;  /* rx descriptor ring */
333 
334 	/* descriptor ring sizes */
335 	u32      ntxd;       /* # tx descriptors */
336 	u32      nrxd;       /* # rx descriptors */
337 	u32      nrxpost;    /* # rx buffers to keep posted */
338 	u32      ntxpost;    /* max number of tx buffers that can be posted */
339 
340 	/*
341 	 * Index of next tx descriptor to reclaim. That is, the descriptor
342 	 * index of the oldest tx buffer for which the host has yet to process
343 	 * the corresponding response.
344 	 */
345 	u32  txin;
346 
347 	/*
348 	 * Index of the first receive descriptor for the sequence of
349 	 * message fragments currently under construction. Used to build up
350 	 * the rxin_numd count for a message. Updated to rxout when the host
351 	 * starts a new sequence of rx buffers for a new message.
352 	 */
353 	u32  tx_msg_start;
354 
355 	/* Index of next tx descriptor to post. */
356 	u32  txout;
357 
358 	/*
359 	 * Number of tx descriptors associated with the message that starts
360 	 * at this tx descriptor index.
361 	 */
362 	u32      txin_numd[PDC_RING_ENTRIES];
363 
364 	/*
365 	 * Index of next rx descriptor to reclaim. This is the index of
366 	 * the next descriptor whose data has yet to be processed by the host.
367 	 */
368 	u32  rxin;
369 
370 	/*
371 	 * Index of the first receive descriptor for the sequence of
372 	 * message fragments currently under construction. Used to build up
373 	 * the rxin_numd count for a message. Updated to rxout when the host
374 	 * starts a new sequence of rx buffers for a new message.
375 	 */
376 	u32  rx_msg_start;
377 
378 	/*
379 	 * Saved value of current hardware rx descriptor index.
380 	 * The last rx buffer written by the hw is the index previous to
381 	 * this one.
382 	 */
383 	u32  last_rx_curr;
384 
385 	/* Index of next rx descriptor to post. */
386 	u32  rxout;
387 
388 	struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
389 
390 	/*
391 	 * Scatterlists used to form request and reply frames beginning at a
392 	 * given ring index. Retained in order to unmap each sg after reply
393 	 * is processed
394 	 */
395 	struct scatterlist *src_sg[PDC_RING_ENTRIES];
396 
397 	/* counters */
398 	u32  pdc_requests;     /* number of request messages submitted */
399 	u32  pdc_replies;      /* number of reply messages received */
400 	u32  last_tx_not_done; /* too few tx descriptors to indicate done */
401 	u32  tx_ring_full;     /* unable to accept msg because tx ring full */
402 	u32  rx_ring_full;     /* unable to accept msg because rx ring full */
403 	u32  txnobuf;          /* unable to create tx descriptor */
404 	u32  rxnobuf;          /* unable to create rx descriptor */
405 	u32  rx_oflow;         /* count of rx overflows */
406 
407 	/* hardware type - FA2 or PDC/MDE */
408 	enum pdc_hw hw_type;
409 };
410 
411 /* Global variables */
412 
413 struct pdc_globals {
414 	/* Actual number of SPUs in hardware, as reported by device tree */
415 	u32 num_spu;
416 };
417 
418 static struct pdc_globals pdcg;
419 
420 /* top level debug FS directory for PDC driver */
421 static struct dentry *debugfs_dir;
422 
423 static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
424 				size_t count, loff_t *offp)
425 {
426 	struct pdc_state *pdcs;
427 	char *buf;
428 	ssize_t ret, out_offset, out_count;
429 
430 	out_count = 512;
431 
432 	buf = kmalloc(out_count, GFP_KERNEL);
433 	if (!buf)
434 		return -ENOMEM;
435 
436 	pdcs = filp->private_data;
437 	out_offset = 0;
438 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
439 			       "SPU %u stats:\n", pdcs->pdc_idx);
440 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
441 			       "PDC requests....................%u\n",
442 			       pdcs->pdc_requests);
443 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
444 			       "PDC responses...................%u\n",
445 			       pdcs->pdc_replies);
446 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
447 			       "Tx not done.....................%u\n",
448 			       pdcs->last_tx_not_done);
449 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
450 			       "Tx ring full....................%u\n",
451 			       pdcs->tx_ring_full);
452 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
453 			       "Rx ring full....................%u\n",
454 			       pdcs->rx_ring_full);
455 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
456 			       "Tx desc write fail. Ring full...%u\n",
457 			       pdcs->txnobuf);
458 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
459 			       "Rx desc write fail. Ring full...%u\n",
460 			       pdcs->rxnobuf);
461 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
462 			       "Receive overflow................%u\n",
463 			       pdcs->rx_oflow);
464 	out_offset += scnprintf(buf + out_offset, out_count - out_offset,
465 			       "Num frags in rx ring............%u\n",
466 			       NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
467 					  pdcs->nrxpost));
468 
469 	if (out_offset > out_count)
470 		out_offset = out_count;
471 
472 	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
473 	kfree(buf);
474 	return ret;
475 }
476 
477 static const struct file_operations pdc_debugfs_stats = {
478 	.owner = THIS_MODULE,
479 	.open = simple_open,
480 	.read = pdc_debugfs_read,
481 };
482 
483 /**
484  * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
485  * directory has not yet been created, create it now. Create a stats file in
486  * this directory for a SPU.
487  * @pdcs: PDC state structure
488  */
489 static void pdc_setup_debugfs(struct pdc_state *pdcs)
490 {
491 	char spu_stats_name[16];
492 
493 	if (!debugfs_initialized())
494 		return;
495 
496 	snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
497 	if (!debugfs_dir)
498 		debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
499 
500 	/* S_IRUSR == 0400 */
501 	debugfs_create_file(spu_stats_name, 0400, debugfs_dir, pdcs,
502 			    &pdc_debugfs_stats);
503 }
504 
505 static void pdc_free_debugfs(void)
506 {
507 	debugfs_remove_recursive(debugfs_dir);
508 	debugfs_dir = NULL;
509 }
510 
511 /**
512  * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
513  * @pdcs:      PDC state for SPU that will generate result
514  * @dma_addr:  DMA address of buffer that descriptor is being built for
515  * @buf_len:   Length of the receive buffer, in bytes
516  * @flags:     Flags to be stored in descriptor
517  */
518 static inline void
519 pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
520 	      u32 buf_len, u32 flags)
521 {
522 	struct device *dev = &pdcs->pdev->dev;
523 	struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
524 
525 	dev_dbg(dev,
526 		"Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
527 		pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
528 
529 	rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
530 	rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
531 	rxd->ctrl1 = cpu_to_le32(flags);
532 	rxd->ctrl2 = cpu_to_le32(buf_len);
533 
534 	/* bump ring index and return */
535 	pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
536 }
537 
538 /**
539  * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
540  * hardware.
541  * @pdcs:        PDC state for the SPU that will process this request
542  * @dma_addr:    DMA address of packet to be transmitted
543  * @buf_len:     Length of tx buffer, in bytes
544  * @flags:       Flags to be stored in descriptor
545  */
546 static inline void
547 pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
548 	      u32 flags)
549 {
550 	struct device *dev = &pdcs->pdev->dev;
551 	struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
552 
553 	dev_dbg(dev,
554 		"Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
555 		pdcs->pdc_idx, pdcs->txout, buf_len, flags);
556 
557 	txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
558 	txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
559 	txd->ctrl1 = cpu_to_le32(flags);
560 	txd->ctrl2 = cpu_to_le32(buf_len);
561 
562 	/* bump ring index and return */
563 	pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
564 }
565 
566 /**
567  * pdc_receive_one() - Receive a response message from a given SPU.
568  * @pdcs:    PDC state for the SPU to receive from
569  *
570  * When the return code indicates success, the response message is available in
571  * the receive buffers provided prior to submission of the request.
572  *
573  * Return:  PDC_SUCCESS if one or more receive descriptors was processed
574  *          -EAGAIN indicates that no response message is available
575  *          -EIO an error occurred
576  */
577 static int
578 pdc_receive_one(struct pdc_state *pdcs)
579 {
580 	struct device *dev = &pdcs->pdev->dev;
581 	struct mbox_controller *mbc;
582 	struct mbox_chan *chan;
583 	struct brcm_message mssg;
584 	u32 len, rx_status;
585 	u32 num_frags;
586 	u8 *resp_hdr;    /* virtual addr of start of resp message DMA header */
587 	u32 frags_rdy;   /* number of fragments ready to read */
588 	u32 rx_idx;      /* ring index of start of receive frame */
589 	dma_addr_t resp_hdr_daddr;
590 	struct pdc_rx_ctx *rx_ctx;
591 
592 	mbc = &pdcs->mbc;
593 	chan = &mbc->chans[0];
594 	mssg.type = BRCM_MESSAGE_SPU;
595 
596 	/*
597 	 * return if a complete response message is not yet ready.
598 	 * rxin_numd[rxin] is the number of fragments in the next msg
599 	 * to read.
600 	 */
601 	frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
602 	if ((frags_rdy == 0) ||
603 	    (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
604 		/* No response ready */
605 		return -EAGAIN;
606 
607 	num_frags = pdcs->txin_numd[pdcs->txin];
608 	WARN_ON(num_frags == 0);
609 
610 	dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
611 		     sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
612 
613 	pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
614 
615 	dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
616 		pdcs->pdc_idx, num_frags);
617 
618 	rx_idx = pdcs->rxin;
619 	rx_ctx = &pdcs->rx_ctx[rx_idx];
620 	num_frags = rx_ctx->rxin_numd;
621 	/* Return opaque context with result */
622 	mssg.ctx = rx_ctx->rxp_ctx;
623 	rx_ctx->rxp_ctx = NULL;
624 	resp_hdr = rx_ctx->resp_hdr;
625 	resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
626 	dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
627 		     DMA_FROM_DEVICE);
628 
629 	pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
630 
631 	dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
632 		pdcs->pdc_idx, num_frags);
633 
634 	dev_dbg(dev,
635 		"PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
636 		pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
637 		pdcs->rxout, pdcs->last_rx_curr);
638 
639 	if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
640 		/*
641 		 * For SPU-M, get length of response msg and rx overflow status.
642 		 */
643 		rx_status = *((u32 *)resp_hdr);
644 		len = rx_status & RX_STATUS_LEN;
645 		dev_dbg(dev,
646 			"SPU response length %u bytes", len);
647 		if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
648 			if (rx_status & RX_STATUS_OVERFLOW) {
649 				dev_err_ratelimited(dev,
650 						    "crypto receive overflow");
651 				pdcs->rx_oflow++;
652 			} else {
653 				dev_info_ratelimited(dev, "crypto rx len = 0");
654 			}
655 			return -EIO;
656 		}
657 	}
658 
659 	dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
660 
661 	mbox_chan_received_data(chan, &mssg);
662 
663 	pdcs->pdc_replies++;
664 	return PDC_SUCCESS;
665 }
666 
667 /**
668  * pdc_receive() - Process as many responses as are available in the rx ring.
669  * @pdcs:  PDC state
670  *
671  * Called within the hard IRQ.
672  * Return:
673  */
674 static int
675 pdc_receive(struct pdc_state *pdcs)
676 {
677 	int rx_status;
678 
679 	/* read last_rx_curr from register once */
680 	pdcs->last_rx_curr =
681 	    (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) &
682 	     CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
683 
684 	do {
685 		/* Could be many frames ready */
686 		rx_status = pdc_receive_one(pdcs);
687 	} while (rx_status == PDC_SUCCESS);
688 
689 	return 0;
690 }
691 
692 /**
693  * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
694  * descriptors for a given SPU. The scatterlist buffers contain the data for a
695  * SPU request message.
696  * @pdcs:      PDC state for the SPU that will process this request
697  * @sg:        Scatterlist whose buffers contain part of the SPU request
698  *
699  * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
700  * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
701  *
702  * Return: PDC_SUCCESS if successful
703  *         < 0 otherwise
704  */
705 static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
706 {
707 	u32 flags = 0;
708 	u32 eot;
709 	u32 tx_avail;
710 
711 	/*
712 	 * Num descriptors needed. Conservatively assume we need a descriptor
713 	 * for every entry in sg.
714 	 */
715 	u32 num_desc;
716 	u32 desc_w = 0;	/* Number of tx descriptors written */
717 	u32 bufcnt;	/* Number of bytes of buffer pointed to by descriptor */
718 	dma_addr_t databufptr;	/* DMA address to put in descriptor */
719 
720 	num_desc = (u32)sg_nents(sg);
721 
722 	/* check whether enough tx descriptors are available */
723 	tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
724 					      pdcs->ntxpost);
725 	if (unlikely(num_desc > tx_avail)) {
726 		pdcs->txnobuf++;
727 		return -ENOSPC;
728 	}
729 
730 	/* build tx descriptors */
731 	if (pdcs->tx_msg_start == pdcs->txout) {
732 		/* Start of frame */
733 		pdcs->txin_numd[pdcs->tx_msg_start] = 0;
734 		pdcs->src_sg[pdcs->txout] = sg;
735 		flags = D64_CTRL1_SOF;
736 	}
737 
738 	while (sg) {
739 		if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
740 			eot = D64_CTRL1_EOT;
741 		else
742 			eot = 0;
743 
744 		/*
745 		 * If sg buffer larger than PDC limit, split across
746 		 * multiple descriptors
747 		 */
748 		bufcnt = sg_dma_len(sg);
749 		databufptr = sg_dma_address(sg);
750 		while (bufcnt > PDC_DMA_BUF_MAX) {
751 			pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
752 				      flags | eot);
753 			desc_w++;
754 			bufcnt -= PDC_DMA_BUF_MAX;
755 			databufptr += PDC_DMA_BUF_MAX;
756 			if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
757 				eot = D64_CTRL1_EOT;
758 			else
759 				eot = 0;
760 		}
761 		sg = sg_next(sg);
762 		if (!sg)
763 			/* Writing last descriptor for frame */
764 			flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
765 		pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
766 		desc_w++;
767 		/* Clear start of frame after first descriptor */
768 		flags &= ~D64_CTRL1_SOF;
769 	}
770 	pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
771 
772 	return PDC_SUCCESS;
773 }
774 
775 /**
776  * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
777  * ring.
778  * @pdcs:  PDC state for SPU to process the request
779  *
780  * Sets the index of the last descriptor written in both the rx and tx ring.
781  *
782  * Return: PDC_SUCCESS
783  */
784 static int pdc_tx_list_final(struct pdc_state *pdcs)
785 {
786 	/*
787 	 * write barrier to ensure all register writes are complete
788 	 * before chip starts to process new request
789 	 */
790 	wmb();
791 	iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
792 	iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
793 	pdcs->pdc_requests++;
794 
795 	return PDC_SUCCESS;
796 }
797 
798 /**
799  * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
800  * @pdcs:   PDC state for SPU handling request
801  * @dst_sg: scatterlist providing rx buffers for response to be returned to
802  *	    mailbox client
803  * @ctx:    Opaque context for this request
804  *
805  * Posts a single receive descriptor to hold the metadata that precedes a
806  * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
807  * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
808  * rx to indicate the start of a new message.
809  *
810  * Return:  PDC_SUCCESS if successful
811  *          < 0 if an error (e.g., rx ring is full)
812  */
813 static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
814 			    void *ctx)
815 {
816 	u32 flags = 0;
817 	u32 rx_avail;
818 	u32 rx_pkt_cnt = 1;	/* Adding a single rx buffer */
819 	dma_addr_t daddr;
820 	void *vaddr;
821 	struct pdc_rx_ctx *rx_ctx;
822 
823 	rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
824 					      pdcs->nrxpost);
825 	if (unlikely(rx_pkt_cnt > rx_avail)) {
826 		pdcs->rxnobuf++;
827 		return -ENOSPC;
828 	}
829 
830 	/* allocate a buffer for the dma rx status */
831 	vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
832 	if (unlikely(!vaddr))
833 		return -ENOMEM;
834 
835 	/*
836 	 * Update msg_start indexes for both tx and rx to indicate the start
837 	 * of a new sequence of descriptor indexes that contain the fragments
838 	 * of the same message.
839 	 */
840 	pdcs->rx_msg_start = pdcs->rxout;
841 	pdcs->tx_msg_start = pdcs->txout;
842 
843 	/* This is always the first descriptor in the receive sequence */
844 	flags = D64_CTRL1_SOF;
845 	pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
846 
847 	if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
848 		flags |= D64_CTRL1_EOT;
849 
850 	rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
851 	rx_ctx->rxp_ctx = ctx;
852 	rx_ctx->dst_sg = dst_sg;
853 	rx_ctx->resp_hdr = vaddr;
854 	rx_ctx->resp_hdr_daddr = daddr;
855 	pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
856 	return PDC_SUCCESS;
857 }
858 
859 /**
860  * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
861  * descriptors for a given SPU. The caller must have already DMA mapped the
862  * scatterlist.
863  * @pdcs:       PDC state for the SPU that will process this request
864  * @sg:         Scatterlist whose buffers are added to the receive ring
865  *
866  * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
867  * multiple receive descriptors are written, each with a buffer <=
868  * PDC_DMA_BUF_MAX.
869  *
870  * Return: PDC_SUCCESS if successful
871  *         < 0 otherwise (e.g., receive ring is full)
872  */
873 static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
874 {
875 	u32 flags = 0;
876 	u32 rx_avail;
877 
878 	/*
879 	 * Num descriptors needed. Conservatively assume we need a descriptor
880 	 * for every entry from our starting point in the scatterlist.
881 	 */
882 	u32 num_desc;
883 	u32 desc_w = 0;	/* Number of tx descriptors written */
884 	u32 bufcnt;	/* Number of bytes of buffer pointed to by descriptor */
885 	dma_addr_t databufptr;	/* DMA address to put in descriptor */
886 
887 	num_desc = (u32)sg_nents(sg);
888 
889 	rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
890 					      pdcs->nrxpost);
891 	if (unlikely(num_desc > rx_avail)) {
892 		pdcs->rxnobuf++;
893 		return -ENOSPC;
894 	}
895 
896 	while (sg) {
897 		if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
898 			flags = D64_CTRL1_EOT;
899 		else
900 			flags = 0;
901 
902 		/*
903 		 * If sg buffer larger than PDC limit, split across
904 		 * multiple descriptors
905 		 */
906 		bufcnt = sg_dma_len(sg);
907 		databufptr = sg_dma_address(sg);
908 		while (bufcnt > PDC_DMA_BUF_MAX) {
909 			pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
910 			desc_w++;
911 			bufcnt -= PDC_DMA_BUF_MAX;
912 			databufptr += PDC_DMA_BUF_MAX;
913 			if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
914 				flags = D64_CTRL1_EOT;
915 			else
916 				flags = 0;
917 		}
918 		pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
919 		desc_w++;
920 		sg = sg_next(sg);
921 	}
922 	pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
923 
924 	return PDC_SUCCESS;
925 }
926 
927 /**
928  * pdc_irq_handler() - Interrupt handler called in interrupt context.
929  * @irq:      Interrupt number that has fired
930  * @data:     device struct for DMA engine that generated the interrupt
931  *
932  * We have to clear the device interrupt status flags here. So cache the
933  * status for later use in the thread function. Other than that, just return
934  * WAKE_THREAD to invoke the thread function.
935  *
936  * Return: IRQ_WAKE_THREAD if interrupt is ours
937  *         IRQ_NONE otherwise
938  */
939 static irqreturn_t pdc_irq_handler(int irq, void *data)
940 {
941 	struct device *dev = (struct device *)data;
942 	struct pdc_state *pdcs = dev_get_drvdata(dev);
943 	u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
944 
945 	if (unlikely(intstatus == 0))
946 		return IRQ_NONE;
947 
948 	/* Disable interrupts until soft handler runs */
949 	iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
950 
951 	/* Clear interrupt flags in device */
952 	iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
953 
954 	/* Wakeup IRQ thread */
955 	tasklet_schedule(&pdcs->rx_tasklet);
956 	return IRQ_HANDLED;
957 }
958 
959 /**
960  * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
961  * a DMA receive interrupt. Reenables the receive interrupt.
962  * @t: Pointer to the Altera sSGDMA channel structure
963  */
964 static void pdc_tasklet_cb(struct tasklet_struct *t)
965 {
966 	struct pdc_state *pdcs = from_tasklet(pdcs, t, rx_tasklet);
967 
968 	pdc_receive(pdcs);
969 
970 	/* reenable interrupts */
971 	iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
972 }
973 
974 /**
975  * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
976  * descriptors in one ringset.
977  * @pdcs:    PDC instance state
978  * @ringset: index of ringset being used
979  *
980  * Return: PDC_SUCCESS if ring initialized
981  *         < 0 otherwise
982  */
983 static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
984 {
985 	int i;
986 	int err = PDC_SUCCESS;
987 	struct dma64 *dma_reg;
988 	struct device *dev = &pdcs->pdev->dev;
989 	struct pdc_ring_alloc tx;
990 	struct pdc_ring_alloc rx;
991 
992 	/* Allocate tx ring */
993 	tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
994 	if (unlikely(!tx.vbase)) {
995 		err = -ENOMEM;
996 		goto done;
997 	}
998 
999 	/* Allocate rx ring */
1000 	rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
1001 	if (unlikely(!rx.vbase)) {
1002 		err = -ENOMEM;
1003 		goto fail_dealloc;
1004 	}
1005 
1006 	dev_dbg(dev, " - base DMA addr of tx ring      %pad", &tx.dmabase);
1007 	dev_dbg(dev, " - base virtual addr of tx ring  %p", tx.vbase);
1008 	dev_dbg(dev, " - base DMA addr of rx ring      %pad", &rx.dmabase);
1009 	dev_dbg(dev, " - base virtual addr of rx ring  %p", rx.vbase);
1010 
1011 	memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
1012 	memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
1013 
1014 	pdcs->rxin = 0;
1015 	pdcs->rx_msg_start = 0;
1016 	pdcs->last_rx_curr = 0;
1017 	pdcs->rxout = 0;
1018 	pdcs->txin = 0;
1019 	pdcs->tx_msg_start = 0;
1020 	pdcs->txout = 0;
1021 
1022 	/* Set descriptor array base addresses */
1023 	pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
1024 	pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
1025 
1026 	/* Tell device the base DMA address of each ring */
1027 	dma_reg = &pdcs->regs->dmaregs[ringset];
1028 
1029 	/* But first disable DMA and set curptr to 0 for both TX & RX */
1030 	iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1031 	iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
1032 		  &dma_reg->dmarcv.control);
1033 	iowrite32(0, &dma_reg->dmaxmt.ptr);
1034 	iowrite32(0, &dma_reg->dmarcv.ptr);
1035 
1036 	/* Set base DMA addresses */
1037 	iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
1038 		  &dma_reg->dmaxmt.addrlow);
1039 	iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
1040 		  &dma_reg->dmaxmt.addrhigh);
1041 
1042 	iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
1043 		  &dma_reg->dmarcv.addrlow);
1044 	iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
1045 		  &dma_reg->dmarcv.addrhigh);
1046 
1047 	/* Re-enable DMA */
1048 	iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
1049 	iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
1050 		  &dma_reg->dmarcv.control);
1051 
1052 	/* Initialize descriptors */
1053 	for (i = 0; i < PDC_RING_ENTRIES; i++) {
1054 		/* Every tx descriptor can be used for start of frame. */
1055 		if (i != pdcs->ntxpost) {
1056 			iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
1057 				  &pdcs->txd_64[i].ctrl1);
1058 		} else {
1059 			/* Last descriptor in ringset. Set End of Table. */
1060 			iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
1061 				  D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
1062 		}
1063 
1064 		/* Every rx descriptor can be used for start of frame */
1065 		if (i != pdcs->nrxpost) {
1066 			iowrite32(D64_CTRL1_SOF,
1067 				  &pdcs->rxd_64[i].ctrl1);
1068 		} else {
1069 			/* Last descriptor in ringset. Set End of Table. */
1070 			iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
1071 				  &pdcs->rxd_64[i].ctrl1);
1072 		}
1073 	}
1074 	return PDC_SUCCESS;
1075 
1076 fail_dealloc:
1077 	dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
1078 done:
1079 	return err;
1080 }
1081 
1082 static void pdc_ring_free(struct pdc_state *pdcs)
1083 {
1084 	if (pdcs->tx_ring_alloc.vbase) {
1085 		dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
1086 			      pdcs->tx_ring_alloc.dmabase);
1087 		pdcs->tx_ring_alloc.vbase = NULL;
1088 	}
1089 
1090 	if (pdcs->rx_ring_alloc.vbase) {
1091 		dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
1092 			      pdcs->rx_ring_alloc.dmabase);
1093 		pdcs->rx_ring_alloc.vbase = NULL;
1094 	}
1095 }
1096 
1097 /**
1098  * pdc_desc_count() - Count the number of DMA descriptors that will be required
1099  * for a given scatterlist. Account for the max length of a DMA buffer.
1100  * @sg:    Scatterlist to be DMA'd
1101  * Return: Number of descriptors required
1102  */
1103 static u32 pdc_desc_count(struct scatterlist *sg)
1104 {
1105 	u32 cnt = 0;
1106 
1107 	while (sg) {
1108 		cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
1109 		sg = sg_next(sg);
1110 	}
1111 	return cnt;
1112 }
1113 
1114 /**
1115  * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
1116  * and the rx ring has room for rx_cnt descriptors.
1117  * @pdcs:  PDC state
1118  * @tx_cnt: The number of descriptors required in the tx ring
1119  * @rx_cnt: The number of descriptors required i the rx ring
1120  *
1121  * Return: true if one of the rings does not have enough space
1122  *         false if sufficient space is available in both rings
1123  */
1124 static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
1125 {
1126 	u32 rx_avail;
1127 	u32 tx_avail;
1128 	bool full = false;
1129 
1130 	/* Check if the tx and rx rings are likely to have enough space */
1131 	rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
1132 					      pdcs->nrxpost);
1133 	if (unlikely(rx_cnt > rx_avail)) {
1134 		pdcs->rx_ring_full++;
1135 		full = true;
1136 	}
1137 
1138 	if (likely(!full)) {
1139 		tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
1140 						      pdcs->ntxpost);
1141 		if (unlikely(tx_cnt > tx_avail)) {
1142 			pdcs->tx_ring_full++;
1143 			full = true;
1144 		}
1145 	}
1146 	return full;
1147 }
1148 
1149 /**
1150  * pdc_last_tx_done() - If both the tx and rx rings have at least
1151  * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
1152  * framework can submit another message.
1153  * @chan:  mailbox channel to check
1154  * Return: true if PDC can accept another message on this channel
1155  */
1156 static bool pdc_last_tx_done(struct mbox_chan *chan)
1157 {
1158 	struct pdc_state *pdcs = chan->con_priv;
1159 	bool ret;
1160 
1161 	if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
1162 				    PDC_RING_SPACE_MIN))) {
1163 		pdcs->last_tx_not_done++;
1164 		ret = false;
1165 	} else {
1166 		ret = true;
1167 	}
1168 	return ret;
1169 }
1170 
1171 /**
1172  * pdc_send_data() - mailbox send_data function
1173  * @chan:	The mailbox channel on which the data is sent. The channel
1174  *              corresponds to a DMA ringset.
1175  * @data:	The mailbox message to be sent. The message must be a
1176  *              brcm_message structure.
1177  *
1178  * This function is registered as the send_data function for the mailbox
1179  * controller. From the destination scatterlist in the mailbox message, it
1180  * creates a sequence of receive descriptors in the rx ring. From the source
1181  * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
1182  * After creating the descriptors, it writes the rx ptr and tx ptr registers to
1183  * initiate the DMA transfer.
1184  *
1185  * This function does the DMA map and unmap of the src and dst scatterlists in
1186  * the mailbox message.
1187  *
1188  * Return: 0 if successful
1189  *	   -ENOTSUPP if the mailbox message is a type this driver does not
1190  *			support
1191  *         < 0 if an error
1192  */
1193 static int pdc_send_data(struct mbox_chan *chan, void *data)
1194 {
1195 	struct pdc_state *pdcs = chan->con_priv;
1196 	struct device *dev = &pdcs->pdev->dev;
1197 	struct brcm_message *mssg = data;
1198 	int err = PDC_SUCCESS;
1199 	int src_nent;
1200 	int dst_nent;
1201 	int nent;
1202 	u32 tx_desc_req;
1203 	u32 rx_desc_req;
1204 
1205 	if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
1206 		return -ENOTSUPP;
1207 
1208 	src_nent = sg_nents(mssg->spu.src);
1209 	if (likely(src_nent)) {
1210 		nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
1211 		if (unlikely(nent == 0))
1212 			return -EIO;
1213 	}
1214 
1215 	dst_nent = sg_nents(mssg->spu.dst);
1216 	if (likely(dst_nent)) {
1217 		nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
1218 				  DMA_FROM_DEVICE);
1219 		if (unlikely(nent == 0)) {
1220 			dma_unmap_sg(dev, mssg->spu.src, src_nent,
1221 				     DMA_TO_DEVICE);
1222 			return -EIO;
1223 		}
1224 	}
1225 
1226 	/*
1227 	 * Check if the tx and rx rings have enough space. Do this prior to
1228 	 * writing any tx or rx descriptors. Need to ensure that we do not write
1229 	 * a partial set of descriptors, or write just rx descriptors but
1230 	 * corresponding tx descriptors don't fit. Note that we want this check
1231 	 * and the entire sequence of descriptor to happen without another
1232 	 * thread getting in. The channel spin lock in the mailbox framework
1233 	 * ensures this.
1234 	 */
1235 	tx_desc_req = pdc_desc_count(mssg->spu.src);
1236 	rx_desc_req = pdc_desc_count(mssg->spu.dst);
1237 	if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
1238 		return -ENOSPC;
1239 
1240 	/* Create rx descriptors to SPU catch response */
1241 	err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
1242 	err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
1243 
1244 	/* Create tx descriptors to submit SPU request */
1245 	err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
1246 	err |= pdc_tx_list_final(pdcs);	/* initiate transfer */
1247 
1248 	if (unlikely(err))
1249 		dev_err(&pdcs->pdev->dev,
1250 			"%s failed with error %d", __func__, err);
1251 
1252 	return err;
1253 }
1254 
1255 static int pdc_startup(struct mbox_chan *chan)
1256 {
1257 	return pdc_ring_init(chan->con_priv, PDC_RINGSET);
1258 }
1259 
1260 static void pdc_shutdown(struct mbox_chan *chan)
1261 {
1262 	struct pdc_state *pdcs = chan->con_priv;
1263 
1264 	if (!pdcs)
1265 		return;
1266 
1267 	dev_dbg(&pdcs->pdev->dev,
1268 		"Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1269 	pdc_ring_free(pdcs);
1270 }
1271 
1272 /**
1273  * pdc_hw_init() - Use the given initialization parameters to initialize the
1274  * state for one of the PDCs.
1275  * @pdcs:  state of the PDC
1276  */
1277 static
1278 void pdc_hw_init(struct pdc_state *pdcs)
1279 {
1280 	struct platform_device *pdev;
1281 	struct device *dev;
1282 	struct dma64 *dma_reg;
1283 	int ringset = PDC_RINGSET;
1284 
1285 	pdev = pdcs->pdev;
1286 	dev = &pdev->dev;
1287 
1288 	dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
1289 	dev_dbg(dev, "state structure:                   %p",
1290 		pdcs);
1291 	dev_dbg(dev, " - base virtual addr of hw regs    %p",
1292 		pdcs->pdc_reg_vbase);
1293 
1294 	/* initialize data structures */
1295 	pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
1296 	pdcs->txregs_64 = (struct dma64_regs *)
1297 	    (((u8 *)pdcs->pdc_reg_vbase) +
1298 		     PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1299 	pdcs->rxregs_64 = (struct dma64_regs *)
1300 	    (((u8 *)pdcs->pdc_reg_vbase) +
1301 		     PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1302 
1303 	pdcs->ntxd = PDC_RING_ENTRIES;
1304 	pdcs->nrxd = PDC_RING_ENTRIES;
1305 	pdcs->ntxpost = PDC_RING_ENTRIES - 1;
1306 	pdcs->nrxpost = PDC_RING_ENTRIES - 1;
1307 	iowrite32(0, &pdcs->regs->intmask);
1308 
1309 	dma_reg = &pdcs->regs->dmaregs[ringset];
1310 
1311 	/* Configure DMA but will enable later in pdc_ring_init() */
1312 	iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1313 
1314 	iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1315 		  &dma_reg->dmarcv.control);
1316 
1317 	/* Reset current index pointers after making sure DMA is disabled */
1318 	iowrite32(0, &dma_reg->dmaxmt.ptr);
1319 	iowrite32(0, &dma_reg->dmarcv.ptr);
1320 
1321 	if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
1322 		iowrite32(PDC_CKSUM_CTRL,
1323 			  pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
1324 }
1325 
1326 /**
1327  * pdc_hw_disable() - Disable the tx and rx control in the hw.
1328  * @pdcs: PDC state structure
1329  *
1330  */
1331 static void pdc_hw_disable(struct pdc_state *pdcs)
1332 {
1333 	struct dma64 *dma_reg;
1334 
1335 	dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
1336 	iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1337 	iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1338 		  &dma_reg->dmarcv.control);
1339 }
1340 
1341 /**
1342  * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
1343  * header returned with each response message.
1344  * @pdcs: PDC state structure
1345  *
1346  * The metadata is not returned to the mailbox client. So the PDC driver
1347  * manages these buffers.
1348  *
1349  * Return: PDC_SUCCESS
1350  *         -ENOMEM if pool creation fails
1351  */
1352 static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
1353 {
1354 	struct platform_device *pdev;
1355 	struct device *dev;
1356 
1357 	pdev = pdcs->pdev;
1358 	dev = &pdev->dev;
1359 
1360 	pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
1361 	if (pdcs->use_bcm_hdr)
1362 		pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
1363 
1364 	pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
1365 					    pdcs->pdc_resp_hdr_len,
1366 					    RX_BUF_ALIGN, 0);
1367 	if (!pdcs->rx_buf_pool)
1368 		return -ENOMEM;
1369 
1370 	return PDC_SUCCESS;
1371 }
1372 
1373 /**
1374  * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
1375  * specify a threaded IRQ handler for deferred handling of interrupts outside of
1376  * interrupt context.
1377  * @pdcs:   PDC state
1378  *
1379  * Set the interrupt mask for transmit and receive done.
1380  * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
1381  *
1382  * Return:  PDC_SUCCESS
1383  *          <0 if threaded irq request fails
1384  */
1385 static int pdc_interrupts_init(struct pdc_state *pdcs)
1386 {
1387 	struct platform_device *pdev = pdcs->pdev;
1388 	struct device *dev = &pdev->dev;
1389 	struct device_node *dn = pdev->dev.of_node;
1390 	int err;
1391 
1392 	/* interrupt configuration */
1393 	iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
1394 
1395 	if (pdcs->hw_type == FA_HW)
1396 		iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
1397 			  FA_RCVLAZY0_OFFSET);
1398 	else
1399 		iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
1400 			  PDC_RCVLAZY0_OFFSET);
1401 
1402 	/* read irq from device tree */
1403 	pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
1404 	dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
1405 		dev_name(dev), pdcs->pdc_irq, pdcs);
1406 
1407 	err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
1408 			       dev_name(dev), dev);
1409 	if (err) {
1410 		dev_err(dev, "IRQ %u request failed with err %d\n",
1411 			pdcs->pdc_irq, err);
1412 		return err;
1413 	}
1414 	return PDC_SUCCESS;
1415 }
1416 
1417 static const struct mbox_chan_ops pdc_mbox_chan_ops = {
1418 	.send_data = pdc_send_data,
1419 	.last_tx_done = pdc_last_tx_done,
1420 	.startup = pdc_startup,
1421 	.shutdown = pdc_shutdown
1422 };
1423 
1424 /**
1425  * pdc_mb_init() - Initialize the mailbox controller.
1426  * @pdcs:  PDC state
1427  *
1428  * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
1429  * driver only uses one ringset and thus one mb channel. PDC uses the transmit
1430  * complete interrupt to determine when a mailbox message has successfully been
1431  * transmitted.
1432  *
1433  * Return: 0 on success
1434  *         < 0 if there is an allocation or registration failure
1435  */
1436 static int pdc_mb_init(struct pdc_state *pdcs)
1437 {
1438 	struct device *dev = &pdcs->pdev->dev;
1439 	struct mbox_controller *mbc;
1440 	int chan_index;
1441 	int err;
1442 
1443 	mbc = &pdcs->mbc;
1444 	mbc->dev = dev;
1445 	mbc->ops = &pdc_mbox_chan_ops;
1446 	mbc->num_chans = 1;
1447 	mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
1448 				  GFP_KERNEL);
1449 	if (!mbc->chans)
1450 		return -ENOMEM;
1451 
1452 	mbc->txdone_irq = false;
1453 	mbc->txdone_poll = true;
1454 	mbc->txpoll_period = 1;
1455 	for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
1456 		mbc->chans[chan_index].con_priv = pdcs;
1457 
1458 	/* Register mailbox controller */
1459 	err = devm_mbox_controller_register(dev, mbc);
1460 	if (err) {
1461 		dev_crit(dev,
1462 			 "Failed to register PDC mailbox controller. Error %d.",
1463 			 err);
1464 		return err;
1465 	}
1466 	return 0;
1467 }
1468 
1469 /* Device tree API */
1470 static const int pdc_hw = PDC_HW;
1471 static const int fa_hw = FA_HW;
1472 
1473 static const struct of_device_id pdc_mbox_of_match[] = {
1474 	{.compatible = "brcm,iproc-pdc-mbox", .data = &pdc_hw},
1475 	{.compatible = "brcm,iproc-fa2-mbox", .data = &fa_hw},
1476 	{ /* sentinel */ }
1477 };
1478 MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
1479 
1480 /**
1481  * pdc_dt_read() - Read application-specific data from device tree.
1482  * @pdev:  Platform device
1483  * @pdcs:  PDC state
1484  *
1485  * Reads the number of bytes of receive status that precede each received frame.
1486  * Reads whether transmit and received frames should be preceded by an 8-byte
1487  * BCM header.
1488  *
1489  * Return: 0 if successful
1490  *         -ENODEV if device not available
1491  */
1492 static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
1493 {
1494 	struct device *dev = &pdev->dev;
1495 	struct device_node *dn = pdev->dev.of_node;
1496 	const int *hw_type;
1497 	int err;
1498 
1499 	err = of_property_read_u32(dn, "brcm,rx-status-len",
1500 				   &pdcs->rx_status_len);
1501 	if (err < 0)
1502 		dev_err(dev,
1503 			"%s failed to get DMA receive status length from device tree",
1504 			__func__);
1505 
1506 	pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
1507 
1508 	pdcs->hw_type = PDC_HW;
1509 
1510 	hw_type = device_get_match_data(dev);
1511 	if (hw_type)
1512 		pdcs->hw_type = *hw_type;
1513 
1514 	return 0;
1515 }
1516 
1517 /**
1518  * pdc_probe() - Probe function for PDC driver.
1519  * @pdev:   PDC platform device
1520  *
1521  * Reserve and map register regions defined in device tree.
1522  * Allocate and initialize tx and rx DMA rings.
1523  * Initialize a mailbox controller for each PDC.
1524  *
1525  * Return: 0 if successful
1526  *         < 0 if an error
1527  */
1528 static int pdc_probe(struct platform_device *pdev)
1529 {
1530 	int err = 0;
1531 	struct device *dev = &pdev->dev;
1532 	struct resource *pdc_regs;
1533 	struct pdc_state *pdcs;
1534 
1535 	/* PDC state for one SPU */
1536 	pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
1537 	if (!pdcs) {
1538 		err = -ENOMEM;
1539 		goto cleanup;
1540 	}
1541 
1542 	pdcs->pdev = pdev;
1543 	platform_set_drvdata(pdev, pdcs);
1544 	pdcs->pdc_idx = pdcg.num_spu;
1545 	pdcg.num_spu++;
1546 
1547 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
1548 	if (err) {
1549 		dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
1550 		goto cleanup;
1551 	}
1552 
1553 	/* Create DMA pool for tx ring */
1554 	pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
1555 					  RING_ALIGN, 0);
1556 	if (!pdcs->ring_pool) {
1557 		err = -ENOMEM;
1558 		goto cleanup;
1559 	}
1560 
1561 	err = pdc_dt_read(pdev, pdcs);
1562 	if (err)
1563 		goto cleanup_ring_pool;
1564 
1565 	pdcs->pdc_reg_vbase = devm_platform_get_and_ioremap_resource(pdev, 0, &pdc_regs);
1566 	if (IS_ERR(pdcs->pdc_reg_vbase)) {
1567 		err = PTR_ERR(pdcs->pdc_reg_vbase);
1568 		goto cleanup_ring_pool;
1569 	}
1570 	dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
1571 		&pdc_regs->start, &pdc_regs->end);
1572 
1573 	/* create rx buffer pool after dt read to know how big buffers are */
1574 	err = pdc_rx_buf_pool_create(pdcs);
1575 	if (err)
1576 		goto cleanup_ring_pool;
1577 
1578 	pdc_hw_init(pdcs);
1579 
1580 	/* Init tasklet for deferred DMA rx processing */
1581 	tasklet_setup(&pdcs->rx_tasklet, pdc_tasklet_cb);
1582 
1583 	err = pdc_interrupts_init(pdcs);
1584 	if (err)
1585 		goto cleanup_buf_pool;
1586 
1587 	/* Initialize mailbox controller */
1588 	err = pdc_mb_init(pdcs);
1589 	if (err)
1590 		goto cleanup_buf_pool;
1591 
1592 	pdc_setup_debugfs(pdcs);
1593 
1594 	dev_dbg(dev, "pdc_probe() successful");
1595 	return PDC_SUCCESS;
1596 
1597 cleanup_buf_pool:
1598 	tasklet_kill(&pdcs->rx_tasklet);
1599 	dma_pool_destroy(pdcs->rx_buf_pool);
1600 
1601 cleanup_ring_pool:
1602 	dma_pool_destroy(pdcs->ring_pool);
1603 
1604 cleanup:
1605 	return err;
1606 }
1607 
1608 static void pdc_remove(struct platform_device *pdev)
1609 {
1610 	struct pdc_state *pdcs = platform_get_drvdata(pdev);
1611 
1612 	pdc_free_debugfs();
1613 
1614 	tasklet_kill(&pdcs->rx_tasklet);
1615 
1616 	pdc_hw_disable(pdcs);
1617 
1618 	dma_pool_destroy(pdcs->rx_buf_pool);
1619 	dma_pool_destroy(pdcs->ring_pool);
1620 }
1621 
1622 static struct platform_driver pdc_mbox_driver = {
1623 	.probe = pdc_probe,
1624 	.remove_new = pdc_remove,
1625 	.driver = {
1626 		   .name = "brcm-iproc-pdc-mbox",
1627 		   .of_match_table = pdc_mbox_of_match,
1628 		   },
1629 };
1630 module_platform_driver(pdc_mbox_driver);
1631 
1632 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
1633 MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
1634 MODULE_LICENSE("GPL v2");
1635