1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2016 Broadcom
4 */
5
6 /*
7 * Broadcom PDC Mailbox Driver
8 * The PDC provides a ring based programming interface to one or more hardware
9 * offload engines. For example, the PDC driver works with both SPU-M and SPU2
10 * cryptographic offload hardware. In some chips the PDC is referred to as MDE,
11 * and in others the FA2/FA+ hardware is used with this PDC driver.
12 *
13 * The PDC driver registers with the Linux mailbox framework as a mailbox
14 * controller, once for each PDC instance. Ring 0 for each PDC is registered as
15 * a mailbox channel. The PDC driver uses interrupts to determine when data
16 * transfers to and from an offload engine are complete. The PDC driver uses
17 * threaded IRQs so that response messages are handled outside of interrupt
18 * context.
19 *
20 * The PDC driver allows multiple messages to be pending in the descriptor
21 * rings. The tx_msg_start descriptor index indicates where the last message
22 * starts. The txin_numd value at this index indicates how many descriptor
23 * indexes make up the message. Similar state is kept on the receive side. When
24 * an rx interrupt indicates a response is ready, the PDC driver processes numd
25 * descriptors from the tx and rx ring, thus processing one response at a time.
26 */
27
28 #include <linux/errno.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <linux/debugfs.h>
33 #include <linux/interrupt.h>
34 #include <linux/wait.h>
35 #include <linux/platform_device.h>
36 #include <linux/property.h>
37 #include <linux/io.h>
38 #include <linux/of.h>
39 #include <linux/of_irq.h>
40 #include <linux/mailbox_controller.h>
41 #include <linux/mailbox/brcm-message.h>
42 #include <linux/scatterlist.h>
43 #include <linux/dma-direction.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/dmapool.h>
46 #include <linux/workqueue.h>
47
48 #define PDC_SUCCESS 0
49
50 #define RING_ENTRY_SIZE sizeof(struct dma64dd)
51
52 /* # entries in PDC dma ring */
53 #define PDC_RING_ENTRIES 512
54 /*
55 * Minimum number of ring descriptor entries that must be free to tell mailbox
56 * framework that it can submit another request
57 */
58 #define PDC_RING_SPACE_MIN 15
59
60 #define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
61 /* Rings are 8k aligned */
62 #define RING_ALIGN_ORDER 13
63 #define RING_ALIGN BIT(RING_ALIGN_ORDER)
64
65 #define RX_BUF_ALIGN_ORDER 5
66 #define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
67
68 /* descriptor bumping macros */
69 #define XXD(x, max_mask) ((x) & (max_mask))
70 #define TXD(x, max_mask) XXD((x), (max_mask))
71 #define RXD(x, max_mask) XXD((x), (max_mask))
72 #define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
73 #define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
74 #define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
75 #define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
76 #define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
77 #define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
78
79 /* Length of BCM header at start of SPU msg, in bytes */
80 #define BCM_HDR_LEN 8
81
82 /*
83 * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
84 * not currently support use of multiple ringsets on a single PDC engine.
85 */
86 #define PDC_RINGSET 0
87
88 /*
89 * Interrupt mask and status definitions. Enable interrupts for tx and rx on
90 * ring 0
91 */
92 #define PDC_RCVINT_0 (16 + PDC_RINGSET)
93 #define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
94 #define PDC_INTMASK (PDC_RCVINTEN_0)
95 #define PDC_LAZY_FRAMECOUNT 1
96 #define PDC_LAZY_TIMEOUT 10000
97 #define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
98 #define PDC_INTMASK_OFFSET 0x24
99 #define PDC_INTSTATUS_OFFSET 0x20
100 #define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
101 #define FA_RCVLAZY0_OFFSET 0x100
102
103 /*
104 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
105 * before frame
106 */
107 #define PDC_SPU2_RESP_HDR_LEN 17
108 #define PDC_CKSUM_CTRL BIT(27)
109 #define PDC_CKSUM_CTRL_OFFSET 0x400
110
111 #define PDC_SPUM_RESP_HDR_LEN 32
112
113 /*
114 * Sets the following bits for write to transmit control reg:
115 * 11 - PtyChkDisable - parity check is disabled
116 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
117 */
118 #define PDC_TX_CTL 0x000C0800
119
120 /* Bit in tx control reg to enable tx channel */
121 #define PDC_TX_ENABLE 0x1
122
123 /*
124 * Sets the following bits for write to receive control reg:
125 * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
126 * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
127 * that have StartOfFrame set
128 * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
129 * remaining bytes in current frame, report error
130 * in rx frame status for current frame
131 * 11 - PtyChkDisable - parity check is disabled
132 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
133 */
134 #define PDC_RX_CTL 0x000C0E00
135
136 /* Bit in rx control reg to enable rx channel */
137 #define PDC_RX_ENABLE 0x1
138
139 #define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
140
141 /* descriptor flags */
142 #define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
143 #define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
144 #define D64_CTRL1_EOF BIT(30) /* end of frame */
145 #define D64_CTRL1_SOF BIT(31) /* start of frame */
146
147 #define RX_STATUS_OVERFLOW 0x00800000
148 #define RX_STATUS_LEN 0x0000FFFF
149
150 #define PDC_TXREGS_OFFSET 0x200
151 #define PDC_RXREGS_OFFSET 0x220
152
153 /* Maximum size buffer the DMA engine can handle */
154 #define PDC_DMA_BUF_MAX 16384
155
156 enum pdc_hw {
157 FA_HW, /* FA2/FA+ hardware (i.e. Northstar Plus) */
158 PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
159 };
160
161 /* dma descriptor */
162 struct dma64dd {
163 u32 ctrl1; /* misc control bits */
164 u32 ctrl2; /* buffer count and address extension */
165 u32 addrlow; /* memory address of the date buffer, bits 31:0 */
166 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
167 };
168
169 /* dma registers per channel(xmt or rcv) */
170 struct dma64_regs {
171 u32 control; /* enable, et al */
172 u32 ptr; /* last descriptor posted to chip */
173 u32 addrlow; /* descriptor ring base address low 32-bits */
174 u32 addrhigh; /* descriptor ring base address bits 63:32 */
175 u32 status0; /* last rx descriptor written by hw */
176 u32 status1; /* driver does not use */
177 };
178
179 /* cpp contortions to concatenate w/arg prescan */
180 #ifndef PAD
181 #define _PADLINE(line) pad ## line
182 #define _XSTR(line) _PADLINE(line)
183 #define PAD _XSTR(__LINE__)
184 #endif /* PAD */
185
186 /* dma registers. matches hw layout. */
187 struct dma64 {
188 struct dma64_regs dmaxmt; /* dma tx */
189 u32 PAD[2];
190 struct dma64_regs dmarcv; /* dma rx */
191 u32 PAD[2];
192 };
193
194 /* PDC registers */
195 struct pdc_regs {
196 u32 devcontrol; /* 0x000 */
197 u32 devstatus; /* 0x004 */
198 u32 PAD;
199 u32 biststatus; /* 0x00c */
200 u32 PAD[4];
201 u32 intstatus; /* 0x020 */
202 u32 intmask; /* 0x024 */
203 u32 gptimer; /* 0x028 */
204
205 u32 PAD;
206 u32 intrcvlazy_0; /* 0x030 (Only in PDC, not FA2) */
207 u32 intrcvlazy_1; /* 0x034 (Only in PDC, not FA2) */
208 u32 intrcvlazy_2; /* 0x038 (Only in PDC, not FA2) */
209 u32 intrcvlazy_3; /* 0x03c (Only in PDC, not FA2) */
210
211 u32 PAD[48];
212 u32 fa_intrecvlazy; /* 0x100 (Only in FA2, not PDC) */
213 u32 flowctlthresh; /* 0x104 */
214 u32 wrrthresh; /* 0x108 */
215 u32 gmac_idle_cnt_thresh; /* 0x10c */
216
217 u32 PAD[4];
218 u32 ifioaccessaddr; /* 0x120 */
219 u32 ifioaccessbyte; /* 0x124 */
220 u32 ifioaccessdata; /* 0x128 */
221
222 u32 PAD[21];
223 u32 phyaccess; /* 0x180 */
224 u32 PAD;
225 u32 phycontrol; /* 0x188 */
226 u32 txqctl; /* 0x18c */
227 u32 rxqctl; /* 0x190 */
228 u32 gpioselect; /* 0x194 */
229 u32 gpio_output_en; /* 0x198 */
230 u32 PAD; /* 0x19c */
231 u32 txq_rxq_mem_ctl; /* 0x1a0 */
232 u32 memory_ecc_status; /* 0x1a4 */
233 u32 serdes_ctl; /* 0x1a8 */
234 u32 serdes_status0; /* 0x1ac */
235 u32 serdes_status1; /* 0x1b0 */
236 u32 PAD[11]; /* 0x1b4-1dc */
237 u32 clk_ctl_st; /* 0x1e0 */
238 u32 hw_war; /* 0x1e4 (Only in PDC, not FA2) */
239 u32 pwrctl; /* 0x1e8 */
240 u32 PAD[5];
241
242 #define PDC_NUM_DMA_RINGS 4
243 struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
244
245 /* more registers follow, but we don't use them */
246 };
247
248 /* structure for allocating/freeing DMA rings */
249 struct pdc_ring_alloc {
250 dma_addr_t dmabase; /* DMA address of start of ring */
251 void *vbase; /* base kernel virtual address of ring */
252 u32 size; /* ring allocation size in bytes */
253 };
254
255 /*
256 * context associated with a receive descriptor.
257 * @rxp_ctx: opaque context associated with frame that starts at each
258 * rx ring index.
259 * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
260 * index. Retained in order to unmap each sg after reply is processed.
261 * @rxin_numd: Number of rx descriptors associated with the message that starts
262 * at a descriptor index. Not set for every index. For example,
263 * if descriptor index i points to a scatterlist with 4 entries,
264 * then the next three descriptor indexes don't have a value set.
265 * @resp_hdr: Virtual address of buffer used to catch DMA rx status
266 * @resp_hdr_daddr: physical address of DMA rx status buffer
267 */
268 struct pdc_rx_ctx {
269 void *rxp_ctx;
270 struct scatterlist *dst_sg;
271 u32 rxin_numd;
272 void *resp_hdr;
273 dma_addr_t resp_hdr_daddr;
274 };
275
276 /* PDC state structure */
277 struct pdc_state {
278 /* Index of the PDC whose state is in this structure instance */
279 u8 pdc_idx;
280
281 /* Platform device for this PDC instance */
282 struct platform_device *pdev;
283
284 /*
285 * Each PDC instance has a mailbox controller. PDC receives request
286 * messages through mailboxes, and sends response messages through the
287 * mailbox framework.
288 */
289 struct mbox_controller mbc;
290
291 unsigned int pdc_irq;
292
293 /* work for deferred processing after DMA rx interrupt */
294 struct work_struct rx_work;
295
296 /* Number of bytes of receive status prior to each rx frame */
297 u32 rx_status_len;
298 /* Whether a BCM header is prepended to each frame */
299 bool use_bcm_hdr;
300 /* Sum of length of BCM header and rx status header */
301 u32 pdc_resp_hdr_len;
302
303 /* The base virtual address of DMA hw registers */
304 void __iomem *pdc_reg_vbase;
305
306 /* Pool for allocation of DMA rings */
307 struct dma_pool *ring_pool;
308
309 /* Pool for allocation of metadata buffers for response messages */
310 struct dma_pool *rx_buf_pool;
311
312 /*
313 * The base virtual address of DMA tx/rx descriptor rings. Corresponding
314 * DMA address and size of ring allocation.
315 */
316 struct pdc_ring_alloc tx_ring_alloc;
317 struct pdc_ring_alloc rx_ring_alloc;
318
319 struct pdc_regs *regs; /* start of PDC registers */
320
321 struct dma64_regs *txregs_64; /* dma tx engine registers */
322 struct dma64_regs *rxregs_64; /* dma rx engine registers */
323
324 /*
325 * Arrays of PDC_RING_ENTRIES descriptors
326 * To use multiple ringsets, this needs to be extended
327 */
328 struct dma64dd *txd_64; /* tx descriptor ring */
329 struct dma64dd *rxd_64; /* rx descriptor ring */
330
331 /* descriptor ring sizes */
332 u32 ntxd; /* # tx descriptors */
333 u32 nrxd; /* # rx descriptors */
334 u32 nrxpost; /* # rx buffers to keep posted */
335 u32 ntxpost; /* max number of tx buffers that can be posted */
336
337 /*
338 * Index of next tx descriptor to reclaim. That is, the descriptor
339 * index of the oldest tx buffer for which the host has yet to process
340 * the corresponding response.
341 */
342 u32 txin;
343
344 /*
345 * Index of the first receive descriptor for the sequence of
346 * message fragments currently under construction. Used to build up
347 * the rxin_numd count for a message. Updated to rxout when the host
348 * starts a new sequence of rx buffers for a new message.
349 */
350 u32 tx_msg_start;
351
352 /* Index of next tx descriptor to post. */
353 u32 txout;
354
355 /*
356 * Number of tx descriptors associated with the message that starts
357 * at this tx descriptor index.
358 */
359 u32 txin_numd[PDC_RING_ENTRIES];
360
361 /*
362 * Index of next rx descriptor to reclaim. This is the index of
363 * the next descriptor whose data has yet to be processed by the host.
364 */
365 u32 rxin;
366
367 /*
368 * Index of the first receive descriptor for the sequence of
369 * message fragments currently under construction. Used to build up
370 * the rxin_numd count for a message. Updated to rxout when the host
371 * starts a new sequence of rx buffers for a new message.
372 */
373 u32 rx_msg_start;
374
375 /*
376 * Saved value of current hardware rx descriptor index.
377 * The last rx buffer written by the hw is the index previous to
378 * this one.
379 */
380 u32 last_rx_curr;
381
382 /* Index of next rx descriptor to post. */
383 u32 rxout;
384
385 struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
386
387 /*
388 * Scatterlists used to form request and reply frames beginning at a
389 * given ring index. Retained in order to unmap each sg after reply
390 * is processed
391 */
392 struct scatterlist *src_sg[PDC_RING_ENTRIES];
393
394 /* counters */
395 u32 pdc_requests; /* number of request messages submitted */
396 u32 pdc_replies; /* number of reply messages received */
397 u32 last_tx_not_done; /* too few tx descriptors to indicate done */
398 u32 tx_ring_full; /* unable to accept msg because tx ring full */
399 u32 rx_ring_full; /* unable to accept msg because rx ring full */
400 u32 txnobuf; /* unable to create tx descriptor */
401 u32 rxnobuf; /* unable to create rx descriptor */
402 u32 rx_oflow; /* count of rx overflows */
403
404 /* hardware type - FA2 or PDC/MDE */
405 enum pdc_hw hw_type;
406 };
407
408 /* Global variables */
409
410 struct pdc_globals {
411 /* Actual number of SPUs in hardware, as reported by device tree */
412 u32 num_spu;
413 };
414
415 static struct pdc_globals pdcg;
416
417 /* top level debug FS directory for PDC driver */
418 static struct dentry *debugfs_dir;
419
pdc_debugfs_read(struct file * filp,char __user * ubuf,size_t count,loff_t * offp)420 static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
421 size_t count, loff_t *offp)
422 {
423 struct pdc_state *pdcs;
424 char *buf;
425 ssize_t ret, out_offset, out_count;
426
427 out_count = 512;
428
429 buf = kmalloc(out_count, GFP_KERNEL);
430 if (!buf)
431 return -ENOMEM;
432
433 pdcs = filp->private_data;
434 out_offset = 0;
435 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
436 "SPU %u stats:\n", pdcs->pdc_idx);
437 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
438 "PDC requests....................%u\n",
439 pdcs->pdc_requests);
440 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
441 "PDC responses...................%u\n",
442 pdcs->pdc_replies);
443 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
444 "Tx not done.....................%u\n",
445 pdcs->last_tx_not_done);
446 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
447 "Tx ring full....................%u\n",
448 pdcs->tx_ring_full);
449 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
450 "Rx ring full....................%u\n",
451 pdcs->rx_ring_full);
452 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
453 "Tx desc write fail. Ring full...%u\n",
454 pdcs->txnobuf);
455 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
456 "Rx desc write fail. Ring full...%u\n",
457 pdcs->rxnobuf);
458 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
459 "Receive overflow................%u\n",
460 pdcs->rx_oflow);
461 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
462 "Num frags in rx ring............%u\n",
463 NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
464 pdcs->nrxpost));
465
466 if (out_offset > out_count)
467 out_offset = out_count;
468
469 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
470 kfree(buf);
471 return ret;
472 }
473
474 static const struct file_operations pdc_debugfs_stats = {
475 .owner = THIS_MODULE,
476 .open = simple_open,
477 .read = pdc_debugfs_read,
478 };
479
480 /**
481 * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
482 * directory has not yet been created, create it now. Create a stats file in
483 * this directory for a SPU.
484 * @pdcs: PDC state structure
485 */
pdc_setup_debugfs(struct pdc_state * pdcs)486 static void pdc_setup_debugfs(struct pdc_state *pdcs)
487 {
488 char spu_stats_name[16];
489
490 if (!debugfs_initialized())
491 return;
492
493 snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
494 if (!debugfs_dir)
495 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
496
497 /* S_IRUSR == 0400 */
498 debugfs_create_file(spu_stats_name, 0400, debugfs_dir, pdcs,
499 &pdc_debugfs_stats);
500 }
501
pdc_free_debugfs(void)502 static void pdc_free_debugfs(void)
503 {
504 debugfs_remove_recursive(debugfs_dir);
505 debugfs_dir = NULL;
506 }
507
508 /**
509 * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
510 * @pdcs: PDC state for SPU that will generate result
511 * @dma_addr: DMA address of buffer that descriptor is being built for
512 * @buf_len: Length of the receive buffer, in bytes
513 * @flags: Flags to be stored in descriptor
514 */
515 static inline void
pdc_build_rxd(struct pdc_state * pdcs,dma_addr_t dma_addr,u32 buf_len,u32 flags)516 pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
517 u32 buf_len, u32 flags)
518 {
519 struct device *dev = &pdcs->pdev->dev;
520 struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
521
522 dev_dbg(dev,
523 "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
524 pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
525
526 rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
527 rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
528 rxd->ctrl1 = cpu_to_le32(flags);
529 rxd->ctrl2 = cpu_to_le32(buf_len);
530
531 /* bump ring index and return */
532 pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
533 }
534
535 /**
536 * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
537 * hardware.
538 * @pdcs: PDC state for the SPU that will process this request
539 * @dma_addr: DMA address of packet to be transmitted
540 * @buf_len: Length of tx buffer, in bytes
541 * @flags: Flags to be stored in descriptor
542 */
543 static inline void
pdc_build_txd(struct pdc_state * pdcs,dma_addr_t dma_addr,u32 buf_len,u32 flags)544 pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
545 u32 flags)
546 {
547 struct device *dev = &pdcs->pdev->dev;
548 struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
549
550 dev_dbg(dev,
551 "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
552 pdcs->pdc_idx, pdcs->txout, buf_len, flags);
553
554 txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
555 txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
556 txd->ctrl1 = cpu_to_le32(flags);
557 txd->ctrl2 = cpu_to_le32(buf_len);
558
559 /* bump ring index and return */
560 pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
561 }
562
563 /**
564 * pdc_receive_one() - Receive a response message from a given SPU.
565 * @pdcs: PDC state for the SPU to receive from
566 *
567 * When the return code indicates success, the response message is available in
568 * the receive buffers provided prior to submission of the request.
569 *
570 * Return: PDC_SUCCESS if one or more receive descriptors was processed
571 * -EAGAIN indicates that no response message is available
572 * -EIO an error occurred
573 */
574 static int
pdc_receive_one(struct pdc_state * pdcs)575 pdc_receive_one(struct pdc_state *pdcs)
576 {
577 struct device *dev = &pdcs->pdev->dev;
578 struct mbox_controller *mbc;
579 struct mbox_chan *chan;
580 struct brcm_message mssg;
581 u32 len, rx_status;
582 u32 num_frags;
583 u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
584 u32 frags_rdy; /* number of fragments ready to read */
585 u32 rx_idx; /* ring index of start of receive frame */
586 dma_addr_t resp_hdr_daddr;
587 struct pdc_rx_ctx *rx_ctx;
588
589 mbc = &pdcs->mbc;
590 chan = &mbc->chans[0];
591 mssg.type = BRCM_MESSAGE_SPU;
592
593 /*
594 * return if a complete response message is not yet ready.
595 * rxin_numd[rxin] is the number of fragments in the next msg
596 * to read.
597 */
598 frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
599 if ((frags_rdy == 0) ||
600 (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
601 /* No response ready */
602 return -EAGAIN;
603
604 num_frags = pdcs->txin_numd[pdcs->txin];
605 WARN_ON(num_frags == 0);
606
607 dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
608 sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
609
610 pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
611
612 dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
613 pdcs->pdc_idx, num_frags);
614
615 rx_idx = pdcs->rxin;
616 rx_ctx = &pdcs->rx_ctx[rx_idx];
617 num_frags = rx_ctx->rxin_numd;
618 /* Return opaque context with result */
619 mssg.ctx = rx_ctx->rxp_ctx;
620 rx_ctx->rxp_ctx = NULL;
621 resp_hdr = rx_ctx->resp_hdr;
622 resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
623 dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
624 DMA_FROM_DEVICE);
625
626 pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
627
628 dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
629 pdcs->pdc_idx, num_frags);
630
631 dev_dbg(dev,
632 "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
633 pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
634 pdcs->rxout, pdcs->last_rx_curr);
635
636 if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
637 /*
638 * For SPU-M, get length of response msg and rx overflow status.
639 */
640 rx_status = *((u32 *)resp_hdr);
641 len = rx_status & RX_STATUS_LEN;
642 dev_dbg(dev,
643 "SPU response length %u bytes", len);
644 if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
645 if (rx_status & RX_STATUS_OVERFLOW) {
646 dev_err_ratelimited(dev,
647 "crypto receive overflow");
648 pdcs->rx_oflow++;
649 } else {
650 dev_info_ratelimited(dev, "crypto rx len = 0");
651 }
652 return -EIO;
653 }
654 }
655
656 dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
657
658 mbox_chan_received_data(chan, &mssg);
659
660 pdcs->pdc_replies++;
661 return PDC_SUCCESS;
662 }
663
664 /**
665 * pdc_receive() - Process as many responses as are available in the rx ring.
666 * @pdcs: PDC state
667 *
668 * Called within the hard IRQ.
669 * Return:
670 */
671 static int
pdc_receive(struct pdc_state * pdcs)672 pdc_receive(struct pdc_state *pdcs)
673 {
674 int rx_status;
675
676 /* read last_rx_curr from register once */
677 pdcs->last_rx_curr =
678 (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) &
679 CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
680
681 do {
682 /* Could be many frames ready */
683 rx_status = pdc_receive_one(pdcs);
684 } while (rx_status == PDC_SUCCESS);
685
686 return 0;
687 }
688
689 /**
690 * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
691 * descriptors for a given SPU. The scatterlist buffers contain the data for a
692 * SPU request message.
693 * @pdcs: PDC state for the SPU that will process this request
694 * @sg: Scatterlist whose buffers contain part of the SPU request
695 *
696 * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
697 * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
698 *
699 * Return: PDC_SUCCESS if successful
700 * < 0 otherwise
701 */
pdc_tx_list_sg_add(struct pdc_state * pdcs,struct scatterlist * sg)702 static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
703 {
704 u32 flags = 0;
705 u32 eot;
706 u32 tx_avail;
707
708 /*
709 * Num descriptors needed. Conservatively assume we need a descriptor
710 * for every entry in sg.
711 */
712 u32 num_desc;
713 u32 desc_w = 0; /* Number of tx descriptors written */
714 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
715 dma_addr_t databufptr; /* DMA address to put in descriptor */
716
717 num_desc = (u32)sg_nents(sg);
718
719 /* check whether enough tx descriptors are available */
720 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
721 pdcs->ntxpost);
722 if (unlikely(num_desc > tx_avail)) {
723 pdcs->txnobuf++;
724 return -ENOSPC;
725 }
726
727 /* build tx descriptors */
728 if (pdcs->tx_msg_start == pdcs->txout) {
729 /* Start of frame */
730 pdcs->txin_numd[pdcs->tx_msg_start] = 0;
731 pdcs->src_sg[pdcs->txout] = sg;
732 flags = D64_CTRL1_SOF;
733 }
734
735 while (sg) {
736 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
737 eot = D64_CTRL1_EOT;
738 else
739 eot = 0;
740
741 /*
742 * If sg buffer larger than PDC limit, split across
743 * multiple descriptors
744 */
745 bufcnt = sg_dma_len(sg);
746 databufptr = sg_dma_address(sg);
747 while (bufcnt > PDC_DMA_BUF_MAX) {
748 pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
749 flags | eot);
750 desc_w++;
751 bufcnt -= PDC_DMA_BUF_MAX;
752 databufptr += PDC_DMA_BUF_MAX;
753 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
754 eot = D64_CTRL1_EOT;
755 else
756 eot = 0;
757 }
758 sg = sg_next(sg);
759 if (!sg)
760 /* Writing last descriptor for frame */
761 flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
762 pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
763 desc_w++;
764 /* Clear start of frame after first descriptor */
765 flags &= ~D64_CTRL1_SOF;
766 }
767 pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
768
769 return PDC_SUCCESS;
770 }
771
772 /**
773 * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
774 * ring.
775 * @pdcs: PDC state for SPU to process the request
776 *
777 * Sets the index of the last descriptor written in both the rx and tx ring.
778 *
779 * Return: PDC_SUCCESS
780 */
pdc_tx_list_final(struct pdc_state * pdcs)781 static int pdc_tx_list_final(struct pdc_state *pdcs)
782 {
783 /*
784 * write barrier to ensure all register writes are complete
785 * before chip starts to process new request
786 */
787 wmb();
788 iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
789 iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
790 pdcs->pdc_requests++;
791
792 return PDC_SUCCESS;
793 }
794
795 /**
796 * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
797 * @pdcs: PDC state for SPU handling request
798 * @dst_sg: scatterlist providing rx buffers for response to be returned to
799 * mailbox client
800 * @ctx: Opaque context for this request
801 *
802 * Posts a single receive descriptor to hold the metadata that precedes a
803 * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
804 * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
805 * rx to indicate the start of a new message.
806 *
807 * Return: PDC_SUCCESS if successful
808 * < 0 if an error (e.g., rx ring is full)
809 */
pdc_rx_list_init(struct pdc_state * pdcs,struct scatterlist * dst_sg,void * ctx)810 static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
811 void *ctx)
812 {
813 u32 flags = 0;
814 u32 rx_avail;
815 u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
816 dma_addr_t daddr;
817 void *vaddr;
818 struct pdc_rx_ctx *rx_ctx;
819
820 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
821 pdcs->nrxpost);
822 if (unlikely(rx_pkt_cnt > rx_avail)) {
823 pdcs->rxnobuf++;
824 return -ENOSPC;
825 }
826
827 /* allocate a buffer for the dma rx status */
828 vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
829 if (unlikely(!vaddr))
830 return -ENOMEM;
831
832 /*
833 * Update msg_start indexes for both tx and rx to indicate the start
834 * of a new sequence of descriptor indexes that contain the fragments
835 * of the same message.
836 */
837 pdcs->rx_msg_start = pdcs->rxout;
838 pdcs->tx_msg_start = pdcs->txout;
839
840 /* This is always the first descriptor in the receive sequence */
841 flags = D64_CTRL1_SOF;
842 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
843
844 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
845 flags |= D64_CTRL1_EOT;
846
847 rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
848 rx_ctx->rxp_ctx = ctx;
849 rx_ctx->dst_sg = dst_sg;
850 rx_ctx->resp_hdr = vaddr;
851 rx_ctx->resp_hdr_daddr = daddr;
852 pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
853 return PDC_SUCCESS;
854 }
855
856 /**
857 * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
858 * descriptors for a given SPU. The caller must have already DMA mapped the
859 * scatterlist.
860 * @pdcs: PDC state for the SPU that will process this request
861 * @sg: Scatterlist whose buffers are added to the receive ring
862 *
863 * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
864 * multiple receive descriptors are written, each with a buffer <=
865 * PDC_DMA_BUF_MAX.
866 *
867 * Return: PDC_SUCCESS if successful
868 * < 0 otherwise (e.g., receive ring is full)
869 */
pdc_rx_list_sg_add(struct pdc_state * pdcs,struct scatterlist * sg)870 static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
871 {
872 u32 flags = 0;
873 u32 rx_avail;
874
875 /*
876 * Num descriptors needed. Conservatively assume we need a descriptor
877 * for every entry from our starting point in the scatterlist.
878 */
879 u32 num_desc;
880 u32 desc_w = 0; /* Number of tx descriptors written */
881 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
882 dma_addr_t databufptr; /* DMA address to put in descriptor */
883
884 num_desc = (u32)sg_nents(sg);
885
886 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
887 pdcs->nrxpost);
888 if (unlikely(num_desc > rx_avail)) {
889 pdcs->rxnobuf++;
890 return -ENOSPC;
891 }
892
893 while (sg) {
894 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
895 flags = D64_CTRL1_EOT;
896 else
897 flags = 0;
898
899 /*
900 * If sg buffer larger than PDC limit, split across
901 * multiple descriptors
902 */
903 bufcnt = sg_dma_len(sg);
904 databufptr = sg_dma_address(sg);
905 while (bufcnt > PDC_DMA_BUF_MAX) {
906 pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
907 desc_w++;
908 bufcnt -= PDC_DMA_BUF_MAX;
909 databufptr += PDC_DMA_BUF_MAX;
910 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
911 flags = D64_CTRL1_EOT;
912 else
913 flags = 0;
914 }
915 pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
916 desc_w++;
917 sg = sg_next(sg);
918 }
919 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
920
921 return PDC_SUCCESS;
922 }
923
924 /**
925 * pdc_irq_handler() - Interrupt handler called in interrupt context.
926 * @irq: Interrupt number that has fired
927 * @data: device struct for DMA engine that generated the interrupt
928 *
929 * We have to clear the device interrupt status flags here. So cache the
930 * status for later use in the thread function. Other than that, just return
931 * WAKE_THREAD to invoke the thread function.
932 *
933 * Return: IRQ_WAKE_THREAD if interrupt is ours
934 * IRQ_NONE otherwise
935 */
pdc_irq_handler(int irq,void * data)936 static irqreturn_t pdc_irq_handler(int irq, void *data)
937 {
938 struct device *dev = (struct device *)data;
939 struct pdc_state *pdcs = dev_get_drvdata(dev);
940 u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
941
942 if (unlikely(intstatus == 0))
943 return IRQ_NONE;
944
945 /* Disable interrupts until soft handler runs */
946 iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
947
948 /* Clear interrupt flags in device */
949 iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
950
951 /* Wakeup IRQ thread */
952 queue_work(system_bh_wq, &pdcs->rx_work);
953 return IRQ_HANDLED;
954 }
955
956 /**
957 * pdc_work_cb() - Work callback that runs the deferred processing after
958 * a DMA receive interrupt. Reenables the receive interrupt.
959 * @t: Pointer to the Altera sSGDMA channel structure
960 */
pdc_work_cb(struct work_struct * t)961 static void pdc_work_cb(struct work_struct *t)
962 {
963 struct pdc_state *pdcs = from_work(pdcs, t, rx_work);
964
965 pdc_receive(pdcs);
966
967 /* reenable interrupts */
968 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
969 }
970
971 /**
972 * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
973 * descriptors in one ringset.
974 * @pdcs: PDC instance state
975 * @ringset: index of ringset being used
976 *
977 * Return: PDC_SUCCESS if ring initialized
978 * < 0 otherwise
979 */
pdc_ring_init(struct pdc_state * pdcs,int ringset)980 static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
981 {
982 int i;
983 int err = PDC_SUCCESS;
984 struct dma64 *dma_reg;
985 struct device *dev = &pdcs->pdev->dev;
986 struct pdc_ring_alloc tx;
987 struct pdc_ring_alloc rx;
988
989 /* Allocate tx ring */
990 tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
991 if (unlikely(!tx.vbase)) {
992 err = -ENOMEM;
993 goto done;
994 }
995
996 /* Allocate rx ring */
997 rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
998 if (unlikely(!rx.vbase)) {
999 err = -ENOMEM;
1000 goto fail_dealloc;
1001 }
1002
1003 dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
1004 dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
1005 dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
1006 dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
1007
1008 memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
1009 memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
1010
1011 pdcs->rxin = 0;
1012 pdcs->rx_msg_start = 0;
1013 pdcs->last_rx_curr = 0;
1014 pdcs->rxout = 0;
1015 pdcs->txin = 0;
1016 pdcs->tx_msg_start = 0;
1017 pdcs->txout = 0;
1018
1019 /* Set descriptor array base addresses */
1020 pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
1021 pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
1022
1023 /* Tell device the base DMA address of each ring */
1024 dma_reg = &pdcs->regs->dmaregs[ringset];
1025
1026 /* But first disable DMA and set curptr to 0 for both TX & RX */
1027 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1028 iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
1029 &dma_reg->dmarcv.control);
1030 iowrite32(0, &dma_reg->dmaxmt.ptr);
1031 iowrite32(0, &dma_reg->dmarcv.ptr);
1032
1033 /* Set base DMA addresses */
1034 iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
1035 &dma_reg->dmaxmt.addrlow);
1036 iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
1037 &dma_reg->dmaxmt.addrhigh);
1038
1039 iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
1040 &dma_reg->dmarcv.addrlow);
1041 iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
1042 &dma_reg->dmarcv.addrhigh);
1043
1044 /* Re-enable DMA */
1045 iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
1046 iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
1047 &dma_reg->dmarcv.control);
1048
1049 /* Initialize descriptors */
1050 for (i = 0; i < PDC_RING_ENTRIES; i++) {
1051 /* Every tx descriptor can be used for start of frame. */
1052 if (i != pdcs->ntxpost) {
1053 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
1054 &pdcs->txd_64[i].ctrl1);
1055 } else {
1056 /* Last descriptor in ringset. Set End of Table. */
1057 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
1058 D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
1059 }
1060
1061 /* Every rx descriptor can be used for start of frame */
1062 if (i != pdcs->nrxpost) {
1063 iowrite32(D64_CTRL1_SOF,
1064 &pdcs->rxd_64[i].ctrl1);
1065 } else {
1066 /* Last descriptor in ringset. Set End of Table. */
1067 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
1068 &pdcs->rxd_64[i].ctrl1);
1069 }
1070 }
1071 return PDC_SUCCESS;
1072
1073 fail_dealloc:
1074 dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
1075 done:
1076 return err;
1077 }
1078
pdc_ring_free(struct pdc_state * pdcs)1079 static void pdc_ring_free(struct pdc_state *pdcs)
1080 {
1081 if (pdcs->tx_ring_alloc.vbase) {
1082 dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
1083 pdcs->tx_ring_alloc.dmabase);
1084 pdcs->tx_ring_alloc.vbase = NULL;
1085 }
1086
1087 if (pdcs->rx_ring_alloc.vbase) {
1088 dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
1089 pdcs->rx_ring_alloc.dmabase);
1090 pdcs->rx_ring_alloc.vbase = NULL;
1091 }
1092 }
1093
1094 /**
1095 * pdc_desc_count() - Count the number of DMA descriptors that will be required
1096 * for a given scatterlist. Account for the max length of a DMA buffer.
1097 * @sg: Scatterlist to be DMA'd
1098 * Return: Number of descriptors required
1099 */
pdc_desc_count(struct scatterlist * sg)1100 static u32 pdc_desc_count(struct scatterlist *sg)
1101 {
1102 u32 cnt = 0;
1103
1104 while (sg) {
1105 cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
1106 sg = sg_next(sg);
1107 }
1108 return cnt;
1109 }
1110
1111 /**
1112 * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
1113 * and the rx ring has room for rx_cnt descriptors.
1114 * @pdcs: PDC state
1115 * @tx_cnt: The number of descriptors required in the tx ring
1116 * @rx_cnt: The number of descriptors required i the rx ring
1117 *
1118 * Return: true if one of the rings does not have enough space
1119 * false if sufficient space is available in both rings
1120 */
pdc_rings_full(struct pdc_state * pdcs,int tx_cnt,int rx_cnt)1121 static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
1122 {
1123 u32 rx_avail;
1124 u32 tx_avail;
1125 bool full = false;
1126
1127 /* Check if the tx and rx rings are likely to have enough space */
1128 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
1129 pdcs->nrxpost);
1130 if (unlikely(rx_cnt > rx_avail)) {
1131 pdcs->rx_ring_full++;
1132 full = true;
1133 }
1134
1135 if (likely(!full)) {
1136 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
1137 pdcs->ntxpost);
1138 if (unlikely(tx_cnt > tx_avail)) {
1139 pdcs->tx_ring_full++;
1140 full = true;
1141 }
1142 }
1143 return full;
1144 }
1145
1146 /**
1147 * pdc_last_tx_done() - If both the tx and rx rings have at least
1148 * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
1149 * framework can submit another message.
1150 * @chan: mailbox channel to check
1151 * Return: true if PDC can accept another message on this channel
1152 */
pdc_last_tx_done(struct mbox_chan * chan)1153 static bool pdc_last_tx_done(struct mbox_chan *chan)
1154 {
1155 struct pdc_state *pdcs = chan->con_priv;
1156 bool ret;
1157
1158 if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
1159 PDC_RING_SPACE_MIN))) {
1160 pdcs->last_tx_not_done++;
1161 ret = false;
1162 } else {
1163 ret = true;
1164 }
1165 return ret;
1166 }
1167
1168 /**
1169 * pdc_send_data() - mailbox send_data function
1170 * @chan: The mailbox channel on which the data is sent. The channel
1171 * corresponds to a DMA ringset.
1172 * @data: The mailbox message to be sent. The message must be a
1173 * brcm_message structure.
1174 *
1175 * This function is registered as the send_data function for the mailbox
1176 * controller. From the destination scatterlist in the mailbox message, it
1177 * creates a sequence of receive descriptors in the rx ring. From the source
1178 * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
1179 * After creating the descriptors, it writes the rx ptr and tx ptr registers to
1180 * initiate the DMA transfer.
1181 *
1182 * This function does the DMA map and unmap of the src and dst scatterlists in
1183 * the mailbox message.
1184 *
1185 * Return: 0 if successful
1186 * -ENOTSUPP if the mailbox message is a type this driver does not
1187 * support
1188 * < 0 if an error
1189 */
pdc_send_data(struct mbox_chan * chan,void * data)1190 static int pdc_send_data(struct mbox_chan *chan, void *data)
1191 {
1192 struct pdc_state *pdcs = chan->con_priv;
1193 struct device *dev = &pdcs->pdev->dev;
1194 struct brcm_message *mssg = data;
1195 int err = PDC_SUCCESS;
1196 int src_nent;
1197 int dst_nent;
1198 int nent;
1199 u32 tx_desc_req;
1200 u32 rx_desc_req;
1201
1202 if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
1203 return -ENOTSUPP;
1204
1205 src_nent = sg_nents(mssg->spu.src);
1206 if (likely(src_nent)) {
1207 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
1208 if (unlikely(nent == 0))
1209 return -EIO;
1210 }
1211
1212 dst_nent = sg_nents(mssg->spu.dst);
1213 if (likely(dst_nent)) {
1214 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
1215 DMA_FROM_DEVICE);
1216 if (unlikely(nent == 0)) {
1217 dma_unmap_sg(dev, mssg->spu.src, src_nent,
1218 DMA_TO_DEVICE);
1219 return -EIO;
1220 }
1221 }
1222
1223 /*
1224 * Check if the tx and rx rings have enough space. Do this prior to
1225 * writing any tx or rx descriptors. Need to ensure that we do not write
1226 * a partial set of descriptors, or write just rx descriptors but
1227 * corresponding tx descriptors don't fit. Note that we want this check
1228 * and the entire sequence of descriptor to happen without another
1229 * thread getting in. The channel spin lock in the mailbox framework
1230 * ensures this.
1231 */
1232 tx_desc_req = pdc_desc_count(mssg->spu.src);
1233 rx_desc_req = pdc_desc_count(mssg->spu.dst);
1234 if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
1235 return -ENOSPC;
1236
1237 /* Create rx descriptors to SPU catch response */
1238 err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
1239 err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
1240
1241 /* Create tx descriptors to submit SPU request */
1242 err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
1243 err |= pdc_tx_list_final(pdcs); /* initiate transfer */
1244
1245 if (unlikely(err))
1246 dev_err(&pdcs->pdev->dev,
1247 "%s failed with error %d", __func__, err);
1248
1249 return err;
1250 }
1251
pdc_startup(struct mbox_chan * chan)1252 static int pdc_startup(struct mbox_chan *chan)
1253 {
1254 return pdc_ring_init(chan->con_priv, PDC_RINGSET);
1255 }
1256
pdc_shutdown(struct mbox_chan * chan)1257 static void pdc_shutdown(struct mbox_chan *chan)
1258 {
1259 struct pdc_state *pdcs = chan->con_priv;
1260
1261 if (!pdcs)
1262 return;
1263
1264 dev_dbg(&pdcs->pdev->dev,
1265 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1266 pdc_ring_free(pdcs);
1267 }
1268
1269 /**
1270 * pdc_hw_init() - Use the given initialization parameters to initialize the
1271 * state for one of the PDCs.
1272 * @pdcs: state of the PDC
1273 */
1274 static
pdc_hw_init(struct pdc_state * pdcs)1275 void pdc_hw_init(struct pdc_state *pdcs)
1276 {
1277 struct platform_device *pdev;
1278 struct device *dev;
1279 struct dma64 *dma_reg;
1280 int ringset = PDC_RINGSET;
1281
1282 pdev = pdcs->pdev;
1283 dev = &pdev->dev;
1284
1285 dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
1286 dev_dbg(dev, "state structure: %p",
1287 pdcs);
1288 dev_dbg(dev, " - base virtual addr of hw regs %p",
1289 pdcs->pdc_reg_vbase);
1290
1291 /* initialize data structures */
1292 pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
1293 pdcs->txregs_64 = (struct dma64_regs *)
1294 (((u8 *)pdcs->pdc_reg_vbase) +
1295 PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1296 pdcs->rxregs_64 = (struct dma64_regs *)
1297 (((u8 *)pdcs->pdc_reg_vbase) +
1298 PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1299
1300 pdcs->ntxd = PDC_RING_ENTRIES;
1301 pdcs->nrxd = PDC_RING_ENTRIES;
1302 pdcs->ntxpost = PDC_RING_ENTRIES - 1;
1303 pdcs->nrxpost = PDC_RING_ENTRIES - 1;
1304 iowrite32(0, &pdcs->regs->intmask);
1305
1306 dma_reg = &pdcs->regs->dmaregs[ringset];
1307
1308 /* Configure DMA but will enable later in pdc_ring_init() */
1309 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1310
1311 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1312 &dma_reg->dmarcv.control);
1313
1314 /* Reset current index pointers after making sure DMA is disabled */
1315 iowrite32(0, &dma_reg->dmaxmt.ptr);
1316 iowrite32(0, &dma_reg->dmarcv.ptr);
1317
1318 if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
1319 iowrite32(PDC_CKSUM_CTRL,
1320 pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
1321 }
1322
1323 /**
1324 * pdc_hw_disable() - Disable the tx and rx control in the hw.
1325 * @pdcs: PDC state structure
1326 *
1327 */
pdc_hw_disable(struct pdc_state * pdcs)1328 static void pdc_hw_disable(struct pdc_state *pdcs)
1329 {
1330 struct dma64 *dma_reg;
1331
1332 dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
1333 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1334 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1335 &dma_reg->dmarcv.control);
1336 }
1337
1338 /**
1339 * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
1340 * header returned with each response message.
1341 * @pdcs: PDC state structure
1342 *
1343 * The metadata is not returned to the mailbox client. So the PDC driver
1344 * manages these buffers.
1345 *
1346 * Return: PDC_SUCCESS
1347 * -ENOMEM if pool creation fails
1348 */
pdc_rx_buf_pool_create(struct pdc_state * pdcs)1349 static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
1350 {
1351 struct platform_device *pdev;
1352 struct device *dev;
1353
1354 pdev = pdcs->pdev;
1355 dev = &pdev->dev;
1356
1357 pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
1358 if (pdcs->use_bcm_hdr)
1359 pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
1360
1361 pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
1362 pdcs->pdc_resp_hdr_len,
1363 RX_BUF_ALIGN, 0);
1364 if (!pdcs->rx_buf_pool)
1365 return -ENOMEM;
1366
1367 return PDC_SUCCESS;
1368 }
1369
1370 /**
1371 * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
1372 * specify a threaded IRQ handler for deferred handling of interrupts outside of
1373 * interrupt context.
1374 * @pdcs: PDC state
1375 *
1376 * Set the interrupt mask for transmit and receive done.
1377 * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
1378 *
1379 * Return: PDC_SUCCESS
1380 * <0 if threaded irq request fails
1381 */
pdc_interrupts_init(struct pdc_state * pdcs)1382 static int pdc_interrupts_init(struct pdc_state *pdcs)
1383 {
1384 struct platform_device *pdev = pdcs->pdev;
1385 struct device *dev = &pdev->dev;
1386 struct device_node *dn = pdev->dev.of_node;
1387 int err;
1388
1389 /* interrupt configuration */
1390 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
1391
1392 if (pdcs->hw_type == FA_HW)
1393 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
1394 FA_RCVLAZY0_OFFSET);
1395 else
1396 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
1397 PDC_RCVLAZY0_OFFSET);
1398
1399 /* read irq from device tree */
1400 pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
1401 dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
1402 dev_name(dev), pdcs->pdc_irq, pdcs);
1403
1404 err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
1405 dev_name(dev), dev);
1406 if (err) {
1407 dev_err(dev, "IRQ %u request failed with err %d\n",
1408 pdcs->pdc_irq, err);
1409 return err;
1410 }
1411 return PDC_SUCCESS;
1412 }
1413
1414 static const struct mbox_chan_ops pdc_mbox_chan_ops = {
1415 .send_data = pdc_send_data,
1416 .last_tx_done = pdc_last_tx_done,
1417 .startup = pdc_startup,
1418 .shutdown = pdc_shutdown
1419 };
1420
1421 /**
1422 * pdc_mb_init() - Initialize the mailbox controller.
1423 * @pdcs: PDC state
1424 *
1425 * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
1426 * driver only uses one ringset and thus one mb channel. PDC uses the transmit
1427 * complete interrupt to determine when a mailbox message has successfully been
1428 * transmitted.
1429 *
1430 * Return: 0 on success
1431 * < 0 if there is an allocation or registration failure
1432 */
pdc_mb_init(struct pdc_state * pdcs)1433 static int pdc_mb_init(struct pdc_state *pdcs)
1434 {
1435 struct device *dev = &pdcs->pdev->dev;
1436 struct mbox_controller *mbc;
1437 int chan_index;
1438 int err;
1439
1440 mbc = &pdcs->mbc;
1441 mbc->dev = dev;
1442 mbc->ops = &pdc_mbox_chan_ops;
1443 mbc->num_chans = 1;
1444 mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
1445 GFP_KERNEL);
1446 if (!mbc->chans)
1447 return -ENOMEM;
1448
1449 mbc->txdone_irq = false;
1450 mbc->txdone_poll = true;
1451 mbc->txpoll_period = 1;
1452 for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
1453 mbc->chans[chan_index].con_priv = pdcs;
1454
1455 /* Register mailbox controller */
1456 err = devm_mbox_controller_register(dev, mbc);
1457 if (err) {
1458 dev_crit(dev,
1459 "Failed to register PDC mailbox controller. Error %d.",
1460 err);
1461 return err;
1462 }
1463 return 0;
1464 }
1465
1466 /* Device tree API */
1467 static const int pdc_hw = PDC_HW;
1468 static const int fa_hw = FA_HW;
1469
1470 static const struct of_device_id pdc_mbox_of_match[] = {
1471 {.compatible = "brcm,iproc-pdc-mbox", .data = &pdc_hw},
1472 {.compatible = "brcm,iproc-fa2-mbox", .data = &fa_hw},
1473 { /* sentinel */ }
1474 };
1475 MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
1476
1477 /**
1478 * pdc_dt_read() - Read application-specific data from device tree.
1479 * @pdev: Platform device
1480 * @pdcs: PDC state
1481 *
1482 * Reads the number of bytes of receive status that precede each received frame.
1483 * Reads whether transmit and received frames should be preceded by an 8-byte
1484 * BCM header.
1485 *
1486 * Return: 0 if successful
1487 * -ENODEV if device not available
1488 */
pdc_dt_read(struct platform_device * pdev,struct pdc_state * pdcs)1489 static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
1490 {
1491 struct device *dev = &pdev->dev;
1492 struct device_node *dn = pdev->dev.of_node;
1493 const int *hw_type;
1494 int err;
1495
1496 err = of_property_read_u32(dn, "brcm,rx-status-len",
1497 &pdcs->rx_status_len);
1498 if (err < 0)
1499 dev_err(dev,
1500 "%s failed to get DMA receive status length from device tree",
1501 __func__);
1502
1503 pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
1504
1505 pdcs->hw_type = PDC_HW;
1506
1507 hw_type = device_get_match_data(dev);
1508 if (hw_type)
1509 pdcs->hw_type = *hw_type;
1510
1511 return 0;
1512 }
1513
1514 /**
1515 * pdc_probe() - Probe function for PDC driver.
1516 * @pdev: PDC platform device
1517 *
1518 * Reserve and map register regions defined in device tree.
1519 * Allocate and initialize tx and rx DMA rings.
1520 * Initialize a mailbox controller for each PDC.
1521 *
1522 * Return: 0 if successful
1523 * < 0 if an error
1524 */
pdc_probe(struct platform_device * pdev)1525 static int pdc_probe(struct platform_device *pdev)
1526 {
1527 int err = 0;
1528 struct device *dev = &pdev->dev;
1529 struct resource *pdc_regs;
1530 struct pdc_state *pdcs;
1531
1532 /* PDC state for one SPU */
1533 pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
1534 if (!pdcs) {
1535 err = -ENOMEM;
1536 goto cleanup;
1537 }
1538
1539 pdcs->pdev = pdev;
1540 platform_set_drvdata(pdev, pdcs);
1541 pdcs->pdc_idx = pdcg.num_spu;
1542 pdcg.num_spu++;
1543
1544 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
1545 if (err) {
1546 dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
1547 goto cleanup;
1548 }
1549
1550 /* Create DMA pool for tx ring */
1551 pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
1552 RING_ALIGN, 0);
1553 if (!pdcs->ring_pool) {
1554 err = -ENOMEM;
1555 goto cleanup;
1556 }
1557
1558 err = pdc_dt_read(pdev, pdcs);
1559 if (err)
1560 goto cleanup_ring_pool;
1561
1562 pdcs->pdc_reg_vbase = devm_platform_get_and_ioremap_resource(pdev, 0, &pdc_regs);
1563 if (IS_ERR(pdcs->pdc_reg_vbase)) {
1564 err = PTR_ERR(pdcs->pdc_reg_vbase);
1565 goto cleanup_ring_pool;
1566 }
1567 dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
1568 &pdc_regs->start, &pdc_regs->end);
1569
1570 /* create rx buffer pool after dt read to know how big buffers are */
1571 err = pdc_rx_buf_pool_create(pdcs);
1572 if (err)
1573 goto cleanup_ring_pool;
1574
1575 pdc_hw_init(pdcs);
1576
1577 /* Init work for deferred DMA rx processing */
1578 INIT_WORK(&pdcs->rx_work, pdc_work_cb);
1579
1580 err = pdc_interrupts_init(pdcs);
1581 if (err)
1582 goto cleanup_buf_pool;
1583
1584 /* Initialize mailbox controller */
1585 err = pdc_mb_init(pdcs);
1586 if (err)
1587 goto cleanup_buf_pool;
1588
1589 pdc_setup_debugfs(pdcs);
1590
1591 dev_dbg(dev, "pdc_probe() successful");
1592 return PDC_SUCCESS;
1593
1594 cleanup_buf_pool:
1595 cancel_work_sync(&pdcs->rx_work);
1596 dma_pool_destroy(pdcs->rx_buf_pool);
1597
1598 cleanup_ring_pool:
1599 dma_pool_destroy(pdcs->ring_pool);
1600
1601 cleanup:
1602 return err;
1603 }
1604
pdc_remove(struct platform_device * pdev)1605 static void pdc_remove(struct platform_device *pdev)
1606 {
1607 struct pdc_state *pdcs = platform_get_drvdata(pdev);
1608
1609 pdc_free_debugfs();
1610
1611 cancel_work_sync(&pdcs->rx_work);
1612
1613 pdc_hw_disable(pdcs);
1614
1615 dma_pool_destroy(pdcs->rx_buf_pool);
1616 dma_pool_destroy(pdcs->ring_pool);
1617 }
1618
1619 static struct platform_driver pdc_mbox_driver = {
1620 .probe = pdc_probe,
1621 .remove_new = pdc_remove,
1622 .driver = {
1623 .name = "brcm-iproc-pdc-mbox",
1624 .of_match_table = pdc_mbox_of_match,
1625 },
1626 };
1627 module_platform_driver(pdc_mbox_driver);
1628
1629 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
1630 MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
1631 MODULE_LICENSE("GPL v2");
1632