167a2003eSSinan Kaya /*
267a2003eSSinan Kaya * Qualcomm Technologies HIDMA DMA engine interface
367a2003eSSinan Kaya *
413058e33SSinan Kaya * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
567a2003eSSinan Kaya *
667a2003eSSinan Kaya * This program is free software; you can redistribute it and/or modify
767a2003eSSinan Kaya * it under the terms of the GNU General Public License version 2 and
867a2003eSSinan Kaya * only version 2 as published by the Free Software Foundation.
967a2003eSSinan Kaya *
1067a2003eSSinan Kaya * This program is distributed in the hope that it will be useful,
1167a2003eSSinan Kaya * but WITHOUT ANY WARRANTY; without even the implied warranty of
1267a2003eSSinan Kaya * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1367a2003eSSinan Kaya * GNU General Public License for more details.
1467a2003eSSinan Kaya */
1567a2003eSSinan Kaya
1667a2003eSSinan Kaya /*
1767a2003eSSinan Kaya * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
1867a2003eSSinan Kaya * Copyright (C) Semihalf 2009
1967a2003eSSinan Kaya * Copyright (C) Ilya Yanok, Emcraft Systems 2010
2067a2003eSSinan Kaya * Copyright (C) Alexander Popov, Promcontroller 2014
2167a2003eSSinan Kaya *
2267a2003eSSinan Kaya * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
2367a2003eSSinan Kaya * (defines, structures and comments) was taken from MPC5121 DMA driver
2467a2003eSSinan Kaya * written by Hongjun Chen <hong-jun.chen@freescale.com>.
2567a2003eSSinan Kaya *
2667a2003eSSinan Kaya * Approved as OSADL project by a majority of OSADL members and funded
2767a2003eSSinan Kaya * by OSADL membership fees in 2009; for details see www.osadl.org.
2867a2003eSSinan Kaya *
2967a2003eSSinan Kaya * This program is free software; you can redistribute it and/or modify it
3067a2003eSSinan Kaya * under the terms of the GNU General Public License as published by the Free
3167a2003eSSinan Kaya * Software Foundation; either version 2 of the License, or (at your option)
3267a2003eSSinan Kaya * any later version.
3367a2003eSSinan Kaya *
3467a2003eSSinan Kaya * This program is distributed in the hope that it will be useful, but WITHOUT
3567a2003eSSinan Kaya * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
3667a2003eSSinan Kaya * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
3767a2003eSSinan Kaya * more details.
3867a2003eSSinan Kaya *
3967a2003eSSinan Kaya * The full GNU General Public License is included in this distribution in the
4067a2003eSSinan Kaya * file called COPYING.
4167a2003eSSinan Kaya */
4267a2003eSSinan Kaya
4367a2003eSSinan Kaya /* Linux Foundation elects GPLv2 license only. */
4467a2003eSSinan Kaya
4567a2003eSSinan Kaya #include <linux/dmaengine.h>
4667a2003eSSinan Kaya #include <linux/dma-mapping.h>
4767a2003eSSinan Kaya #include <linux/list.h>
48897500c7SRob Herring #include <linux/mod_devicetable.h>
4967a2003eSSinan Kaya #include <linux/module.h>
5067a2003eSSinan Kaya #include <linux/platform_device.h>
5167a2003eSSinan Kaya #include <linux/slab.h>
5267a2003eSSinan Kaya #include <linux/spinlock.h>
5367a2003eSSinan Kaya #include <linux/property.h>
5467a2003eSSinan Kaya #include <linux/delay.h>
5567a2003eSSinan Kaya #include <linux/acpi.h>
5667a2003eSSinan Kaya #include <linux/irq.h>
5767a2003eSSinan Kaya #include <linux/atomic.h>
5867a2003eSSinan Kaya #include <linux/pm_runtime.h>
591c0e3e82SSinan Kaya #include <linux/msi.h>
6067a2003eSSinan Kaya
6167a2003eSSinan Kaya #include "../dmaengine.h"
6267a2003eSSinan Kaya #include "hidma.h"
6367a2003eSSinan Kaya
6467a2003eSSinan Kaya /*
6567a2003eSSinan Kaya * Default idle time is 2 seconds. This parameter can
6667a2003eSSinan Kaya * be overridden by changing the following
6767a2003eSSinan Kaya * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
6867a2003eSSinan Kaya * during kernel boot.
6967a2003eSSinan Kaya */
7067a2003eSSinan Kaya #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
7167a2003eSSinan Kaya #define HIDMA_ERR_INFO_SW 0xFF
7267a2003eSSinan Kaya #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
7367a2003eSSinan Kaya #define HIDMA_NR_DEFAULT_DESC 10
741c0e3e82SSinan Kaya #define HIDMA_MSI_INTS 11
7567a2003eSSinan Kaya
to_hidma_dev(struct dma_device * dmadev)7667a2003eSSinan Kaya static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
7767a2003eSSinan Kaya {
7867a2003eSSinan Kaya return container_of(dmadev, struct hidma_dev, ddev);
7967a2003eSSinan Kaya }
8067a2003eSSinan Kaya
8167a2003eSSinan Kaya static inline
to_hidma_dev_from_lldev(struct hidma_lldev ** _lldevp)8267a2003eSSinan Kaya struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
8367a2003eSSinan Kaya {
8467a2003eSSinan Kaya return container_of(_lldevp, struct hidma_dev, lldev);
8567a2003eSSinan Kaya }
8667a2003eSSinan Kaya
to_hidma_chan(struct dma_chan * dmach)8767a2003eSSinan Kaya static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
8867a2003eSSinan Kaya {
8967a2003eSSinan Kaya return container_of(dmach, struct hidma_chan, chan);
9067a2003eSSinan Kaya }
9167a2003eSSinan Kaya
hidma_free(struct hidma_dev * dmadev)9267a2003eSSinan Kaya static void hidma_free(struct hidma_dev *dmadev)
9367a2003eSSinan Kaya {
9467a2003eSSinan Kaya INIT_LIST_HEAD(&dmadev->ddev.channels);
9567a2003eSSinan Kaya }
9667a2003eSSinan Kaya
9767a2003eSSinan Kaya static unsigned int nr_desc_prm;
9867a2003eSSinan Kaya module_param(nr_desc_prm, uint, 0644);
9967a2003eSSinan Kaya MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
10067a2003eSSinan Kaya
10195fbfb7aSSinan Kaya enum hidma_cap {
10295fbfb7aSSinan Kaya HIDMA_MSI_CAP = 1,
103b5419adcSSinan Kaya HIDMA_IDENTITY_CAP,
10495fbfb7aSSinan Kaya };
10567a2003eSSinan Kaya
10667a2003eSSinan Kaya /* process completed descriptors */
hidma_process_completed(struct hidma_chan * mchan)10767a2003eSSinan Kaya static void hidma_process_completed(struct hidma_chan *mchan)
10867a2003eSSinan Kaya {
10967a2003eSSinan Kaya struct dma_device *ddev = mchan->chan.device;
11067a2003eSSinan Kaya struct hidma_dev *mdma = to_hidma_dev(ddev);
11167a2003eSSinan Kaya struct dma_async_tx_descriptor *desc;
11267a2003eSSinan Kaya dma_cookie_t last_cookie;
11367a2003eSSinan Kaya struct hidma_desc *mdesc;
1148a31f8b5SSinan Kaya struct hidma_desc *next;
11567a2003eSSinan Kaya unsigned long irqflags;
11667a2003eSSinan Kaya struct list_head list;
11767a2003eSSinan Kaya
11867a2003eSSinan Kaya INIT_LIST_HEAD(&list);
11967a2003eSSinan Kaya
12067a2003eSSinan Kaya /* Get all completed descriptors */
12167a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
12267a2003eSSinan Kaya list_splice_tail_init(&mchan->completed, &list);
12367a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
12467a2003eSSinan Kaya
12567a2003eSSinan Kaya /* Execute callbacks and run dependencies */
1268a31f8b5SSinan Kaya list_for_each_entry_safe(mdesc, next, &list, node) {
12767a2003eSSinan Kaya enum dma_status llstat;
1288a31f8b5SSinan Kaya struct dmaengine_desc_callback cb;
12955c370e5SSinan Kaya struct dmaengine_result result;
13067a2003eSSinan Kaya
13167a2003eSSinan Kaya desc = &mdesc->desc;
132793ae66cSSinan Kaya last_cookie = desc->cookie;
13367a2003eSSinan Kaya
134546c0547SShunyong Yang llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
135546c0547SShunyong Yang
13667a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
137546c0547SShunyong Yang if (llstat == DMA_COMPLETE) {
138546c0547SShunyong Yang mchan->last_success = last_cookie;
139546c0547SShunyong Yang result.result = DMA_TRANS_NOERROR;
140546c0547SShunyong Yang } else {
141546c0547SShunyong Yang result.result = DMA_TRANS_ABORTED;
142546c0547SShunyong Yang }
143546c0547SShunyong Yang
14467a2003eSSinan Kaya dma_cookie_complete(desc);
14567a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
14667a2003eSSinan Kaya
1478a31f8b5SSinan Kaya dmaengine_desc_get_callback(desc, &cb);
14867a2003eSSinan Kaya
14967a2003eSSinan Kaya dma_run_dependencies(desc);
15067a2003eSSinan Kaya
15167a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
1528a31f8b5SSinan Kaya list_move(&mdesc->node, &mchan->free);
15355c370e5SSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
15455c370e5SSinan Kaya
15555c370e5SSinan Kaya dmaengine_desc_callback_invoke(&cb, &result);
1568a31f8b5SSinan Kaya }
15767a2003eSSinan Kaya }
15867a2003eSSinan Kaya
15967a2003eSSinan Kaya /*
16067a2003eSSinan Kaya * Called once for each submitted descriptor.
16167a2003eSSinan Kaya * PM is locked once for each descriptor that is currently
16267a2003eSSinan Kaya * in execution.
16367a2003eSSinan Kaya */
hidma_callback(void * data)16467a2003eSSinan Kaya static void hidma_callback(void *data)
16567a2003eSSinan Kaya {
16667a2003eSSinan Kaya struct hidma_desc *mdesc = data;
16767a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
16867a2003eSSinan Kaya struct dma_device *ddev = mchan->chan.device;
16967a2003eSSinan Kaya struct hidma_dev *dmadev = to_hidma_dev(ddev);
17067a2003eSSinan Kaya unsigned long irqflags;
17167a2003eSSinan Kaya bool queued = false;
17267a2003eSSinan Kaya
17367a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
17467a2003eSSinan Kaya if (mdesc->node.next) {
17567a2003eSSinan Kaya /* Delete from the active list, add to completed list */
17667a2003eSSinan Kaya list_move_tail(&mdesc->node, &mchan->completed);
17767a2003eSSinan Kaya queued = true;
17867a2003eSSinan Kaya
17967a2003eSSinan Kaya /* calculate the next running descriptor */
18067a2003eSSinan Kaya mchan->running = list_first_entry(&mchan->active,
18167a2003eSSinan Kaya struct hidma_desc, node);
18267a2003eSSinan Kaya }
18367a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
18467a2003eSSinan Kaya
18567a2003eSSinan Kaya hidma_process_completed(mchan);
18667a2003eSSinan Kaya
18767a2003eSSinan Kaya if (queued) {
18867a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
18967a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
19067a2003eSSinan Kaya }
19167a2003eSSinan Kaya }
19267a2003eSSinan Kaya
hidma_chan_init(struct hidma_dev * dmadev,u32 dma_sig)19367a2003eSSinan Kaya static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
19467a2003eSSinan Kaya {
19567a2003eSSinan Kaya struct hidma_chan *mchan;
19667a2003eSSinan Kaya struct dma_device *ddev;
19767a2003eSSinan Kaya
19867a2003eSSinan Kaya mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
19967a2003eSSinan Kaya if (!mchan)
20067a2003eSSinan Kaya return -ENOMEM;
20167a2003eSSinan Kaya
20267a2003eSSinan Kaya ddev = &dmadev->ddev;
20367a2003eSSinan Kaya mchan->dma_sig = dma_sig;
20467a2003eSSinan Kaya mchan->dmadev = dmadev;
20567a2003eSSinan Kaya mchan->chan.device = ddev;
20667a2003eSSinan Kaya dma_cookie_init(&mchan->chan);
20767a2003eSSinan Kaya
20867a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->free);
20967a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->prepared);
21067a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->active);
21167a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->completed);
21299efdb3eSSinan Kaya INIT_LIST_HEAD(&mchan->queued);
21367a2003eSSinan Kaya
21467a2003eSSinan Kaya spin_lock_init(&mchan->lock);
21567a2003eSSinan Kaya list_add_tail(&mchan->chan.device_node, &ddev->channels);
21667a2003eSSinan Kaya return 0;
21767a2003eSSinan Kaya }
21867a2003eSSinan Kaya
hidma_issue_task(struct tasklet_struct * t)21900c4747aSAllen Pais static void hidma_issue_task(struct tasklet_struct *t)
22067a2003eSSinan Kaya {
22100c4747aSAllen Pais struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
22267a2003eSSinan Kaya
22367a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
22467a2003eSSinan Kaya hidma_ll_start(dmadev->lldev);
22567a2003eSSinan Kaya }
22667a2003eSSinan Kaya
hidma_issue_pending(struct dma_chan * dmach)22767a2003eSSinan Kaya static void hidma_issue_pending(struct dma_chan *dmach)
22867a2003eSSinan Kaya {
22967a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
23067a2003eSSinan Kaya struct hidma_dev *dmadev = mchan->dmadev;
23167a2003eSSinan Kaya unsigned long flags;
23299efdb3eSSinan Kaya struct hidma_desc *qdesc, *next;
23367a2003eSSinan Kaya int status;
23467a2003eSSinan Kaya
23567a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, flags);
23699efdb3eSSinan Kaya list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
23799efdb3eSSinan Kaya hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
23899efdb3eSSinan Kaya list_move_tail(&qdesc->node, &mchan->active);
23999efdb3eSSinan Kaya }
24099efdb3eSSinan Kaya
24167a2003eSSinan Kaya if (!mchan->running) {
24267a2003eSSinan Kaya struct hidma_desc *desc = list_first_entry(&mchan->active,
24367a2003eSSinan Kaya struct hidma_desc,
24467a2003eSSinan Kaya node);
24567a2003eSSinan Kaya mchan->running = desc;
24667a2003eSSinan Kaya }
24767a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, flags);
24867a2003eSSinan Kaya
24967a2003eSSinan Kaya /* PM will be released in hidma_callback function. */
25067a2003eSSinan Kaya status = pm_runtime_get(dmadev->ddev.dev);
25167a2003eSSinan Kaya if (status < 0)
25267a2003eSSinan Kaya tasklet_schedule(&dmadev->task);
25367a2003eSSinan Kaya else
25467a2003eSSinan Kaya hidma_ll_start(dmadev->lldev);
25567a2003eSSinan Kaya }
25667a2003eSSinan Kaya
hidma_txn_is_success(dma_cookie_t cookie,dma_cookie_t last_success,dma_cookie_t last_used)257793ae66cSSinan Kaya static inline bool hidma_txn_is_success(dma_cookie_t cookie,
258793ae66cSSinan Kaya dma_cookie_t last_success, dma_cookie_t last_used)
259793ae66cSSinan Kaya {
260793ae66cSSinan Kaya if (last_success <= last_used) {
261793ae66cSSinan Kaya if ((cookie <= last_success) || (cookie > last_used))
262793ae66cSSinan Kaya return true;
263793ae66cSSinan Kaya } else {
264793ae66cSSinan Kaya if ((cookie <= last_success) && (cookie > last_used))
265793ae66cSSinan Kaya return true;
266793ae66cSSinan Kaya }
267793ae66cSSinan Kaya return false;
268793ae66cSSinan Kaya }
269793ae66cSSinan Kaya
hidma_tx_status(struct dma_chan * dmach,dma_cookie_t cookie,struct dma_tx_state * txstate)27067a2003eSSinan Kaya static enum dma_status hidma_tx_status(struct dma_chan *dmach,
27167a2003eSSinan Kaya dma_cookie_t cookie,
27267a2003eSSinan Kaya struct dma_tx_state *txstate)
27367a2003eSSinan Kaya {
27467a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
27567a2003eSSinan Kaya enum dma_status ret;
27667a2003eSSinan Kaya
27767a2003eSSinan Kaya ret = dma_cookie_status(dmach, cookie, txstate);
278793ae66cSSinan Kaya if (ret == DMA_COMPLETE) {
279793ae66cSSinan Kaya bool is_success;
280793ae66cSSinan Kaya
281793ae66cSSinan Kaya is_success = hidma_txn_is_success(cookie, mchan->last_success,
282793ae66cSSinan Kaya dmach->cookie);
283793ae66cSSinan Kaya return is_success ? ret : DMA_ERROR;
284793ae66cSSinan Kaya }
28567a2003eSSinan Kaya
28667a2003eSSinan Kaya if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
28767a2003eSSinan Kaya unsigned long flags;
28867a2003eSSinan Kaya dma_cookie_t runcookie;
28967a2003eSSinan Kaya
29067a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, flags);
29167a2003eSSinan Kaya if (mchan->running)
29267a2003eSSinan Kaya runcookie = mchan->running->desc.cookie;
29367a2003eSSinan Kaya else
29467a2003eSSinan Kaya runcookie = -EINVAL;
29567a2003eSSinan Kaya
29667a2003eSSinan Kaya if (runcookie == cookie)
29767a2003eSSinan Kaya ret = DMA_PAUSED;
29867a2003eSSinan Kaya
29967a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, flags);
30067a2003eSSinan Kaya }
30167a2003eSSinan Kaya
30267a2003eSSinan Kaya return ret;
30367a2003eSSinan Kaya }
30467a2003eSSinan Kaya
30567a2003eSSinan Kaya /*
30667a2003eSSinan Kaya * Submit descriptor to hardware.
30767a2003eSSinan Kaya * Lock the PM for each descriptor we are sending.
30867a2003eSSinan Kaya */
hidma_tx_submit(struct dma_async_tx_descriptor * txd)30967a2003eSSinan Kaya static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
31067a2003eSSinan Kaya {
31167a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(txd->chan);
31267a2003eSSinan Kaya struct hidma_dev *dmadev = mchan->dmadev;
31367a2003eSSinan Kaya struct hidma_desc *mdesc;
31467a2003eSSinan Kaya unsigned long irqflags;
31567a2003eSSinan Kaya dma_cookie_t cookie;
31667a2003eSSinan Kaya
31767a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
31867a2003eSSinan Kaya if (!hidma_ll_isenabled(dmadev->lldev)) {
31967a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
32067a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
32167a2003eSSinan Kaya return -ENODEV;
32267a2003eSSinan Kaya }
32399efdb3eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
32499efdb3eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
32567a2003eSSinan Kaya
32667a2003eSSinan Kaya mdesc = container_of(txd, struct hidma_desc, desc);
32767a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
32867a2003eSSinan Kaya
32999efdb3eSSinan Kaya /* Move descriptor to queued */
33099efdb3eSSinan Kaya list_move_tail(&mdesc->node, &mchan->queued);
33167a2003eSSinan Kaya
33267a2003eSSinan Kaya /* Update cookie */
33367a2003eSSinan Kaya cookie = dma_cookie_assign(txd);
33467a2003eSSinan Kaya
33567a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
33667a2003eSSinan Kaya
33767a2003eSSinan Kaya return cookie;
33867a2003eSSinan Kaya }
33967a2003eSSinan Kaya
hidma_alloc_chan_resources(struct dma_chan * dmach)34067a2003eSSinan Kaya static int hidma_alloc_chan_resources(struct dma_chan *dmach)
34167a2003eSSinan Kaya {
34267a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
34367a2003eSSinan Kaya struct hidma_dev *dmadev = mchan->dmadev;
34467a2003eSSinan Kaya struct hidma_desc *mdesc, *tmp;
34567a2003eSSinan Kaya unsigned long irqflags;
34667a2003eSSinan Kaya LIST_HEAD(descs);
34767a2003eSSinan Kaya unsigned int i;
34867a2003eSSinan Kaya int rc = 0;
34967a2003eSSinan Kaya
35067a2003eSSinan Kaya if (mchan->allocated)
35167a2003eSSinan Kaya return 0;
35267a2003eSSinan Kaya
35367a2003eSSinan Kaya /* Alloc descriptors for this channel */
35467a2003eSSinan Kaya for (i = 0; i < dmadev->nr_descriptors; i++) {
35567a2003eSSinan Kaya mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
35667a2003eSSinan Kaya if (!mdesc) {
35767a2003eSSinan Kaya rc = -ENOMEM;
35867a2003eSSinan Kaya break;
35967a2003eSSinan Kaya }
36067a2003eSSinan Kaya dma_async_tx_descriptor_init(&mdesc->desc, dmach);
36167a2003eSSinan Kaya mdesc->desc.tx_submit = hidma_tx_submit;
36267a2003eSSinan Kaya
36367a2003eSSinan Kaya rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
36467a2003eSSinan Kaya "DMA engine", hidma_callback, mdesc,
36567a2003eSSinan Kaya &mdesc->tre_ch);
36667a2003eSSinan Kaya if (rc) {
36767a2003eSSinan Kaya dev_err(dmach->device->dev,
36867a2003eSSinan Kaya "channel alloc failed at %u\n", i);
36967a2003eSSinan Kaya kfree(mdesc);
37067a2003eSSinan Kaya break;
37167a2003eSSinan Kaya }
37267a2003eSSinan Kaya list_add_tail(&mdesc->node, &descs);
37367a2003eSSinan Kaya }
37467a2003eSSinan Kaya
37567a2003eSSinan Kaya if (rc) {
37667a2003eSSinan Kaya /* return the allocated descriptors */
37767a2003eSSinan Kaya list_for_each_entry_safe(mdesc, tmp, &descs, node) {
37867a2003eSSinan Kaya hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
37967a2003eSSinan Kaya kfree(mdesc);
38067a2003eSSinan Kaya }
38167a2003eSSinan Kaya return rc;
38267a2003eSSinan Kaya }
38367a2003eSSinan Kaya
38467a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
38567a2003eSSinan Kaya list_splice_tail_init(&descs, &mchan->free);
38667a2003eSSinan Kaya mchan->allocated = true;
38767a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
38867a2003eSSinan Kaya return 1;
38967a2003eSSinan Kaya }
39067a2003eSSinan Kaya
39167a2003eSSinan Kaya static struct dma_async_tx_descriptor *
hidma_prep_dma_memcpy(struct dma_chan * dmach,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)39267a2003eSSinan Kaya hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
39367a2003eSSinan Kaya size_t len, unsigned long flags)
39467a2003eSSinan Kaya {
39567a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
39667a2003eSSinan Kaya struct hidma_desc *mdesc = NULL;
39767a2003eSSinan Kaya struct hidma_dev *mdma = mchan->dmadev;
39867a2003eSSinan Kaya unsigned long irqflags;
39967a2003eSSinan Kaya
40067a2003eSSinan Kaya /* Get free descriptor */
40167a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
40267a2003eSSinan Kaya if (!list_empty(&mchan->free)) {
40367a2003eSSinan Kaya mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
40467a2003eSSinan Kaya list_del(&mdesc->node);
40567a2003eSSinan Kaya }
40667a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
40767a2003eSSinan Kaya
40867a2003eSSinan Kaya if (!mdesc)
40967a2003eSSinan Kaya return NULL;
41067a2003eSSinan Kaya
411875aac8aSShunyong Yang mdesc->desc.flags = flags;
41267a2003eSSinan Kaya hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
4135e2db086SSinan Kaya src, dest, len, flags,
4145e2db086SSinan Kaya HIDMA_TRE_MEMCPY);
4155e2db086SSinan Kaya
4165e2db086SSinan Kaya /* Place descriptor in prepared list */
4175e2db086SSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
4185e2db086SSinan Kaya list_add_tail(&mdesc->node, &mchan->prepared);
4195e2db086SSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
4205e2db086SSinan Kaya
4215e2db086SSinan Kaya return &mdesc->desc;
4225e2db086SSinan Kaya }
4235e2db086SSinan Kaya
4245e2db086SSinan Kaya static struct dma_async_tx_descriptor *
hidma_prep_dma_memset(struct dma_chan * dmach,dma_addr_t dest,int value,size_t len,unsigned long flags)4255e2db086SSinan Kaya hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
4265e2db086SSinan Kaya size_t len, unsigned long flags)
4275e2db086SSinan Kaya {
4285e2db086SSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
4295e2db086SSinan Kaya struct hidma_desc *mdesc = NULL;
4305e2db086SSinan Kaya struct hidma_dev *mdma = mchan->dmadev;
4315e2db086SSinan Kaya unsigned long irqflags;
432643a4a85SBen Walker u64 byte_pattern, fill_pattern;
4335e2db086SSinan Kaya
4345e2db086SSinan Kaya /* Get free descriptor */
4355e2db086SSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
4365e2db086SSinan Kaya if (!list_empty(&mchan->free)) {
4375e2db086SSinan Kaya mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
4385e2db086SSinan Kaya list_del(&mdesc->node);
4395e2db086SSinan Kaya }
4405e2db086SSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
4415e2db086SSinan Kaya
4425e2db086SSinan Kaya if (!mdesc)
4435e2db086SSinan Kaya return NULL;
4445e2db086SSinan Kaya
445643a4a85SBen Walker byte_pattern = (char)value;
446643a4a85SBen Walker fill_pattern = (byte_pattern << 56) |
447643a4a85SBen Walker (byte_pattern << 48) |
448643a4a85SBen Walker (byte_pattern << 40) |
449643a4a85SBen Walker (byte_pattern << 32) |
450643a4a85SBen Walker (byte_pattern << 24) |
451643a4a85SBen Walker (byte_pattern << 16) |
452643a4a85SBen Walker (byte_pattern << 8) |
453643a4a85SBen Walker byte_pattern;
454643a4a85SBen Walker
455875aac8aSShunyong Yang mdesc->desc.flags = flags;
4565e2db086SSinan Kaya hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
457643a4a85SBen Walker fill_pattern, dest, len, flags,
4585e2db086SSinan Kaya HIDMA_TRE_MEMSET);
45967a2003eSSinan Kaya
46067a2003eSSinan Kaya /* Place descriptor in prepared list */
46167a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
46267a2003eSSinan Kaya list_add_tail(&mdesc->node, &mchan->prepared);
46367a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
46467a2003eSSinan Kaya
46567a2003eSSinan Kaya return &mdesc->desc;
46667a2003eSSinan Kaya }
46767a2003eSSinan Kaya
hidma_terminate_channel(struct dma_chan * chan)46867a2003eSSinan Kaya static int hidma_terminate_channel(struct dma_chan *chan)
46967a2003eSSinan Kaya {
47067a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(chan);
47167a2003eSSinan Kaya struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
47267a2003eSSinan Kaya struct hidma_desc *tmp, *mdesc;
47367a2003eSSinan Kaya unsigned long irqflags;
47467a2003eSSinan Kaya LIST_HEAD(list);
47567a2003eSSinan Kaya int rc;
47667a2003eSSinan Kaya
47767a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
47867a2003eSSinan Kaya /* give completed requests a chance to finish */
47967a2003eSSinan Kaya hidma_process_completed(mchan);
48067a2003eSSinan Kaya
48167a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
482793ae66cSSinan Kaya mchan->last_success = 0;
48367a2003eSSinan Kaya list_splice_init(&mchan->active, &list);
48467a2003eSSinan Kaya list_splice_init(&mchan->prepared, &list);
48567a2003eSSinan Kaya list_splice_init(&mchan->completed, &list);
48699efdb3eSSinan Kaya list_splice_init(&mchan->queued, &list);
48767a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
48867a2003eSSinan Kaya
48967a2003eSSinan Kaya /* this suspends the existing transfer */
490d1615ca2SSinan Kaya rc = hidma_ll_disable(dmadev->lldev);
49167a2003eSSinan Kaya if (rc) {
49267a2003eSSinan Kaya dev_err(dmadev->ddev.dev, "channel did not pause\n");
49367a2003eSSinan Kaya goto out;
49467a2003eSSinan Kaya }
49567a2003eSSinan Kaya
49667a2003eSSinan Kaya /* return all user requests */
49767a2003eSSinan Kaya list_for_each_entry_safe(mdesc, tmp, &list, node) {
49867a2003eSSinan Kaya struct dma_async_tx_descriptor *txd = &mdesc->desc;
49967a2003eSSinan Kaya
50067a2003eSSinan Kaya dma_descriptor_unmap(txd);
5015ade6683SDave Jiang dmaengine_desc_get_callback_invoke(txd, NULL);
50267a2003eSSinan Kaya dma_run_dependencies(txd);
50367a2003eSSinan Kaya
50467a2003eSSinan Kaya /* move myself to free_list */
50567a2003eSSinan Kaya list_move(&mdesc->node, &mchan->free);
50667a2003eSSinan Kaya }
50767a2003eSSinan Kaya
508d1615ca2SSinan Kaya rc = hidma_ll_enable(dmadev->lldev);
50967a2003eSSinan Kaya out:
51067a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
51167a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
51267a2003eSSinan Kaya return rc;
51367a2003eSSinan Kaya }
51467a2003eSSinan Kaya
hidma_terminate_all(struct dma_chan * chan)51567a2003eSSinan Kaya static int hidma_terminate_all(struct dma_chan *chan)
51667a2003eSSinan Kaya {
51767a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(chan);
51867a2003eSSinan Kaya struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
51967a2003eSSinan Kaya int rc;
52067a2003eSSinan Kaya
52167a2003eSSinan Kaya rc = hidma_terminate_channel(chan);
52267a2003eSSinan Kaya if (rc)
52367a2003eSSinan Kaya return rc;
52467a2003eSSinan Kaya
52567a2003eSSinan Kaya /* reinitialize the hardware */
52667a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
52767a2003eSSinan Kaya rc = hidma_ll_setup(dmadev->lldev);
52867a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
52967a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
53067a2003eSSinan Kaya return rc;
53167a2003eSSinan Kaya }
53267a2003eSSinan Kaya
hidma_free_chan_resources(struct dma_chan * dmach)53367a2003eSSinan Kaya static void hidma_free_chan_resources(struct dma_chan *dmach)
53467a2003eSSinan Kaya {
53567a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach);
53667a2003eSSinan Kaya struct hidma_dev *mdma = mchan->dmadev;
53767a2003eSSinan Kaya struct hidma_desc *mdesc, *tmp;
53867a2003eSSinan Kaya unsigned long irqflags;
53967a2003eSSinan Kaya LIST_HEAD(descs);
54067a2003eSSinan Kaya
54167a2003eSSinan Kaya /* terminate running transactions and free descriptors */
54267a2003eSSinan Kaya hidma_terminate_channel(dmach);
54367a2003eSSinan Kaya
54467a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags);
54567a2003eSSinan Kaya
54667a2003eSSinan Kaya /* Move data */
54767a2003eSSinan Kaya list_splice_tail_init(&mchan->free, &descs);
54867a2003eSSinan Kaya
54967a2003eSSinan Kaya /* Free descriptors */
55067a2003eSSinan Kaya list_for_each_entry_safe(mdesc, tmp, &descs, node) {
55167a2003eSSinan Kaya hidma_ll_free(mdma->lldev, mdesc->tre_ch);
55267a2003eSSinan Kaya list_del(&mdesc->node);
55367a2003eSSinan Kaya kfree(mdesc);
55467a2003eSSinan Kaya }
55567a2003eSSinan Kaya
556d24224deSJason Yan mchan->allocated = false;
55767a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags);
55867a2003eSSinan Kaya }
55967a2003eSSinan Kaya
hidma_pause(struct dma_chan * chan)56067a2003eSSinan Kaya static int hidma_pause(struct dma_chan *chan)
56167a2003eSSinan Kaya {
56267a2003eSSinan Kaya struct hidma_chan *mchan;
56367a2003eSSinan Kaya struct hidma_dev *dmadev;
56467a2003eSSinan Kaya
56567a2003eSSinan Kaya mchan = to_hidma_chan(chan);
56667a2003eSSinan Kaya dmadev = to_hidma_dev(mchan->chan.device);
56767a2003eSSinan Kaya if (!mchan->paused) {
56867a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
569d1615ca2SSinan Kaya if (hidma_ll_disable(dmadev->lldev))
57067a2003eSSinan Kaya dev_warn(dmadev->ddev.dev, "channel did not stop\n");
57167a2003eSSinan Kaya mchan->paused = true;
57267a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
57367a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
57467a2003eSSinan Kaya }
57567a2003eSSinan Kaya return 0;
57667a2003eSSinan Kaya }
57767a2003eSSinan Kaya
hidma_resume(struct dma_chan * chan)57867a2003eSSinan Kaya static int hidma_resume(struct dma_chan *chan)
57967a2003eSSinan Kaya {
58067a2003eSSinan Kaya struct hidma_chan *mchan;
58167a2003eSSinan Kaya struct hidma_dev *dmadev;
58267a2003eSSinan Kaya int rc = 0;
58367a2003eSSinan Kaya
58467a2003eSSinan Kaya mchan = to_hidma_chan(chan);
58567a2003eSSinan Kaya dmadev = to_hidma_dev(mchan->chan.device);
58667a2003eSSinan Kaya if (mchan->paused) {
58767a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
588d1615ca2SSinan Kaya rc = hidma_ll_enable(dmadev->lldev);
58967a2003eSSinan Kaya if (!rc)
59067a2003eSSinan Kaya mchan->paused = false;
59167a2003eSSinan Kaya else
59267a2003eSSinan Kaya dev_err(dmadev->ddev.dev,
59367a2003eSSinan Kaya "failed to resume the channel");
59467a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
59567a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
59667a2003eSSinan Kaya }
59767a2003eSSinan Kaya return rc;
59867a2003eSSinan Kaya }
59967a2003eSSinan Kaya
hidma_chirq_handler(int chirq,void * arg)60067a2003eSSinan Kaya static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
60167a2003eSSinan Kaya {
60267a2003eSSinan Kaya struct hidma_lldev *lldev = arg;
60367a2003eSSinan Kaya
60467a2003eSSinan Kaya /*
60567a2003eSSinan Kaya * All interrupts are request driven.
60667a2003eSSinan Kaya * HW doesn't send an interrupt by itself.
60767a2003eSSinan Kaya */
60867a2003eSSinan Kaya return hidma_ll_inthandler(chirq, lldev);
60967a2003eSSinan Kaya }
61067a2003eSSinan Kaya
61113e7accbSThomas Gleixner #ifdef CONFIG_GENERIC_MSI_IRQ
hidma_chirq_handler_msi(int chirq,void * arg)6121c0e3e82SSinan Kaya static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
6131c0e3e82SSinan Kaya {
6141c0e3e82SSinan Kaya struct hidma_lldev **lldevp = arg;
6151c0e3e82SSinan Kaya struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
6161c0e3e82SSinan Kaya
6171c0e3e82SSinan Kaya return hidma_ll_inthandler_msi(chirq, *lldevp,
6181c0e3e82SSinan Kaya 1 << (chirq - dmadev->msi_virqbase));
6191c0e3e82SSinan Kaya }
6208cc12b26SArnd Bergmann #endif
6211c0e3e82SSinan Kaya
hidma_show_values(struct device * dev,struct device_attribute * attr,char * buf)62242d236f8SSinan Kaya static ssize_t hidma_show_values(struct device *dev,
62342d236f8SSinan Kaya struct device_attribute *attr, char *buf)
62442d236f8SSinan Kaya {
6256af6c371SWolfram Sang struct hidma_dev *mdev = dev_get_drvdata(dev);
62642d236f8SSinan Kaya
62742d236f8SSinan Kaya buf[0] = 0;
62842d236f8SSinan Kaya
62942d236f8SSinan Kaya if (strcmp(attr->attr.name, "chid") == 0)
63042d236f8SSinan Kaya sprintf(buf, "%d\n", mdev->chidx);
63142d236f8SSinan Kaya
63242d236f8SSinan Kaya return strlen(buf);
63342d236f8SSinan Kaya }
63442d236f8SSinan Kaya
hidma_sysfs_uninit(struct hidma_dev * dev)635c6e4584dSSinan Kaya static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
636c6e4584dSSinan Kaya {
637c6e4584dSSinan Kaya device_remove_file(dev->ddev.dev, dev->chid_attrs);
638c6e4584dSSinan Kaya }
639c6e4584dSSinan Kaya
640c6e4584dSSinan Kaya static struct device_attribute*
hidma_create_sysfs_entry(struct hidma_dev * dev,char * name,int mode)641c6e4584dSSinan Kaya hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
64242d236f8SSinan Kaya {
64342d236f8SSinan Kaya struct device_attribute *attrs;
64442d236f8SSinan Kaya char *name_copy;
64542d236f8SSinan Kaya
64642d236f8SSinan Kaya attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
64742d236f8SSinan Kaya GFP_KERNEL);
64842d236f8SSinan Kaya if (!attrs)
649c6e4584dSSinan Kaya return NULL;
65042d236f8SSinan Kaya
65142d236f8SSinan Kaya name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
65242d236f8SSinan Kaya if (!name_copy)
653c6e4584dSSinan Kaya return NULL;
65442d236f8SSinan Kaya
65542d236f8SSinan Kaya attrs->attr.name = name_copy;
65642d236f8SSinan Kaya attrs->attr.mode = mode;
65742d236f8SSinan Kaya attrs->show = hidma_show_values;
65842d236f8SSinan Kaya sysfs_attr_init(&attrs->attr);
65942d236f8SSinan Kaya
660c6e4584dSSinan Kaya return attrs;
661c6e4584dSSinan Kaya }
662c6e4584dSSinan Kaya
hidma_sysfs_init(struct hidma_dev * dev)663c6e4584dSSinan Kaya static int hidma_sysfs_init(struct hidma_dev *dev)
664c6e4584dSSinan Kaya {
665c6e4584dSSinan Kaya dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
666c6e4584dSSinan Kaya if (!dev->chid_attrs)
667c6e4584dSSinan Kaya return -ENOMEM;
668c6e4584dSSinan Kaya
669c6e4584dSSinan Kaya return device_create_file(dev->ddev.dev, dev->chid_attrs);
67042d236f8SSinan Kaya }
67142d236f8SSinan Kaya
67213e7accbSThomas Gleixner #ifdef CONFIG_GENERIC_MSI_IRQ
hidma_write_msi_msg(struct msi_desc * desc,struct msi_msg * msg)6731c0e3e82SSinan Kaya static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
6741c0e3e82SSinan Kaya {
6751c0e3e82SSinan Kaya struct device *dev = msi_desc_to_dev(desc);
6761c0e3e82SSinan Kaya struct hidma_dev *dmadev = dev_get_drvdata(dev);
6771c0e3e82SSinan Kaya
678dba27c7fSThomas Gleixner if (!desc->msi_index) {
6791c0e3e82SSinan Kaya writel(msg->address_lo, dmadev->dev_evca + 0x118);
6801c0e3e82SSinan Kaya writel(msg->address_hi, dmadev->dev_evca + 0x11C);
6811c0e3e82SSinan Kaya writel(msg->data, dmadev->dev_evca + 0x120);
6821c0e3e82SSinan Kaya }
6831c0e3e82SSinan Kaya }
6841c0e3e82SSinan Kaya #endif
6851c0e3e82SSinan Kaya
hidma_free_msis(struct hidma_dev * dmadev)6861c0e3e82SSinan Kaya static void hidma_free_msis(struct hidma_dev *dmadev)
6871c0e3e82SSinan Kaya {
68813e7accbSThomas Gleixner #ifdef CONFIG_GENERIC_MSI_IRQ
6891c0e3e82SSinan Kaya struct device *dev = dmadev->ddev.dev;
6901900c962SThomas Gleixner int i, virq;
6911c0e3e82SSinan Kaya
6921900c962SThomas Gleixner for (i = 0; i < HIDMA_MSI_INTS; i++) {
6931900c962SThomas Gleixner virq = msi_get_virq(dev, i);
6941900c962SThomas Gleixner if (virq)
6951900c962SThomas Gleixner devm_free_irq(dev, virq, &dmadev->lldev);
6961900c962SThomas Gleixner }
6971c0e3e82SSinan Kaya
69814fd06c7SThomas Gleixner platform_device_msi_free_irqs_all(dev);
6991c0e3e82SSinan Kaya #endif
7001c0e3e82SSinan Kaya }
7011c0e3e82SSinan Kaya
hidma_request_msi(struct hidma_dev * dmadev,struct platform_device * pdev)7021c0e3e82SSinan Kaya static int hidma_request_msi(struct hidma_dev *dmadev,
7031c0e3e82SSinan Kaya struct platform_device *pdev)
7041c0e3e82SSinan Kaya {
70513e7accbSThomas Gleixner #ifdef CONFIG_GENERIC_MSI_IRQ
7061900c962SThomas Gleixner int rc, i, virq;
7071c0e3e82SSinan Kaya
70814fd06c7SThomas Gleixner rc = platform_device_msi_init_and_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
7091c0e3e82SSinan Kaya hidma_write_msi_msg);
7101c0e3e82SSinan Kaya if (rc)
7111c0e3e82SSinan Kaya return rc;
7121c0e3e82SSinan Kaya
7131900c962SThomas Gleixner for (i = 0; i < HIDMA_MSI_INTS; i++) {
7141900c962SThomas Gleixner virq = msi_get_virq(&pdev->dev, i);
7151900c962SThomas Gleixner rc = devm_request_irq(&pdev->dev, virq,
7161c0e3e82SSinan Kaya hidma_chirq_handler_msi,
7171c0e3e82SSinan Kaya 0, "qcom-hidma-msi",
7181c0e3e82SSinan Kaya &dmadev->lldev);
7191900c962SThomas Gleixner if (rc)
7201c0e3e82SSinan Kaya break;
7211900c962SThomas Gleixner if (!i)
7221900c962SThomas Gleixner dmadev->msi_virqbase = virq;
7231c0e3e82SSinan Kaya }
7241c0e3e82SSinan Kaya
7251c0e3e82SSinan Kaya if (rc) {
7261c0e3e82SSinan Kaya /* free allocated MSI interrupts above */
7271900c962SThomas Gleixner for (--i; i >= 0; i--) {
7281900c962SThomas Gleixner virq = msi_get_virq(&pdev->dev, i);
7291900c962SThomas Gleixner devm_free_irq(&pdev->dev, virq, &dmadev->lldev);
7301c0e3e82SSinan Kaya }
7311900c962SThomas Gleixner dev_warn(&pdev->dev,
7321900c962SThomas Gleixner "failed to request MSI irq, falling back to wired IRQ\n");
7331c0e3e82SSinan Kaya } else {
7341c0e3e82SSinan Kaya /* Add callback to free MSIs on teardown */
7351c0e3e82SSinan Kaya hidma_ll_setup_irq(dmadev->lldev, true);
7361c0e3e82SSinan Kaya }
7371c0e3e82SSinan Kaya return rc;
7381c0e3e82SSinan Kaya #else
7391c0e3e82SSinan Kaya return -EINVAL;
7401c0e3e82SSinan Kaya #endif
7411c0e3e82SSinan Kaya }
7421c0e3e82SSinan Kaya
hidma_test_capability(struct device * dev,enum hidma_cap test_cap)74395fbfb7aSSinan Kaya static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
7441c0e3e82SSinan Kaya {
74595fbfb7aSSinan Kaya enum hidma_cap cap;
7461c0e3e82SSinan Kaya
7479a2136b6SKrzysztof Kozlowski cap = (uintptr_t) device_get_match_data(dev);
74895fbfb7aSSinan Kaya return cap ? ((cap & test_cap) > 0) : 0;
7491c0e3e82SSinan Kaya }
7501c0e3e82SSinan Kaya
hidma_probe(struct platform_device * pdev)75167a2003eSSinan Kaya static int hidma_probe(struct platform_device *pdev)
75267a2003eSSinan Kaya {
75367a2003eSSinan Kaya struct hidma_dev *dmadev;
75467a2003eSSinan Kaya struct resource *trca_resource;
75567a2003eSSinan Kaya struct resource *evca_resource;
75667a2003eSSinan Kaya int chirq;
75767a2003eSSinan Kaya void __iomem *evca;
75867a2003eSSinan Kaya void __iomem *trca;
75967a2003eSSinan Kaya int rc;
7601c0e3e82SSinan Kaya bool msi;
76167a2003eSSinan Kaya
76267a2003eSSinan Kaya pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
76367a2003eSSinan Kaya pm_runtime_use_autosuspend(&pdev->dev);
76467a2003eSSinan Kaya pm_runtime_set_active(&pdev->dev);
76567a2003eSSinan Kaya pm_runtime_enable(&pdev->dev);
76667a2003eSSinan Kaya
767f1e47b83SYangtao Li trca = devm_platform_get_and_ioremap_resource(pdev, 0, &trca_resource);
76867a2003eSSinan Kaya if (IS_ERR(trca)) {
769f1e47b83SYangtao Li rc = PTR_ERR(trca);
77067a2003eSSinan Kaya goto bailout;
77167a2003eSSinan Kaya }
77267a2003eSSinan Kaya
773f1e47b83SYangtao Li evca = devm_platform_get_and_ioremap_resource(pdev, 1, &evca_resource);
77467a2003eSSinan Kaya if (IS_ERR(evca)) {
775f1e47b83SYangtao Li rc = PTR_ERR(evca);
77667a2003eSSinan Kaya goto bailout;
77767a2003eSSinan Kaya }
77867a2003eSSinan Kaya
77967a2003eSSinan Kaya /*
78067a2003eSSinan Kaya * This driver only handles the channel IRQs.
78167a2003eSSinan Kaya * Common IRQ is handled by the management driver.
78267a2003eSSinan Kaya */
78367a2003eSSinan Kaya chirq = platform_get_irq(pdev, 0);
78467a2003eSSinan Kaya if (chirq < 0) {
785f1e47b83SYangtao Li rc = chirq;
78667a2003eSSinan Kaya goto bailout;
78767a2003eSSinan Kaya }
78867a2003eSSinan Kaya
78967a2003eSSinan Kaya dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
79067a2003eSSinan Kaya if (!dmadev) {
79167a2003eSSinan Kaya rc = -ENOMEM;
79267a2003eSSinan Kaya goto bailout;
79367a2003eSSinan Kaya }
79467a2003eSSinan Kaya
79567a2003eSSinan Kaya INIT_LIST_HEAD(&dmadev->ddev.channels);
79667a2003eSSinan Kaya spin_lock_init(&dmadev->lock);
79767a2003eSSinan Kaya dmadev->ddev.dev = &pdev->dev;
79867a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
79967a2003eSSinan Kaya
80067a2003eSSinan Kaya dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
8015e2db086SSinan Kaya dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
80267a2003eSSinan Kaya if (WARN_ON(!pdev->dev.dma_mask)) {
80367a2003eSSinan Kaya rc = -ENXIO;
80467a2003eSSinan Kaya goto dmafree;
80567a2003eSSinan Kaya }
80667a2003eSSinan Kaya
80767a2003eSSinan Kaya dmadev->dev_evca = evca;
80867a2003eSSinan Kaya dmadev->evca_resource = evca_resource;
80967a2003eSSinan Kaya dmadev->dev_trca = trca;
81067a2003eSSinan Kaya dmadev->trca_resource = trca_resource;
81167a2003eSSinan Kaya dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
8125e2db086SSinan Kaya dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
81367a2003eSSinan Kaya dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
81467a2003eSSinan Kaya dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
81567a2003eSSinan Kaya dmadev->ddev.device_tx_status = hidma_tx_status;
81667a2003eSSinan Kaya dmadev->ddev.device_issue_pending = hidma_issue_pending;
81767a2003eSSinan Kaya dmadev->ddev.device_pause = hidma_pause;
81867a2003eSSinan Kaya dmadev->ddev.device_resume = hidma_resume;
81967a2003eSSinan Kaya dmadev->ddev.device_terminate_all = hidma_terminate_all;
82067a2003eSSinan Kaya dmadev->ddev.copy_align = 8;
82167a2003eSSinan Kaya
8221c0e3e82SSinan Kaya /*
8231c0e3e82SSinan Kaya * Determine the MSI capability of the platform. Old HW doesn't
8241c0e3e82SSinan Kaya * support MSI.
8251c0e3e82SSinan Kaya */
82695fbfb7aSSinan Kaya msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
82767a2003eSSinan Kaya device_property_read_u32(&pdev->dev, "desc-count",
82867a2003eSSinan Kaya &dmadev->nr_descriptors);
82967a2003eSSinan Kaya
83013058e33SSinan Kaya if (nr_desc_prm) {
83113058e33SSinan Kaya dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
83213058e33SSinan Kaya nr_desc_prm);
83367a2003eSSinan Kaya dmadev->nr_descriptors = nr_desc_prm;
83413058e33SSinan Kaya }
83567a2003eSSinan Kaya
83667a2003eSSinan Kaya if (!dmadev->nr_descriptors)
83767a2003eSSinan Kaya dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
83867a2003eSSinan Kaya
839b5419adcSSinan Kaya if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
840b5419adcSSinan Kaya dmadev->chidx = readl(dmadev->dev_trca + 0x40);
841b5419adcSSinan Kaya else
84267a2003eSSinan Kaya dmadev->chidx = readl(dmadev->dev_trca + 0x28);
84367a2003eSSinan Kaya
84467a2003eSSinan Kaya /* Set DMA mask to 64 bits. */
84567a2003eSSinan Kaya rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
84667a2003eSSinan Kaya if (rc) {
84767a2003eSSinan Kaya dev_warn(&pdev->dev, "unable to set coherent mask to 64");
84867a2003eSSinan Kaya goto dmafree;
84967a2003eSSinan Kaya }
85067a2003eSSinan Kaya
85167a2003eSSinan Kaya dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
85267a2003eSSinan Kaya dmadev->nr_descriptors, dmadev->dev_trca,
85367a2003eSSinan Kaya dmadev->dev_evca, dmadev->chidx);
85467a2003eSSinan Kaya if (!dmadev->lldev) {
85567a2003eSSinan Kaya rc = -EPROBE_DEFER;
85667a2003eSSinan Kaya goto dmafree;
85767a2003eSSinan Kaya }
85867a2003eSSinan Kaya
8591c0e3e82SSinan Kaya platform_set_drvdata(pdev, dmadev);
8601c0e3e82SSinan Kaya if (msi)
8611c0e3e82SSinan Kaya rc = hidma_request_msi(dmadev, pdev);
8621c0e3e82SSinan Kaya
8631c0e3e82SSinan Kaya if (!msi || rc) {
8641c0e3e82SSinan Kaya hidma_ll_setup_irq(dmadev->lldev, false);
8651c0e3e82SSinan Kaya rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
8661c0e3e82SSinan Kaya 0, "qcom-hidma", dmadev->lldev);
86767a2003eSSinan Kaya if (rc)
86867a2003eSSinan Kaya goto uninit;
8691c0e3e82SSinan Kaya }
87067a2003eSSinan Kaya
87167a2003eSSinan Kaya INIT_LIST_HEAD(&dmadev->ddev.channels);
87267a2003eSSinan Kaya rc = hidma_chan_init(dmadev, 0);
87367a2003eSSinan Kaya if (rc)
87467a2003eSSinan Kaya goto uninit;
87567a2003eSSinan Kaya
87667a2003eSSinan Kaya rc = dma_async_device_register(&dmadev->ddev);
87767a2003eSSinan Kaya if (rc)
87867a2003eSSinan Kaya goto uninit;
87967a2003eSSinan Kaya
88067a2003eSSinan Kaya dmadev->irq = chirq;
88100c4747aSAllen Pais tasklet_setup(&dmadev->task, hidma_issue_task);
882570d0176SSinan Kaya hidma_debug_init(dmadev);
883c6e4584dSSinan Kaya hidma_sysfs_init(dmadev);
88467a2003eSSinan Kaya dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
88567a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
88667a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
88767a2003eSSinan Kaya return 0;
88867a2003eSSinan Kaya
88967a2003eSSinan Kaya uninit:
8901c0e3e82SSinan Kaya if (msi)
8911c0e3e82SSinan Kaya hidma_free_msis(dmadev);
8921c0e3e82SSinan Kaya
89367a2003eSSinan Kaya hidma_ll_uninit(dmadev->lldev);
89467a2003eSSinan Kaya dmafree:
89567a2003eSSinan Kaya if (dmadev)
89667a2003eSSinan Kaya hidma_free(dmadev);
89767a2003eSSinan Kaya bailout:
89867a2003eSSinan Kaya pm_runtime_put_sync(&pdev->dev);
89967a2003eSSinan Kaya pm_runtime_disable(&pdev->dev);
90067a2003eSSinan Kaya return rc;
90167a2003eSSinan Kaya }
90267a2003eSSinan Kaya
hidma_shutdown(struct platform_device * pdev)903dc7c733aSSinan Kaya static void hidma_shutdown(struct platform_device *pdev)
904dc7c733aSSinan Kaya {
905dc7c733aSSinan Kaya struct hidma_dev *dmadev = platform_get_drvdata(pdev);
906dc7c733aSSinan Kaya
907dc7c733aSSinan Kaya dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
908dc7c733aSSinan Kaya
909dc7c733aSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
910dc7c733aSSinan Kaya if (hidma_ll_disable(dmadev->lldev))
911dc7c733aSSinan Kaya dev_warn(dmadev->ddev.dev, "channel did not stop\n");
912dc7c733aSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev);
913dc7c733aSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev);
914dc7c733aSSinan Kaya
915dc7c733aSSinan Kaya }
916dc7c733aSSinan Kaya
hidma_remove(struct platform_device * pdev)917af9bc3c2SUwe Kleine-König static void hidma_remove(struct platform_device *pdev)
91867a2003eSSinan Kaya {
91967a2003eSSinan Kaya struct hidma_dev *dmadev = platform_get_drvdata(pdev);
92067a2003eSSinan Kaya
92167a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev);
92267a2003eSSinan Kaya dma_async_device_unregister(&dmadev->ddev);
9231c0e3e82SSinan Kaya if (!dmadev->lldev->msi_support)
92467a2003eSSinan Kaya devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
9251c0e3e82SSinan Kaya else
9261c0e3e82SSinan Kaya hidma_free_msis(dmadev);
9271c0e3e82SSinan Kaya
928bd16934aSVinod Koul tasklet_kill(&dmadev->task);
929c6e4584dSSinan Kaya hidma_sysfs_uninit(dmadev);
930570d0176SSinan Kaya hidma_debug_uninit(dmadev);
93167a2003eSSinan Kaya hidma_ll_uninit(dmadev->lldev);
93267a2003eSSinan Kaya hidma_free(dmadev);
93367a2003eSSinan Kaya
93467a2003eSSinan Kaya dev_info(&pdev->dev, "HI-DMA engine removed\n");
93567a2003eSSinan Kaya pm_runtime_put_sync_suspend(&pdev->dev);
93667a2003eSSinan Kaya pm_runtime_disable(&pdev->dev);
93767a2003eSSinan Kaya }
93867a2003eSSinan Kaya
93967a2003eSSinan Kaya #if IS_ENABLED(CONFIG_ACPI)
94067a2003eSSinan Kaya static const struct acpi_device_id hidma_acpi_ids[] = {
94167a2003eSSinan Kaya {"QCOM8061"},
94295fbfb7aSSinan Kaya {"QCOM8062", HIDMA_MSI_CAP},
943b5419adcSSinan Kaya {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
94467a2003eSSinan Kaya {},
94567a2003eSSinan Kaya };
94675ff7668SSinan Kaya MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
94767a2003eSSinan Kaya #endif
94867a2003eSSinan Kaya
94967a2003eSSinan Kaya static struct platform_driver hidma_driver = {
95067a2003eSSinan Kaya .probe = hidma_probe,
951af9bc3c2SUwe Kleine-König .remove_new = hidma_remove,
952dc7c733aSSinan Kaya .shutdown = hidma_shutdown,
95367a2003eSSinan Kaya .driver = {
95467a2003eSSinan Kaya .name = "hidma",
95567a2003eSSinan Kaya .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
95667a2003eSSinan Kaya },
95767a2003eSSinan Kaya };
95867a2003eSSinan Kaya
95967a2003eSSinan Kaya module_platform_driver(hidma_driver);
960*8e9d83d7SJeff Johnson MODULE_DESCRIPTION("Qualcomm Technologies HIDMA Channel support");
96167a2003eSSinan Kaya MODULE_LICENSE("GPL v2");
962