167a2003eSSinan Kaya /* 267a2003eSSinan Kaya * Qualcomm Technologies HIDMA DMA engine interface 367a2003eSSinan Kaya * 413058e33SSinan Kaya * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. 567a2003eSSinan Kaya * 667a2003eSSinan Kaya * This program is free software; you can redistribute it and/or modify 767a2003eSSinan Kaya * it under the terms of the GNU General Public License version 2 and 867a2003eSSinan Kaya * only version 2 as published by the Free Software Foundation. 967a2003eSSinan Kaya * 1067a2003eSSinan Kaya * This program is distributed in the hope that it will be useful, 1167a2003eSSinan Kaya * but WITHOUT ANY WARRANTY; without even the implied warranty of 1267a2003eSSinan Kaya * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 1367a2003eSSinan Kaya * GNU General Public License for more details. 1467a2003eSSinan Kaya */ 1567a2003eSSinan Kaya 1667a2003eSSinan Kaya /* 1767a2003eSSinan Kaya * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 1867a2003eSSinan Kaya * Copyright (C) Semihalf 2009 1967a2003eSSinan Kaya * Copyright (C) Ilya Yanok, Emcraft Systems 2010 2067a2003eSSinan Kaya * Copyright (C) Alexander Popov, Promcontroller 2014 2167a2003eSSinan Kaya * 2267a2003eSSinan Kaya * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 2367a2003eSSinan Kaya * (defines, structures and comments) was taken from MPC5121 DMA driver 2467a2003eSSinan Kaya * written by Hongjun Chen <hong-jun.chen@freescale.com>. 2567a2003eSSinan Kaya * 2667a2003eSSinan Kaya * Approved as OSADL project by a majority of OSADL members and funded 2767a2003eSSinan Kaya * by OSADL membership fees in 2009; for details see www.osadl.org. 2867a2003eSSinan Kaya * 2967a2003eSSinan Kaya * This program is free software; you can redistribute it and/or modify it 3067a2003eSSinan Kaya * under the terms of the GNU General Public License as published by the Free 3167a2003eSSinan Kaya * Software Foundation; either version 2 of the License, or (at your option) 3267a2003eSSinan Kaya * any later version. 3367a2003eSSinan Kaya * 3467a2003eSSinan Kaya * This program is distributed in the hope that it will be useful, but WITHOUT 3567a2003eSSinan Kaya * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 3667a2003eSSinan Kaya * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 3767a2003eSSinan Kaya * more details. 3867a2003eSSinan Kaya * 3967a2003eSSinan Kaya * The full GNU General Public License is included in this distribution in the 4067a2003eSSinan Kaya * file called COPYING. 4167a2003eSSinan Kaya */ 4267a2003eSSinan Kaya 4367a2003eSSinan Kaya /* Linux Foundation elects GPLv2 license only. */ 4467a2003eSSinan Kaya 4567a2003eSSinan Kaya #include <linux/dmaengine.h> 4667a2003eSSinan Kaya #include <linux/dma-mapping.h> 4767a2003eSSinan Kaya #include <linux/list.h> 4867a2003eSSinan Kaya #include <linux/module.h> 4967a2003eSSinan Kaya #include <linux/platform_device.h> 5067a2003eSSinan Kaya #include <linux/slab.h> 5167a2003eSSinan Kaya #include <linux/spinlock.h> 5267a2003eSSinan Kaya #include <linux/of_dma.h> 5395fbfb7aSSinan Kaya #include <linux/of_device.h> 5467a2003eSSinan Kaya #include <linux/property.h> 5567a2003eSSinan Kaya #include <linux/delay.h> 5667a2003eSSinan Kaya #include <linux/acpi.h> 5767a2003eSSinan Kaya #include <linux/irq.h> 5867a2003eSSinan Kaya #include <linux/atomic.h> 5967a2003eSSinan Kaya #include <linux/pm_runtime.h> 601c0e3e82SSinan Kaya #include <linux/msi.h> 6167a2003eSSinan Kaya 6267a2003eSSinan Kaya #include "../dmaengine.h" 6367a2003eSSinan Kaya #include "hidma.h" 6467a2003eSSinan Kaya 6567a2003eSSinan Kaya /* 6667a2003eSSinan Kaya * Default idle time is 2 seconds. This parameter can 6767a2003eSSinan Kaya * be overridden by changing the following 6867a2003eSSinan Kaya * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms 6967a2003eSSinan Kaya * during kernel boot. 7067a2003eSSinan Kaya */ 7167a2003eSSinan Kaya #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 7267a2003eSSinan Kaya #define HIDMA_ERR_INFO_SW 0xFF 7367a2003eSSinan Kaya #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 7467a2003eSSinan Kaya #define HIDMA_NR_DEFAULT_DESC 10 751c0e3e82SSinan Kaya #define HIDMA_MSI_INTS 11 7667a2003eSSinan Kaya 7767a2003eSSinan Kaya static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) 7867a2003eSSinan Kaya { 7967a2003eSSinan Kaya return container_of(dmadev, struct hidma_dev, ddev); 8067a2003eSSinan Kaya } 8167a2003eSSinan Kaya 8267a2003eSSinan Kaya static inline 8367a2003eSSinan Kaya struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp) 8467a2003eSSinan Kaya { 8567a2003eSSinan Kaya return container_of(_lldevp, struct hidma_dev, lldev); 8667a2003eSSinan Kaya } 8767a2003eSSinan Kaya 8867a2003eSSinan Kaya static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) 8967a2003eSSinan Kaya { 9067a2003eSSinan Kaya return container_of(dmach, struct hidma_chan, chan); 9167a2003eSSinan Kaya } 9267a2003eSSinan Kaya 9367a2003eSSinan Kaya static inline 9467a2003eSSinan Kaya struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t) 9567a2003eSSinan Kaya { 9667a2003eSSinan Kaya return container_of(t, struct hidma_desc, desc); 9767a2003eSSinan Kaya } 9867a2003eSSinan Kaya 9967a2003eSSinan Kaya static void hidma_free(struct hidma_dev *dmadev) 10067a2003eSSinan Kaya { 10167a2003eSSinan Kaya INIT_LIST_HEAD(&dmadev->ddev.channels); 10267a2003eSSinan Kaya } 10367a2003eSSinan Kaya 10467a2003eSSinan Kaya static unsigned int nr_desc_prm; 10567a2003eSSinan Kaya module_param(nr_desc_prm, uint, 0644); 10667a2003eSSinan Kaya MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); 10767a2003eSSinan Kaya 10895fbfb7aSSinan Kaya enum hidma_cap { 10995fbfb7aSSinan Kaya HIDMA_MSI_CAP = 1, 110b5419adcSSinan Kaya HIDMA_IDENTITY_CAP, 11195fbfb7aSSinan Kaya }; 11267a2003eSSinan Kaya 11367a2003eSSinan Kaya /* process completed descriptors */ 11467a2003eSSinan Kaya static void hidma_process_completed(struct hidma_chan *mchan) 11567a2003eSSinan Kaya { 11667a2003eSSinan Kaya struct dma_device *ddev = mchan->chan.device; 11767a2003eSSinan Kaya struct hidma_dev *mdma = to_hidma_dev(ddev); 11867a2003eSSinan Kaya struct dma_async_tx_descriptor *desc; 11967a2003eSSinan Kaya dma_cookie_t last_cookie; 12067a2003eSSinan Kaya struct hidma_desc *mdesc; 1218a31f8b5SSinan Kaya struct hidma_desc *next; 12267a2003eSSinan Kaya unsigned long irqflags; 12367a2003eSSinan Kaya struct list_head list; 12467a2003eSSinan Kaya 12567a2003eSSinan Kaya INIT_LIST_HEAD(&list); 12667a2003eSSinan Kaya 12767a2003eSSinan Kaya /* Get all completed descriptors */ 12867a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 12967a2003eSSinan Kaya list_splice_tail_init(&mchan->completed, &list); 13067a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 13167a2003eSSinan Kaya 13267a2003eSSinan Kaya /* Execute callbacks and run dependencies */ 1338a31f8b5SSinan Kaya list_for_each_entry_safe(mdesc, next, &list, node) { 13467a2003eSSinan Kaya enum dma_status llstat; 1358a31f8b5SSinan Kaya struct dmaengine_desc_callback cb; 13655c370e5SSinan Kaya struct dmaengine_result result; 13767a2003eSSinan Kaya 13867a2003eSSinan Kaya desc = &mdesc->desc; 139793ae66cSSinan Kaya last_cookie = desc->cookie; 14067a2003eSSinan Kaya 141546c0547SShunyong Yang llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); 142546c0547SShunyong Yang 14367a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 144546c0547SShunyong Yang if (llstat == DMA_COMPLETE) { 145546c0547SShunyong Yang mchan->last_success = last_cookie; 146546c0547SShunyong Yang result.result = DMA_TRANS_NOERROR; 147546c0547SShunyong Yang } else { 148546c0547SShunyong Yang result.result = DMA_TRANS_ABORTED; 149546c0547SShunyong Yang } 150546c0547SShunyong Yang 15167a2003eSSinan Kaya dma_cookie_complete(desc); 15267a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 15367a2003eSSinan Kaya 1548a31f8b5SSinan Kaya dmaengine_desc_get_callback(desc, &cb); 15567a2003eSSinan Kaya 15667a2003eSSinan Kaya dma_run_dependencies(desc); 15767a2003eSSinan Kaya 15867a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 1598a31f8b5SSinan Kaya list_move(&mdesc->node, &mchan->free); 16055c370e5SSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 16155c370e5SSinan Kaya 16255c370e5SSinan Kaya dmaengine_desc_callback_invoke(&cb, &result); 1638a31f8b5SSinan Kaya } 16467a2003eSSinan Kaya } 16567a2003eSSinan Kaya 16667a2003eSSinan Kaya /* 16767a2003eSSinan Kaya * Called once for each submitted descriptor. 16867a2003eSSinan Kaya * PM is locked once for each descriptor that is currently 16967a2003eSSinan Kaya * in execution. 17067a2003eSSinan Kaya */ 17167a2003eSSinan Kaya static void hidma_callback(void *data) 17267a2003eSSinan Kaya { 17367a2003eSSinan Kaya struct hidma_desc *mdesc = data; 17467a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan); 17567a2003eSSinan Kaya struct dma_device *ddev = mchan->chan.device; 17667a2003eSSinan Kaya struct hidma_dev *dmadev = to_hidma_dev(ddev); 17767a2003eSSinan Kaya unsigned long irqflags; 17867a2003eSSinan Kaya bool queued = false; 17967a2003eSSinan Kaya 18067a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 18167a2003eSSinan Kaya if (mdesc->node.next) { 18267a2003eSSinan Kaya /* Delete from the active list, add to completed list */ 18367a2003eSSinan Kaya list_move_tail(&mdesc->node, &mchan->completed); 18467a2003eSSinan Kaya queued = true; 18567a2003eSSinan Kaya 18667a2003eSSinan Kaya /* calculate the next running descriptor */ 18767a2003eSSinan Kaya mchan->running = list_first_entry(&mchan->active, 18867a2003eSSinan Kaya struct hidma_desc, node); 18967a2003eSSinan Kaya } 19067a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 19167a2003eSSinan Kaya 19267a2003eSSinan Kaya hidma_process_completed(mchan); 19367a2003eSSinan Kaya 19467a2003eSSinan Kaya if (queued) { 19567a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev); 19667a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev); 19767a2003eSSinan Kaya } 19867a2003eSSinan Kaya } 19967a2003eSSinan Kaya 20067a2003eSSinan Kaya static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) 20167a2003eSSinan Kaya { 20267a2003eSSinan Kaya struct hidma_chan *mchan; 20367a2003eSSinan Kaya struct dma_device *ddev; 20467a2003eSSinan Kaya 20567a2003eSSinan Kaya mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL); 20667a2003eSSinan Kaya if (!mchan) 20767a2003eSSinan Kaya return -ENOMEM; 20867a2003eSSinan Kaya 20967a2003eSSinan Kaya ddev = &dmadev->ddev; 21067a2003eSSinan Kaya mchan->dma_sig = dma_sig; 21167a2003eSSinan Kaya mchan->dmadev = dmadev; 21267a2003eSSinan Kaya mchan->chan.device = ddev; 21367a2003eSSinan Kaya dma_cookie_init(&mchan->chan); 21467a2003eSSinan Kaya 21567a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->free); 21667a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->prepared); 21767a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->active); 21867a2003eSSinan Kaya INIT_LIST_HEAD(&mchan->completed); 21999efdb3eSSinan Kaya INIT_LIST_HEAD(&mchan->queued); 22067a2003eSSinan Kaya 22167a2003eSSinan Kaya spin_lock_init(&mchan->lock); 22267a2003eSSinan Kaya list_add_tail(&mchan->chan.device_node, &ddev->channels); 22367a2003eSSinan Kaya dmadev->ddev.chancnt++; 22467a2003eSSinan Kaya return 0; 22567a2003eSSinan Kaya } 22667a2003eSSinan Kaya 227*00c4747aSAllen Pais static void hidma_issue_task(struct tasklet_struct *t) 22867a2003eSSinan Kaya { 229*00c4747aSAllen Pais struct hidma_dev *dmadev = from_tasklet(dmadev, t, task); 23067a2003eSSinan Kaya 23167a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev); 23267a2003eSSinan Kaya hidma_ll_start(dmadev->lldev); 23367a2003eSSinan Kaya } 23467a2003eSSinan Kaya 23567a2003eSSinan Kaya static void hidma_issue_pending(struct dma_chan *dmach) 23667a2003eSSinan Kaya { 23767a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach); 23867a2003eSSinan Kaya struct hidma_dev *dmadev = mchan->dmadev; 23967a2003eSSinan Kaya unsigned long flags; 24099efdb3eSSinan Kaya struct hidma_desc *qdesc, *next; 24167a2003eSSinan Kaya int status; 24267a2003eSSinan Kaya 24367a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, flags); 24499efdb3eSSinan Kaya list_for_each_entry_safe(qdesc, next, &mchan->queued, node) { 24599efdb3eSSinan Kaya hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch); 24699efdb3eSSinan Kaya list_move_tail(&qdesc->node, &mchan->active); 24799efdb3eSSinan Kaya } 24899efdb3eSSinan Kaya 24967a2003eSSinan Kaya if (!mchan->running) { 25067a2003eSSinan Kaya struct hidma_desc *desc = list_first_entry(&mchan->active, 25167a2003eSSinan Kaya struct hidma_desc, 25267a2003eSSinan Kaya node); 25367a2003eSSinan Kaya mchan->running = desc; 25467a2003eSSinan Kaya } 25567a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, flags); 25667a2003eSSinan Kaya 25767a2003eSSinan Kaya /* PM will be released in hidma_callback function. */ 25867a2003eSSinan Kaya status = pm_runtime_get(dmadev->ddev.dev); 25967a2003eSSinan Kaya if (status < 0) 26067a2003eSSinan Kaya tasklet_schedule(&dmadev->task); 26167a2003eSSinan Kaya else 26267a2003eSSinan Kaya hidma_ll_start(dmadev->lldev); 26367a2003eSSinan Kaya } 26467a2003eSSinan Kaya 265793ae66cSSinan Kaya static inline bool hidma_txn_is_success(dma_cookie_t cookie, 266793ae66cSSinan Kaya dma_cookie_t last_success, dma_cookie_t last_used) 267793ae66cSSinan Kaya { 268793ae66cSSinan Kaya if (last_success <= last_used) { 269793ae66cSSinan Kaya if ((cookie <= last_success) || (cookie > last_used)) 270793ae66cSSinan Kaya return true; 271793ae66cSSinan Kaya } else { 272793ae66cSSinan Kaya if ((cookie <= last_success) && (cookie > last_used)) 273793ae66cSSinan Kaya return true; 274793ae66cSSinan Kaya } 275793ae66cSSinan Kaya return false; 276793ae66cSSinan Kaya } 277793ae66cSSinan Kaya 27867a2003eSSinan Kaya static enum dma_status hidma_tx_status(struct dma_chan *dmach, 27967a2003eSSinan Kaya dma_cookie_t cookie, 28067a2003eSSinan Kaya struct dma_tx_state *txstate) 28167a2003eSSinan Kaya { 28267a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach); 28367a2003eSSinan Kaya enum dma_status ret; 28467a2003eSSinan Kaya 28567a2003eSSinan Kaya ret = dma_cookie_status(dmach, cookie, txstate); 286793ae66cSSinan Kaya if (ret == DMA_COMPLETE) { 287793ae66cSSinan Kaya bool is_success; 288793ae66cSSinan Kaya 289793ae66cSSinan Kaya is_success = hidma_txn_is_success(cookie, mchan->last_success, 290793ae66cSSinan Kaya dmach->cookie); 291793ae66cSSinan Kaya return is_success ? ret : DMA_ERROR; 292793ae66cSSinan Kaya } 29367a2003eSSinan Kaya 29467a2003eSSinan Kaya if (mchan->paused && (ret == DMA_IN_PROGRESS)) { 29567a2003eSSinan Kaya unsigned long flags; 29667a2003eSSinan Kaya dma_cookie_t runcookie; 29767a2003eSSinan Kaya 29867a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, flags); 29967a2003eSSinan Kaya if (mchan->running) 30067a2003eSSinan Kaya runcookie = mchan->running->desc.cookie; 30167a2003eSSinan Kaya else 30267a2003eSSinan Kaya runcookie = -EINVAL; 30367a2003eSSinan Kaya 30467a2003eSSinan Kaya if (runcookie == cookie) 30567a2003eSSinan Kaya ret = DMA_PAUSED; 30667a2003eSSinan Kaya 30767a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, flags); 30867a2003eSSinan Kaya } 30967a2003eSSinan Kaya 31067a2003eSSinan Kaya return ret; 31167a2003eSSinan Kaya } 31267a2003eSSinan Kaya 31367a2003eSSinan Kaya /* 31467a2003eSSinan Kaya * Submit descriptor to hardware. 31567a2003eSSinan Kaya * Lock the PM for each descriptor we are sending. 31667a2003eSSinan Kaya */ 31767a2003eSSinan Kaya static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) 31867a2003eSSinan Kaya { 31967a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(txd->chan); 32067a2003eSSinan Kaya struct hidma_dev *dmadev = mchan->dmadev; 32167a2003eSSinan Kaya struct hidma_desc *mdesc; 32267a2003eSSinan Kaya unsigned long irqflags; 32367a2003eSSinan Kaya dma_cookie_t cookie; 32467a2003eSSinan Kaya 32567a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev); 32667a2003eSSinan Kaya if (!hidma_ll_isenabled(dmadev->lldev)) { 32767a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev); 32867a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev); 32967a2003eSSinan Kaya return -ENODEV; 33067a2003eSSinan Kaya } 33199efdb3eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev); 33299efdb3eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev); 33367a2003eSSinan Kaya 33467a2003eSSinan Kaya mdesc = container_of(txd, struct hidma_desc, desc); 33567a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 33667a2003eSSinan Kaya 33799efdb3eSSinan Kaya /* Move descriptor to queued */ 33899efdb3eSSinan Kaya list_move_tail(&mdesc->node, &mchan->queued); 33967a2003eSSinan Kaya 34067a2003eSSinan Kaya /* Update cookie */ 34167a2003eSSinan Kaya cookie = dma_cookie_assign(txd); 34267a2003eSSinan Kaya 34367a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 34467a2003eSSinan Kaya 34567a2003eSSinan Kaya return cookie; 34667a2003eSSinan Kaya } 34767a2003eSSinan Kaya 34867a2003eSSinan Kaya static int hidma_alloc_chan_resources(struct dma_chan *dmach) 34967a2003eSSinan Kaya { 35067a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach); 35167a2003eSSinan Kaya struct hidma_dev *dmadev = mchan->dmadev; 35267a2003eSSinan Kaya struct hidma_desc *mdesc, *tmp; 35367a2003eSSinan Kaya unsigned long irqflags; 35467a2003eSSinan Kaya LIST_HEAD(descs); 35567a2003eSSinan Kaya unsigned int i; 35667a2003eSSinan Kaya int rc = 0; 35767a2003eSSinan Kaya 35867a2003eSSinan Kaya if (mchan->allocated) 35967a2003eSSinan Kaya return 0; 36067a2003eSSinan Kaya 36167a2003eSSinan Kaya /* Alloc descriptors for this channel */ 36267a2003eSSinan Kaya for (i = 0; i < dmadev->nr_descriptors; i++) { 36367a2003eSSinan Kaya mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT); 36467a2003eSSinan Kaya if (!mdesc) { 36567a2003eSSinan Kaya rc = -ENOMEM; 36667a2003eSSinan Kaya break; 36767a2003eSSinan Kaya } 36867a2003eSSinan Kaya dma_async_tx_descriptor_init(&mdesc->desc, dmach); 36967a2003eSSinan Kaya mdesc->desc.tx_submit = hidma_tx_submit; 37067a2003eSSinan Kaya 37167a2003eSSinan Kaya rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig, 37267a2003eSSinan Kaya "DMA engine", hidma_callback, mdesc, 37367a2003eSSinan Kaya &mdesc->tre_ch); 37467a2003eSSinan Kaya if (rc) { 37567a2003eSSinan Kaya dev_err(dmach->device->dev, 37667a2003eSSinan Kaya "channel alloc failed at %u\n", i); 37767a2003eSSinan Kaya kfree(mdesc); 37867a2003eSSinan Kaya break; 37967a2003eSSinan Kaya } 38067a2003eSSinan Kaya list_add_tail(&mdesc->node, &descs); 38167a2003eSSinan Kaya } 38267a2003eSSinan Kaya 38367a2003eSSinan Kaya if (rc) { 38467a2003eSSinan Kaya /* return the allocated descriptors */ 38567a2003eSSinan Kaya list_for_each_entry_safe(mdesc, tmp, &descs, node) { 38667a2003eSSinan Kaya hidma_ll_free(dmadev->lldev, mdesc->tre_ch); 38767a2003eSSinan Kaya kfree(mdesc); 38867a2003eSSinan Kaya } 38967a2003eSSinan Kaya return rc; 39067a2003eSSinan Kaya } 39167a2003eSSinan Kaya 39267a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 39367a2003eSSinan Kaya list_splice_tail_init(&descs, &mchan->free); 39467a2003eSSinan Kaya mchan->allocated = true; 39567a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 39667a2003eSSinan Kaya return 1; 39767a2003eSSinan Kaya } 39867a2003eSSinan Kaya 39967a2003eSSinan Kaya static struct dma_async_tx_descriptor * 40067a2003eSSinan Kaya hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, 40167a2003eSSinan Kaya size_t len, unsigned long flags) 40267a2003eSSinan Kaya { 40367a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach); 40467a2003eSSinan Kaya struct hidma_desc *mdesc = NULL; 40567a2003eSSinan Kaya struct hidma_dev *mdma = mchan->dmadev; 40667a2003eSSinan Kaya unsigned long irqflags; 40767a2003eSSinan Kaya 40867a2003eSSinan Kaya /* Get free descriptor */ 40967a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 41067a2003eSSinan Kaya if (!list_empty(&mchan->free)) { 41167a2003eSSinan Kaya mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); 41267a2003eSSinan Kaya list_del(&mdesc->node); 41367a2003eSSinan Kaya } 41467a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 41567a2003eSSinan Kaya 41667a2003eSSinan Kaya if (!mdesc) 41767a2003eSSinan Kaya return NULL; 41867a2003eSSinan Kaya 419875aac8aSShunyong Yang mdesc->desc.flags = flags; 42067a2003eSSinan Kaya hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 4215e2db086SSinan Kaya src, dest, len, flags, 4225e2db086SSinan Kaya HIDMA_TRE_MEMCPY); 4235e2db086SSinan Kaya 4245e2db086SSinan Kaya /* Place descriptor in prepared list */ 4255e2db086SSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 4265e2db086SSinan Kaya list_add_tail(&mdesc->node, &mchan->prepared); 4275e2db086SSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 4285e2db086SSinan Kaya 4295e2db086SSinan Kaya return &mdesc->desc; 4305e2db086SSinan Kaya } 4315e2db086SSinan Kaya 4325e2db086SSinan Kaya static struct dma_async_tx_descriptor * 4335e2db086SSinan Kaya hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, 4345e2db086SSinan Kaya size_t len, unsigned long flags) 4355e2db086SSinan Kaya { 4365e2db086SSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach); 4375e2db086SSinan Kaya struct hidma_desc *mdesc = NULL; 4385e2db086SSinan Kaya struct hidma_dev *mdma = mchan->dmadev; 4395e2db086SSinan Kaya unsigned long irqflags; 4405e2db086SSinan Kaya 4415e2db086SSinan Kaya /* Get free descriptor */ 4425e2db086SSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 4435e2db086SSinan Kaya if (!list_empty(&mchan->free)) { 4445e2db086SSinan Kaya mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); 4455e2db086SSinan Kaya list_del(&mdesc->node); 4465e2db086SSinan Kaya } 4475e2db086SSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 4485e2db086SSinan Kaya 4495e2db086SSinan Kaya if (!mdesc) 4505e2db086SSinan Kaya return NULL; 4515e2db086SSinan Kaya 452875aac8aSShunyong Yang mdesc->desc.flags = flags; 4535e2db086SSinan Kaya hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 4545e2db086SSinan Kaya value, dest, len, flags, 4555e2db086SSinan Kaya HIDMA_TRE_MEMSET); 45667a2003eSSinan Kaya 45767a2003eSSinan Kaya /* Place descriptor in prepared list */ 45867a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 45967a2003eSSinan Kaya list_add_tail(&mdesc->node, &mchan->prepared); 46067a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 46167a2003eSSinan Kaya 46267a2003eSSinan Kaya return &mdesc->desc; 46367a2003eSSinan Kaya } 46467a2003eSSinan Kaya 46567a2003eSSinan Kaya static int hidma_terminate_channel(struct dma_chan *chan) 46667a2003eSSinan Kaya { 46767a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(chan); 46867a2003eSSinan Kaya struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); 46967a2003eSSinan Kaya struct hidma_desc *tmp, *mdesc; 47067a2003eSSinan Kaya unsigned long irqflags; 47167a2003eSSinan Kaya LIST_HEAD(list); 47267a2003eSSinan Kaya int rc; 47367a2003eSSinan Kaya 47467a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev); 47567a2003eSSinan Kaya /* give completed requests a chance to finish */ 47667a2003eSSinan Kaya hidma_process_completed(mchan); 47767a2003eSSinan Kaya 47867a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 479793ae66cSSinan Kaya mchan->last_success = 0; 48067a2003eSSinan Kaya list_splice_init(&mchan->active, &list); 48167a2003eSSinan Kaya list_splice_init(&mchan->prepared, &list); 48267a2003eSSinan Kaya list_splice_init(&mchan->completed, &list); 48399efdb3eSSinan Kaya list_splice_init(&mchan->queued, &list); 48467a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 48567a2003eSSinan Kaya 48667a2003eSSinan Kaya /* this suspends the existing transfer */ 487d1615ca2SSinan Kaya rc = hidma_ll_disable(dmadev->lldev); 48867a2003eSSinan Kaya if (rc) { 48967a2003eSSinan Kaya dev_err(dmadev->ddev.dev, "channel did not pause\n"); 49067a2003eSSinan Kaya goto out; 49167a2003eSSinan Kaya } 49267a2003eSSinan Kaya 49367a2003eSSinan Kaya /* return all user requests */ 49467a2003eSSinan Kaya list_for_each_entry_safe(mdesc, tmp, &list, node) { 49567a2003eSSinan Kaya struct dma_async_tx_descriptor *txd = &mdesc->desc; 49667a2003eSSinan Kaya 49767a2003eSSinan Kaya dma_descriptor_unmap(txd); 4985ade6683SDave Jiang dmaengine_desc_get_callback_invoke(txd, NULL); 49967a2003eSSinan Kaya dma_run_dependencies(txd); 50067a2003eSSinan Kaya 50167a2003eSSinan Kaya /* move myself to free_list */ 50267a2003eSSinan Kaya list_move(&mdesc->node, &mchan->free); 50367a2003eSSinan Kaya } 50467a2003eSSinan Kaya 505d1615ca2SSinan Kaya rc = hidma_ll_enable(dmadev->lldev); 50667a2003eSSinan Kaya out: 50767a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev); 50867a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev); 50967a2003eSSinan Kaya return rc; 51067a2003eSSinan Kaya } 51167a2003eSSinan Kaya 51267a2003eSSinan Kaya static int hidma_terminate_all(struct dma_chan *chan) 51367a2003eSSinan Kaya { 51467a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(chan); 51567a2003eSSinan Kaya struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); 51667a2003eSSinan Kaya int rc; 51767a2003eSSinan Kaya 51867a2003eSSinan Kaya rc = hidma_terminate_channel(chan); 51967a2003eSSinan Kaya if (rc) 52067a2003eSSinan Kaya return rc; 52167a2003eSSinan Kaya 52267a2003eSSinan Kaya /* reinitialize the hardware */ 52367a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev); 52467a2003eSSinan Kaya rc = hidma_ll_setup(dmadev->lldev); 52567a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev); 52667a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev); 52767a2003eSSinan Kaya return rc; 52867a2003eSSinan Kaya } 52967a2003eSSinan Kaya 53067a2003eSSinan Kaya static void hidma_free_chan_resources(struct dma_chan *dmach) 53167a2003eSSinan Kaya { 53267a2003eSSinan Kaya struct hidma_chan *mchan = to_hidma_chan(dmach); 53367a2003eSSinan Kaya struct hidma_dev *mdma = mchan->dmadev; 53467a2003eSSinan Kaya struct hidma_desc *mdesc, *tmp; 53567a2003eSSinan Kaya unsigned long irqflags; 53667a2003eSSinan Kaya LIST_HEAD(descs); 53767a2003eSSinan Kaya 53867a2003eSSinan Kaya /* terminate running transactions and free descriptors */ 53967a2003eSSinan Kaya hidma_terminate_channel(dmach); 54067a2003eSSinan Kaya 54167a2003eSSinan Kaya spin_lock_irqsave(&mchan->lock, irqflags); 54267a2003eSSinan Kaya 54367a2003eSSinan Kaya /* Move data */ 54467a2003eSSinan Kaya list_splice_tail_init(&mchan->free, &descs); 54567a2003eSSinan Kaya 54667a2003eSSinan Kaya /* Free descriptors */ 54767a2003eSSinan Kaya list_for_each_entry_safe(mdesc, tmp, &descs, node) { 54867a2003eSSinan Kaya hidma_ll_free(mdma->lldev, mdesc->tre_ch); 54967a2003eSSinan Kaya list_del(&mdesc->node); 55067a2003eSSinan Kaya kfree(mdesc); 55167a2003eSSinan Kaya } 55267a2003eSSinan Kaya 553d24224deSJason Yan mchan->allocated = false; 55467a2003eSSinan Kaya spin_unlock_irqrestore(&mchan->lock, irqflags); 55567a2003eSSinan Kaya } 55667a2003eSSinan Kaya 55767a2003eSSinan Kaya static int hidma_pause(struct dma_chan *chan) 55867a2003eSSinan Kaya { 55967a2003eSSinan Kaya struct hidma_chan *mchan; 56067a2003eSSinan Kaya struct hidma_dev *dmadev; 56167a2003eSSinan Kaya 56267a2003eSSinan Kaya mchan = to_hidma_chan(chan); 56367a2003eSSinan Kaya dmadev = to_hidma_dev(mchan->chan.device); 56467a2003eSSinan Kaya if (!mchan->paused) { 56567a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev); 566d1615ca2SSinan Kaya if (hidma_ll_disable(dmadev->lldev)) 56767a2003eSSinan Kaya dev_warn(dmadev->ddev.dev, "channel did not stop\n"); 56867a2003eSSinan Kaya mchan->paused = true; 56967a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev); 57067a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev); 57167a2003eSSinan Kaya } 57267a2003eSSinan Kaya return 0; 57367a2003eSSinan Kaya } 57467a2003eSSinan Kaya 57567a2003eSSinan Kaya static int hidma_resume(struct dma_chan *chan) 57667a2003eSSinan Kaya { 57767a2003eSSinan Kaya struct hidma_chan *mchan; 57867a2003eSSinan Kaya struct hidma_dev *dmadev; 57967a2003eSSinan Kaya int rc = 0; 58067a2003eSSinan Kaya 58167a2003eSSinan Kaya mchan = to_hidma_chan(chan); 58267a2003eSSinan Kaya dmadev = to_hidma_dev(mchan->chan.device); 58367a2003eSSinan Kaya if (mchan->paused) { 58467a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev); 585d1615ca2SSinan Kaya rc = hidma_ll_enable(dmadev->lldev); 58667a2003eSSinan Kaya if (!rc) 58767a2003eSSinan Kaya mchan->paused = false; 58867a2003eSSinan Kaya else 58967a2003eSSinan Kaya dev_err(dmadev->ddev.dev, 59067a2003eSSinan Kaya "failed to resume the channel"); 59167a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev); 59267a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev); 59367a2003eSSinan Kaya } 59467a2003eSSinan Kaya return rc; 59567a2003eSSinan Kaya } 59667a2003eSSinan Kaya 59767a2003eSSinan Kaya static irqreturn_t hidma_chirq_handler(int chirq, void *arg) 59867a2003eSSinan Kaya { 59967a2003eSSinan Kaya struct hidma_lldev *lldev = arg; 60067a2003eSSinan Kaya 60167a2003eSSinan Kaya /* 60267a2003eSSinan Kaya * All interrupts are request driven. 60367a2003eSSinan Kaya * HW doesn't send an interrupt by itself. 60467a2003eSSinan Kaya */ 60567a2003eSSinan Kaya return hidma_ll_inthandler(chirq, lldev); 60667a2003eSSinan Kaya } 60767a2003eSSinan Kaya 6088cc12b26SArnd Bergmann #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 6091c0e3e82SSinan Kaya static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg) 6101c0e3e82SSinan Kaya { 6111c0e3e82SSinan Kaya struct hidma_lldev **lldevp = arg; 6121c0e3e82SSinan Kaya struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp); 6131c0e3e82SSinan Kaya 6141c0e3e82SSinan Kaya return hidma_ll_inthandler_msi(chirq, *lldevp, 6151c0e3e82SSinan Kaya 1 << (chirq - dmadev->msi_virqbase)); 6161c0e3e82SSinan Kaya } 6178cc12b26SArnd Bergmann #endif 6181c0e3e82SSinan Kaya 61942d236f8SSinan Kaya static ssize_t hidma_show_values(struct device *dev, 62042d236f8SSinan Kaya struct device_attribute *attr, char *buf) 62142d236f8SSinan Kaya { 6226af6c371SWolfram Sang struct hidma_dev *mdev = dev_get_drvdata(dev); 62342d236f8SSinan Kaya 62442d236f8SSinan Kaya buf[0] = 0; 62542d236f8SSinan Kaya 62642d236f8SSinan Kaya if (strcmp(attr->attr.name, "chid") == 0) 62742d236f8SSinan Kaya sprintf(buf, "%d\n", mdev->chidx); 62842d236f8SSinan Kaya 62942d236f8SSinan Kaya return strlen(buf); 63042d236f8SSinan Kaya } 63142d236f8SSinan Kaya 632c6e4584dSSinan Kaya static inline void hidma_sysfs_uninit(struct hidma_dev *dev) 633c6e4584dSSinan Kaya { 634c6e4584dSSinan Kaya device_remove_file(dev->ddev.dev, dev->chid_attrs); 635c6e4584dSSinan Kaya } 636c6e4584dSSinan Kaya 637c6e4584dSSinan Kaya static struct device_attribute* 638c6e4584dSSinan Kaya hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode) 63942d236f8SSinan Kaya { 64042d236f8SSinan Kaya struct device_attribute *attrs; 64142d236f8SSinan Kaya char *name_copy; 64242d236f8SSinan Kaya 64342d236f8SSinan Kaya attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), 64442d236f8SSinan Kaya GFP_KERNEL); 64542d236f8SSinan Kaya if (!attrs) 646c6e4584dSSinan Kaya return NULL; 64742d236f8SSinan Kaya 64842d236f8SSinan Kaya name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); 64942d236f8SSinan Kaya if (!name_copy) 650c6e4584dSSinan Kaya return NULL; 65142d236f8SSinan Kaya 65242d236f8SSinan Kaya attrs->attr.name = name_copy; 65342d236f8SSinan Kaya attrs->attr.mode = mode; 65442d236f8SSinan Kaya attrs->show = hidma_show_values; 65542d236f8SSinan Kaya sysfs_attr_init(&attrs->attr); 65642d236f8SSinan Kaya 657c6e4584dSSinan Kaya return attrs; 658c6e4584dSSinan Kaya } 659c6e4584dSSinan Kaya 660c6e4584dSSinan Kaya static int hidma_sysfs_init(struct hidma_dev *dev) 661c6e4584dSSinan Kaya { 662c6e4584dSSinan Kaya dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO); 663c6e4584dSSinan Kaya if (!dev->chid_attrs) 664c6e4584dSSinan Kaya return -ENOMEM; 665c6e4584dSSinan Kaya 666c6e4584dSSinan Kaya return device_create_file(dev->ddev.dev, dev->chid_attrs); 66742d236f8SSinan Kaya } 66842d236f8SSinan Kaya 6691c0e3e82SSinan Kaya #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 6701c0e3e82SSinan Kaya static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 6711c0e3e82SSinan Kaya { 6721c0e3e82SSinan Kaya struct device *dev = msi_desc_to_dev(desc); 6731c0e3e82SSinan Kaya struct hidma_dev *dmadev = dev_get_drvdata(dev); 6741c0e3e82SSinan Kaya 6751c0e3e82SSinan Kaya if (!desc->platform.msi_index) { 6761c0e3e82SSinan Kaya writel(msg->address_lo, dmadev->dev_evca + 0x118); 6771c0e3e82SSinan Kaya writel(msg->address_hi, dmadev->dev_evca + 0x11C); 6781c0e3e82SSinan Kaya writel(msg->data, dmadev->dev_evca + 0x120); 6791c0e3e82SSinan Kaya } 6801c0e3e82SSinan Kaya } 6811c0e3e82SSinan Kaya #endif 6821c0e3e82SSinan Kaya 6831c0e3e82SSinan Kaya static void hidma_free_msis(struct hidma_dev *dmadev) 6841c0e3e82SSinan Kaya { 6851c0e3e82SSinan Kaya #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 6861c0e3e82SSinan Kaya struct device *dev = dmadev->ddev.dev; 6871c0e3e82SSinan Kaya struct msi_desc *desc; 6881c0e3e82SSinan Kaya 6891c0e3e82SSinan Kaya /* free allocated MSI interrupts above */ 6901c0e3e82SSinan Kaya for_each_msi_entry(desc, dev) 6911c0e3e82SSinan Kaya devm_free_irq(dev, desc->irq, &dmadev->lldev); 6921c0e3e82SSinan Kaya 6931c0e3e82SSinan Kaya platform_msi_domain_free_irqs(dev); 6941c0e3e82SSinan Kaya #endif 6951c0e3e82SSinan Kaya } 6961c0e3e82SSinan Kaya 6971c0e3e82SSinan Kaya static int hidma_request_msi(struct hidma_dev *dmadev, 6981c0e3e82SSinan Kaya struct platform_device *pdev) 6991c0e3e82SSinan Kaya { 7001c0e3e82SSinan Kaya #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 7011c0e3e82SSinan Kaya int rc; 7021c0e3e82SSinan Kaya struct msi_desc *desc; 7031c0e3e82SSinan Kaya struct msi_desc *failed_desc = NULL; 7041c0e3e82SSinan Kaya 7051c0e3e82SSinan Kaya rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, 7061c0e3e82SSinan Kaya hidma_write_msi_msg); 7071c0e3e82SSinan Kaya if (rc) 7081c0e3e82SSinan Kaya return rc; 7091c0e3e82SSinan Kaya 7101c0e3e82SSinan Kaya for_each_msi_entry(desc, &pdev->dev) { 7111c0e3e82SSinan Kaya if (!desc->platform.msi_index) 7121c0e3e82SSinan Kaya dmadev->msi_virqbase = desc->irq; 7131c0e3e82SSinan Kaya 7141c0e3e82SSinan Kaya rc = devm_request_irq(&pdev->dev, desc->irq, 7151c0e3e82SSinan Kaya hidma_chirq_handler_msi, 7161c0e3e82SSinan Kaya 0, "qcom-hidma-msi", 7171c0e3e82SSinan Kaya &dmadev->lldev); 7181c0e3e82SSinan Kaya if (rc) { 7191c0e3e82SSinan Kaya failed_desc = desc; 7201c0e3e82SSinan Kaya break; 7211c0e3e82SSinan Kaya } 7221c0e3e82SSinan Kaya } 7231c0e3e82SSinan Kaya 7241c0e3e82SSinan Kaya if (rc) { 7251c0e3e82SSinan Kaya /* free allocated MSI interrupts above */ 7261c0e3e82SSinan Kaya for_each_msi_entry(desc, &pdev->dev) { 7271c0e3e82SSinan Kaya if (desc == failed_desc) 7281c0e3e82SSinan Kaya break; 7291c0e3e82SSinan Kaya devm_free_irq(&pdev->dev, desc->irq, 7301c0e3e82SSinan Kaya &dmadev->lldev); 7311c0e3e82SSinan Kaya } 7321c0e3e82SSinan Kaya } else { 7331c0e3e82SSinan Kaya /* Add callback to free MSIs on teardown */ 7341c0e3e82SSinan Kaya hidma_ll_setup_irq(dmadev->lldev, true); 7351c0e3e82SSinan Kaya 7361c0e3e82SSinan Kaya } 7371c0e3e82SSinan Kaya if (rc) 7381c0e3e82SSinan Kaya dev_warn(&pdev->dev, 7391c0e3e82SSinan Kaya "failed to request MSI irq, falling back to wired IRQ\n"); 7401c0e3e82SSinan Kaya return rc; 7411c0e3e82SSinan Kaya #else 7421c0e3e82SSinan Kaya return -EINVAL; 7431c0e3e82SSinan Kaya #endif 7441c0e3e82SSinan Kaya } 7451c0e3e82SSinan Kaya 74695fbfb7aSSinan Kaya static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap) 7471c0e3e82SSinan Kaya { 74895fbfb7aSSinan Kaya enum hidma_cap cap; 7491c0e3e82SSinan Kaya 75095fbfb7aSSinan Kaya cap = (enum hidma_cap) device_get_match_data(dev); 75195fbfb7aSSinan Kaya return cap ? ((cap & test_cap) > 0) : 0; 7521c0e3e82SSinan Kaya } 7531c0e3e82SSinan Kaya 75467a2003eSSinan Kaya static int hidma_probe(struct platform_device *pdev) 75567a2003eSSinan Kaya { 75667a2003eSSinan Kaya struct hidma_dev *dmadev; 75767a2003eSSinan Kaya struct resource *trca_resource; 75867a2003eSSinan Kaya struct resource *evca_resource; 75967a2003eSSinan Kaya int chirq; 76067a2003eSSinan Kaya void __iomem *evca; 76167a2003eSSinan Kaya void __iomem *trca; 76267a2003eSSinan Kaya int rc; 7631c0e3e82SSinan Kaya bool msi; 76467a2003eSSinan Kaya 76567a2003eSSinan Kaya pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); 76667a2003eSSinan Kaya pm_runtime_use_autosuspend(&pdev->dev); 76767a2003eSSinan Kaya pm_runtime_set_active(&pdev->dev); 76867a2003eSSinan Kaya pm_runtime_enable(&pdev->dev); 76967a2003eSSinan Kaya 77067a2003eSSinan Kaya trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); 77167a2003eSSinan Kaya trca = devm_ioremap_resource(&pdev->dev, trca_resource); 77267a2003eSSinan Kaya if (IS_ERR(trca)) { 77367a2003eSSinan Kaya rc = -ENOMEM; 77467a2003eSSinan Kaya goto bailout; 77567a2003eSSinan Kaya } 77667a2003eSSinan Kaya 77767a2003eSSinan Kaya evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); 77867a2003eSSinan Kaya evca = devm_ioremap_resource(&pdev->dev, evca_resource); 77967a2003eSSinan Kaya if (IS_ERR(evca)) { 78067a2003eSSinan Kaya rc = -ENOMEM; 78167a2003eSSinan Kaya goto bailout; 78267a2003eSSinan Kaya } 78367a2003eSSinan Kaya 78467a2003eSSinan Kaya /* 78567a2003eSSinan Kaya * This driver only handles the channel IRQs. 78667a2003eSSinan Kaya * Common IRQ is handled by the management driver. 78767a2003eSSinan Kaya */ 78867a2003eSSinan Kaya chirq = platform_get_irq(pdev, 0); 78967a2003eSSinan Kaya if (chirq < 0) { 79067a2003eSSinan Kaya rc = -ENODEV; 79167a2003eSSinan Kaya goto bailout; 79267a2003eSSinan Kaya } 79367a2003eSSinan Kaya 79467a2003eSSinan Kaya dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); 79567a2003eSSinan Kaya if (!dmadev) { 79667a2003eSSinan Kaya rc = -ENOMEM; 79767a2003eSSinan Kaya goto bailout; 79867a2003eSSinan Kaya } 79967a2003eSSinan Kaya 80067a2003eSSinan Kaya INIT_LIST_HEAD(&dmadev->ddev.channels); 80167a2003eSSinan Kaya spin_lock_init(&dmadev->lock); 80267a2003eSSinan Kaya dmadev->ddev.dev = &pdev->dev; 80367a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev); 80467a2003eSSinan Kaya 80567a2003eSSinan Kaya dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); 8065e2db086SSinan Kaya dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask); 80767a2003eSSinan Kaya if (WARN_ON(!pdev->dev.dma_mask)) { 80867a2003eSSinan Kaya rc = -ENXIO; 80967a2003eSSinan Kaya goto dmafree; 81067a2003eSSinan Kaya } 81167a2003eSSinan Kaya 81267a2003eSSinan Kaya dmadev->dev_evca = evca; 81367a2003eSSinan Kaya dmadev->evca_resource = evca_resource; 81467a2003eSSinan Kaya dmadev->dev_trca = trca; 81567a2003eSSinan Kaya dmadev->trca_resource = trca_resource; 81667a2003eSSinan Kaya dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; 8175e2db086SSinan Kaya dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset; 81867a2003eSSinan Kaya dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; 81967a2003eSSinan Kaya dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; 82067a2003eSSinan Kaya dmadev->ddev.device_tx_status = hidma_tx_status; 82167a2003eSSinan Kaya dmadev->ddev.device_issue_pending = hidma_issue_pending; 82267a2003eSSinan Kaya dmadev->ddev.device_pause = hidma_pause; 82367a2003eSSinan Kaya dmadev->ddev.device_resume = hidma_resume; 82467a2003eSSinan Kaya dmadev->ddev.device_terminate_all = hidma_terminate_all; 82567a2003eSSinan Kaya dmadev->ddev.copy_align = 8; 82667a2003eSSinan Kaya 8271c0e3e82SSinan Kaya /* 8281c0e3e82SSinan Kaya * Determine the MSI capability of the platform. Old HW doesn't 8291c0e3e82SSinan Kaya * support MSI. 8301c0e3e82SSinan Kaya */ 83195fbfb7aSSinan Kaya msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP); 83267a2003eSSinan Kaya device_property_read_u32(&pdev->dev, "desc-count", 83367a2003eSSinan Kaya &dmadev->nr_descriptors); 83467a2003eSSinan Kaya 83513058e33SSinan Kaya if (nr_desc_prm) { 83613058e33SSinan Kaya dev_info(&pdev->dev, "overriding number of descriptors as %d\n", 83713058e33SSinan Kaya nr_desc_prm); 83867a2003eSSinan Kaya dmadev->nr_descriptors = nr_desc_prm; 83913058e33SSinan Kaya } 84067a2003eSSinan Kaya 84167a2003eSSinan Kaya if (!dmadev->nr_descriptors) 84267a2003eSSinan Kaya dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; 84367a2003eSSinan Kaya 844b5419adcSSinan Kaya if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP)) 845b5419adcSSinan Kaya dmadev->chidx = readl(dmadev->dev_trca + 0x40); 846b5419adcSSinan Kaya else 84767a2003eSSinan Kaya dmadev->chidx = readl(dmadev->dev_trca + 0x28); 84867a2003eSSinan Kaya 84967a2003eSSinan Kaya /* Set DMA mask to 64 bits. */ 85067a2003eSSinan Kaya rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 85167a2003eSSinan Kaya if (rc) { 85267a2003eSSinan Kaya dev_warn(&pdev->dev, "unable to set coherent mask to 64"); 85367a2003eSSinan Kaya rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 85467a2003eSSinan Kaya if (rc) 85567a2003eSSinan Kaya goto dmafree; 85667a2003eSSinan Kaya } 85767a2003eSSinan Kaya 85867a2003eSSinan Kaya dmadev->lldev = hidma_ll_init(dmadev->ddev.dev, 85967a2003eSSinan Kaya dmadev->nr_descriptors, dmadev->dev_trca, 86067a2003eSSinan Kaya dmadev->dev_evca, dmadev->chidx); 86167a2003eSSinan Kaya if (!dmadev->lldev) { 86267a2003eSSinan Kaya rc = -EPROBE_DEFER; 86367a2003eSSinan Kaya goto dmafree; 86467a2003eSSinan Kaya } 86567a2003eSSinan Kaya 8661c0e3e82SSinan Kaya platform_set_drvdata(pdev, dmadev); 8671c0e3e82SSinan Kaya if (msi) 8681c0e3e82SSinan Kaya rc = hidma_request_msi(dmadev, pdev); 8691c0e3e82SSinan Kaya 8701c0e3e82SSinan Kaya if (!msi || rc) { 8711c0e3e82SSinan Kaya hidma_ll_setup_irq(dmadev->lldev, false); 8721c0e3e82SSinan Kaya rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 8731c0e3e82SSinan Kaya 0, "qcom-hidma", dmadev->lldev); 87467a2003eSSinan Kaya if (rc) 87567a2003eSSinan Kaya goto uninit; 8761c0e3e82SSinan Kaya } 87767a2003eSSinan Kaya 87867a2003eSSinan Kaya INIT_LIST_HEAD(&dmadev->ddev.channels); 87967a2003eSSinan Kaya rc = hidma_chan_init(dmadev, 0); 88067a2003eSSinan Kaya if (rc) 88167a2003eSSinan Kaya goto uninit; 88267a2003eSSinan Kaya 88367a2003eSSinan Kaya rc = dma_async_device_register(&dmadev->ddev); 88467a2003eSSinan Kaya if (rc) 88567a2003eSSinan Kaya goto uninit; 88667a2003eSSinan Kaya 88767a2003eSSinan Kaya dmadev->irq = chirq; 888*00c4747aSAllen Pais tasklet_setup(&dmadev->task, hidma_issue_task); 889570d0176SSinan Kaya hidma_debug_init(dmadev); 890c6e4584dSSinan Kaya hidma_sysfs_init(dmadev); 89167a2003eSSinan Kaya dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); 89267a2003eSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev); 89367a2003eSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev); 89467a2003eSSinan Kaya return 0; 89567a2003eSSinan Kaya 89667a2003eSSinan Kaya uninit: 8971c0e3e82SSinan Kaya if (msi) 8981c0e3e82SSinan Kaya hidma_free_msis(dmadev); 8991c0e3e82SSinan Kaya 90067a2003eSSinan Kaya hidma_ll_uninit(dmadev->lldev); 90167a2003eSSinan Kaya dmafree: 90267a2003eSSinan Kaya if (dmadev) 90367a2003eSSinan Kaya hidma_free(dmadev); 90467a2003eSSinan Kaya bailout: 90567a2003eSSinan Kaya pm_runtime_put_sync(&pdev->dev); 90667a2003eSSinan Kaya pm_runtime_disable(&pdev->dev); 90767a2003eSSinan Kaya return rc; 90867a2003eSSinan Kaya } 90967a2003eSSinan Kaya 910dc7c733aSSinan Kaya static void hidma_shutdown(struct platform_device *pdev) 911dc7c733aSSinan Kaya { 912dc7c733aSSinan Kaya struct hidma_dev *dmadev = platform_get_drvdata(pdev); 913dc7c733aSSinan Kaya 914dc7c733aSSinan Kaya dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n"); 915dc7c733aSSinan Kaya 916dc7c733aSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev); 917dc7c733aSSinan Kaya if (hidma_ll_disable(dmadev->lldev)) 918dc7c733aSSinan Kaya dev_warn(dmadev->ddev.dev, "channel did not stop\n"); 919dc7c733aSSinan Kaya pm_runtime_mark_last_busy(dmadev->ddev.dev); 920dc7c733aSSinan Kaya pm_runtime_put_autosuspend(dmadev->ddev.dev); 921dc7c733aSSinan Kaya 922dc7c733aSSinan Kaya } 923dc7c733aSSinan Kaya 92467a2003eSSinan Kaya static int hidma_remove(struct platform_device *pdev) 92567a2003eSSinan Kaya { 92667a2003eSSinan Kaya struct hidma_dev *dmadev = platform_get_drvdata(pdev); 92767a2003eSSinan Kaya 92867a2003eSSinan Kaya pm_runtime_get_sync(dmadev->ddev.dev); 92967a2003eSSinan Kaya dma_async_device_unregister(&dmadev->ddev); 9301c0e3e82SSinan Kaya if (!dmadev->lldev->msi_support) 93167a2003eSSinan Kaya devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); 9321c0e3e82SSinan Kaya else 9331c0e3e82SSinan Kaya hidma_free_msis(dmadev); 9341c0e3e82SSinan Kaya 935bd16934aSVinod Koul tasklet_kill(&dmadev->task); 936c6e4584dSSinan Kaya hidma_sysfs_uninit(dmadev); 937570d0176SSinan Kaya hidma_debug_uninit(dmadev); 93867a2003eSSinan Kaya hidma_ll_uninit(dmadev->lldev); 93967a2003eSSinan Kaya hidma_free(dmadev); 94067a2003eSSinan Kaya 94167a2003eSSinan Kaya dev_info(&pdev->dev, "HI-DMA engine removed\n"); 94267a2003eSSinan Kaya pm_runtime_put_sync_suspend(&pdev->dev); 94367a2003eSSinan Kaya pm_runtime_disable(&pdev->dev); 94467a2003eSSinan Kaya 94567a2003eSSinan Kaya return 0; 94667a2003eSSinan Kaya } 94767a2003eSSinan Kaya 94867a2003eSSinan Kaya #if IS_ENABLED(CONFIG_ACPI) 94967a2003eSSinan Kaya static const struct acpi_device_id hidma_acpi_ids[] = { 95067a2003eSSinan Kaya {"QCOM8061"}, 95195fbfb7aSSinan Kaya {"QCOM8062", HIDMA_MSI_CAP}, 952b5419adcSSinan Kaya {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)}, 95367a2003eSSinan Kaya {}, 95467a2003eSSinan Kaya }; 95575ff7668SSinan Kaya MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); 95667a2003eSSinan Kaya #endif 95767a2003eSSinan Kaya 95867a2003eSSinan Kaya static const struct of_device_id hidma_match[] = { 95967a2003eSSinan Kaya {.compatible = "qcom,hidma-1.0",}, 96095fbfb7aSSinan Kaya {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),}, 961b5419adcSSinan Kaya {.compatible = "qcom,hidma-1.2", 962b5419adcSSinan Kaya .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),}, 96367a2003eSSinan Kaya {}, 96467a2003eSSinan Kaya }; 96567a2003eSSinan Kaya MODULE_DEVICE_TABLE(of, hidma_match); 96667a2003eSSinan Kaya 96767a2003eSSinan Kaya static struct platform_driver hidma_driver = { 96867a2003eSSinan Kaya .probe = hidma_probe, 96967a2003eSSinan Kaya .remove = hidma_remove, 970dc7c733aSSinan Kaya .shutdown = hidma_shutdown, 97167a2003eSSinan Kaya .driver = { 97267a2003eSSinan Kaya .name = "hidma", 97367a2003eSSinan Kaya .of_match_table = hidma_match, 97467a2003eSSinan Kaya .acpi_match_table = ACPI_PTR(hidma_acpi_ids), 97567a2003eSSinan Kaya }, 97667a2003eSSinan Kaya }; 97767a2003eSSinan Kaya 97867a2003eSSinan Kaya module_platform_driver(hidma_driver); 97967a2003eSSinan Kaya MODULE_LICENSE("GPL v2"); 980