1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
4 *
5 * Based on drivers/misc/eeprom/sunxi_sid.c
6 */
7
8 #include <linux/device.h>
9 #include <linux/clk.h>
10 #include <linux/completion.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/kernel.h>
16 #include <linux/kobject.h>
17 #include <linux/of.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/random.h>
21
22 #include <soc/tegra/fuse.h>
23
24 #include "fuse.h"
25
26 #define FUSE_BEGIN 0x100
27 #define FUSE_UID_LOW 0x08
28 #define FUSE_UID_HIGH 0x0c
29
tegra20_fuse_read_early(struct tegra_fuse * fuse,unsigned int offset)30 static u32 tegra20_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
31 {
32 return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
33 }
34
apb_dma_complete(void * args)35 static void apb_dma_complete(void *args)
36 {
37 struct tegra_fuse *fuse = args;
38
39 complete(&fuse->apbdma.wait);
40 }
41
tegra20_fuse_read(struct tegra_fuse * fuse,unsigned int offset)42 static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
43 {
44 unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
45 struct dma_async_tx_descriptor *dma_desc;
46 unsigned long time_left;
47 u32 value = 0;
48 int err;
49
50 err = pm_runtime_resume_and_get(fuse->dev);
51 if (err)
52 return err;
53
54 mutex_lock(&fuse->apbdma.lock);
55
56 fuse->apbdma.config.src_addr = fuse->phys + FUSE_BEGIN + offset;
57
58 err = dmaengine_slave_config(fuse->apbdma.chan, &fuse->apbdma.config);
59 if (err)
60 goto out;
61
62 dma_desc = dmaengine_prep_slave_single(fuse->apbdma.chan,
63 fuse->apbdma.phys,
64 sizeof(u32), DMA_DEV_TO_MEM,
65 flags);
66 if (!dma_desc)
67 goto out;
68
69 dma_desc->callback = apb_dma_complete;
70 dma_desc->callback_param = fuse;
71
72 reinit_completion(&fuse->apbdma.wait);
73
74 dmaengine_submit(dma_desc);
75 dma_async_issue_pending(fuse->apbdma.chan);
76 time_left = wait_for_completion_timeout(&fuse->apbdma.wait,
77 msecs_to_jiffies(50));
78
79 if (WARN(time_left == 0, "apb read dma timed out"))
80 dmaengine_terminate_all(fuse->apbdma.chan);
81 else
82 value = *fuse->apbdma.virt;
83
84 out:
85 mutex_unlock(&fuse->apbdma.lock);
86 pm_runtime_put(fuse->dev);
87 return value;
88 }
89
dma_filter(struct dma_chan * chan,void * filter_param)90 static bool dma_filter(struct dma_chan *chan, void *filter_param)
91 {
92 struct device_node *np = chan->device->dev->of_node;
93
94 return of_device_is_compatible(np, "nvidia,tegra20-apbdma");
95 }
96
tegra20_fuse_release_channel(void * data)97 static void tegra20_fuse_release_channel(void *data)
98 {
99 struct tegra_fuse *fuse = data;
100
101 dma_release_channel(fuse->apbdma.chan);
102 fuse->apbdma.chan = NULL;
103 }
104
tegra20_fuse_free_coherent(void * data)105 static void tegra20_fuse_free_coherent(void *data)
106 {
107 struct tegra_fuse *fuse = data;
108
109 dma_free_coherent(fuse->dev, sizeof(u32), fuse->apbdma.virt,
110 fuse->apbdma.phys);
111 fuse->apbdma.virt = NULL;
112 fuse->apbdma.phys = 0x0;
113 }
114
tegra20_fuse_probe(struct tegra_fuse * fuse)115 static int tegra20_fuse_probe(struct tegra_fuse *fuse)
116 {
117 dma_cap_mask_t mask;
118 int err;
119
120 dma_cap_zero(mask);
121 dma_cap_set(DMA_SLAVE, mask);
122
123 fuse->apbdma.chan = dma_request_channel(mask, dma_filter, NULL);
124 if (!fuse->apbdma.chan)
125 return -EPROBE_DEFER;
126
127 err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_release_channel,
128 fuse);
129 if (err)
130 return err;
131
132 fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32),
133 &fuse->apbdma.phys,
134 GFP_KERNEL);
135 if (!fuse->apbdma.virt)
136 return -ENOMEM;
137
138 err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_free_coherent,
139 fuse);
140 if (err)
141 return err;
142
143 fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
144 fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
145 fuse->apbdma.config.src_maxburst = 1;
146 fuse->apbdma.config.dst_maxburst = 1;
147 fuse->apbdma.config.direction = DMA_DEV_TO_MEM;
148 fuse->apbdma.config.device_fc = false;
149
150 init_completion(&fuse->apbdma.wait);
151 mutex_init(&fuse->apbdma.lock);
152 fuse->read = tegra20_fuse_read;
153
154 return 0;
155 }
156
157 static const struct tegra_fuse_info tegra20_fuse_info = {
158 .read = tegra20_fuse_read,
159 .size = 0x1f8,
160 .spare = 0x100,
161 };
162
163 /* Early boot code. This code is called before the devices are created */
164
tegra20_fuse_add_randomness(void)165 static void __init tegra20_fuse_add_randomness(void)
166 {
167 u32 randomness[7];
168
169 randomness[0] = tegra_sku_info.sku_id;
170 randomness[1] = tegra_read_straps();
171 randomness[2] = tegra_read_chipid();
172 randomness[3] = tegra_sku_info.cpu_process_id << 16;
173 randomness[3] |= tegra_sku_info.soc_process_id;
174 randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
175 randomness[4] |= tegra_sku_info.soc_speedo_id;
176 randomness[5] = tegra_fuse_read_early(FUSE_UID_LOW);
177 randomness[6] = tegra_fuse_read_early(FUSE_UID_HIGH);
178
179 add_device_randomness(randomness, sizeof(randomness));
180 }
181
tegra20_fuse_init(struct tegra_fuse * fuse)182 static void __init tegra20_fuse_init(struct tegra_fuse *fuse)
183 {
184 fuse->read_early = tegra20_fuse_read_early;
185
186 tegra_init_revision();
187 fuse->soc->speedo_init(&tegra_sku_info);
188 tegra20_fuse_add_randomness();
189 }
190
191 const struct tegra_fuse_soc tegra20_fuse_soc = {
192 .init = tegra20_fuse_init,
193 .speedo_init = tegra20_init_speedo_data,
194 .probe = tegra20_fuse_probe,
195 .info = &tegra20_fuse_info,
196 .soc_attr_group = &tegra_soc_attr_group,
197 .clk_suspend_on = false,
198 };
199