1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #include "../habanalabs.h"
9 #include "../../include/hw_ip/pci/pci_general.h"
10
11 #include <linux/pci.h>
12
13 #include <trace/events/habanalabs.h>
14
15 #define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 100)
16
17 #define IATU_REGION_CTRL_REGION_EN_MASK BIT(31)
18 #define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30)
19 #define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19)
20 #define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8)
21
22 /**
23 * hl_pci_bars_map() - Map PCI BARs.
24 * @hdev: Pointer to hl_device structure.
25 * @name: Array of BAR names.
26 * @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
27 *
28 * Request PCI regions and map them to kernel virtual addresses.
29 *
30 * Return: 0 on success, non-zero for failure.
31 */
hl_pci_bars_map(struct hl_device * hdev,const char * const name[3],bool is_wc[3])32 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
33 bool is_wc[3])
34 {
35 struct pci_dev *pdev = hdev->pdev;
36 int rc, i, bar;
37
38 rc = pci_request_regions(pdev, HL_NAME);
39 if (rc) {
40 dev_err(hdev->dev, "Cannot obtain PCI resources\n");
41 return rc;
42 }
43
44 for (i = 0 ; i < 3 ; i++) {
45 bar = i * 2; /* 64-bit BARs */
46 hdev->pcie_bar[bar] = is_wc[i] ?
47 pci_ioremap_wc_bar(pdev, bar) :
48 pci_ioremap_bar(pdev, bar);
49 if (!hdev->pcie_bar[bar]) {
50 dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n",
51 is_wc[i] ? "_wc" : "", name[i]);
52 rc = -ENODEV;
53 goto err;
54 }
55 }
56
57 return 0;
58
59 err:
60 for (i = 2 ; i >= 0 ; i--) {
61 bar = i * 2; /* 64-bit BARs */
62 if (hdev->pcie_bar[bar])
63 iounmap(hdev->pcie_bar[bar]);
64 }
65
66 pci_release_regions(pdev);
67
68 return rc;
69 }
70
71 /**
72 * hl_pci_bars_unmap() - Unmap PCI BARS.
73 * @hdev: Pointer to hl_device structure.
74 *
75 * Release all PCI BARs and unmap their virtual addresses.
76 */
hl_pci_bars_unmap(struct hl_device * hdev)77 static void hl_pci_bars_unmap(struct hl_device *hdev)
78 {
79 struct pci_dev *pdev = hdev->pdev;
80 int i, bar;
81
82 for (i = 2 ; i >= 0 ; i--) {
83 bar = i * 2; /* 64-bit BARs */
84 iounmap(hdev->pcie_bar[bar]);
85 }
86
87 pci_release_regions(pdev);
88 }
89
hl_pci_elbi_read(struct hl_device * hdev,u64 addr,u32 * data)90 int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data)
91 {
92 struct pci_dev *pdev = hdev->pdev;
93 ktime_t timeout;
94 u64 msec;
95 u32 val;
96
97 if (hdev->pldm)
98 msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC;
99 else
100 msec = HL_PCI_ELBI_TIMEOUT_MSEC;
101
102 /* Clear previous status */
103 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
104
105 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
106 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, 0);
107
108 timeout = ktime_add_ms(ktime_get(), msec);
109 for (;;) {
110 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
111 if (val & PCI_CONFIG_ELBI_STS_MASK)
112 break;
113 if (ktime_compare(ktime_get(), timeout) > 0) {
114 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
115 &val);
116 break;
117 }
118
119 usleep_range(300, 500);
120 }
121
122 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
123 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
124
125 if (unlikely(trace_habanalabs_elbi_read_enabled()))
126 trace_habanalabs_elbi_read(&hdev->pdev->dev, (u32) addr, val);
127
128 return 0;
129 }
130
131 if (val & PCI_CONFIG_ELBI_STS_ERR) {
132 dev_err(hdev->dev, "Error reading from ELBI\n");
133 return -EIO;
134 }
135
136 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
137 dev_err(hdev->dev, "ELBI read didn't finish in time\n");
138 return -EIO;
139 }
140
141 dev_err(hdev->dev, "ELBI read has undefined bits in status\n");
142 return -EIO;
143 }
144
145 /**
146 * hl_pci_elbi_write() - Write through the ELBI interface.
147 * @hdev: Pointer to hl_device structure.
148 * @addr: Address to write to
149 * @data: Data to write
150 *
151 * Return: 0 on success, negative value for failure.
152 */
hl_pci_elbi_write(struct hl_device * hdev,u64 addr,u32 data)153 static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
154 {
155 struct pci_dev *pdev = hdev->pdev;
156 ktime_t timeout;
157 u64 msec;
158 u32 val;
159
160 if (hdev->pldm)
161 msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC;
162 else
163 msec = HL_PCI_ELBI_TIMEOUT_MSEC;
164
165 /* Clear previous status */
166 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
167
168 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
169 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
170 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
171 PCI_CONFIG_ELBI_CTRL_WRITE);
172
173 timeout = ktime_add_ms(ktime_get(), msec);
174 for (;;) {
175 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
176 if (val & PCI_CONFIG_ELBI_STS_MASK)
177 break;
178 if (ktime_compare(ktime_get(), timeout) > 0) {
179 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
180 &val);
181 break;
182 }
183
184 usleep_range(300, 500);
185 }
186
187 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
188 if (unlikely(trace_habanalabs_elbi_write_enabled()))
189 trace_habanalabs_elbi_write(&hdev->pdev->dev, (u32) addr, val);
190 return 0;
191 }
192
193 if (val & PCI_CONFIG_ELBI_STS_ERR)
194 return -EIO;
195
196 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
197 dev_err(hdev->dev, "ELBI write didn't finish in time\n");
198 return -EIO;
199 }
200
201 dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
202 return -EIO;
203 }
204
205 /**
206 * hl_pci_iatu_write() - iatu write routine.
207 * @hdev: Pointer to hl_device structure.
208 * @addr: Address to write to
209 * @data: Data to write
210 *
211 * Return: 0 on success, negative value for failure.
212 */
hl_pci_iatu_write(struct hl_device * hdev,u32 addr,u32 data)213 int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
214 {
215 struct asic_fixed_properties *prop = &hdev->asic_prop;
216 u32 dbi_offset;
217 int rc;
218
219 dbi_offset = addr & 0xFFF;
220
221 /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
222 * in case the firmware security is enabled
223 */
224 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
225
226 rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
227 data);
228
229 if (rc)
230 return -EIO;
231
232 return 0;
233 }
234
235 /**
236 * hl_pci_set_inbound_region() - Configure inbound region
237 * @hdev: Pointer to hl_device structure.
238 * @region: Inbound region number.
239 * @pci_region: Inbound region parameters.
240 *
241 * Configure the iATU inbound region.
242 *
243 * Return: 0 on success, negative value for failure.
244 */
hl_pci_set_inbound_region(struct hl_device * hdev,u8 region,struct hl_inbound_pci_region * pci_region)245 int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
246 struct hl_inbound_pci_region *pci_region)
247 {
248 struct asic_fixed_properties *prop = &hdev->asic_prop;
249 u64 bar_phys_base, region_base, region_end_address;
250 u32 offset, ctrl_reg_val;
251 int rc = 0;
252
253 /* region offset */
254 offset = (0x200 * region) + 0x100;
255
256 if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) {
257 bar_phys_base = hdev->pcie_bar_phys[pci_region->bar];
258 region_base = bar_phys_base + pci_region->offset_in_bar;
259 region_end_address = region_base + pci_region->size - 1;
260
261 rc |= hl_pci_iatu_write(hdev, offset + 0x8,
262 lower_32_bits(region_base));
263 rc |= hl_pci_iatu_write(hdev, offset + 0xC,
264 upper_32_bits(region_base));
265 rc |= hl_pci_iatu_write(hdev, offset + 0x10,
266 lower_32_bits(region_end_address));
267 }
268
269 /* Point to the specified address */
270 rc |= hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(pci_region->addr));
271 rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(pci_region->addr));
272
273 /* Set bar type as memory */
274 rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
275
276 /* Enable + bar/address match + match enable + bar number */
277 ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1);
278 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK, pci_region->mode);
279 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1);
280
281 if (pci_region->mode == PCI_BAR_MATCH_MODE)
282 ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK, pci_region->bar);
283
284 rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
285
286 /* Return the DBI window to the default location
287 * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
288 * in case the firmware security is enabled
289 */
290 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
291
292 if (rc)
293 dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
294 pci_region->bar, pci_region->addr);
295
296 return rc;
297 }
298
299 /**
300 * hl_pci_set_outbound_region() - Configure outbound region 0
301 * @hdev: Pointer to hl_device structure.
302 * @pci_region: Outbound region parameters.
303 *
304 * Configure the iATU outbound region 0.
305 *
306 * Return: 0 on success, negative value for failure.
307 */
hl_pci_set_outbound_region(struct hl_device * hdev,struct hl_outbound_pci_region * pci_region)308 int hl_pci_set_outbound_region(struct hl_device *hdev,
309 struct hl_outbound_pci_region *pci_region)
310 {
311 struct asic_fixed_properties *prop = &hdev->asic_prop;
312 u64 outbound_region_end_address;
313 int rc = 0;
314
315 /* Outbound Region 0 */
316 outbound_region_end_address =
317 pci_region->addr + pci_region->size - 1;
318 rc |= hl_pci_iatu_write(hdev, 0x008,
319 lower_32_bits(pci_region->addr));
320 rc |= hl_pci_iatu_write(hdev, 0x00C,
321 upper_32_bits(pci_region->addr));
322 rc |= hl_pci_iatu_write(hdev, 0x010,
323 lower_32_bits(outbound_region_end_address));
324 rc |= hl_pci_iatu_write(hdev, 0x014, 0);
325
326 rc |= hl_pci_iatu_write(hdev, 0x018, 0);
327
328 rc |= hl_pci_iatu_write(hdev, 0x020,
329 upper_32_bits(outbound_region_end_address));
330 /* Increase region size */
331 rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
332 /* Enable */
333 rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
334
335 /* Return the DBI window to the default location
336 * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
337 * in case the firmware security is enabled
338 */
339 hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
340
341 return rc;
342 }
343
344 /**
345 * hl_get_pci_memory_region() - get PCI region for given address
346 * @hdev: Pointer to hl_device structure.
347 * @addr: device address
348 *
349 * @return region index on success, otherwise PCI_REGION_NUMBER (invalid
350 * region index)
351 */
hl_get_pci_memory_region(struct hl_device * hdev,u64 addr)352 enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr)
353 {
354 int i;
355
356 for (i = 0 ; i < PCI_REGION_NUMBER ; i++) {
357 struct pci_mem_region *region = &hdev->pci_mem_region[i];
358
359 if (!region->used)
360 continue;
361
362 if ((addr >= region->region_base) &&
363 (addr < region->region_base + region->region_size))
364 return i;
365 }
366
367 return PCI_REGION_NUMBER;
368 }
369
370 /**
371 * hl_pci_init() - PCI initialization code.
372 * @hdev: Pointer to hl_device structure.
373 *
374 * Set DMA masks, initialize the PCI controller and map the PCI BARs.
375 *
376 * Return: 0 on success, non-zero for failure.
377 */
hl_pci_init(struct hl_device * hdev)378 int hl_pci_init(struct hl_device *hdev)
379 {
380 struct asic_fixed_properties *prop = &hdev->asic_prop;
381 struct pci_dev *pdev = hdev->pdev;
382 int rc;
383
384 rc = pci_enable_device_mem(pdev);
385 if (rc) {
386 dev_err(hdev->dev, "can't enable PCI device\n");
387 return rc;
388 }
389
390 pci_set_master(pdev);
391
392 rc = hdev->asic_funcs->pci_bars_map(hdev);
393 if (rc) {
394 dev_err(hdev->dev, "Failed to map PCI BAR addresses\n");
395 goto disable_device;
396 }
397
398 rc = hdev->asic_funcs->init_iatu(hdev);
399 if (rc) {
400 dev_err(hdev->dev, "PCI controller was not initialized successfully\n");
401 goto unmap_pci_bars;
402 }
403
404 /* Driver must sleep in order for FW to finish the iATU configuration */
405 if (hdev->asic_prop.iatu_done_by_fw)
406 usleep_range(2000, 3000);
407
408 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(prop->dma_mask));
409 if (rc) {
410 dev_err(hdev->dev,
411 "Failed to set dma mask to %d bits, error %d\n",
412 prop->dma_mask, rc);
413 goto unmap_pci_bars;
414 }
415
416 dma_set_max_seg_size(&pdev->dev, U32_MAX);
417
418 return 0;
419
420 unmap_pci_bars:
421 hl_pci_bars_unmap(hdev);
422 disable_device:
423 pci_disable_device(pdev);
424
425 return rc;
426 }
427
428 /**
429 * hl_pci_fini() - PCI finalization code.
430 * @hdev: Pointer to hl_device structure
431 *
432 * Unmap PCI bars and disable PCI device.
433 */
hl_pci_fini(struct hl_device * hdev)434 void hl_pci_fini(struct hl_device *hdev)
435 {
436 hl_pci_bars_unmap(hdev);
437
438 pci_disable_device(hdev->pdev);
439 }
440