xref: /linux/drivers/misc/genwqe/card_utils.c (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 /**
2  * IBM Accelerator Family 'GenWQE'
3  *
4  * (C) Copyright IBM Corp. 2013
5  *
6  * Author: Frank Haverkamp <haver@linux.vnet.ibm.com>
7  * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com>
8  * Author: Michael Jung <mijung@gmx.net>
9  * Author: Michael Ruettger <michael@ibmra.de>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License (version 2 only)
13  * as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18  * GNU General Public License for more details.
19  */
20 
21 /*
22  * Miscelanous functionality used in the other GenWQE driver parts.
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/sched.h>
28 #include <linux/vmalloc.h>
29 #include <linux/page-flags.h>
30 #include <linux/scatterlist.h>
31 #include <linux/hugetlb.h>
32 #include <linux/iommu.h>
33 #include <linux/delay.h>
34 #include <linux/pci.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/ctype.h>
37 #include <linux/module.h>
38 #include <linux/platform_device.h>
39 #include <linux/delay.h>
40 #include <asm/pgtable.h>
41 
42 #include "genwqe_driver.h"
43 #include "card_base.h"
44 #include "card_ddcb.h"
45 
46 /**
47  * __genwqe_writeq() - Write 64-bit register
48  * @cd:	        genwqe device descriptor
49  * @byte_offs:  byte offset within BAR
50  * @val:        64-bit value
51  *
52  * Return: 0 if success; < 0 if error
53  */
54 int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
55 {
56 	struct pci_dev *pci_dev = cd->pci_dev;
57 
58 	if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
59 		return -EIO;
60 
61 	if (cd->mmio == NULL)
62 		return -EIO;
63 
64 	if (pci_channel_offline(pci_dev))
65 		return -EIO;
66 
67 	__raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
68 	return 0;
69 }
70 
71 /**
72  * __genwqe_readq() - Read 64-bit register
73  * @cd:         genwqe device descriptor
74  * @byte_offs:  offset within BAR
75  *
76  * Return: value from register
77  */
78 u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
79 {
80 	if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
81 		return 0xffffffffffffffffull;
82 
83 	if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
84 	    (byte_offs == IO_SLC_CFGREG_GFIR))
85 		return 0x000000000000ffffull;
86 
87 	if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
88 	    (byte_offs == IO_SLC_CFGREG_GFIR))
89 		return 0x00000000ffff0000ull;
90 
91 	if (cd->mmio == NULL)
92 		return 0xffffffffffffffffull;
93 
94 	return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
95 }
96 
97 /**
98  * __genwqe_writel() - Write 32-bit register
99  * @cd:	        genwqe device descriptor
100  * @byte_offs:  byte offset within BAR
101  * @val:        32-bit value
102  *
103  * Return: 0 if success; < 0 if error
104  */
105 int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
106 {
107 	struct pci_dev *pci_dev = cd->pci_dev;
108 
109 	if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
110 		return -EIO;
111 
112 	if (cd->mmio == NULL)
113 		return -EIO;
114 
115 	if (pci_channel_offline(pci_dev))
116 		return -EIO;
117 
118 	__raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
119 	return 0;
120 }
121 
122 /**
123  * __genwqe_readl() - Read 32-bit register
124  * @cd:         genwqe device descriptor
125  * @byte_offs:  offset within BAR
126  *
127  * Return: Value from register
128  */
129 u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
130 {
131 	if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
132 		return 0xffffffff;
133 
134 	if (cd->mmio == NULL)
135 		return 0xffffffff;
136 
137 	return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
138 }
139 
140 /**
141  * genwqe_read_app_id() - Extract app_id
142  *
143  * app_unitcfg need to be filled with valid data first
144  */
145 int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
146 {
147 	int i, j;
148 	u32 app_id = (u32)cd->app_unitcfg;
149 
150 	memset(app_name, 0, len);
151 	for (i = 0, j = 0; j < min(len, 4); j++) {
152 		char ch = (char)((app_id >> (24 - j*8)) & 0xff);
153 
154 		if (ch == ' ')
155 			continue;
156 		app_name[i++] = isprint(ch) ? ch : 'X';
157 	}
158 	return i;
159 }
160 
161 /**
162  * genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
163  *
164  * Existing kernel functions seem to use a different polynom,
165  * therefore we could not use them here.
166  *
167  * Genwqe's Polynomial = 0x20044009
168  */
169 #define CRC32_POLYNOMIAL	0x20044009
170 static u32 crc32_tab[256];	/* crc32 lookup table */
171 
172 void genwqe_init_crc32(void)
173 {
174 	int i, j;
175 	u32 crc;
176 
177 	for (i = 0;  i < 256;  i++) {
178 		crc = i << 24;
179 		for (j = 0;  j < 8;  j++) {
180 			if (crc & 0x80000000)
181 				crc = (crc << 1) ^ CRC32_POLYNOMIAL;
182 			else
183 				crc = (crc << 1);
184 		}
185 		crc32_tab[i] = crc;
186 	}
187 }
188 
189 /**
190  * genwqe_crc32() - Generate 32-bit crc as required for DDCBs
191  * @buff:       pointer to data buffer
192  * @len:        length of data for calculation
193  * @init:       initial crc (0xffffffff at start)
194  *
195  * polynomial = x^32 * + x^29 + x^18 + x^14 + x^3 + 1 (0x20044009)
196 
197  * Example: 4 bytes 0x01 0x02 0x03 0x04 with init=0xffffffff should
198  * result in a crc32 of 0xf33cb7d3.
199  *
200  * The existing kernel crc functions did not cover this polynom yet.
201  *
202  * Return: crc32 checksum.
203  */
204 u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
205 {
206 	int i;
207 	u32 crc;
208 
209 	crc = init;
210 	while (len--) {
211 		i = ((crc >> 24) ^ *buff++) & 0xFF;
212 		crc = (crc << 8) ^ crc32_tab[i];
213 	}
214 	return crc;
215 }
216 
217 void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
218 			       dma_addr_t *dma_handle)
219 {
220 	if (get_order(size) > MAX_ORDER)
221 		return NULL;
222 
223 	return pci_alloc_consistent(cd->pci_dev, size, dma_handle);
224 }
225 
226 void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
227 			     void *vaddr, dma_addr_t dma_handle)
228 {
229 	if (vaddr == NULL)
230 		return;
231 
232 	pci_free_consistent(cd->pci_dev, size, vaddr, dma_handle);
233 }
234 
235 static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
236 			      int num_pages)
237 {
238 	int i;
239 	struct pci_dev *pci_dev = cd->pci_dev;
240 
241 	for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
242 		pci_unmap_page(pci_dev, dma_list[i],
243 			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
244 		dma_list[i] = 0x0;
245 	}
246 }
247 
248 static int genwqe_map_pages(struct genwqe_dev *cd,
249 			   struct page **page_list, int num_pages,
250 			   dma_addr_t *dma_list)
251 {
252 	int i;
253 	struct pci_dev *pci_dev = cd->pci_dev;
254 
255 	/* establish DMA mapping for requested pages */
256 	for (i = 0; i < num_pages; i++) {
257 		dma_addr_t daddr;
258 
259 		dma_list[i] = 0x0;
260 		daddr = pci_map_page(pci_dev, page_list[i],
261 				     0,	 /* map_offs */
262 				     PAGE_SIZE,
263 				     PCI_DMA_BIDIRECTIONAL);  /* FIXME rd/rw */
264 
265 		if (pci_dma_mapping_error(pci_dev, daddr)) {
266 			dev_err(&pci_dev->dev,
267 				"[%s] err: no dma addr daddr=%016llx!\n",
268 				__func__, (long long)daddr);
269 			goto err;
270 		}
271 
272 		dma_list[i] = daddr;
273 	}
274 	return 0;
275 
276  err:
277 	genwqe_unmap_pages(cd, dma_list, num_pages);
278 	return -EIO;
279 }
280 
281 static int genwqe_sgl_size(int num_pages)
282 {
283 	int len, num_tlb = num_pages / 7;
284 
285 	len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
286 	return roundup(len, PAGE_SIZE);
287 }
288 
289 /**
290  * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
291  *
292  * Allocates memory for sgl and overlapping pages. Pages which might
293  * overlap other user-space memory blocks are being cached for DMAs,
294  * such that we do not run into syncronization issues. Data is copied
295  * from user-space into the cached pages.
296  */
297 int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
298 			  void __user *user_addr, size_t user_size)
299 {
300 	int rc;
301 	struct pci_dev *pci_dev = cd->pci_dev;
302 
303 	sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
304 	sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
305 	sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
306 	sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
307 
308 	dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
309 		__func__, user_addr, user_size, sgl->nr_pages,
310 		sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
311 
312 	sgl->user_addr = user_addr;
313 	sgl->user_size = user_size;
314 	sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
315 
316 	if (get_order(sgl->sgl_size) > MAX_ORDER) {
317 		dev_err(&pci_dev->dev,
318 			"[%s] err: too much memory requested!\n", __func__);
319 		return -ENOMEM;
320 	}
321 
322 	sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
323 					     &sgl->sgl_dma_addr);
324 	if (sgl->sgl == NULL) {
325 		dev_err(&pci_dev->dev,
326 			"[%s] err: no memory available!\n", __func__);
327 		return -ENOMEM;
328 	}
329 
330 	/* Only use buffering on incomplete pages */
331 	if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
332 		sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
333 						       &sgl->fpage_dma_addr);
334 		if (sgl->fpage == NULL)
335 			goto err_out;
336 
337 		/* Sync with user memory */
338 		if (copy_from_user(sgl->fpage + sgl->fpage_offs,
339 				   user_addr, sgl->fpage_size)) {
340 			rc = -EFAULT;
341 			goto err_out;
342 		}
343 	}
344 	if (sgl->lpage_size != 0) {
345 		sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
346 						       &sgl->lpage_dma_addr);
347 		if (sgl->lpage == NULL)
348 			goto err_out1;
349 
350 		/* Sync with user memory */
351 		if (copy_from_user(sgl->lpage, user_addr + user_size -
352 				   sgl->lpage_size, sgl->lpage_size)) {
353 			rc = -EFAULT;
354 			goto err_out1;
355 		}
356 	}
357 	return 0;
358 
359  err_out1:
360 	__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
361 				 sgl->fpage_dma_addr);
362  err_out:
363 	__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
364 				 sgl->sgl_dma_addr);
365 	return -ENOMEM;
366 }
367 
368 int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
369 		     dma_addr_t *dma_list)
370 {
371 	int i = 0, j = 0, p;
372 	unsigned long dma_offs, map_offs;
373 	dma_addr_t prev_daddr = 0;
374 	struct sg_entry *s, *last_s = NULL;
375 	size_t size = sgl->user_size;
376 
377 	dma_offs = 128;		/* next block if needed/dma_offset */
378 	map_offs = sgl->fpage_offs; /* offset in first page */
379 
380 	s = &sgl->sgl[0];	/* first set of 8 entries */
381 	p = 0;			/* page */
382 	while (p < sgl->nr_pages) {
383 		dma_addr_t daddr;
384 		unsigned int size_to_map;
385 
386 		/* always write the chaining entry, cleanup is done later */
387 		j = 0;
388 		s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
389 		s[j].len	 = cpu_to_be32(128);
390 		s[j].flags	 = cpu_to_be32(SG_CHAINED);
391 		j++;
392 
393 		while (j < 8) {
394 			/* DMA mapping for requested page, offs, size */
395 			size_to_map = min(size, PAGE_SIZE - map_offs);
396 
397 			if ((p == 0) && (sgl->fpage != NULL)) {
398 				daddr = sgl->fpage_dma_addr + map_offs;
399 
400 			} else if ((p == sgl->nr_pages - 1) &&
401 				   (sgl->lpage != NULL)) {
402 				daddr = sgl->lpage_dma_addr;
403 			} else {
404 				daddr = dma_list[p] + map_offs;
405 			}
406 
407 			size -= size_to_map;
408 			map_offs = 0;
409 
410 			if (prev_daddr == daddr) {
411 				u32 prev_len = be32_to_cpu(last_s->len);
412 
413 				/* pr_info("daddr combining: "
414 					"%016llx/%08x -> %016llx\n",
415 					prev_daddr, prev_len, daddr); */
416 
417 				last_s->len = cpu_to_be32(prev_len +
418 							  size_to_map);
419 
420 				p++; /* process next page */
421 				if (p == sgl->nr_pages)
422 					goto fixup;  /* nothing to do */
423 
424 				prev_daddr = daddr + size_to_map;
425 				continue;
426 			}
427 
428 			/* start new entry */
429 			s[j].target_addr = cpu_to_be64(daddr);
430 			s[j].len	 = cpu_to_be32(size_to_map);
431 			s[j].flags	 = cpu_to_be32(SG_DATA);
432 			prev_daddr = daddr + size_to_map;
433 			last_s = &s[j];
434 			j++;
435 
436 			p++;	/* process next page */
437 			if (p == sgl->nr_pages)
438 				goto fixup;  /* nothing to do */
439 		}
440 		dma_offs += 128;
441 		s += 8;		/* continue 8 elements further */
442 	}
443  fixup:
444 	if (j == 1) {		/* combining happend on last entry! */
445 		s -= 8;		/* full shift needed on previous sgl block */
446 		j =  7;		/* shift all elements */
447 	}
448 
449 	for (i = 0; i < j; i++)	/* move elements 1 up */
450 		s[i] = s[i + 1];
451 
452 	s[i].target_addr = cpu_to_be64(0);
453 	s[i].len	 = cpu_to_be32(0);
454 	s[i].flags	 = cpu_to_be32(SG_END_LIST);
455 	return 0;
456 }
457 
458 /**
459  * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
460  *
461  * After the DMA transfer has been completed we free the memory for
462  * the sgl and the cached pages. Data is being transfered from cached
463  * pages into user-space buffers.
464  */
465 int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
466 {
467 	int rc = 0;
468 	struct pci_dev *pci_dev = cd->pci_dev;
469 
470 	if (sgl->fpage) {
471 		if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
472 				 sgl->fpage_size)) {
473 			dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
474 				__func__);
475 			rc = -EFAULT;
476 		}
477 		__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
478 					 sgl->fpage_dma_addr);
479 		sgl->fpage = NULL;
480 		sgl->fpage_dma_addr = 0;
481 	}
482 	if (sgl->lpage) {
483 		if (copy_to_user(sgl->user_addr + sgl->user_size -
484 				 sgl->lpage_size, sgl->lpage,
485 				 sgl->lpage_size)) {
486 			dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
487 				__func__);
488 			rc = -EFAULT;
489 		}
490 		__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
491 					 sgl->lpage_dma_addr);
492 		sgl->lpage = NULL;
493 		sgl->lpage_dma_addr = 0;
494 	}
495 	__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
496 				 sgl->sgl_dma_addr);
497 
498 	sgl->sgl = NULL;
499 	sgl->sgl_dma_addr = 0x0;
500 	sgl->sgl_size = 0;
501 	return rc;
502 }
503 
504 /**
505  * free_user_pages() - Give pinned pages back
506  *
507  * Documentation of get_user_pages is in mm/memory.c:
508  *
509  * If the page is written to, set_page_dirty (or set_page_dirty_lock,
510  * as appropriate) must be called after the page is finished with, and
511  * before put_page is called.
512  *
513  * FIXME Could be of use to others and might belong in the generic
514  * code, if others agree. E.g.
515  *    ll_free_user_pages in drivers/staging/lustre/lustre/llite/rw26.c
516  *    ceph_put_page_vector in net/ceph/pagevec.c
517  *    maybe more?
518  */
519 static int free_user_pages(struct page **page_list, unsigned int nr_pages,
520 			   int dirty)
521 {
522 	unsigned int i;
523 
524 	for (i = 0; i < nr_pages; i++) {
525 		if (page_list[i] != NULL) {
526 			if (dirty)
527 				set_page_dirty_lock(page_list[i]);
528 			put_page(page_list[i]);
529 		}
530 	}
531 	return 0;
532 }
533 
534 /**
535  * genwqe_user_vmap() - Map user-space memory to virtual kernel memory
536  * @cd:         pointer to genwqe device
537  * @m:          mapping params
538  * @uaddr:      user virtual address
539  * @size:       size of memory to be mapped
540  *
541  * We need to think about how we could speed this up. Of course it is
542  * not a good idea to do this over and over again, like we are
543  * currently doing it. Nevertheless, I am curious where on the path
544  * the performance is spend. Most probably within the memory
545  * allocation functions, but maybe also in the DMA mapping code.
546  *
547  * Restrictions: The maximum size of the possible mapping currently depends
548  *               on the amount of memory we can get using kzalloc() for the
549  *               page_list and pci_alloc_consistent for the sg_list.
550  *               The sg_list is currently itself not scattered, which could
551  *               be fixed with some effort. The page_list must be split into
552  *               PAGE_SIZE chunks too. All that will make the complicated
553  *               code more complicated.
554  *
555  * Return: 0 if success
556  */
557 int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
558 		     unsigned long size, struct ddcb_requ *req)
559 {
560 	int rc = -EINVAL;
561 	unsigned long data, offs;
562 	struct pci_dev *pci_dev = cd->pci_dev;
563 
564 	if ((uaddr == NULL) || (size == 0)) {
565 		m->size = 0;	/* mark unused and not added */
566 		return -EINVAL;
567 	}
568 	m->u_vaddr = uaddr;
569 	m->size    = size;
570 
571 	/* determine space needed for page_list. */
572 	data = (unsigned long)uaddr;
573 	offs = offset_in_page(data);
574 	m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
575 
576 	m->page_list = kcalloc(m->nr_pages,
577 			       sizeof(struct page *) + sizeof(dma_addr_t),
578 			       GFP_KERNEL);
579 	if (!m->page_list) {
580 		dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
581 		m->nr_pages = 0;
582 		m->u_vaddr = NULL;
583 		m->size = 0;	/* mark unused and not added */
584 		return -ENOMEM;
585 	}
586 	m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
587 
588 	/* pin user pages in memory */
589 	rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
590 				 m->nr_pages,
591 				 1,		/* write by caller */
592 				 m->page_list);	/* ptrs to pages */
593 	if (rc < 0)
594 		goto fail_get_user_pages;
595 
596 	/* assumption: get_user_pages can be killed by signals. */
597 	if (rc < m->nr_pages) {
598 		free_user_pages(m->page_list, rc, 0);
599 		rc = -EFAULT;
600 		goto fail_get_user_pages;
601 	}
602 
603 	rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
604 	if (rc != 0)
605 		goto fail_free_user_pages;
606 
607 	return 0;
608 
609  fail_free_user_pages:
610 	free_user_pages(m->page_list, m->nr_pages, 0);
611 
612  fail_get_user_pages:
613 	kfree(m->page_list);
614 	m->page_list = NULL;
615 	m->dma_list = NULL;
616 	m->nr_pages = 0;
617 	m->u_vaddr = NULL;
618 	m->size = 0;		/* mark unused and not added */
619 	return rc;
620 }
621 
622 /**
623  * genwqe_user_vunmap() - Undo mapping of user-space mem to virtual kernel
624  *                        memory
625  * @cd:         pointer to genwqe device
626  * @m:          mapping params
627  */
628 int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
629 		       struct ddcb_requ *req)
630 {
631 	struct pci_dev *pci_dev = cd->pci_dev;
632 
633 	if (!dma_mapping_used(m)) {
634 		dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
635 			__func__, m);
636 		return -EINVAL;
637 	}
638 
639 	if (m->dma_list)
640 		genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
641 
642 	if (m->page_list) {
643 		free_user_pages(m->page_list, m->nr_pages, 1);
644 
645 		kfree(m->page_list);
646 		m->page_list = NULL;
647 		m->dma_list = NULL;
648 		m->nr_pages = 0;
649 	}
650 
651 	m->u_vaddr = NULL;
652 	m->size = 0;		/* mark as unused and not added */
653 	return 0;
654 }
655 
656 /**
657  * genwqe_card_type() - Get chip type SLU Configuration Register
658  * @cd:         pointer to the genwqe device descriptor
659  * Return: 0: Altera Stratix-IV 230
660  *         1: Altera Stratix-IV 530
661  *         2: Altera Stratix-V A4
662  *         3: Altera Stratix-V A7
663  */
664 u8 genwqe_card_type(struct genwqe_dev *cd)
665 {
666 	u64 card_type = cd->slu_unitcfg;
667 
668 	return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
669 }
670 
671 /**
672  * genwqe_card_reset() - Reset the card
673  * @cd:         pointer to the genwqe device descriptor
674  */
675 int genwqe_card_reset(struct genwqe_dev *cd)
676 {
677 	u64 softrst;
678 	struct pci_dev *pci_dev = cd->pci_dev;
679 
680 	if (!genwqe_is_privileged(cd))
681 		return -ENODEV;
682 
683 	/* new SL */
684 	__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
685 	msleep(1000);
686 	__genwqe_readq(cd, IO_HSU_FIR_CLR);
687 	__genwqe_readq(cd, IO_APP_FIR_CLR);
688 	__genwqe_readq(cd, IO_SLU_FIR_CLR);
689 
690 	/*
691 	 * Read-modify-write to preserve the stealth bits
692 	 *
693 	 * For SL >= 039, Stealth WE bit allows removing
694 	 * the read-modify-wrote.
695 	 * r-m-w may require a mask 0x3C to avoid hitting hard
696 	 * reset again for error reset (should be 0, chicken).
697 	 */
698 	softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
699 	__genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
700 
701 	/* give ERRORRESET some time to finish */
702 	msleep(50);
703 
704 	if (genwqe_need_err_masking(cd)) {
705 		dev_info(&pci_dev->dev,
706 			 "[%s] masking errors for old bitstreams\n", __func__);
707 		__genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
708 	}
709 	return 0;
710 }
711 
712 int genwqe_read_softreset(struct genwqe_dev *cd)
713 {
714 	u64 bitstream;
715 
716 	if (!genwqe_is_privileged(cd))
717 		return -ENODEV;
718 
719 	bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
720 	cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
721 	return 0;
722 }
723 
724 /**
725  * genwqe_set_interrupt_capability() - Configure MSI capability structure
726  * @cd:         pointer to the device
727  * Return: 0 if no error
728  */
729 int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
730 {
731 	int rc;
732 	struct pci_dev *pci_dev = cd->pci_dev;
733 
734 	rc = pci_enable_msi_range(pci_dev, 1, count);
735 	if (rc < 0)
736 		return rc;
737 
738 	cd->flags |= GENWQE_FLAG_MSI_ENABLED;
739 	return 0;
740 }
741 
742 /**
743  * genwqe_reset_interrupt_capability() - Undo genwqe_set_interrupt_capability()
744  * @cd:         pointer to the device
745  */
746 void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
747 {
748 	struct pci_dev *pci_dev = cd->pci_dev;
749 
750 	if (cd->flags & GENWQE_FLAG_MSI_ENABLED) {
751 		pci_disable_msi(pci_dev);
752 		cd->flags &= ~GENWQE_FLAG_MSI_ENABLED;
753 	}
754 }
755 
756 /**
757  * set_reg_idx() - Fill array with data. Ignore illegal offsets.
758  * @cd:         card device
759  * @r:          debug register array
760  * @i:          index to desired entry
761  * @m:          maximum possible entries
762  * @addr:       addr which is read
763  * @index:      index in debug array
764  * @val:        read value
765  */
766 static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
767 		       unsigned int *i, unsigned int m, u32 addr, u32 idx,
768 		       u64 val)
769 {
770 	if (WARN_ON_ONCE(*i >= m))
771 		return -EFAULT;
772 
773 	r[*i].addr = addr;
774 	r[*i].idx = idx;
775 	r[*i].val = val;
776 	++*i;
777 	return 0;
778 }
779 
780 static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
781 		   unsigned int *i, unsigned int m, u32 addr, u64 val)
782 {
783 	return set_reg_idx(cd, r, i, m, addr, 0, val);
784 }
785 
786 int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
787 			 unsigned int max_regs, int all)
788 {
789 	unsigned int i, j, idx = 0;
790 	u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
791 	u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
792 
793 	/* Global FIR */
794 	gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
795 	set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
796 
797 	/* UnitCfg for SLU */
798 	sluid = __genwqe_readq(cd, IO_SLU_UNITCFG); /* 0x00000000 */
799 	set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
800 
801 	/* UnitCfg for APP */
802 	appid = __genwqe_readq(cd, IO_APP_UNITCFG); /* 0x02000000 */
803 	set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
804 
805 	/* Check all chip Units */
806 	for (i = 0; i < GENWQE_MAX_UNITS; i++) {
807 
808 		/* Unit FIR */
809 		ufir_addr = (i << 24) | 0x008;
810 		ufir = __genwqe_readq(cd, ufir_addr);
811 		set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
812 
813 		/* Unit FEC */
814 		ufec_addr = (i << 24) | 0x018;
815 		ufec = __genwqe_readq(cd, ufec_addr);
816 		set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
817 
818 		for (j = 0; j < 64; j++) {
819 			/* wherever there is a primary 1, read the 2ndary */
820 			if (!all && (!(ufir & (1ull << j))))
821 				continue;
822 
823 			sfir_addr = (i << 24) | (0x100 + 8 * j);
824 			sfir = __genwqe_readq(cd, sfir_addr);
825 			set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
826 
827 			sfec_addr = (i << 24) | (0x300 + 8 * j);
828 			sfec = __genwqe_readq(cd, sfec_addr);
829 			set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
830 		}
831 	}
832 
833 	/* fill with invalid data until end */
834 	for (i = idx; i < max_regs; i++) {
835 		regs[i].addr = 0xffffffff;
836 		regs[i].val = 0xffffffffffffffffull;
837 	}
838 	return idx;
839 }
840 
841 /**
842  * genwqe_ffdc_buff_size() - Calculates the number of dump registers
843  */
844 int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
845 {
846 	int entries = 0, ring, traps, traces, trace_entries;
847 	u32 eevptr_addr, l_addr, d_len, d_type;
848 	u64 eevptr, val, addr;
849 
850 	eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
851 	eevptr = __genwqe_readq(cd, eevptr_addr);
852 
853 	if ((eevptr != 0x0) && (eevptr != -1ull)) {
854 		l_addr = GENWQE_UID_OFFS(uid) | eevptr;
855 
856 		while (1) {
857 			val = __genwqe_readq(cd, l_addr);
858 
859 			if ((val == 0x0) || (val == -1ull))
860 				break;
861 
862 			/* 38:24 */
863 			d_len  = (val & 0x0000007fff000000ull) >> 24;
864 
865 			/* 39 */
866 			d_type = (val & 0x0000008000000000ull) >> 36;
867 
868 			if (d_type) {	/* repeat */
869 				entries += d_len;
870 			} else {	/* size in bytes! */
871 				entries += d_len >> 3;
872 			}
873 
874 			l_addr += 8;
875 		}
876 	}
877 
878 	for (ring = 0; ring < 8; ring++) {
879 		addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
880 		val = __genwqe_readq(cd, addr);
881 
882 		if ((val == 0x0ull) || (val == -1ull))
883 			continue;
884 
885 		traps = (val >> 24) & 0xff;
886 		traces = (val >> 16) & 0xff;
887 		trace_entries = val & 0xffff;
888 
889 		entries += traps + (traces * trace_entries);
890 	}
891 	return entries;
892 }
893 
894 /**
895  * genwqe_ffdc_buff_read() - Implements LogoutExtendedErrorRegisters procedure
896  */
897 int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
898 			  struct genwqe_reg *regs, unsigned int max_regs)
899 {
900 	int i, traps, traces, trace, trace_entries, trace_entry, ring;
901 	unsigned int idx = 0;
902 	u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
903 	u64 eevptr, e, val, addr;
904 
905 	eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
906 	eevptr = __genwqe_readq(cd, eevptr_addr);
907 
908 	if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
909 		l_addr = GENWQE_UID_OFFS(uid) | eevptr;
910 		while (1) {
911 			e = __genwqe_readq(cd, l_addr);
912 			if ((e == 0x0) || (e == 0xffffffffffffffffull))
913 				break;
914 
915 			d_addr = (e & 0x0000000000ffffffull);	    /* 23:0 */
916 			d_len  = (e & 0x0000007fff000000ull) >> 24; /* 38:24 */
917 			d_type = (e & 0x0000008000000000ull) >> 36; /* 39 */
918 			d_addr |= GENWQE_UID_OFFS(uid);
919 
920 			if (d_type) {
921 				for (i = 0; i < (int)d_len; i++) {
922 					val = __genwqe_readq(cd, d_addr);
923 					set_reg_idx(cd, regs, &idx, max_regs,
924 						    d_addr, i, val);
925 				}
926 			} else {
927 				d_len >>= 3; /* Size in bytes! */
928 				for (i = 0; i < (int)d_len; i++, d_addr += 8) {
929 					val = __genwqe_readq(cd, d_addr);
930 					set_reg_idx(cd, regs, &idx, max_regs,
931 						    d_addr, 0, val);
932 				}
933 			}
934 			l_addr += 8;
935 		}
936 	}
937 
938 	/*
939 	 * To save time, there are only 6 traces poplulated on Uid=2,
940 	 * Ring=1. each with iters=512.
941 	 */
942 	for (ring = 0; ring < 8; ring++) { /* 0 is fls, 1 is fds,
943 					      2...7 are ASI rings */
944 		addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
945 		val = __genwqe_readq(cd, addr);
946 
947 		if ((val == 0x0ull) || (val == -1ull))
948 			continue;
949 
950 		traps = (val >> 24) & 0xff;	/* Number of Traps	*/
951 		traces = (val >> 16) & 0xff;	/* Number of Traces	*/
952 		trace_entries = val & 0xffff;	/* Entries per trace	*/
953 
954 		/* Note: This is a combined loop that dumps both the traps */
955 		/* (for the trace == 0 case) as well as the traces 1 to    */
956 		/* 'traces'.						   */
957 		for (trace = 0; trace <= traces; trace++) {
958 			u32 diag_sel =
959 				GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
960 
961 			addr = (GENWQE_UID_OFFS(uid) |
962 				IO_EXTENDED_DIAG_SELECTOR);
963 			__genwqe_writeq(cd, addr, diag_sel);
964 
965 			for (trace_entry = 0;
966 			     trace_entry < (trace ? trace_entries : traps);
967 			     trace_entry++) {
968 				addr = (GENWQE_UID_OFFS(uid) |
969 					IO_EXTENDED_DIAG_READ_MBX);
970 				val = __genwqe_readq(cd, addr);
971 				set_reg_idx(cd, regs, &idx, max_regs, addr,
972 					    (diag_sel<<16) | trace_entry, val);
973 			}
974 		}
975 	}
976 	return 0;
977 }
978 
979 /**
980  * genwqe_write_vreg() - Write register in virtual window
981  *
982  * Note, these registers are only accessible to the PF through the
983  * VF-window. It is not intended for the VF to access.
984  */
985 int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
986 {
987 	__genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
988 	__genwqe_writeq(cd, reg, val);
989 	return 0;
990 }
991 
992 /**
993  * genwqe_read_vreg() - Read register in virtual window
994  *
995  * Note, these registers are only accessible to the PF through the
996  * VF-window. It is not intended for the VF to access.
997  */
998 u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
999 {
1000 	__genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
1001 	return __genwqe_readq(cd, reg);
1002 }
1003 
1004 /**
1005  * genwqe_base_clock_frequency() - Deteremine base clock frequency of the card
1006  *
1007  * Note: From a design perspective it turned out to be a bad idea to
1008  * use codes here to specifiy the frequency/speed values. An old
1009  * driver cannot understand new codes and is therefore always a
1010  * problem. Better is to measure out the value or put the
1011  * speed/frequency directly into a register which is always a valid
1012  * value for old as well as for new software.
1013  *
1014  * Return: Card clock in MHz
1015  */
1016 int genwqe_base_clock_frequency(struct genwqe_dev *cd)
1017 {
1018 	u16 speed;		/*         MHz  MHz  MHz  MHz */
1019 	static const int speed_grade[] = { 250, 200, 166, 175 };
1020 
1021 	speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
1022 	if (speed >= ARRAY_SIZE(speed_grade))
1023 		return 0;	/* illegal value */
1024 
1025 	return speed_grade[speed];
1026 }
1027 
1028 /**
1029  * genwqe_stop_traps() - Stop traps
1030  *
1031  * Before reading out the analysis data, we need to stop the traps.
1032  */
1033 void genwqe_stop_traps(struct genwqe_dev *cd)
1034 {
1035 	__genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
1036 }
1037 
1038 /**
1039  * genwqe_start_traps() - Start traps
1040  *
1041  * After having read the data, we can/must enable the traps again.
1042  */
1043 void genwqe_start_traps(struct genwqe_dev *cd)
1044 {
1045 	__genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
1046 
1047 	if (genwqe_need_err_masking(cd))
1048 		__genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
1049 }
1050