xref: /linux/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c (revision 7ec462100ef9142344ddbf86f2c3008b97acddbe)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3 
4 /*
5  * nfp_cppcore.c
6  * Provides low-level access to the NFP's internal CPP bus
7  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8  *          Jason McMullan <jason.mcmullan@netronome.com>
9  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
10  */
11 
12 #include <linux/unaligned.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/ioport.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/wait.h>
22 
23 #include "nfp_arm.h"
24 #include "nfp_cpp.h"
25 #include "nfp6000/nfp6000.h"
26 
27 #define NFP_ARM_GCSR_SOFTMODEL2                              0x0000014c
28 #define NFP_ARM_GCSR_SOFTMODEL3                              0x00000150
29 
30 struct nfp_cpp_resource {
31 	struct list_head list;
32 	const char *name;
33 	u32 cpp_id;
34 	u64 start;
35 	u64 end;
36 };
37 
38 /**
39  * struct nfp_cpp - main nfpcore device structure
40  * Following fields are read-only after probe() exits or netdevs are spawned.
41  * @dev:		embedded device structure
42  * @op:			low-level implementation ops
43  * @priv:		private data of the low-level implementation
44  * @model:		chip model
45  * @interface:		chip interface id we are using to reach it
46  * @serial:		chip serial number
47  * @imb_cat_table:	CPP Mapping Table
48  * @mu_locality_lsb:	MU access type bit offset
49  *
50  * Following fields use explicit locking:
51  * @resource_list:	NFP CPP resource list
52  * @resource_lock:	protects @resource_list
53  *
54  * @area_cache_list:	cached areas for cpp/xpb read/write speed up
55  * @area_cache_mutex:	protects @area_cache_list
56  *
57  * @waitq:		area wait queue
58  */
59 struct nfp_cpp {
60 	struct device dev;
61 
62 	void *priv;
63 
64 	u32 model;
65 	u16 interface;
66 	u8 serial[NFP_SERIAL_LEN];
67 
68 	const struct nfp_cpp_operations *op;
69 	struct list_head resource_list;
70 	rwlock_t resource_lock;
71 	wait_queue_head_t waitq;
72 
73 	u32 imb_cat_table[16];
74 	unsigned int mu_locality_lsb;
75 
76 	struct mutex area_cache_mutex;
77 	struct list_head area_cache_list;
78 };
79 
80 /* Element of the area_cache_list */
81 struct nfp_cpp_area_cache {
82 	struct list_head entry;
83 	u32 id;
84 	u64 addr;
85 	u32 size;
86 	struct nfp_cpp_area *area;
87 };
88 
89 struct nfp_cpp_area {
90 	struct nfp_cpp *cpp;
91 	struct kref kref;
92 	atomic_t refcount;
93 	struct mutex mutex;	/* Lock for the area's refcount */
94 	unsigned long long offset;
95 	unsigned long size;
96 	struct nfp_cpp_resource resource;
97 	void __iomem *iomem;
98 	/* Here follows the 'priv' part of nfp_cpp_area. */
99 };
100 
101 struct nfp_cpp_explicit {
102 	struct nfp_cpp *cpp;
103 	struct nfp_cpp_explicit_command cmd;
104 	/* Here follows the 'priv' part of nfp_cpp_area. */
105 };
106 
__resource_add(struct list_head * head,struct nfp_cpp_resource * res)107 static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res)
108 {
109 	struct nfp_cpp_resource *tmp;
110 	struct list_head *pos;
111 
112 	list_for_each(pos, head) {
113 		tmp = container_of(pos, struct nfp_cpp_resource, list);
114 
115 		if (tmp->cpp_id > res->cpp_id)
116 			break;
117 
118 		if (tmp->cpp_id == res->cpp_id && tmp->start > res->start)
119 			break;
120 	}
121 
122 	list_add_tail(&res->list, pos);
123 }
124 
__resource_del(struct nfp_cpp_resource * res)125 static void __resource_del(struct nfp_cpp_resource *res)
126 {
127 	list_del_init(&res->list);
128 }
129 
__release_cpp_area(struct kref * kref)130 static void __release_cpp_area(struct kref *kref)
131 {
132 	struct nfp_cpp_area *area =
133 		container_of(kref, struct nfp_cpp_area, kref);
134 	struct nfp_cpp *cpp = nfp_cpp_area_cpp(area);
135 
136 	if (area->cpp->op->area_cleanup)
137 		area->cpp->op->area_cleanup(area);
138 
139 	write_lock(&cpp->resource_lock);
140 	__resource_del(&area->resource);
141 	write_unlock(&cpp->resource_lock);
142 	kfree(area);
143 }
144 
nfp_cpp_area_put(struct nfp_cpp_area * area)145 static void nfp_cpp_area_put(struct nfp_cpp_area *area)
146 {
147 	kref_put(&area->kref, __release_cpp_area);
148 }
149 
nfp_cpp_area_get(struct nfp_cpp_area * area)150 static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area)
151 {
152 	kref_get(&area->kref);
153 
154 	return area;
155 }
156 
157 /**
158  * nfp_cpp_free() - free the CPP handle
159  * @cpp:	CPP handle
160  */
nfp_cpp_free(struct nfp_cpp * cpp)161 void nfp_cpp_free(struct nfp_cpp *cpp)
162 {
163 	struct nfp_cpp_area_cache *cache, *ctmp;
164 	struct nfp_cpp_resource *res, *rtmp;
165 
166 	/* Remove all caches */
167 	list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
168 		list_del(&cache->entry);
169 		if (cache->id)
170 			nfp_cpp_area_release(cache->area);
171 		nfp_cpp_area_free(cache->area);
172 		kfree(cache);
173 	}
174 
175 	/* There should be no dangling areas at this point */
176 	WARN_ON(!list_empty(&cpp->resource_list));
177 
178 	/* .. but if they weren't, try to clean up. */
179 	list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) {
180 		struct nfp_cpp_area *area = container_of(res,
181 							 struct nfp_cpp_area,
182 							 resource);
183 
184 		dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n",
185 			NFP_CPP_ID_TARGET_of(res->cpp_id),
186 			NFP_CPP_ID_ACTION_of(res->cpp_id),
187 			NFP_CPP_ID_TOKEN_of(res->cpp_id),
188 			res->start, res->end,
189 			res->name ? " " : "",
190 			res->name ? res->name : "");
191 
192 		if (area->cpp->op->area_release)
193 			area->cpp->op->area_release(area);
194 
195 		__release_cpp_area(&area->kref);
196 	}
197 
198 	if (cpp->op->free)
199 		cpp->op->free(cpp);
200 
201 	device_unregister(&cpp->dev);
202 
203 	kfree(cpp);
204 }
205 
206 /**
207  * nfp_cpp_model() - Retrieve the Model ID of the NFP
208  * @cpp:	NFP CPP handle
209  *
210  * Return: NFP CPP Model ID
211  */
nfp_cpp_model(struct nfp_cpp * cpp)212 u32 nfp_cpp_model(struct nfp_cpp *cpp)
213 {
214 	return cpp->model;
215 }
216 
217 /**
218  * nfp_cpp_interface() - Retrieve the Interface ID of the NFP
219  * @cpp:	NFP CPP handle
220  *
221  * Return: NFP CPP Interface ID
222  */
nfp_cpp_interface(struct nfp_cpp * cpp)223 u16 nfp_cpp_interface(struct nfp_cpp *cpp)
224 {
225 	return cpp->interface;
226 }
227 
228 /**
229  * nfp_cpp_serial() - Retrieve the Serial ID of the NFP
230  * @cpp:	NFP CPP handle
231  * @serial:	Pointer to NFP serial number
232  *
233  * Return:  Length of NFP serial number
234  */
nfp_cpp_serial(struct nfp_cpp * cpp,const u8 ** serial)235 int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
236 {
237 	*serial = &cpp->serial[0];
238 	return sizeof(cpp->serial);
239 }
240 
241 #define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x)		(((_x) >> 13) & 0x7)
242 #define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE		BIT(12)
243 #define   NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT	0
244 #define   NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT	BIT(12)
245 
nfp_cpp_set_mu_locality_lsb(struct nfp_cpp * cpp)246 static int nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp)
247 {
248 	unsigned int mode, addr40;
249 	u32 imbcppat;
250 	int res;
251 
252 	imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU];
253 	mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
254 	addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
255 
256 	res = nfp_cppat_mu_locality_lsb(mode, addr40);
257 	if (res < 0)
258 		return res;
259 	cpp->mu_locality_lsb = res;
260 
261 	return 0;
262 }
263 
nfp_cpp_mu_locality_lsb(struct nfp_cpp * cpp)264 unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp)
265 {
266 	return cpp->mu_locality_lsb;
267 }
268 
269 /**
270  * nfp_cpp_area_alloc_with_name() - allocate a new CPP area
271  * @cpp:	CPP device handle
272  * @dest:	NFP CPP ID
273  * @name:	Name of region
274  * @address:	Address of region
275  * @size:	Size of region
276  *
277  * Allocate and initialize a CPP area structure.  The area must later
278  * be locked down with an 'acquire' before it can be safely accessed.
279  *
280  * NOTE: @address and @size must be 32-bit aligned values.
281  *
282  * Return: NFP CPP area handle, or NULL
283  */
284 struct nfp_cpp_area *
nfp_cpp_area_alloc_with_name(struct nfp_cpp * cpp,u32 dest,const char * name,unsigned long long address,unsigned long size)285 nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name,
286 			     unsigned long long address, unsigned long size)
287 {
288 	struct nfp_cpp_area *area;
289 	u64 tmp64 = address;
290 	int err, name_len;
291 
292 	/* Remap from cpp_island to cpp_target */
293 	err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
294 	if (err < 0)
295 		return NULL;
296 
297 	address = tmp64;
298 
299 	if (!name)
300 		name = "(reserved)";
301 
302 	name_len = strlen(name) + 1;
303 	area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len,
304 		       GFP_KERNEL);
305 	if (!area)
306 		return NULL;
307 
308 	area->cpp = cpp;
309 	area->resource.name = (void *)area + sizeof(*area) +
310 		cpp->op->area_priv_size;
311 	memcpy((char *)area->resource.name, name, name_len);
312 
313 	area->resource.cpp_id = dest;
314 	area->resource.start = address;
315 	area->resource.end = area->resource.start + size - 1;
316 	INIT_LIST_HEAD(&area->resource.list);
317 
318 	atomic_set(&area->refcount, 0);
319 	kref_init(&area->kref);
320 	mutex_init(&area->mutex);
321 
322 	if (cpp->op->area_init) {
323 		int err;
324 
325 		err = cpp->op->area_init(area, dest, address, size);
326 		if (err < 0) {
327 			kfree(area);
328 			return NULL;
329 		}
330 	}
331 
332 	write_lock(&cpp->resource_lock);
333 	__resource_add(&cpp->resource_list, &area->resource);
334 	write_unlock(&cpp->resource_lock);
335 
336 	area->offset = address;
337 	area->size = size;
338 
339 	return area;
340 }
341 
342 /**
343  * nfp_cpp_area_alloc() - allocate a new CPP area
344  * @cpp:	CPP handle
345  * @dest:	CPP id
346  * @address:	Start address on CPP target
347  * @size:	Size of area in bytes
348  *
349  * Allocate and initialize a CPP area structure.  The area must later
350  * be locked down with an 'acquire' before it can be safely accessed.
351  *
352  * NOTE: @address and @size must be 32-bit aligned values.
353  *
354  * Return: NFP CPP Area handle, or NULL
355  */
356 struct nfp_cpp_area *
nfp_cpp_area_alloc(struct nfp_cpp * cpp,u32 dest,unsigned long long address,unsigned long size)357 nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
358 		   unsigned long long address, unsigned long size)
359 {
360 	return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
361 }
362 
363 /**
364  * nfp_cpp_area_alloc_acquire() - allocate a new CPP area and lock it down
365  * @cpp:	CPP handle
366  * @name:	Name of region
367  * @dest:	CPP id
368  * @address:	Start address on CPP target
369  * @size:	Size of area
370  *
371  * Allocate and initialize a CPP area structure, and lock it down so
372  * that it can be accessed directly.
373  *
374  * NOTE: @address and @size must be 32-bit aligned values.
375  * The area must also be 'released' when the structure is freed.
376  *
377  * Return: NFP CPP Area handle, or NULL
378  */
379 struct nfp_cpp_area *
nfp_cpp_area_alloc_acquire(struct nfp_cpp * cpp,const char * name,u32 dest,unsigned long long address,unsigned long size)380 nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 dest,
381 			   unsigned long long address, unsigned long size)
382 {
383 	struct nfp_cpp_area *area;
384 
385 	area = nfp_cpp_area_alloc_with_name(cpp, dest, name, address, size);
386 	if (!area)
387 		return NULL;
388 
389 	if (nfp_cpp_area_acquire(area)) {
390 		nfp_cpp_area_free(area);
391 		return NULL;
392 	}
393 
394 	return area;
395 }
396 
397 /**
398  * nfp_cpp_area_free() - free up the CPP area
399  * @area:	CPP area handle
400  *
401  * Frees up memory resources held by the CPP area.
402  */
nfp_cpp_area_free(struct nfp_cpp_area * area)403 void nfp_cpp_area_free(struct nfp_cpp_area *area)
404 {
405 	if (atomic_read(&area->refcount))
406 		nfp_warn(area->cpp, "Warning: freeing busy area\n");
407 	nfp_cpp_area_put(area);
408 }
409 
nfp_cpp_area_acquire_try(struct nfp_cpp_area * area,int * status)410 static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status)
411 {
412 	*status = area->cpp->op->area_acquire(area);
413 
414 	return *status != -EAGAIN;
415 }
416 
__nfp_cpp_area_acquire(struct nfp_cpp_area * area)417 static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area)
418 {
419 	int err, status;
420 
421 	if (atomic_inc_return(&area->refcount) > 1)
422 		return 0;
423 
424 	if (!area->cpp->op->area_acquire)
425 		return 0;
426 
427 	err = wait_event_interruptible(area->cpp->waitq,
428 				       nfp_cpp_area_acquire_try(area, &status));
429 	if (!err)
430 		err = status;
431 	if (err) {
432 		nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err);
433 		atomic_dec(&area->refcount);
434 		return err;
435 	}
436 
437 	nfp_cpp_area_get(area);
438 
439 	return 0;
440 }
441 
442 /**
443  * nfp_cpp_area_acquire() - lock down a CPP area for access
444  * @area:	CPP area handle
445  *
446  * Locks down the CPP area for a potential long term activity.  Area
447  * must always be locked down before being accessed.
448  *
449  * Return: 0, or -ERRNO
450  */
nfp_cpp_area_acquire(struct nfp_cpp_area * area)451 int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
452 {
453 	int ret;
454 
455 	mutex_lock(&area->mutex);
456 	ret = __nfp_cpp_area_acquire(area);
457 	mutex_unlock(&area->mutex);
458 
459 	return ret;
460 }
461 
462 /**
463  * nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access
464  * @area:	CPP area handle
465  *
466  * Locks down the CPP area for a potential long term activity.  Area
467  * must always be locked down before being accessed.
468  *
469  * NOTE: Returns -EAGAIN is no area is available
470  *
471  * Return: 0, or -ERRNO
472  */
nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area * area)473 int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area)
474 {
475 	mutex_lock(&area->mutex);
476 	if (atomic_inc_return(&area->refcount) == 1) {
477 		if (area->cpp->op->area_acquire) {
478 			int err;
479 
480 			err = area->cpp->op->area_acquire(area);
481 			if (err < 0) {
482 				atomic_dec(&area->refcount);
483 				mutex_unlock(&area->mutex);
484 				return err;
485 			}
486 		}
487 	}
488 	mutex_unlock(&area->mutex);
489 
490 	nfp_cpp_area_get(area);
491 	return 0;
492 }
493 
494 /**
495  * nfp_cpp_area_release() - release a locked down CPP area
496  * @area:	CPP area handle
497  *
498  * Releases a previously locked down CPP area.
499  */
nfp_cpp_area_release(struct nfp_cpp_area * area)500 void nfp_cpp_area_release(struct nfp_cpp_area *area)
501 {
502 	mutex_lock(&area->mutex);
503 	/* Only call the release on refcount == 0 */
504 	if (atomic_dec_and_test(&area->refcount)) {
505 		if (area->cpp->op->area_release) {
506 			area->cpp->op->area_release(area);
507 			/* Let anyone waiting for a BAR try to get one.. */
508 			wake_up_interruptible_all(&area->cpp->waitq);
509 		}
510 	}
511 	mutex_unlock(&area->mutex);
512 
513 	nfp_cpp_area_put(area);
514 }
515 
516 /**
517  * nfp_cpp_area_release_free() - release CPP area and free it
518  * @area:	CPP area handle
519  *
520  * Releases CPP area and frees up memory resources held by the it.
521  */
nfp_cpp_area_release_free(struct nfp_cpp_area * area)522 void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
523 {
524 	nfp_cpp_area_release(area);
525 	nfp_cpp_area_free(area);
526 }
527 
528 /**
529  * nfp_cpp_area_read() - read data from CPP area
530  * @area:	  CPP area handle
531  * @offset:	  offset into CPP area
532  * @kernel_vaddr: kernel address to put data into
533  * @length:	  number of bytes to read
534  *
535  * Read data from indicated CPP region.
536  *
537  * NOTE: @offset and @length must be 32-bit aligned values.
538  * Area must have been locked down with an 'acquire'.
539  *
540  * Return: length of io, or -ERRNO
541  */
nfp_cpp_area_read(struct nfp_cpp_area * area,unsigned long offset,void * kernel_vaddr,size_t length)542 int nfp_cpp_area_read(struct nfp_cpp_area *area,
543 		      unsigned long offset, void *kernel_vaddr,
544 		      size_t length)
545 {
546 	return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
547 }
548 
549 /**
550  * nfp_cpp_area_write() - write data to CPP area
551  * @area:	CPP area handle
552  * @offset:	offset into CPP area
553  * @kernel_vaddr: kernel address to read data from
554  * @length:	number of bytes to write
555  *
556  * Write data to indicated CPP region.
557  *
558  * NOTE: @offset and @length must be 32-bit aligned values.
559  * Area must have been locked down with an 'acquire'.
560  *
561  * Return: length of io, or -ERRNO
562  */
nfp_cpp_area_write(struct nfp_cpp_area * area,unsigned long offset,const void * kernel_vaddr,size_t length)563 int nfp_cpp_area_write(struct nfp_cpp_area *area,
564 		       unsigned long offset, const void *kernel_vaddr,
565 		       size_t length)
566 {
567 	return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
568 }
569 
570 /**
571  * nfp_cpp_area_size() - return size of a CPP area
572  * @cpp_area:	CPP area handle
573  *
574  * Return: Size of the area
575  */
nfp_cpp_area_size(struct nfp_cpp_area * cpp_area)576 size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area)
577 {
578 	return cpp_area->size;
579 }
580 
581 /**
582  * nfp_cpp_area_name() - return name of a CPP area
583  * @cpp_area:	CPP area handle
584  *
585  * Return: Name of the area, or NULL
586  */
nfp_cpp_area_name(struct nfp_cpp_area * cpp_area)587 const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
588 {
589 	return cpp_area->resource.name;
590 }
591 
592 /**
593  * nfp_cpp_area_priv() - return private struct for CPP area
594  * @cpp_area:	CPP area handle
595  *
596  * Return: Private data for the CPP area
597  */
nfp_cpp_area_priv(struct nfp_cpp_area * cpp_area)598 void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
599 {
600 	return &cpp_area[1];
601 }
602 
603 /**
604  * nfp_cpp_area_cpp() - return CPP handle for CPP area
605  * @cpp_area:	CPP area handle
606  *
607  * Return: NFP CPP handle
608  */
nfp_cpp_area_cpp(struct nfp_cpp_area * cpp_area)609 struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
610 {
611 	return cpp_area->cpp;
612 }
613 
614 /**
615  * nfp_cpp_area_resource() - get resource
616  * @area:	CPP area handle
617  *
618  * NOTE: Area must have been locked down with an 'acquire'.
619  *
620  * Return: struct resource pointer, or NULL
621  */
nfp_cpp_area_resource(struct nfp_cpp_area * area)622 struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area)
623 {
624 	struct resource *res = NULL;
625 
626 	if (area->cpp->op->area_resource)
627 		res = area->cpp->op->area_resource(area);
628 
629 	return res;
630 }
631 
632 /**
633  * nfp_cpp_area_phys() - get physical address of CPP area
634  * @area:	CPP area handle
635  *
636  * NOTE: Area must have been locked down with an 'acquire'.
637  *
638  * Return: phy_addr_t of the area, or NULL
639  */
nfp_cpp_area_phys(struct nfp_cpp_area * area)640 phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area)
641 {
642 	phys_addr_t addr = ~0;
643 
644 	if (area->cpp->op->area_phys)
645 		addr = area->cpp->op->area_phys(area);
646 
647 	return addr;
648 }
649 
650 /**
651  * nfp_cpp_area_iomem() - get IOMEM region for CPP area
652  * @area:	CPP area handle
653  *
654  * Returns an iomem pointer for use with readl()/writel() style
655  * operations.
656  *
657  * NOTE: Area must have been locked down with an 'acquire'.
658  *
659  * Return: __iomem pointer to the area, or NULL
660  */
nfp_cpp_area_iomem(struct nfp_cpp_area * area)661 void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
662 {
663 	void __iomem *iomem = NULL;
664 
665 	if (area->cpp->op->area_iomem)
666 		iomem = area->cpp->op->area_iomem(area);
667 
668 	return iomem;
669 }
670 
671 /**
672  * nfp_cpp_area_readl() - Read a u32 word from an area
673  * @area:	CPP Area handle
674  * @offset:	Offset into area
675  * @value:	Pointer to read buffer
676  *
677  * Return: 0 on success, or -ERRNO
678  */
nfp_cpp_area_readl(struct nfp_cpp_area * area,unsigned long offset,u32 * value)679 int nfp_cpp_area_readl(struct nfp_cpp_area *area,
680 		       unsigned long offset, u32 *value)
681 {
682 	u8 tmp[4];
683 	int n;
684 
685 	n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
686 	if (n != sizeof(tmp))
687 		return n < 0 ? n : -EIO;
688 
689 	*value = get_unaligned_le32(tmp);
690 	return 0;
691 }
692 
693 /**
694  * nfp_cpp_area_writel() - Write a u32 word to an area
695  * @area:	CPP Area handle
696  * @offset:	Offset into area
697  * @value:	Value to write
698  *
699  * Return: 0 on success, or -ERRNO
700  */
nfp_cpp_area_writel(struct nfp_cpp_area * area,unsigned long offset,u32 value)701 int nfp_cpp_area_writel(struct nfp_cpp_area *area,
702 			unsigned long offset, u32 value)
703 {
704 	u8 tmp[4];
705 	int n;
706 
707 	put_unaligned_le32(value, tmp);
708 	n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
709 
710 	return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
711 }
712 
713 /**
714  * nfp_cpp_area_readq() - Read a u64 word from an area
715  * @area:	CPP Area handle
716  * @offset:	Offset into area
717  * @value:	Pointer to read buffer
718  *
719  * Return: 0 on success, or -ERRNO
720  */
nfp_cpp_area_readq(struct nfp_cpp_area * area,unsigned long offset,u64 * value)721 int nfp_cpp_area_readq(struct nfp_cpp_area *area,
722 		       unsigned long offset, u64 *value)
723 {
724 	u8 tmp[8];
725 	int n;
726 
727 	n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
728 	if (n != sizeof(tmp))
729 		return n < 0 ? n : -EIO;
730 
731 	*value = get_unaligned_le64(tmp);
732 	return 0;
733 }
734 
735 /**
736  * nfp_cpp_area_writeq() - Write a u64 word to an area
737  * @area:	CPP Area handle
738  * @offset:	Offset into area
739  * @value:	Value to write
740  *
741  * Return: 0 on success, or -ERRNO
742  */
nfp_cpp_area_writeq(struct nfp_cpp_area * area,unsigned long offset,u64 value)743 int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
744 			unsigned long offset, u64 value)
745 {
746 	u8 tmp[8];
747 	int n;
748 
749 	put_unaligned_le64(value, tmp);
750 	n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
751 
752 	return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
753 }
754 
755 /**
756  * nfp_cpp_area_fill() - fill a CPP area with a value
757  * @area:	CPP area
758  * @offset:	offset into CPP area
759  * @value:	value to fill with
760  * @length:	length of area to fill
761  *
762  * Fill indicated area with given value.
763  *
764  * Return: length of io, or -ERRNO
765  */
nfp_cpp_area_fill(struct nfp_cpp_area * area,unsigned long offset,u32 value,size_t length)766 int nfp_cpp_area_fill(struct nfp_cpp_area *area,
767 		      unsigned long offset, u32 value, size_t length)
768 {
769 	u8 tmp[4];
770 	size_t i;
771 	int k;
772 
773 	put_unaligned_le32(value, tmp);
774 
775 	if (offset % sizeof(tmp) || length % sizeof(tmp))
776 		return -EINVAL;
777 
778 	for (i = 0; i < length; i += sizeof(tmp)) {
779 		k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp));
780 		if (k < 0)
781 			return k;
782 	}
783 
784 	return i;
785 }
786 
787 /**
788  * nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache
789  * @cpp:	NFP CPP handle
790  * @size:	Size of the area - MUST BE A POWER OF 2.
791  */
nfp_cpp_area_cache_add(struct nfp_cpp * cpp,size_t size)792 int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
793 {
794 	struct nfp_cpp_area_cache *cache;
795 	struct nfp_cpp_area *area;
796 
797 	/* Allocate an area - we use the MU target's base as a placeholder,
798 	 * as all supported chips have a MU.
799 	 */
800 	area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0),
801 				  0, size);
802 	if (!area)
803 		return -ENOMEM;
804 
805 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
806 	if (!cache) {
807 		nfp_cpp_area_free(area);
808 		return -ENOMEM;
809 	}
810 
811 	cache->id = 0;
812 	cache->addr = 0;
813 	cache->size = size;
814 	cache->area = area;
815 	mutex_lock(&cpp->area_cache_mutex);
816 	list_add_tail(&cache->entry, &cpp->area_cache_list);
817 	mutex_unlock(&cpp->area_cache_mutex);
818 
819 	return 0;
820 }
821 
822 static struct nfp_cpp_area_cache *
area_cache_get(struct nfp_cpp * cpp,u32 id,u64 addr,unsigned long * offset,size_t length)823 area_cache_get(struct nfp_cpp *cpp, u32 id,
824 	       u64 addr, unsigned long *offset, size_t length)
825 {
826 	struct nfp_cpp_area_cache *cache;
827 	int err;
828 
829 	/* Early exit when length == 0, which prevents
830 	 * the need for special case code below when
831 	 * checking against available cache size.
832 	 */
833 	if (length == 0 || id == 0)
834 		return NULL;
835 
836 	/* Remap from cpp_island to cpp_target */
837 	err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table);
838 	if (err < 0)
839 		return NULL;
840 
841 	mutex_lock(&cpp->area_cache_mutex);
842 
843 	if (list_empty(&cpp->area_cache_list)) {
844 		mutex_unlock(&cpp->area_cache_mutex);
845 		return NULL;
846 	}
847 
848 	addr += *offset;
849 
850 	/* See if we have a match */
851 	list_for_each_entry(cache, &cpp->area_cache_list, entry) {
852 		if (id == cache->id &&
853 		    addr >= cache->addr &&
854 		    addr + length <= cache->addr + cache->size)
855 			goto exit;
856 	}
857 
858 	/* No matches - inspect the tail of the LRU */
859 	cache = list_entry(cpp->area_cache_list.prev,
860 			   struct nfp_cpp_area_cache, entry);
861 
862 	/* Can we fit in the cache entry? */
863 	if (round_down(addr + length - 1, cache->size) !=
864 	    round_down(addr, cache->size)) {
865 		mutex_unlock(&cpp->area_cache_mutex);
866 		return NULL;
867 	}
868 
869 	/* If id != 0, we will need to release it */
870 	if (cache->id) {
871 		nfp_cpp_area_release(cache->area);
872 		cache->id = 0;
873 		cache->addr = 0;
874 	}
875 
876 	/* Adjust the start address to be cache size aligned */
877 	cache->addr = addr & ~(u64)(cache->size - 1);
878 
879 	/* Re-init to the new ID and address */
880 	if (cpp->op->area_init) {
881 		err = cpp->op->area_init(cache->area,
882 					 id, cache->addr, cache->size);
883 		if (err < 0) {
884 			mutex_unlock(&cpp->area_cache_mutex);
885 			return NULL;
886 		}
887 	}
888 
889 	/* Attempt to acquire */
890 	err = nfp_cpp_area_acquire(cache->area);
891 	if (err < 0) {
892 		mutex_unlock(&cpp->area_cache_mutex);
893 		return NULL;
894 	}
895 
896 	cache->id = id;
897 
898 exit:
899 	/* Adjust offset */
900 	*offset = addr - cache->addr;
901 	return cache;
902 }
903 
904 static void
area_cache_put(struct nfp_cpp * cpp,struct nfp_cpp_area_cache * cache)905 area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
906 {
907 	if (!cache)
908 		return;
909 
910 	/* Move to front of LRU */
911 	list_move(&cache->entry, &cpp->area_cache_list);
912 
913 	mutex_unlock(&cpp->area_cache_mutex);
914 }
915 
__nfp_cpp_read(struct nfp_cpp * cpp,u32 destination,unsigned long long address,void * kernel_vaddr,size_t length)916 static int __nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
917 			  unsigned long long address, void *kernel_vaddr,
918 			  size_t length)
919 {
920 	struct nfp_cpp_area_cache *cache;
921 	struct nfp_cpp_area *area;
922 	unsigned long offset = 0;
923 	int err;
924 
925 	cache = area_cache_get(cpp, destination, address, &offset, length);
926 	if (cache) {
927 		area = cache->area;
928 	} else {
929 		area = nfp_cpp_area_alloc(cpp, destination, address, length);
930 		if (!area)
931 			return -ENOMEM;
932 
933 		err = nfp_cpp_area_acquire(area);
934 		if (err) {
935 			nfp_cpp_area_free(area);
936 			return err;
937 		}
938 	}
939 
940 	err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
941 
942 	if (cache)
943 		area_cache_put(cpp, cache);
944 	else
945 		nfp_cpp_area_release_free(area);
946 
947 	return err;
948 }
949 
950 /**
951  * nfp_cpp_read() - read from CPP target
952  * @cpp:		CPP handle
953  * @destination:	CPP id
954  * @address:		offset into CPP target
955  * @kernel_vaddr:	kernel buffer for result
956  * @length:		number of bytes to read
957  *
958  * Return: length of io, or -ERRNO
959  */
nfp_cpp_read(struct nfp_cpp * cpp,u32 destination,unsigned long long address,void * kernel_vaddr,size_t length)960 int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
961 		 unsigned long long address, void *kernel_vaddr,
962 		 size_t length)
963 {
964 	size_t n, offset;
965 	int ret;
966 
967 	for (offset = 0; offset < length; offset += n) {
968 		unsigned long long r_addr = address + offset;
969 
970 		/* make first read smaller to align to safe window */
971 		n = min_t(size_t, length - offset,
972 			  ALIGN(r_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - r_addr);
973 
974 		ret = __nfp_cpp_read(cpp, destination, address + offset,
975 				     kernel_vaddr + offset, n);
976 		if (ret < 0)
977 			return ret;
978 		if (ret != n)
979 			return offset + n;
980 	}
981 
982 	return length;
983 }
984 
__nfp_cpp_write(struct nfp_cpp * cpp,u32 destination,unsigned long long address,const void * kernel_vaddr,size_t length)985 static int __nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
986 			   unsigned long long address,
987 			   const void *kernel_vaddr, size_t length)
988 {
989 	struct nfp_cpp_area_cache *cache;
990 	struct nfp_cpp_area *area;
991 	unsigned long offset = 0;
992 	int err;
993 
994 	cache = area_cache_get(cpp, destination, address, &offset, length);
995 	if (cache) {
996 		area = cache->area;
997 	} else {
998 		area = nfp_cpp_area_alloc(cpp, destination, address, length);
999 		if (!area)
1000 			return -ENOMEM;
1001 
1002 		err = nfp_cpp_area_acquire(area);
1003 		if (err) {
1004 			nfp_cpp_area_free(area);
1005 			return err;
1006 		}
1007 	}
1008 
1009 	err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
1010 
1011 	if (cache)
1012 		area_cache_put(cpp, cache);
1013 	else
1014 		nfp_cpp_area_release_free(area);
1015 
1016 	return err;
1017 }
1018 
1019 /**
1020  * nfp_cpp_write() - write to CPP target
1021  * @cpp:		CPP handle
1022  * @destination:	CPP id
1023  * @address:		offset into CPP target
1024  * @kernel_vaddr:	kernel buffer to read from
1025  * @length:		number of bytes to write
1026  *
1027  * Return: length of io, or -ERRNO
1028  */
nfp_cpp_write(struct nfp_cpp * cpp,u32 destination,unsigned long long address,const void * kernel_vaddr,size_t length)1029 int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
1030 		  unsigned long long address,
1031 		  const void *kernel_vaddr, size_t length)
1032 {
1033 	size_t n, offset;
1034 	int ret;
1035 
1036 	for (offset = 0; offset < length; offset += n) {
1037 		unsigned long long w_addr = address + offset;
1038 
1039 		/* make first write smaller to align to safe window */
1040 		n = min_t(size_t, length - offset,
1041 			  ALIGN(w_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - w_addr);
1042 
1043 		ret = __nfp_cpp_write(cpp, destination, address + offset,
1044 				      kernel_vaddr + offset, n);
1045 		if (ret < 0)
1046 			return ret;
1047 		if (ret != n)
1048 			return offset + n;
1049 	}
1050 
1051 	return length;
1052 }
1053 
1054 /* Return the correct CPP address, and fixup xpb_addr as needed. */
nfp_xpb_to_cpp(struct nfp_cpp * cpp,u32 * xpb_addr)1055 static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
1056 {
1057 	int island;
1058 	u32 xpb;
1059 
1060 	xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
1061 	/* Ensure that non-local XPB accesses go
1062 	 * out through the global XPBM bus.
1063 	 */
1064 	island = (*xpb_addr >> 24) & 0x3f;
1065 	if (!island)
1066 		return xpb;
1067 
1068 	if (island != 1) {
1069 		*xpb_addr |= 1 << 30;
1070 		return xpb;
1071 	}
1072 
1073 	/* Accesses to the ARM Island overlay uses Island 0 / Global Bit */
1074 	*xpb_addr &= ~0x7f000000;
1075 	if (*xpb_addr < 0x60000) {
1076 		*xpb_addr |= 1 << 30;
1077 	} else {
1078 		/* And only non-ARM interfaces use the island id = 1 */
1079 		if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp))
1080 		    != NFP_CPP_INTERFACE_TYPE_ARM)
1081 			*xpb_addr |= 1 << 24;
1082 	}
1083 
1084 	return xpb;
1085 }
1086 
1087 /**
1088  * nfp_xpb_readl() - Read a u32 word from a XPB location
1089  * @cpp:	CPP device handle
1090  * @xpb_addr:	Address for operation
1091  * @value:	Pointer to read buffer
1092  *
1093  * Return: 0 on success, or -ERRNO
1094  */
nfp_xpb_readl(struct nfp_cpp * cpp,u32 xpb_addr,u32 * value)1095 int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
1096 {
1097 	u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
1098 
1099 	return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
1100 }
1101 
1102 /**
1103  * nfp_xpb_writel() - Write a u32 word to a XPB location
1104  * @cpp:	CPP device handle
1105  * @xpb_addr:	Address for operation
1106  * @value:	Value to write
1107  *
1108  * Return: 0 on success, or -ERRNO
1109  */
nfp_xpb_writel(struct nfp_cpp * cpp,u32 xpb_addr,u32 value)1110 int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
1111 {
1112 	u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
1113 
1114 	return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
1115 }
1116 
1117 /**
1118  * nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus
1119  * @cpp:	NFP CPP device handle
1120  * @xpb_tgt:	XPB target and address
1121  * @mask:	mask of bits to alter
1122  * @value:	value to modify
1123  *
1124  * KERNEL: This operation is safe to call in interrupt or softirq context.
1125  *
1126  * Return: 0 on success, or -ERRNO
1127  */
nfp_xpb_writelm(struct nfp_cpp * cpp,u32 xpb_tgt,u32 mask,u32 value)1128 int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
1129 		    u32 mask, u32 value)
1130 {
1131 	int err;
1132 	u32 tmp;
1133 
1134 	err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
1135 	if (err < 0)
1136 		return err;
1137 
1138 	tmp &= ~mask;
1139 	tmp |= mask & value;
1140 	return nfp_xpb_writel(cpp, xpb_tgt, tmp);
1141 }
1142 
1143 /* Lockdep markers */
1144 static struct lock_class_key nfp_cpp_resource_lock_key;
1145 
nfp_cpp_dev_release(struct device * dev)1146 static void nfp_cpp_dev_release(struct device *dev)
1147 {
1148 	/* Nothing to do here - it just makes the kernel happy */
1149 }
1150 
1151 /**
1152  * nfp_cpp_from_operations() - Create a NFP CPP handle
1153  *                             from an operations structure
1154  * @ops:	NFP CPP operations structure
1155  * @parent:	Parent device
1156  * @priv:	Private data of low-level implementation
1157  *
1158  * NOTE: On failure, cpp_ops->free will be called!
1159  *
1160  * Return: NFP CPP handle on success, ERR_PTR on failure
1161  */
1162 struct nfp_cpp *
nfp_cpp_from_operations(const struct nfp_cpp_operations * ops,struct device * parent,void * priv)1163 nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
1164 			struct device *parent, void *priv)
1165 {
1166 	const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
1167 	struct nfp_cpp *cpp;
1168 	int ifc, err;
1169 	u32 mask[2];
1170 	u32 xpbaddr;
1171 	size_t tgt;
1172 
1173 	cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
1174 	if (!cpp) {
1175 		err = -ENOMEM;
1176 		goto err_malloc;
1177 	}
1178 
1179 	cpp->op = ops;
1180 	cpp->priv = priv;
1181 
1182 	ifc = ops->get_interface(parent);
1183 	if (ifc < 0) {
1184 		err = ifc;
1185 		goto err_free_cpp;
1186 	}
1187 	cpp->interface = ifc;
1188 	if (ops->read_serial) {
1189 		err = ops->read_serial(parent, cpp->serial);
1190 		if (err)
1191 			goto err_free_cpp;
1192 	}
1193 
1194 	rwlock_init(&cpp->resource_lock);
1195 	init_waitqueue_head(&cpp->waitq);
1196 	lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
1197 	INIT_LIST_HEAD(&cpp->resource_list);
1198 	INIT_LIST_HEAD(&cpp->area_cache_list);
1199 	mutex_init(&cpp->area_cache_mutex);
1200 	cpp->dev.init_name = "cpp";
1201 	cpp->dev.parent = parent;
1202 	cpp->dev.release = nfp_cpp_dev_release;
1203 	err = device_register(&cpp->dev);
1204 	if (err < 0) {
1205 		put_device(&cpp->dev);
1206 		goto err_free_cpp;
1207 	}
1208 
1209 	dev_set_drvdata(&cpp->dev, cpp);
1210 
1211 	/* NOTE: cpp_lock is NOT locked for op->init,
1212 	 * since it may call NFP CPP API operations
1213 	 */
1214 	if (cpp->op->init) {
1215 		err = cpp->op->init(cpp);
1216 		if (err < 0) {
1217 			dev_err(parent,
1218 				"NFP interface initialization failed\n");
1219 			goto err_out;
1220 		}
1221 	}
1222 
1223 	err = nfp_cpp_model_autodetect(cpp, &cpp->model);
1224 	if (err < 0) {
1225 		dev_err(parent, "NFP model detection failed\n");
1226 		goto err_out;
1227 	}
1228 
1229 	for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
1230 			/* Hardcoded XPB IMB Base, island 0 */
1231 		xpbaddr = 0x000a0000 + (tgt * 4);
1232 		err = nfp_xpb_readl(cpp, xpbaddr,
1233 				    &cpp->imb_cat_table[tgt]);
1234 		if (err < 0) {
1235 			dev_err(parent,
1236 				"Can't read CPP mapping from device\n");
1237 			goto err_out;
1238 		}
1239 	}
1240 
1241 	nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2,
1242 		      &mask[0]);
1243 	nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3,
1244 		      &mask[1]);
1245 
1246 	err = nfp_cpp_set_mu_locality_lsb(cpp);
1247 	if (err < 0) {
1248 		dev_err(parent,	"Can't calculate MU locality bit offset\n");
1249 		goto err_out;
1250 	}
1251 
1252 	dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n",
1253 		 nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp));
1254 
1255 	return cpp;
1256 
1257 err_out:
1258 	device_unregister(&cpp->dev);
1259 err_free_cpp:
1260 	kfree(cpp);
1261 err_malloc:
1262 	return ERR_PTR(err);
1263 }
1264 
1265 /**
1266  * nfp_cpp_priv() - Get the operations private data of a CPP handle
1267  * @cpp:	CPP handle
1268  *
1269  * Return: Private data for the NFP CPP handle
1270  */
nfp_cpp_priv(struct nfp_cpp * cpp)1271 void *nfp_cpp_priv(struct nfp_cpp *cpp)
1272 {
1273 	return cpp->priv;
1274 }
1275 
1276 /**
1277  * nfp_cpp_device() - Get the Linux device handle of a CPP handle
1278  * @cpp:	CPP handle
1279  *
1280  * Return: Device for the NFP CPP bus
1281  */
nfp_cpp_device(struct nfp_cpp * cpp)1282 struct device *nfp_cpp_device(struct nfp_cpp *cpp)
1283 {
1284 	return &cpp->dev;
1285 }
1286 
1287 #define NFP_EXPL_OP(func, expl, args...)			  \
1288 	({							  \
1289 		struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
1290 		int err = -ENODEV;				  \
1291 								  \
1292 		if (cpp->op->func)				  \
1293 			err = cpp->op->func(expl, ##args);	  \
1294 		err;						  \
1295 	})
1296 
1297 #define NFP_EXPL_OP_NR(func, expl, args...)			  \
1298 	({							  \
1299 		struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
1300 								  \
1301 		if (cpp->op->func)				  \
1302 			cpp->op->func(expl, ##args);		  \
1303 								  \
1304 	})
1305 
1306 /**
1307  * nfp_cpp_explicit_acquire() - Acquire explicit access handle
1308  * @cpp:	NFP CPP handle
1309  *
1310  * The 'data_ref' and 'signal_ref' values are useful when
1311  * constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values.
1312  *
1313  * Return: NFP CPP explicit handle
1314  */
nfp_cpp_explicit_acquire(struct nfp_cpp * cpp)1315 struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp)
1316 {
1317 	struct nfp_cpp_explicit *expl;
1318 	int err;
1319 
1320 	expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL);
1321 	if (!expl)
1322 		return NULL;
1323 
1324 	expl->cpp = cpp;
1325 	err = NFP_EXPL_OP(explicit_acquire, expl);
1326 	if (err < 0) {
1327 		kfree(expl);
1328 		return NULL;
1329 	}
1330 
1331 	return expl;
1332 }
1333 
1334 /**
1335  * nfp_cpp_explicit_set_target() - Set target fields for explicit
1336  * @expl:	Explicit handle
1337  * @cpp_id:	CPP ID field
1338  * @len:	CPP Length field
1339  * @mask:	CPP Mask field
1340  *
1341  * Return: 0, or -ERRNO
1342  */
nfp_cpp_explicit_set_target(struct nfp_cpp_explicit * expl,u32 cpp_id,u8 len,u8 mask)1343 int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl,
1344 				u32 cpp_id, u8 len, u8 mask)
1345 {
1346 	expl->cmd.cpp_id = cpp_id;
1347 	expl->cmd.len = len;
1348 	expl->cmd.byte_mask = mask;
1349 
1350 	return 0;
1351 }
1352 
1353 /**
1354  * nfp_cpp_explicit_set_data() - Set data fields for explicit
1355  * @expl:	Explicit handle
1356  * @data_master: CPP Data Master field
1357  * @data_ref:	CPP Data Ref field
1358  *
1359  * Return: 0, or -ERRNO
1360  */
nfp_cpp_explicit_set_data(struct nfp_cpp_explicit * expl,u8 data_master,u16 data_ref)1361 int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
1362 			      u8 data_master, u16 data_ref)
1363 {
1364 	expl->cmd.data_master = data_master;
1365 	expl->cmd.data_ref = data_ref;
1366 
1367 	return 0;
1368 }
1369 
1370 /**
1371  * nfp_cpp_explicit_set_signal() - Set signal fields for explicit
1372  * @expl:	Explicit handle
1373  * @signal_master: CPP Signal Master field
1374  * @signal_ref:	CPP Signal Ref field
1375  *
1376  * Return: 0, or -ERRNO
1377  */
nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit * expl,u8 signal_master,u8 signal_ref)1378 int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
1379 				u8 signal_master, u8 signal_ref)
1380 {
1381 	expl->cmd.signal_master = signal_master;
1382 	expl->cmd.signal_ref = signal_ref;
1383 
1384 	return 0;
1385 }
1386 
1387 /**
1388  * nfp_cpp_explicit_set_posted() - Set completion fields for explicit
1389  * @expl:	Explicit handle
1390  * @posted:	True for signaled completion, false otherwise
1391  * @siga:	CPP Signal A field
1392  * @siga_mode:	CPP Signal A Mode field
1393  * @sigb:	CPP Signal B field
1394  * @sigb_mode:	CPP Signal B Mode field
1395  *
1396  * Return: 0, or -ERRNO
1397  */
nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit * expl,int posted,u8 siga,enum nfp_cpp_explicit_signal_mode siga_mode,u8 sigb,enum nfp_cpp_explicit_signal_mode sigb_mode)1398 int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
1399 				u8 siga,
1400 				enum nfp_cpp_explicit_signal_mode siga_mode,
1401 				u8 sigb,
1402 				enum nfp_cpp_explicit_signal_mode sigb_mode)
1403 {
1404 	expl->cmd.posted = posted;
1405 	expl->cmd.siga = siga;
1406 	expl->cmd.sigb = sigb;
1407 	expl->cmd.siga_mode = siga_mode;
1408 	expl->cmd.sigb_mode = sigb_mode;
1409 
1410 	return 0;
1411 }
1412 
1413 /**
1414  * nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access
1415  * @expl:	NFP CPP Explicit handle
1416  * @buff:	Data to have the target pull in the transaction
1417  * @len:	Length of data, in bytes
1418  *
1419  * The 'len' parameter must be less than or equal to 128 bytes.
1420  *
1421  * If this function is called before the configuration
1422  * registers are set, it will return -EINVAL.
1423  *
1424  * Return: 0, or -ERRNO
1425  */
nfp_cpp_explicit_put(struct nfp_cpp_explicit * expl,const void * buff,size_t len)1426 int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
1427 			 const void *buff, size_t len)
1428 {
1429 	return NFP_EXPL_OP(explicit_put, expl, buff, len);
1430 }
1431 
1432 /**
1433  * nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete
1434  * @expl:	NFP CPP Explicit handle
1435  * @address:	Address to send in the explicit transaction
1436  *
1437  * If this function is called before the configuration
1438  * registers are set, it will return -1, with an errno of EINVAL.
1439  *
1440  * Return: 0, or -ERRNO
1441  */
nfp_cpp_explicit_do(struct nfp_cpp_explicit * expl,u64 address)1442 int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address)
1443 {
1444 	return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address);
1445 }
1446 
1447 /**
1448  * nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access
1449  * @expl:	NFP CPP Explicit handle
1450  * @buff:	Data that the target pushed in the transaction
1451  * @len:	Length of data, in bytes
1452  *
1453  * The 'len' parameter must be less than or equal to 128 bytes.
1454  *
1455  * If this function is called before all three configuration
1456  * registers are set, it will return -1, with an errno of EINVAL.
1457  *
1458  * If this function is called before nfp_cpp_explicit_do()
1459  * has completed, it will return -1, with an errno of EBUSY.
1460  *
1461  * Return: 0, or -ERRNO
1462  */
nfp_cpp_explicit_get(struct nfp_cpp_explicit * expl,void * buff,size_t len)1463 int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len)
1464 {
1465 	return NFP_EXPL_OP(explicit_get, expl, buff, len);
1466 }
1467 
1468 /**
1469  * nfp_cpp_explicit_release() - Release explicit access handle
1470  * @expl:	NFP CPP Explicit handle
1471  *
1472  */
nfp_cpp_explicit_release(struct nfp_cpp_explicit * expl)1473 void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl)
1474 {
1475 	NFP_EXPL_OP_NR(explicit_release, expl);
1476 	kfree(expl);
1477 }
1478 
1479 /**
1480  * nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit
1481  * @cpp_explicit:	CPP explicit handle
1482  *
1483  * Return: NFP CPP handle of the explicit
1484  */
nfp_cpp_explicit_cpp(struct nfp_cpp_explicit * cpp_explicit)1485 struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit)
1486 {
1487 	return cpp_explicit->cpp;
1488 }
1489 
1490 /**
1491  * nfp_cpp_explicit_priv() - return private struct for CPP explicit
1492  * @cpp_explicit:	CPP explicit handle
1493  *
1494  * Return: private data of the explicit, or NULL
1495  */
nfp_cpp_explicit_priv(struct nfp_cpp_explicit * cpp_explicit)1496 void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
1497 {
1498 	return &cpp_explicit[1];
1499 }
1500