xref: /linux/arch/powerpc/include/asm/rtas-work-area.h (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1*43033bc6SNathan Lynch /* SPDX-License-Identifier: GPL-2.0-only */
2*43033bc6SNathan Lynch #ifndef _ASM_POWERPC_RTAS_WORK_AREA_H
3*43033bc6SNathan Lynch #define _ASM_POWERPC_RTAS_WORK_AREA_H
4*43033bc6SNathan Lynch 
5*43033bc6SNathan Lynch #include <linux/build_bug.h>
6*43033bc6SNathan Lynch #include <linux/sizes.h>
7*43033bc6SNathan Lynch #include <linux/types.h>
8*43033bc6SNathan Lynch 
9*43033bc6SNathan Lynch #include <asm/page.h>
10*43033bc6SNathan Lynch 
11*43033bc6SNathan Lynch /**
12*43033bc6SNathan Lynch  * struct rtas_work_area - RTAS work area descriptor.
13*43033bc6SNathan Lynch  *
14*43033bc6SNathan Lynch  * Descriptor for a "work area" in PAPR terminology that satisfies
15*43033bc6SNathan Lynch  * RTAS addressing requirements.
16*43033bc6SNathan Lynch  */
17*43033bc6SNathan Lynch struct rtas_work_area {
18*43033bc6SNathan Lynch 	/* private: Use the APIs provided below. */
19*43033bc6SNathan Lynch 	char *buf;
20*43033bc6SNathan Lynch 	size_t size;
21*43033bc6SNathan Lynch };
22*43033bc6SNathan Lynch 
23*43033bc6SNathan Lynch enum {
24*43033bc6SNathan Lynch 	/* Maximum allocation size, enforced at build time. */
25*43033bc6SNathan Lynch 	RTAS_WORK_AREA_MAX_ALLOC_SZ = SZ_128K,
26*43033bc6SNathan Lynch };
27*43033bc6SNathan Lynch 
28*43033bc6SNathan Lynch /**
29*43033bc6SNathan Lynch  * rtas_work_area_alloc() - Acquire a work area of the requested size.
30*43033bc6SNathan Lynch  * @size_: Allocation size. Must be compile-time constant and not more
31*43033bc6SNathan Lynch  *         than %RTAS_WORK_AREA_MAX_ALLOC_SZ.
32*43033bc6SNathan Lynch  *
33*43033bc6SNathan Lynch  * Allocate a buffer suitable for passing to RTAS functions that have
34*43033bc6SNathan Lynch  * a memory address parameter, often (but not always) referred to as a
35*43033bc6SNathan Lynch  * "work area" in PAPR. Although callers are allowed to block while
36*43033bc6SNathan Lynch  * holding a work area, the amount of memory reserved for this purpose
37*43033bc6SNathan Lynch  * is limited, and allocations should be short-lived. A good guideline
38*43033bc6SNathan Lynch  * is to release any allocated work area before returning from a
39*43033bc6SNathan Lynch  * system call.
40*43033bc6SNathan Lynch  *
41*43033bc6SNathan Lynch  * This function does not fail. It blocks until the allocation
42*43033bc6SNathan Lynch  * succeeds. To prevent deadlocks, callers are discouraged from
43*43033bc6SNathan Lynch  * allocating more than one work area simultaneously in a single task
44*43033bc6SNathan Lynch  * context.
45*43033bc6SNathan Lynch  *
46*43033bc6SNathan Lynch  * Context: This function may sleep.
47*43033bc6SNathan Lynch  * Return: A &struct rtas_work_area descriptor for the allocated work area.
48*43033bc6SNathan Lynch  */
49*43033bc6SNathan Lynch #define rtas_work_area_alloc(size_) ({				\
50*43033bc6SNathan Lynch 	static_assert(__builtin_constant_p(size_));		\
51*43033bc6SNathan Lynch 	static_assert((size_) > 0);				\
52*43033bc6SNathan Lynch 	static_assert((size_) <= RTAS_WORK_AREA_MAX_ALLOC_SZ);	\
53*43033bc6SNathan Lynch 	__rtas_work_area_alloc(size_);				\
54*43033bc6SNathan Lynch })
55*43033bc6SNathan Lynch 
56*43033bc6SNathan Lynch /*
57*43033bc6SNathan Lynch  * Do not call __rtas_work_area_alloc() directly. Use
58*43033bc6SNathan Lynch  * rtas_work_area_alloc().
59*43033bc6SNathan Lynch  */
60*43033bc6SNathan Lynch struct rtas_work_area *__rtas_work_area_alloc(size_t size);
61*43033bc6SNathan Lynch 
62*43033bc6SNathan Lynch /**
63*43033bc6SNathan Lynch  * rtas_work_area_free() - Release a work area.
64*43033bc6SNathan Lynch  * @area: Work area descriptor as returned from rtas_work_area_alloc().
65*43033bc6SNathan Lynch  *
66*43033bc6SNathan Lynch  * Return a work area buffer to the pool.
67*43033bc6SNathan Lynch  */
68*43033bc6SNathan Lynch void rtas_work_area_free(struct rtas_work_area *area);
69*43033bc6SNathan Lynch 
rtas_work_area_raw_buf(const struct rtas_work_area * area)70*43033bc6SNathan Lynch static inline char *rtas_work_area_raw_buf(const struct rtas_work_area *area)
71*43033bc6SNathan Lynch {
72*43033bc6SNathan Lynch 	return area->buf;
73*43033bc6SNathan Lynch }
74*43033bc6SNathan Lynch 
rtas_work_area_size(const struct rtas_work_area * area)75*43033bc6SNathan Lynch static inline size_t rtas_work_area_size(const struct rtas_work_area *area)
76*43033bc6SNathan Lynch {
77*43033bc6SNathan Lynch 	return area->size;
78*43033bc6SNathan Lynch }
79*43033bc6SNathan Lynch 
rtas_work_area_phys(const struct rtas_work_area * area)80*43033bc6SNathan Lynch static inline phys_addr_t rtas_work_area_phys(const struct rtas_work_area *area)
81*43033bc6SNathan Lynch {
82*43033bc6SNathan Lynch 	return __pa(area->buf);
83*43033bc6SNathan Lynch }
84*43033bc6SNathan Lynch 
85*43033bc6SNathan Lynch /*
86*43033bc6SNathan Lynch  * Early setup for the work area allocator. Call from
87*43033bc6SNathan Lynch  * rtas_initialize() only.
88*43033bc6SNathan Lynch  */
89*43033bc6SNathan Lynch 
90*43033bc6SNathan Lynch #ifdef CONFIG_PPC_PSERIES
91*43033bc6SNathan Lynch void rtas_work_area_reserve_arena(phys_addr_t limit);
92*43033bc6SNathan Lynch #else /* CONFIG_PPC_PSERIES */
rtas_work_area_reserve_arena(phys_addr_t limit)93*43033bc6SNathan Lynch static inline void rtas_work_area_reserve_arena(phys_addr_t limit) {}
94*43033bc6SNathan Lynch #endif /* CONFIG_PPC_PSERIES */
95*43033bc6SNathan Lynch 
96*43033bc6SNathan Lynch #endif /* _ASM_POWERPC_RTAS_WORK_AREA_H */
97