xref: /linux/arch/mips/include/asm/mips-cm.h (revision 2398902f96e22b871640ca233fca8f4a88af492f)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (C) 2013 Imagination Technologies
4  * Author: Paul Burton <paul.burton@mips.com>
5  */
6 
7 #ifndef __MIPS_ASM_MIPS_CPS_H__
8 # error Please include asm/mips-cps.h rather than asm/mips-cm.h
9 #endif
10 
11 #ifndef __MIPS_ASM_MIPS_CM_H__
12 #define __MIPS_ASM_MIPS_CM_H__
13 
14 #include <linux/bitfield.h>
15 #include <linux/bitops.h>
16 #include <linux/errno.h>
17 
18 /* The base address of the CM GCR block */
19 extern void __iomem *mips_gcr_base;
20 
21 /* The base address of the CM L2-only sync region */
22 extern void __iomem *mips_cm_l2sync_base;
23 
24 /**
25  * mips_cm_phys_base - retrieve the physical base address of the CM
26  *
27  * This function returns the physical base address of the Coherence Manager
28  * global control block, or 0 if no Coherence Manager is present. It provides
29  * a default implementation which reads the CMGCRBase register where available,
30  * and may be overridden by platforms which determine this address in a
31  * different way by defining a function with the same prototype.
32  */
33 extern phys_addr_t mips_cm_phys_base(void);
34 
35 /**
36  * mips_cm_l2sync_phys_base - retrieve the physical base address of the CM
37  *                            L2-sync region
38  *
39  * This function returns the physical base address of the Coherence Manager
40  * L2-cache only region. It provides a default implementation which reads the
41  * CMGCRL2OnlySyncBase register where available or returns a 4K region just
42  * behind the CM GCR base address. It may be overridden by platforms which
43  * determine this address in a different way by defining a function with the
44  * same prototype.
45  */
46 extern phys_addr_t mips_cm_l2sync_phys_base(void);
47 
48 /*
49  * mips_cm_is64 - determine CM register width
50  *
51  * The CM register width is determined by the version of the CM, with CM3
52  * introducing 64 bit GCRs and all prior CM versions having 32 bit GCRs.
53  * However we may run a kernel built for MIPS32 on a system with 64 bit GCRs,
54  * or vice-versa. This variable indicates the width of the memory accesses
55  * that the kernel will perform to GCRs, which may differ from the actual
56  * width of the GCRs.
57  *
58  * It's set to 0 for 32-bit accesses and 1 for 64-bit accesses.
59  */
60 extern int mips_cm_is64;
61 
62 /**
63  * mips_cm_error_report - Report CM cache errors
64  */
65 #ifdef CONFIG_MIPS_CM
66 extern void mips_cm_error_report(void);
67 #else
68 static inline void mips_cm_error_report(void) {}
69 #endif
70 
71 /**
72  * mips_cm_probe - probe for a Coherence Manager
73  *
74  * Attempt to detect the presence of a Coherence Manager. Returns 0 if a CM
75  * is successfully detected, else -errno.
76  */
77 #ifdef CONFIG_MIPS_CM
78 extern int mips_cm_probe(void);
79 #else
80 static inline int mips_cm_probe(void)
81 {
82 	return -ENODEV;
83 }
84 #endif
85 
86 /**
87  * mips_cm_present - determine whether a Coherence Manager is present
88  *
89  * Returns true if a CM is present in the system, else false.
90  */
91 static inline bool mips_cm_present(void)
92 {
93 #ifdef CONFIG_MIPS_CM
94 	return mips_gcr_base != NULL;
95 #else
96 	return false;
97 #endif
98 }
99 
100 /**
101  * mips_cm_has_l2sync - determine whether an L2-only sync region is present
102  *
103  * Returns true if the system implements an L2-only sync region, else false.
104  */
105 static inline bool mips_cm_has_l2sync(void)
106 {
107 #ifdef CONFIG_MIPS_CM
108 	return mips_cm_l2sync_base != NULL;
109 #else
110 	return false;
111 #endif
112 }
113 
114 /* Offsets to register blocks from the CM base address */
115 #define MIPS_CM_GCB_OFS		0x0000 /* Global Control Block */
116 #define MIPS_CM_CLCB_OFS	0x2000 /* Core Local Control Block */
117 #define MIPS_CM_COCB_OFS	0x4000 /* Core Other Control Block */
118 #define MIPS_CM_GDB_OFS		0x6000 /* Global Debug Block */
119 
120 /* Total size of the CM memory mapped registers */
121 #define MIPS_CM_GCR_SIZE	0x8000
122 
123 /* Size of the L2-only sync region */
124 #define MIPS_CM_L2SYNC_SIZE	0x1000
125 
126 #define GCR_ACCESSOR_RO(sz, off, name)					\
127 	CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_GCB_OFS + off, name)		\
128 	CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name)
129 
130 #define GCR_ACCESSOR_RW(sz, off, name)					\
131 	CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_GCB_OFS + off, name)		\
132 	CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name)
133 
134 #define GCR_CX_ACCESSOR_RO(sz, off, name)				\
135 	CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name)	\
136 	CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name)
137 
138 #define GCR_CX_ACCESSOR_RW(sz, off, name)				\
139 	CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name)	\
140 	CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name)
141 
142 /* GCR_CONFIG - Information about the system */
143 GCR_ACCESSOR_RO(64, 0x000, config)
144 #define CM_GCR_CONFIG_CLUSTER_COH_CAPABLE	BIT_ULL(43)
145 #define CM_GCR_CONFIG_CLUSTER_ID		GENMASK_ULL(39, 32)
146 #define CM_GCR_CONFIG_NUM_CLUSTERS		GENMASK(29, 23)
147 #define CM_GCR_CONFIG_NUMIOCU			GENMASK(15, 8)
148 #define CM_GCR_CONFIG_PCORES			GENMASK(7, 0)
149 
150 /* GCR_BASE - Base address of the Global Configuration Registers (GCRs) */
151 GCR_ACCESSOR_RW(64, 0x008, base)
152 #define CM_GCR_BASE_GCRBASE			GENMASK_ULL(47, 15)
153 #define CM_GCR_BASE_CMDEFTGT			GENMASK(1, 0)
154 #define  CM_GCR_BASE_CMDEFTGT_MEM		0
155 #define  CM_GCR_BASE_CMDEFTGT_RESERVED		1
156 #define  CM_GCR_BASE_CMDEFTGT_IOCU0		2
157 #define  CM_GCR_BASE_CMDEFTGT_IOCU1		3
158 
159 /* GCR_ACCESS - Controls core/IOCU access to GCRs */
160 GCR_ACCESSOR_RW(32, 0x020, access)
161 #define CM_GCR_ACCESS_ACCESSEN			GENMASK(7, 0)
162 
163 /* GCR_REV - Indicates the Coherence Manager revision */
164 GCR_ACCESSOR_RO(32, 0x030, rev)
165 #define CM_GCR_REV_MAJOR			GENMASK(15, 8)
166 #define CM_GCR_REV_MINOR			GENMASK(7, 0)
167 
168 #define CM_ENCODE_REV(major, minor) \
169 		(FIELD_PREP(CM_GCR_REV_MAJOR, major) | \
170 		 FIELD_PREP(CM_GCR_REV_MINOR, minor))
171 
172 #define CM_REV_CM2				CM_ENCODE_REV(6, 0)
173 #define CM_REV_CM2_5				CM_ENCODE_REV(7, 0)
174 #define CM_REV_CM3				CM_ENCODE_REV(8, 0)
175 #define CM_REV_CM3_5				CM_ENCODE_REV(9, 0)
176 
177 /* GCR_ERR_CONTROL - Control error checking logic */
178 GCR_ACCESSOR_RW(32, 0x038, err_control)
179 #define CM_GCR_ERR_CONTROL_L2_ECC_EN		BIT(1)
180 #define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT	BIT(0)
181 
182 /* GCR_ERR_MASK - Control which errors are reported as interrupts */
183 GCR_ACCESSOR_RW(64, 0x040, error_mask)
184 
185 /* GCR_ERR_CAUSE - Indicates the type of error that occurred */
186 GCR_ACCESSOR_RW(64, 0x048, error_cause)
187 #define CM_GCR_ERROR_CAUSE_ERRTYPE		GENMASK(31, 27)
188 #define CM3_GCR_ERROR_CAUSE_ERRTYPE		GENMASK_ULL(63, 58)
189 #define CM_GCR_ERROR_CAUSE_ERRINFO		GENMASK(26, 0)
190 
191 /* GCR_ERR_ADDR - Indicates the address associated with an error */
192 GCR_ACCESSOR_RW(64, 0x050, error_addr)
193 
194 /* GCR_ERR_MULT - Indicates when multiple errors have occurred */
195 GCR_ACCESSOR_RW(64, 0x058, error_mult)
196 #define CM_GCR_ERROR_MULT_ERR2ND		GENMASK(4, 0)
197 
198 /* GCR_L2_ONLY_SYNC_BASE - Base address of the L2 cache-only sync region */
199 GCR_ACCESSOR_RW(64, 0x070, l2_only_sync_base)
200 #define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE	GENMASK(31, 12)
201 #define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN		BIT(0)
202 
203 /* GCR_GIC_BASE - Base address of the Global Interrupt Controller (GIC) */
204 GCR_ACCESSOR_RW(64, 0x080, gic_base)
205 #define CM_GCR_GIC_BASE_GICBASE			GENMASK(31, 17)
206 #define CM_GCR_GIC_BASE_GICEN			BIT(0)
207 
208 /* GCR_CPC_BASE - Base address of the Cluster Power Controller (CPC) */
209 GCR_ACCESSOR_RW(64, 0x088, cpc_base)
210 #define CM_GCR_CPC_BASE_CPCBASE			GENMASK(31, 15)
211 #define CM_GCR_CPC_BASE_CPCEN			BIT(0)
212 
213 /* GCR_REGn_BASE - Base addresses of CM address regions */
214 GCR_ACCESSOR_RW(64, 0x090, reg0_base)
215 GCR_ACCESSOR_RW(64, 0x0a0, reg1_base)
216 GCR_ACCESSOR_RW(64, 0x0b0, reg2_base)
217 GCR_ACCESSOR_RW(64, 0x0c0, reg3_base)
218 #define CM_GCR_REGn_BASE_BASEADDR		GENMASK(31, 16)
219 
220 /* GCR_REGn_MASK - Size & destination of CM address regions */
221 GCR_ACCESSOR_RW(64, 0x098, reg0_mask)
222 GCR_ACCESSOR_RW(64, 0x0a8, reg1_mask)
223 GCR_ACCESSOR_RW(64, 0x0b8, reg2_mask)
224 GCR_ACCESSOR_RW(64, 0x0c8, reg3_mask)
225 #define CM_GCR_REGn_MASK_ADDRMASK		GENMASK(31, 16)
226 #define CM_GCR_REGn_MASK_CCAOVR			GENMASK(7, 5)
227 #define CM_GCR_REGn_MASK_CCAOVREN		BIT(4)
228 #define CM_GCR_REGn_MASK_DROPL2			BIT(2)
229 #define CM_GCR_REGn_MASK_CMTGT			GENMASK(1, 0)
230 #define  CM_GCR_REGn_MASK_CMTGT_DISABLED	0x0
231 #define  CM_GCR_REGn_MASK_CMTGT_MEM		0x1
232 #define  CM_GCR_REGn_MASK_CMTGT_IOCU0		0x2
233 #define  CM_GCR_REGn_MASK_CMTGT_IOCU1		0x3
234 
235 /* GCR_GIC_STATUS - Indicates presence of a Global Interrupt Controller (GIC) */
236 GCR_ACCESSOR_RO(32, 0x0d0, gic_status)
237 #define CM_GCR_GIC_STATUS_EX			BIT(0)
238 
239 /* GCR_CPC_STATUS - Indicates presence of a Cluster Power Controller (CPC) */
240 GCR_ACCESSOR_RO(32, 0x0f0, cpc_status)
241 #define CM_GCR_CPC_STATUS_EX			BIT(0)
242 
243 /* GCR_ACCESS - Controls core/IOCU access to GCRs */
244 GCR_ACCESSOR_RW(32, 0x120, access_cm3)
245 #define CM_GCR_ACCESS_ACCESSEN			GENMASK(7, 0)
246 
247 /* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */
248 GCR_ACCESSOR_RW(32, 0x130, l2_config)
249 #define CM_GCR_L2_CONFIG_BYPASS			BIT(20)
250 #define CM_GCR_L2_CONFIG_SET_SIZE		GENMASK(15, 12)
251 #define CM_GCR_L2_CONFIG_LINE_SIZE		GENMASK(11, 8)
252 #define CM_GCR_L2_CONFIG_ASSOC			GENMASK(7, 0)
253 
254 /* GCR_SYS_CONFIG2 - Further information about the system */
255 GCR_ACCESSOR_RO(32, 0x150, sys_config2)
256 #define CM_GCR_SYS_CONFIG2_MAXVPW		GENMASK(3, 0)
257 
258 /* GCR_L2-RAM_CONFIG - Configuration & status of L2 cache RAMs */
259 GCR_ACCESSOR_RW(64, 0x240, l2_ram_config)
260 #define CM_GCR_L2_RAM_CONFIG_PRESENT		BIT(31)
261 #define CM_GCR_L2_RAM_CONFIG_HCI_DONE		BIT(30)
262 #define CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED	BIT(29)
263 
264 /* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */
265 GCR_ACCESSOR_RW(32, 0x300, l2_pft_control)
266 #define CM_GCR_L2_PFT_CONTROL_PAGEMASK		GENMASK(31, 12)
267 #define CM_GCR_L2_PFT_CONTROL_PFTEN		BIT(8)
268 #define CM_GCR_L2_PFT_CONTROL_NPFT		GENMASK(7, 0)
269 
270 /* GCR_L2_PFT_CONTROL_B - Controls hardware L2 prefetching */
271 GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b)
272 #define CM_GCR_L2_PFT_CONTROL_B_CEN		BIT(8)
273 #define CM_GCR_L2_PFT_CONTROL_B_PORTID		GENMASK(7, 0)
274 
275 /* GCR_L2_TAG_ADDR - Access addresses in L2 cache tags */
276 GCR_ACCESSOR_RW(64, 0x600, l2_tag_addr)
277 
278 /* GCR_L2_TAG_STATE - Access L2 cache tag state */
279 GCR_ACCESSOR_RW(64, 0x608, l2_tag_state)
280 
281 /* GCR_L2_DATA - Access data in L2 cache lines */
282 GCR_ACCESSOR_RW(64, 0x610, l2_data)
283 
284 /* GCR_L2_ECC - Access ECC information from L2 cache lines */
285 GCR_ACCESSOR_RW(64, 0x618, l2_ecc)
286 
287 /* GCR_L2SM_COP - L2 cache op state machine control */
288 GCR_ACCESSOR_RW(32, 0x620, l2sm_cop)
289 #define CM_GCR_L2SM_COP_PRESENT			BIT(31)
290 #define CM_GCR_L2SM_COP_RESULT			GENMASK(8, 6)
291 #define  CM_GCR_L2SM_COP_RESULT_DONTCARE	0
292 #define  CM_GCR_L2SM_COP_RESULT_DONE_OK		1
293 #define  CM_GCR_L2SM_COP_RESULT_DONE_ERROR	2
294 #define  CM_GCR_L2SM_COP_RESULT_ABORT_OK	3
295 #define  CM_GCR_L2SM_COP_RESULT_ABORT_ERROR	4
296 #define CM_GCR_L2SM_COP_RUNNING			BIT(5)
297 #define CM_GCR_L2SM_COP_TYPE			GENMASK(4, 2)
298 #define  CM_GCR_L2SM_COP_TYPE_IDX_WBINV		0
299 #define  CM_GCR_L2SM_COP_TYPE_IDX_STORETAG	1
300 #define  CM_GCR_L2SM_COP_TYPE_IDX_STORETAGDATA	2
301 #define  CM_GCR_L2SM_COP_TYPE_HIT_INV		4
302 #define  CM_GCR_L2SM_COP_TYPE_HIT_WBINV		5
303 #define  CM_GCR_L2SM_COP_TYPE_HIT_WB		6
304 #define  CM_GCR_L2SM_COP_TYPE_FETCHLOCK		7
305 #define CM_GCR_L2SM_COP_CMD			GENMASK(1, 0)
306 #define  CM_GCR_L2SM_COP_CMD_START		1	/* only when idle */
307 #define  CM_GCR_L2SM_COP_CMD_ABORT		3	/* only when running */
308 
309 /* GCR_L2SM_TAG_ADDR_COP - L2 cache op state machine address control */
310 GCR_ACCESSOR_RW(64, 0x628, l2sm_tag_addr_cop)
311 #define CM_GCR_L2SM_TAG_ADDR_COP_NUM_LINES	GENMASK_ULL(63, 48)
312 #define CM_GCR_L2SM_TAG_ADDR_COP_START_TAG	GENMASK_ULL(47, 6)
313 
314 /* GCR_BEV_BASE - Controls the location of the BEV for powered up cores */
315 GCR_ACCESSOR_RW(64, 0x680, bev_base)
316 
317 /* GCR_Cx_RESET_RELEASE - Controls core reset for CM 1.x */
318 GCR_CX_ACCESSOR_RW(32, 0x000, reset_release)
319 
320 /* GCR_Cx_COHERENCE - Controls core coherence */
321 GCR_CX_ACCESSOR_RW(32, 0x008, coherence)
322 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN		GENMASK(7, 0)
323 #define CM3_GCR_Cx_COHERENCE_COHEN		BIT(0)
324 
325 /* GCR_Cx_CONFIG - Information about a core's configuration */
326 GCR_CX_ACCESSOR_RO(32, 0x010, config)
327 #define CM_GCR_Cx_CONFIG_IOCUTYPE		GENMASK(11, 10)
328 #define CM_GCR_Cx_CONFIG_PVPE			GENMASK(9, 0)
329 
330 /* GCR_Cx_OTHER - Configure the core-other/redirect GCR block */
331 GCR_CX_ACCESSOR_RW(32, 0x018, other)
332 #define CM_GCR_Cx_OTHER_CORENUM			GENMASK(31, 16)	/* CM < 3 */
333 #define CM_GCR_Cx_OTHER_CLUSTER_EN		BIT(31)		/* CM >= 3.5 */
334 #define CM_GCR_Cx_OTHER_GIC_EN			BIT(30)		/* CM >= 3.5 */
335 #define CM_GCR_Cx_OTHER_BLOCK			GENMASK(25, 24)	/* CM >= 3.5 */
336 #define  CM_GCR_Cx_OTHER_BLOCK_LOCAL		0
337 #define  CM_GCR_Cx_OTHER_BLOCK_GLOBAL		1
338 #define  CM_GCR_Cx_OTHER_BLOCK_USER		2
339 #define  CM_GCR_Cx_OTHER_BLOCK_GLOBAL_HIGH	3
340 #define CM_GCR_Cx_OTHER_CLUSTER			GENMASK(21, 16)	/* CM >= 3.5 */
341 #define CM3_GCR_Cx_OTHER_CORE			GENMASK(13, 8)	/* CM >= 3 */
342 #define  CM_GCR_Cx_OTHER_CORE_CM		32
343 #define CM3_GCR_Cx_OTHER_VP			GENMASK(2, 0)	/* CM >= 3 */
344 
345 /* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */
346 GCR_CX_ACCESSOR_RW(32, 0x020, reset_base)
347 GCR_CX_ACCESSOR_RW(64, 0x020, reset64_base)
348 #define CM_GCR_Cx_RESET_BASE_BEVEXCBASE		GENMASK(31, 12)
349 #define CM_GCR_Cx_RESET64_BASE_BEVEXCBASE	GENMASK_ULL(47, 12)
350 #define CM_GCR_Cx_RESET_BASE_MODE		BIT(1)
351 
352 /* GCR_Cx_ID - Identify the current core */
353 GCR_CX_ACCESSOR_RO(32, 0x028, id)
354 #define CM_GCR_Cx_ID_CLUSTER			GENMASK(15, 8)
355 #define CM_GCR_Cx_ID_CORE			GENMASK(7, 0)
356 
357 /* GCR_Cx_RESET_EXT_BASE - Configure behaviour when cores reset or power up */
358 GCR_CX_ACCESSOR_RW(32, 0x030, reset_ext_base)
359 #define CM_GCR_Cx_RESET_EXT_BASE_EVARESET	BIT(31)
360 #define CM_GCR_Cx_RESET_EXT_BASE_UEB		BIT(30)
361 #define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK	GENMASK(27, 20)
362 #define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA	GENMASK(7, 1)
363 #define CM_GCR_Cx_RESET_EXT_BASE_PRESENT	BIT(0)
364 
365 /**
366  * mips_cm_l2sync - perform an L2-only sync operation
367  *
368  * If an L2-only sync region is present in the system then this function
369  * performs and L2-only sync and returns zero. Otherwise it returns -ENODEV.
370  */
371 static inline int mips_cm_l2sync(void)
372 {
373 	if (!mips_cm_has_l2sync())
374 		return -ENODEV;
375 
376 	writel(0, mips_cm_l2sync_base);
377 	return 0;
378 }
379 
380 /**
381  * mips_cm_revision() - return CM revision
382  *
383  * Return: The revision of the CM, from GCR_REV, or 0 if no CM is present. The
384  * return value should be checked against the CM_REV_* macros.
385  */
386 static inline int mips_cm_revision(void)
387 {
388 	if (!mips_cm_present())
389 		return 0;
390 
391 	return read_gcr_rev();
392 }
393 
394 /**
395  * mips_cm_max_vp_width() - return the width in bits of VP indices
396  *
397  * Return: the width, in bits, of VP indices in fields that combine core & VP
398  * indices.
399  */
400 static inline unsigned int mips_cm_max_vp_width(void)
401 {
402 	extern int smp_num_siblings;
403 
404 	if (mips_cm_revision() >= CM_REV_CM3)
405 		return FIELD_GET(CM_GCR_SYS_CONFIG2_MAXVPW,
406 				 read_gcr_sys_config2());
407 
408 	if (mips_cm_present()) {
409 		/*
410 		 * We presume that all cores in the system will have the same
411 		 * number of VP(E)s, and if that ever changes then this will
412 		 * need revisiting.
413 		 */
414 		return FIELD_GET(CM_GCR_Cx_CONFIG_PVPE, read_gcr_cl_config()) + 1;
415 	}
416 
417 	if (IS_ENABLED(CONFIG_SMP))
418 		return smp_num_siblings;
419 
420 	return 1;
421 }
422 
423 /**
424  * mips_cm_vp_id() - calculate the hardware VP ID for a CPU
425  * @cpu: the CPU whose VP ID to calculate
426  *
427  * Hardware such as the GIC uses identifiers for VPs which may not match the
428  * CPU numbers used by Linux. This function calculates the hardware VP
429  * identifier corresponding to a given CPU.
430  *
431  * Return: the VP ID for the CPU.
432  */
433 static inline unsigned int mips_cm_vp_id(unsigned int cpu)
434 {
435 	unsigned int core = cpu_core(&cpu_data[cpu]);
436 	unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
437 
438 	return (core * mips_cm_max_vp_width()) + vp;
439 }
440 
441 #ifdef CONFIG_MIPS_CM
442 
443 /**
444  * mips_cm_lock_other - lock access to redirect/other region
445  * @cluster: the other cluster to be accessed
446  * @core: the other core to be accessed
447  * @vp: the VP within the other core to be accessed
448  * @block: the register block to be accessed
449  *
450  * Configure the redirect/other region for the local core/VP (depending upon
451  * the CM revision) to target the specified @cluster, @core, @vp & register
452  * @block. Must be called before using the redirect/other region, and followed
453  * by a call to mips_cm_unlock_other() when access to the redirect/other region
454  * is complete.
455  *
456  * This function acquires a spinlock such that code between it &
457  * mips_cm_unlock_other() calls cannot be pre-empted by anything which may
458  * reconfigure the redirect/other region, and cannot be interfered with by
459  * another VP in the core. As such calls to this function should not be nested.
460  */
461 extern void mips_cm_lock_other(unsigned int cluster, unsigned int core,
462 			       unsigned int vp, unsigned int block);
463 
464 /**
465  * mips_cm_unlock_other - unlock access to redirect/other region
466  *
467  * Must be called after mips_cm_lock_other() once all required access to the
468  * redirect/other region has been completed.
469  */
470 extern void mips_cm_unlock_other(void);
471 
472 #else /* !CONFIG_MIPS_CM */
473 
474 static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core,
475 				      unsigned int vp, unsigned int block) { }
476 static inline void mips_cm_unlock_other(void) { }
477 
478 #endif /* !CONFIG_MIPS_CM */
479 
480 /**
481  * mips_cm_lock_other_cpu - lock access to redirect/other region
482  * @cpu: the other CPU whose register we want to access
483  *
484  * Configure the redirect/other region for the local core/VP (depending upon
485  * the CM revision) to target the specified @cpu & register @block. This is
486  * equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number
487  * for convenience.
488  */
489 static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block)
490 {
491 	struct cpuinfo_mips *d = &cpu_data[cpu];
492 
493 	mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block);
494 }
495 
496 #endif /* __MIPS_ASM_MIPS_CM_H__ */
497