xref: /linux/drivers/misc/sgi-gru/gruhandles.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *              GRU KERNEL MCS INSTRUCTIONS
4  *
5  *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
6  */
7 
8 #include <linux/kernel.h>
9 #include "gru.h"
10 #include "grulib.h"
11 #include "grutables.h"
12 
13 /* 10 sec */
14 #include <linux/sync_core.h>
15 #include <asm/tsc.h>
16 #define GRU_OPERATION_TIMEOUT	((cycles_t) tsc_khz*10*1000)
17 #define CLKS2NSEC(c)		((c) * 1000000 / tsc_khz)
18 
19 /* Extract the status field from a kernel handle */
20 #define GET_MSEG_HANDLE_STATUS(h)	(((*(unsigned long *)(h)) >> 16) & 3)
21 
22 struct mcs_op_statistic mcs_op_statistics[mcsop_last];
23 
24 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
25 {
26 	unsigned long nsec;
27 
28 	nsec = CLKS2NSEC(clks);
29 	atomic_long_inc(&mcs_op_statistics[op].count);
30 	atomic_long_add(nsec, &mcs_op_statistics[op].total);
31 	if (mcs_op_statistics[op].max < nsec)
32 		mcs_op_statistics[op].max = nsec;
33 }
34 
35 static void start_instruction(void *h)
36 {
37 	unsigned long *w0 = h;
38 
39 	wmb();		/* setting CMD/STATUS bits must be last */
40 	*w0 = *w0 | 0x20001;
41 	gru_flush_cache(h);
42 }
43 
44 static void report_instruction_timeout(void *h)
45 {
46 	unsigned long goff = GSEGPOFF((unsigned long)h);
47 	char *id = "???";
48 
49 	if (TYPE_IS(CCH, goff))
50 		id = "CCH";
51 	else if (TYPE_IS(TGH, goff))
52 		id = "TGH";
53 	else if (TYPE_IS(TFH, goff))
54 		id = "TFH";
55 
56 	panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
57 }
58 
59 static int wait_instruction_complete(void *h, enum mcs_op opc)
60 {
61 	int status;
62 	unsigned long start_time = get_cycles();
63 
64 	while (1) {
65 		cpu_relax();
66 		status = GET_MSEG_HANDLE_STATUS(h);
67 		if (status != CCHSTATUS_ACTIVE)
68 			break;
69 		if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
70 			report_instruction_timeout(h);
71 			start_time = get_cycles();
72 		}
73 	}
74 	if (gru_options & OPT_STATS)
75 		update_mcs_stats(opc, get_cycles() - start_time);
76 	return status;
77 }
78 
79 int cch_allocate(struct gru_context_configuration_handle *cch)
80 {
81 	int ret;
82 
83 	cch->opc = CCHOP_ALLOCATE;
84 	start_instruction(cch);
85 	ret = wait_instruction_complete(cch, cchop_allocate);
86 
87 	/*
88 	 * Stop speculation into the GSEG being mapped by the previous ALLOCATE.
89 	 * The GSEG memory does not exist until the ALLOCATE completes.
90 	 */
91 	sync_core();
92 	return ret;
93 }
94 
95 int cch_start(struct gru_context_configuration_handle *cch)
96 {
97 	cch->opc = CCHOP_START;
98 	start_instruction(cch);
99 	return wait_instruction_complete(cch, cchop_start);
100 }
101 
102 int cch_interrupt(struct gru_context_configuration_handle *cch)
103 {
104 	cch->opc = CCHOP_INTERRUPT;
105 	start_instruction(cch);
106 	return wait_instruction_complete(cch, cchop_interrupt);
107 }
108 
109 int cch_deallocate(struct gru_context_configuration_handle *cch)
110 {
111 	int ret;
112 
113 	cch->opc = CCHOP_DEALLOCATE;
114 	start_instruction(cch);
115 	ret = wait_instruction_complete(cch, cchop_deallocate);
116 
117 	/*
118 	 * Stop speculation into the GSEG being unmapped by the previous
119 	 * DEALLOCATE.
120 	 */
121 	sync_core();
122 	return ret;
123 }
124 
125 int cch_interrupt_sync(struct gru_context_configuration_handle
126 				     *cch)
127 {
128 	cch->opc = CCHOP_INTERRUPT_SYNC;
129 	start_instruction(cch);
130 	return wait_instruction_complete(cch, cchop_interrupt_sync);
131 }
132 
133 int tgh_invalidate(struct gru_tlb_global_handle *tgh,
134 				 unsigned long vaddr, unsigned long vaddrmask,
135 				 int asid, int pagesize, int global, int n,
136 				 unsigned short ctxbitmap)
137 {
138 	tgh->vaddr = vaddr;
139 	tgh->asid = asid;
140 	tgh->pagesize = pagesize;
141 	tgh->n = n;
142 	tgh->global = global;
143 	tgh->vaddrmask = vaddrmask;
144 	tgh->ctxbitmap = ctxbitmap;
145 	tgh->opc = TGHOP_TLBINV;
146 	start_instruction(tgh);
147 	return wait_instruction_complete(tgh, tghop_invalidate);
148 }
149 
150 int tfh_write_only(struct gru_tlb_fault_handle *tfh,
151 				  unsigned long paddr, int gaa,
152 				  unsigned long vaddr, int asid, int dirty,
153 				  int pagesize)
154 {
155 	tfh->fillasid = asid;
156 	tfh->fillvaddr = vaddr;
157 	tfh->pfn = paddr >> GRU_PADDR_SHIFT;
158 	tfh->gaa = gaa;
159 	tfh->dirty = dirty;
160 	tfh->pagesize = pagesize;
161 	tfh->opc = TFHOP_WRITE_ONLY;
162 	start_instruction(tfh);
163 	return wait_instruction_complete(tfh, tfhop_write_only);
164 }
165 
166 void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
167 				     unsigned long paddr, int gaa,
168 				     unsigned long vaddr, int asid, int dirty,
169 				     int pagesize)
170 {
171 	tfh->fillasid = asid;
172 	tfh->fillvaddr = vaddr;
173 	tfh->pfn = paddr >> GRU_PADDR_SHIFT;
174 	tfh->gaa = gaa;
175 	tfh->dirty = dirty;
176 	tfh->pagesize = pagesize;
177 	tfh->opc = TFHOP_WRITE_RESTART;
178 	start_instruction(tfh);
179 }
180 
181 void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
182 {
183 	tfh->opc = TFHOP_USER_POLLING_MODE;
184 	start_instruction(tfh);
185 }
186 
187 void tfh_exception(struct gru_tlb_fault_handle *tfh)
188 {
189 	tfh->opc = TFHOP_EXCEPTION;
190 	start_instruction(tfh);
191 }
192 
193