11a59d1b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2ee5b8fecSJack Steiner /*
3ee5b8fecSJack Steiner * SN Platform GRU Driver
4ee5b8fecSJack Steiner *
5ee5b8fecSJack Steiner * MMUOPS callbacks + TLB flushing
6ee5b8fecSJack Steiner *
7ee5b8fecSJack Steiner * This file handles emu notifier callbacks from the core kernel. The callbacks
8ee5b8fecSJack Steiner * are used to update the TLB in the GRU as a result of changes in the
9ee5b8fecSJack Steiner * state of a process address space. This file also handles TLB invalidates
10ee5b8fecSJack Steiner * from the GRU driver.
11ee5b8fecSJack Steiner *
12ee5b8fecSJack Steiner * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
13ee5b8fecSJack Steiner */
14ee5b8fecSJack Steiner
15ee5b8fecSJack Steiner #include <linux/kernel.h>
16ee5b8fecSJack Steiner #include <linux/list.h>
17ee5b8fecSJack Steiner #include <linux/spinlock.h>
18ee5b8fecSJack Steiner #include <linux/mm.h>
19ee5b8fecSJack Steiner #include <linux/slab.h>
20ee5b8fecSJack Steiner #include <linux/device.h>
21ee5b8fecSJack Steiner #include <linux/hugetlb.h>
22ee5b8fecSJack Steiner #include <linux/delay.h>
23ee5b8fecSJack Steiner #include <linux/timex.h>
24ee5b8fecSJack Steiner #include <linux/srcu.h>
25ee5b8fecSJack Steiner #include <asm/processor.h>
26ee5b8fecSJack Steiner #include "gru.h"
27ee5b8fecSJack Steiner #include "grutables.h"
28ee5b8fecSJack Steiner #include <asm/uv/uv_hub.h>
29ee5b8fecSJack Steiner
30ee5b8fecSJack Steiner #define gru_random() get_cycles()
31ee5b8fecSJack Steiner
32ee5b8fecSJack Steiner /* ---------------------------------- TLB Invalidation functions --------
33ee5b8fecSJack Steiner * get_tgh_handle
34ee5b8fecSJack Steiner *
35ee5b8fecSJack Steiner * Find a TGH to use for issuing a TLB invalidate. For GRUs that are on the
36ee5b8fecSJack Steiner * local blade, use a fixed TGH that is a function of the blade-local cpu
37ee5b8fecSJack Steiner * number. Normally, this TGH is private to the cpu & no contention occurs for
38ee5b8fecSJack Steiner * the TGH. For offblade GRUs, select a random TGH in the range above the
39ee5b8fecSJack Steiner * private TGHs. A spinlock is required to access this TGH & the lock must be
40ee5b8fecSJack Steiner * released when the invalidate is completes. This sucks, but it is the best we
41ee5b8fecSJack Steiner * can do.
42ee5b8fecSJack Steiner *
43ee5b8fecSJack Steiner * Note that the spinlock is IN the TGH handle so locking does not involve
44ee5b8fecSJack Steiner * additional cache lines.
45ee5b8fecSJack Steiner *
46ee5b8fecSJack Steiner */
get_off_blade_tgh(struct gru_state * gru)47ee5b8fecSJack Steiner static inline int get_off_blade_tgh(struct gru_state *gru)
48ee5b8fecSJack Steiner {
49ee5b8fecSJack Steiner int n;
50ee5b8fecSJack Steiner
51ee5b8fecSJack Steiner n = GRU_NUM_TGH - gru->gs_tgh_first_remote;
52ee5b8fecSJack Steiner n = gru_random() % n;
53ee5b8fecSJack Steiner n += gru->gs_tgh_first_remote;
54ee5b8fecSJack Steiner return n;
55ee5b8fecSJack Steiner }
56ee5b8fecSJack Steiner
get_on_blade_tgh(struct gru_state * gru)57ee5b8fecSJack Steiner static inline int get_on_blade_tgh(struct gru_state *gru)
58ee5b8fecSJack Steiner {
59ee5b8fecSJack Steiner return uv_blade_processor_id() >> gru->gs_tgh_local_shift;
60ee5b8fecSJack Steiner }
61ee5b8fecSJack Steiner
get_lock_tgh_handle(struct gru_state * gru)62ee5b8fecSJack Steiner static struct gru_tlb_global_handle *get_lock_tgh_handle(struct gru_state
63ee5b8fecSJack Steiner *gru)
64ee5b8fecSJack Steiner {
65ee5b8fecSJack Steiner struct gru_tlb_global_handle *tgh;
66ee5b8fecSJack Steiner int n;
67ee5b8fecSJack Steiner
68ee5b8fecSJack Steiner preempt_disable();
69ee5b8fecSJack Steiner if (uv_numa_blade_id() == gru->gs_blade_id)
70ee5b8fecSJack Steiner n = get_on_blade_tgh(gru);
71ee5b8fecSJack Steiner else
72ee5b8fecSJack Steiner n = get_off_blade_tgh(gru);
73ee5b8fecSJack Steiner tgh = get_tgh_by_index(gru, n);
74ee5b8fecSJack Steiner lock_tgh_handle(tgh);
75ee5b8fecSJack Steiner
76ee5b8fecSJack Steiner return tgh;
77ee5b8fecSJack Steiner }
78ee5b8fecSJack Steiner
get_unlock_tgh_handle(struct gru_tlb_global_handle * tgh)79ee5b8fecSJack Steiner static void get_unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
80ee5b8fecSJack Steiner {
81ee5b8fecSJack Steiner unlock_tgh_handle(tgh);
82ee5b8fecSJack Steiner preempt_enable();
83ee5b8fecSJack Steiner }
84ee5b8fecSJack Steiner
85ee5b8fecSJack Steiner /*
86ee5b8fecSJack Steiner * gru_flush_tlb_range
87ee5b8fecSJack Steiner *
88ee5b8fecSJack Steiner * General purpose TLB invalidation function. This function scans every GRU in
89ee5b8fecSJack Steiner * the ENTIRE system (partition) looking for GRUs where the specified MM has
90ee5b8fecSJack Steiner * been accessed by the GRU. For each GRU found, the TLB must be invalidated OR
91ee5b8fecSJack Steiner * the ASID invalidated. Invalidating an ASID causes a new ASID to be assigned
92ee5b8fecSJack Steiner * on the next fault. This effectively flushes the ENTIRE TLB for the MM at the
93ee5b8fecSJack Steiner * cost of (possibly) a large number of future TLBmisses.
94ee5b8fecSJack Steiner *
95ee5b8fecSJack Steiner * The current algorithm is optimized based on the following (somewhat true)
96ee5b8fecSJack Steiner * assumptions:
97ee5b8fecSJack Steiner * - GRU contexts are not loaded into a GRU unless a reference is made to
98ee5b8fecSJack Steiner * the data segment or control block (this is true, not an assumption).
99ee5b8fecSJack Steiner * If a DS/CB is referenced, the user will also issue instructions that
100ee5b8fecSJack Steiner * cause TLBmisses. It is not necessary to optimize for the case where
101ee5b8fecSJack Steiner * contexts are loaded but no instructions cause TLB misses. (I know
102ee5b8fecSJack Steiner * this will happen but I'm not optimizing for it).
103ee5b8fecSJack Steiner * - GRU instructions to invalidate TLB entries are SLOOOOWWW - normally
104ee5b8fecSJack Steiner * a few usec but in unusual cases, it could be longer. Avoid if
105ee5b8fecSJack Steiner * possible.
106ee5b8fecSJack Steiner * - intrablade process migration between cpus is not frequent but is
107ee5b8fecSJack Steiner * common.
108ee5b8fecSJack Steiner * - a GRU context is not typically migrated to a different GRU on the
109ee5b8fecSJack Steiner * blade because of intrablade migration
110ee5b8fecSJack Steiner * - interblade migration is rare. Processes migrate their GRU context to
111ee5b8fecSJack Steiner * the new blade.
112ee5b8fecSJack Steiner * - if interblade migration occurs, migration back to the original blade
113ee5b8fecSJack Steiner * is very very rare (ie., no optimization for this case)
114ee5b8fecSJack Steiner * - most GRU instruction operate on a subset of the user REGIONS. Code
115ee5b8fecSJack Steiner * & shared library regions are not likely targets of GRU instructions.
116ee5b8fecSJack Steiner *
117ee5b8fecSJack Steiner * To help improve the efficiency of TLB invalidation, the GMS data
118ee5b8fecSJack Steiner * structure is maintained for EACH address space (MM struct). The GMS is
119ee5b8fecSJack Steiner * also the structure that contains the pointer to the mmu callout
120ee5b8fecSJack Steiner * functions. This structure is linked to the mm_struct for the address space
121ee5b8fecSJack Steiner * using the mmu "register" function. The mmu interfaces are used to
122ee5b8fecSJack Steiner * provide the callbacks for TLB invalidation. The GMS contains:
123ee5b8fecSJack Steiner *
124ee5b8fecSJack Steiner * - asid[maxgrus] array. ASIDs are assigned to a GRU when a context is
125ee5b8fecSJack Steiner * loaded into the GRU.
126ee5b8fecSJack Steiner * - asidmap[maxgrus]. bitmap to make it easier to find non-zero asids in
127ee5b8fecSJack Steiner * the above array
128ee5b8fecSJack Steiner * - ctxbitmap[maxgrus]. Indicates the contexts that are currently active
129ee5b8fecSJack Steiner * in the GRU for the address space. This bitmap must be passed to the
130ee5b8fecSJack Steiner * GRU to do an invalidate.
131ee5b8fecSJack Steiner *
132ee5b8fecSJack Steiner * The current algorithm for invalidating TLBs is:
133ee5b8fecSJack Steiner * - scan the asidmap for GRUs where the context has been loaded, ie,
134ee5b8fecSJack Steiner * asid is non-zero.
135ee5b8fecSJack Steiner * - for each gru found:
136ee5b8fecSJack Steiner * - if the ctxtmap is non-zero, there are active contexts in the
137ee5b8fecSJack Steiner * GRU. TLB invalidate instructions must be issued to the GRU.
138ee5b8fecSJack Steiner * - if the ctxtmap is zero, no context is active. Set the ASID to
139ee5b8fecSJack Steiner * zero to force a full TLB invalidation. This is fast but will
140ee5b8fecSJack Steiner * cause a lot of TLB misses if the context is reloaded onto the
141ee5b8fecSJack Steiner * GRU
142ee5b8fecSJack Steiner *
143ee5b8fecSJack Steiner */
144ee5b8fecSJack Steiner
gru_flush_tlb_range(struct gru_mm_struct * gms,unsigned long start,unsigned long len)145ee5b8fecSJack Steiner void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start,
146ee5b8fecSJack Steiner unsigned long len)
147ee5b8fecSJack Steiner {
148ee5b8fecSJack Steiner struct gru_state *gru;
149ee5b8fecSJack Steiner struct gru_mm_tracker *asids;
150ee5b8fecSJack Steiner struct gru_tlb_global_handle *tgh;
151ee5b8fecSJack Steiner unsigned long num;
152ee5b8fecSJack Steiner int grupagesize, pagesize, pageshift, gid, asid;
153ee5b8fecSJack Steiner
154ee5b8fecSJack Steiner /* ZZZ TODO - handle huge pages */
155ee5b8fecSJack Steiner pageshift = PAGE_SHIFT;
156ee5b8fecSJack Steiner pagesize = (1UL << pageshift);
157ee5b8fecSJack Steiner grupagesize = GRU_PAGESIZE(pageshift);
158ee5b8fecSJack Steiner num = min(((len + pagesize - 1) >> pageshift), GRUMAXINVAL);
159ee5b8fecSJack Steiner
160ee5b8fecSJack Steiner STAT(flush_tlb);
161ee5b8fecSJack Steiner gru_dbg(grudev, "gms %p, start 0x%lx, len 0x%lx, asidmap 0x%lx\n", gms,
162ee5b8fecSJack Steiner start, len, gms->ms_asidmap[0]);
163ee5b8fecSJack Steiner
164ee5b8fecSJack Steiner spin_lock(&gms->ms_asid_lock);
165ee5b8fecSJack Steiner for_each_gru_in_bitmap(gid, gms->ms_asidmap) {
166ee5b8fecSJack Steiner STAT(flush_tlb_gru);
167ee5b8fecSJack Steiner gru = GID_TO_GRU(gid);
168ee5b8fecSJack Steiner asids = gms->ms_asids + gid;
169ee5b8fecSJack Steiner asid = asids->mt_asid;
170ee5b8fecSJack Steiner if (asids->mt_ctxbitmap && asid) {
171ee5b8fecSJack Steiner STAT(flush_tlb_gru_tgh);
172ee5b8fecSJack Steiner asid = GRUASID(asid, start);
173ee5b8fecSJack Steiner gru_dbg(grudev,
174563447d7SJack Steiner " FLUSH gruid %d, asid 0x%x, vaddr 0x%lx, vamask 0x%x, num %ld, cbmap 0x%x\n",
175563447d7SJack Steiner gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
176ee5b8fecSJack Steiner tgh = get_lock_tgh_handle(gru);
177fe5bb6b0SJack Steiner tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
178ee5b8fecSJack Steiner num - 1, asids->mt_ctxbitmap);
179ee5b8fecSJack Steiner get_unlock_tgh_handle(tgh);
180ee5b8fecSJack Steiner } else {
181ee5b8fecSJack Steiner STAT(flush_tlb_gru_zero_asid);
182ee5b8fecSJack Steiner asids->mt_asid = 0;
183ee5b8fecSJack Steiner __clear_bit(gru->gs_gid, gms->ms_asidmap);
184ee5b8fecSJack Steiner gru_dbg(grudev,
185ee5b8fecSJack Steiner " CLEARASID gruid %d, asid 0x%x, cbtmap 0x%x, asidmap 0x%lx\n",
186ee5b8fecSJack Steiner gid, asid, asids->mt_ctxbitmap,
187ee5b8fecSJack Steiner gms->ms_asidmap[0]);
188ee5b8fecSJack Steiner }
189ee5b8fecSJack Steiner }
190ee5b8fecSJack Steiner spin_unlock(&gms->ms_asid_lock);
191ee5b8fecSJack Steiner }
192ee5b8fecSJack Steiner
193ee5b8fecSJack Steiner /*
194ee5b8fecSJack Steiner * Flush the entire TLB on a chiplet.
195ee5b8fecSJack Steiner */
gru_flush_all_tlb(struct gru_state * gru)196ee5b8fecSJack Steiner void gru_flush_all_tlb(struct gru_state *gru)
197ee5b8fecSJack Steiner {
198ee5b8fecSJack Steiner struct gru_tlb_global_handle *tgh;
199ee5b8fecSJack Steiner
20043884604SJack Steiner gru_dbg(grudev, "gid %d\n", gru->gs_gid);
201ee5b8fecSJack Steiner tgh = get_lock_tgh_handle(gru);
202fe5bb6b0SJack Steiner tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff);
203ee5b8fecSJack Steiner get_unlock_tgh_handle(tgh);
204ee5b8fecSJack Steiner }
205ee5b8fecSJack Steiner
206ee5b8fecSJack Steiner /*
207ee5b8fecSJack Steiner * MMUOPS notifier callout functions
208ee5b8fecSJack Steiner */
gru_invalidate_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)20993065ac7SMichal Hocko static int gru_invalidate_range_start(struct mmu_notifier *mn,
2105d6527a7SJérôme Glisse const struct mmu_notifier_range *range)
211ee5b8fecSJack Steiner {
212ee5b8fecSJack Steiner struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
213ee5b8fecSJack Steiner ms_notifier);
214ee5b8fecSJack Steiner
215ee5b8fecSJack Steiner STAT(mmu_invalidate_range);
216ee5b8fecSJack Steiner atomic_inc(&gms->ms_range_active);
217ee5b8fecSJack Steiner gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx, act %d\n", gms,
2185d6527a7SJérôme Glisse range->start, range->end, atomic_read(&gms->ms_range_active));
2195d6527a7SJérôme Glisse gru_flush_tlb_range(gms, range->start, range->end - range->start);
22093065ac7SMichal Hocko
22193065ac7SMichal Hocko return 0;
222ee5b8fecSJack Steiner }
223ee5b8fecSJack Steiner
gru_invalidate_range_end(struct mmu_notifier * mn,const struct mmu_notifier_range * range)224ee5b8fecSJack Steiner static void gru_invalidate_range_end(struct mmu_notifier *mn,
2255d6527a7SJérôme Glisse const struct mmu_notifier_range *range)
226ee5b8fecSJack Steiner {
227ee5b8fecSJack Steiner struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct,
228ee5b8fecSJack Steiner ms_notifier);
229ee5b8fecSJack Steiner
2309ca8e40cSJack Steiner /* ..._and_test() provides needed barrier */
2319ca8e40cSJack Steiner (void)atomic_dec_and_test(&gms->ms_range_active);
2329ca8e40cSJack Steiner
233ee5b8fecSJack Steiner wake_up_all(&gms->ms_wait_queue);
2345d6527a7SJérôme Glisse gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n",
2355d6527a7SJérôme Glisse gms, range->start, range->end);
236ee5b8fecSJack Steiner }
237ee5b8fecSJack Steiner
gru_alloc_notifier(struct mm_struct * mm)238*e4c057d0SJason Gunthorpe static struct mmu_notifier *gru_alloc_notifier(struct mm_struct *mm)
239ee5b8fecSJack Steiner {
240ee5b8fecSJack Steiner struct gru_mm_struct *gms;
241ee5b8fecSJack Steiner
242ee5b8fecSJack Steiner gms = kzalloc(sizeof(*gms), GFP_KERNEL);
243a9e5fe58SSudip Mukherjee if (!gms)
244a9e5fe58SSudip Mukherjee return ERR_PTR(-ENOMEM);
245563447d7SJack Steiner STAT(gms_alloc);
246ee5b8fecSJack Steiner spin_lock_init(&gms->ms_asid_lock);
247ee5b8fecSJack Steiner init_waitqueue_head(&gms->ms_wait_queue);
248*e4c057d0SJason Gunthorpe
249*e4c057d0SJason Gunthorpe return &gms->ms_notifier;
250ee5b8fecSJack Steiner }
251*e4c057d0SJason Gunthorpe
gru_free_notifier(struct mmu_notifier * mn)252*e4c057d0SJason Gunthorpe static void gru_free_notifier(struct mmu_notifier *mn)
253*e4c057d0SJason Gunthorpe {
254*e4c057d0SJason Gunthorpe kfree(container_of(mn, struct gru_mm_struct, ms_notifier));
255*e4c057d0SJason Gunthorpe STAT(gms_free);
256*e4c057d0SJason Gunthorpe }
257*e4c057d0SJason Gunthorpe
258*e4c057d0SJason Gunthorpe static const struct mmu_notifier_ops gru_mmuops = {
259*e4c057d0SJason Gunthorpe .invalidate_range_start = gru_invalidate_range_start,
260*e4c057d0SJason Gunthorpe .invalidate_range_end = gru_invalidate_range_end,
261*e4c057d0SJason Gunthorpe .alloc_notifier = gru_alloc_notifier,
262*e4c057d0SJason Gunthorpe .free_notifier = gru_free_notifier,
263*e4c057d0SJason Gunthorpe };
264*e4c057d0SJason Gunthorpe
gru_register_mmu_notifier(void)265*e4c057d0SJason Gunthorpe struct gru_mm_struct *gru_register_mmu_notifier(void)
266*e4c057d0SJason Gunthorpe {
267*e4c057d0SJason Gunthorpe struct mmu_notifier *mn;
268*e4c057d0SJason Gunthorpe
269*e4c057d0SJason Gunthorpe mn = mmu_notifier_get_locked(&gru_mmuops, current->mm);
270*e4c057d0SJason Gunthorpe if (IS_ERR(mn))
271*e4c057d0SJason Gunthorpe return ERR_CAST(mn);
272*e4c057d0SJason Gunthorpe
273*e4c057d0SJason Gunthorpe return container_of(mn, struct gru_mm_struct, ms_notifier);
274ee5b8fecSJack Steiner }
275ee5b8fecSJack Steiner
gru_drop_mmu_notifier(struct gru_mm_struct * gms)276ee5b8fecSJack Steiner void gru_drop_mmu_notifier(struct gru_mm_struct *gms)
277ee5b8fecSJack Steiner {
278*e4c057d0SJason Gunthorpe mmu_notifier_put(&gms->ms_notifier);
279ee5b8fecSJack Steiner }
280ee5b8fecSJack Steiner
281ee5b8fecSJack Steiner /*
282ee5b8fecSJack Steiner * Setup TGH parameters. There are:
283ee5b8fecSJack Steiner * - 24 TGH handles per GRU chiplet
284ee5b8fecSJack Steiner * - a portion (MAX_LOCAL_TGH) of the handles are reserved for
285ee5b8fecSJack Steiner * use by blade-local cpus
286ee5b8fecSJack Steiner * - the rest are used by off-blade cpus. This usage is
287ee5b8fecSJack Steiner * less frequent than blade-local usage.
288ee5b8fecSJack Steiner *
289ee5b8fecSJack Steiner * For now, use 16 handles for local flushes, 8 for remote flushes. If the blade
290ee5b8fecSJack Steiner * has less tan or equal to 16 cpus, each cpu has a unique handle that it can
291ee5b8fecSJack Steiner * use.
292ee5b8fecSJack Steiner */
293ee5b8fecSJack Steiner #define MAX_LOCAL_TGH 16
294ee5b8fecSJack Steiner
gru_tgh_flush_init(struct gru_state * gru)295ee5b8fecSJack Steiner void gru_tgh_flush_init(struct gru_state *gru)
296ee5b8fecSJack Steiner {
297ee5b8fecSJack Steiner int cpus, shift = 0, n;
298ee5b8fecSJack Steiner
299ee5b8fecSJack Steiner cpus = uv_blade_nr_possible_cpus(gru->gs_blade_id);
300ee5b8fecSJack Steiner
301ee5b8fecSJack Steiner /* n = cpus rounded up to next power of 2 */
302ee5b8fecSJack Steiner if (cpus) {
303ee5b8fecSJack Steiner n = 1 << fls(cpus - 1);
304ee5b8fecSJack Steiner
305ee5b8fecSJack Steiner /*
306ee5b8fecSJack Steiner * shift count for converting local cpu# to TGH index
307ee5b8fecSJack Steiner * 0 if cpus <= MAX_LOCAL_TGH,
308ee5b8fecSJack Steiner * 1 if cpus <= 2*MAX_LOCAL_TGH,
309ee5b8fecSJack Steiner * etc
310ee5b8fecSJack Steiner */
311ee5b8fecSJack Steiner shift = max(0, fls(n - 1) - fls(MAX_LOCAL_TGH - 1));
312ee5b8fecSJack Steiner }
313ee5b8fecSJack Steiner gru->gs_tgh_local_shift = shift;
314ee5b8fecSJack Steiner
315ee5b8fecSJack Steiner /* first starting TGH index to use for remote purges */
316ee5b8fecSJack Steiner gru->gs_tgh_first_remote = (cpus + (1 << shift) - 1) >> shift;
317ee5b8fecSJack Steiner
318ee5b8fecSJack Steiner }
319