1196d9d8bSPeter Zijlstra #include <linux/gfp.h> 2196d9d8bSPeter Zijlstra #include <linux/highmem.h> 3196d9d8bSPeter Zijlstra #include <linux/kernel.h> 4196d9d8bSPeter Zijlstra #include <linux/mmdebug.h> 5196d9d8bSPeter Zijlstra #include <linux/mm_types.h> 636090defSArnd Bergmann #include <linux/mm_inline.h> 7196d9d8bSPeter Zijlstra #include <linux/pagemap.h> 8196d9d8bSPeter Zijlstra #include <linux/rcupdate.h> 9196d9d8bSPeter Zijlstra #include <linux/smp.h> 10196d9d8bSPeter Zijlstra #include <linux/swap.h> 11196d9d8bSPeter Zijlstra 12196d9d8bSPeter Zijlstra #include <asm/pgalloc.h> 13196d9d8bSPeter Zijlstra #include <asm/tlb.h> 14196d9d8bSPeter Zijlstra 15580a586cSPeter Zijlstra #ifndef CONFIG_MMU_GATHER_NO_GATHER 16952a31c9SMartin Schwidefsky 17196d9d8bSPeter Zijlstra static bool tlb_next_batch(struct mmu_gather *tlb) 18196d9d8bSPeter Zijlstra { 19196d9d8bSPeter Zijlstra struct mmu_gather_batch *batch; 20196d9d8bSPeter Zijlstra 21196d9d8bSPeter Zijlstra batch = tlb->active; 22196d9d8bSPeter Zijlstra if (batch->next) { 23196d9d8bSPeter Zijlstra tlb->active = batch->next; 24196d9d8bSPeter Zijlstra return true; 25196d9d8bSPeter Zijlstra } 26196d9d8bSPeter Zijlstra 27196d9d8bSPeter Zijlstra if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) 28196d9d8bSPeter Zijlstra return false; 29196d9d8bSPeter Zijlstra 30196d9d8bSPeter Zijlstra batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 31196d9d8bSPeter Zijlstra if (!batch) 32196d9d8bSPeter Zijlstra return false; 33196d9d8bSPeter Zijlstra 34196d9d8bSPeter Zijlstra tlb->batch_count++; 35196d9d8bSPeter Zijlstra batch->next = NULL; 36196d9d8bSPeter Zijlstra batch->nr = 0; 37196d9d8bSPeter Zijlstra batch->max = MAX_GATHER_BATCH; 38196d9d8bSPeter Zijlstra 39196d9d8bSPeter Zijlstra tlb->active->next = batch; 40196d9d8bSPeter Zijlstra tlb->active = batch; 41196d9d8bSPeter Zijlstra 42196d9d8bSPeter Zijlstra return true; 43196d9d8bSPeter Zijlstra } 44196d9d8bSPeter Zijlstra 45952a31c9SMartin Schwidefsky static void tlb_batch_pages_flush(struct mmu_gather *tlb) 46196d9d8bSPeter Zijlstra { 47196d9d8bSPeter Zijlstra struct mmu_gather_batch *batch; 48196d9d8bSPeter Zijlstra 49196d9d8bSPeter Zijlstra for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { 50*b191c9bcSJianxing Wang struct page **pages = batch->pages; 51*b191c9bcSJianxing Wang 52*b191c9bcSJianxing Wang do { 53*b191c9bcSJianxing Wang /* 54*b191c9bcSJianxing Wang * limit free batch count when PAGE_SIZE > 4K 55*b191c9bcSJianxing Wang */ 56*b191c9bcSJianxing Wang unsigned int nr = min(512U, batch->nr); 57*b191c9bcSJianxing Wang 58*b191c9bcSJianxing Wang free_pages_and_swap_cache(pages, nr); 59*b191c9bcSJianxing Wang pages += nr; 60*b191c9bcSJianxing Wang batch->nr -= nr; 61*b191c9bcSJianxing Wang 62*b191c9bcSJianxing Wang cond_resched(); 63*b191c9bcSJianxing Wang } while (batch->nr); 64196d9d8bSPeter Zijlstra } 65196d9d8bSPeter Zijlstra tlb->active = &tlb->local; 66196d9d8bSPeter Zijlstra } 67196d9d8bSPeter Zijlstra 68952a31c9SMartin Schwidefsky static void tlb_batch_list_free(struct mmu_gather *tlb) 69196d9d8bSPeter Zijlstra { 70196d9d8bSPeter Zijlstra struct mmu_gather_batch *batch, *next; 71196d9d8bSPeter Zijlstra 72196d9d8bSPeter Zijlstra for (batch = tlb->local.next; batch; batch = next) { 73196d9d8bSPeter Zijlstra next = batch->next; 74196d9d8bSPeter Zijlstra free_pages((unsigned long)batch, 0); 75196d9d8bSPeter Zijlstra } 76196d9d8bSPeter Zijlstra tlb->local.next = NULL; 77196d9d8bSPeter Zijlstra } 78196d9d8bSPeter Zijlstra 79196d9d8bSPeter Zijlstra bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) 80196d9d8bSPeter Zijlstra { 81196d9d8bSPeter Zijlstra struct mmu_gather_batch *batch; 82196d9d8bSPeter Zijlstra 83196d9d8bSPeter Zijlstra VM_BUG_ON(!tlb->end); 84ed6a7935SPeter Zijlstra 853af4bd03SPeter Zijlstra #ifdef CONFIG_MMU_GATHER_PAGE_SIZE 86196d9d8bSPeter Zijlstra VM_WARN_ON(tlb->page_size != page_size); 87ed6a7935SPeter Zijlstra #endif 88196d9d8bSPeter Zijlstra 89196d9d8bSPeter Zijlstra batch = tlb->active; 90196d9d8bSPeter Zijlstra /* 91196d9d8bSPeter Zijlstra * Add the page and check if we are full. If so 92196d9d8bSPeter Zijlstra * force a flush. 93196d9d8bSPeter Zijlstra */ 94196d9d8bSPeter Zijlstra batch->pages[batch->nr++] = page; 95196d9d8bSPeter Zijlstra if (batch->nr == batch->max) { 96196d9d8bSPeter Zijlstra if (!tlb_next_batch(tlb)) 97196d9d8bSPeter Zijlstra return true; 98196d9d8bSPeter Zijlstra batch = tlb->active; 99196d9d8bSPeter Zijlstra } 100196d9d8bSPeter Zijlstra VM_BUG_ON_PAGE(batch->nr > batch->max, page); 101196d9d8bSPeter Zijlstra 102196d9d8bSPeter Zijlstra return false; 103196d9d8bSPeter Zijlstra } 104196d9d8bSPeter Zijlstra 105580a586cSPeter Zijlstra #endif /* MMU_GATHER_NO_GATHER */ 106952a31c9SMartin Schwidefsky 1070d6e24d4SPeter Zijlstra #ifdef CONFIG_MMU_GATHER_TABLE_FREE 1080d6e24d4SPeter Zijlstra 1090d6e24d4SPeter Zijlstra static void __tlb_remove_table_free(struct mmu_table_batch *batch) 1100d6e24d4SPeter Zijlstra { 1110d6e24d4SPeter Zijlstra int i; 1120d6e24d4SPeter Zijlstra 1130d6e24d4SPeter Zijlstra for (i = 0; i < batch->nr; i++) 1140d6e24d4SPeter Zijlstra __tlb_remove_table(batch->tables[i]); 1150d6e24d4SPeter Zijlstra 1160d6e24d4SPeter Zijlstra free_page((unsigned long)batch); 1170d6e24d4SPeter Zijlstra } 1180d6e24d4SPeter Zijlstra 119ff2e6d72SPeter Zijlstra #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE 120196d9d8bSPeter Zijlstra 121196d9d8bSPeter Zijlstra /* 1220d6e24d4SPeter Zijlstra * Semi RCU freeing of the page directories. 1230d6e24d4SPeter Zijlstra * 1240d6e24d4SPeter Zijlstra * This is needed by some architectures to implement software pagetable walkers. 1250d6e24d4SPeter Zijlstra * 1260d6e24d4SPeter Zijlstra * gup_fast() and other software pagetable walkers do a lockless page-table 1270d6e24d4SPeter Zijlstra * walk and therefore needs some synchronization with the freeing of the page 1280d6e24d4SPeter Zijlstra * directories. The chosen means to accomplish that is by disabling IRQs over 1290d6e24d4SPeter Zijlstra * the walk. 1300d6e24d4SPeter Zijlstra * 1310d6e24d4SPeter Zijlstra * Architectures that use IPIs to flush TLBs will then automagically DTRT, 1320d6e24d4SPeter Zijlstra * since we unlink the page, flush TLBs, free the page. Since the disabling of 1330d6e24d4SPeter Zijlstra * IRQs delays the completion of the TLB flush we can never observe an already 1340d6e24d4SPeter Zijlstra * freed page. 1350d6e24d4SPeter Zijlstra * 1360d6e24d4SPeter Zijlstra * Architectures that do not have this (PPC) need to delay the freeing by some 1370d6e24d4SPeter Zijlstra * other means, this is that means. 1380d6e24d4SPeter Zijlstra * 1390d6e24d4SPeter Zijlstra * What we do is batch the freed directory pages (tables) and RCU free them. 1400d6e24d4SPeter Zijlstra * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling 1410d6e24d4SPeter Zijlstra * holds off grace periods. 1420d6e24d4SPeter Zijlstra * 1430d6e24d4SPeter Zijlstra * However, in order to batch these pages we need to allocate storage, this 1440d6e24d4SPeter Zijlstra * allocation is deep inside the MM code and can thus easily fail on memory 1450d6e24d4SPeter Zijlstra * pressure. To guarantee progress we fall back to single table freeing, see 1460d6e24d4SPeter Zijlstra * the implementation of tlb_remove_table_one(). 1470d6e24d4SPeter Zijlstra * 148196d9d8bSPeter Zijlstra */ 149196d9d8bSPeter Zijlstra 1500d6e24d4SPeter Zijlstra static void tlb_remove_table_smp_sync(void *arg) 1510d6e24d4SPeter Zijlstra { 1520d6e24d4SPeter Zijlstra /* Simply deliver the interrupt */ 1530d6e24d4SPeter Zijlstra } 1540d6e24d4SPeter Zijlstra 1550d6e24d4SPeter Zijlstra static void tlb_remove_table_sync_one(void) 1560d6e24d4SPeter Zijlstra { 1570d6e24d4SPeter Zijlstra /* 1580d6e24d4SPeter Zijlstra * This isn't an RCU grace period and hence the page-tables cannot be 1590d6e24d4SPeter Zijlstra * assumed to be actually RCU-freed. 1600d6e24d4SPeter Zijlstra * 1610d6e24d4SPeter Zijlstra * It is however sufficient for software page-table walkers that rely on 1620d6e24d4SPeter Zijlstra * IRQ disabling. 1630d6e24d4SPeter Zijlstra */ 1640d6e24d4SPeter Zijlstra smp_call_function(tlb_remove_table_smp_sync, NULL, 1); 1650d6e24d4SPeter Zijlstra } 1660d6e24d4SPeter Zijlstra 1670d6e24d4SPeter Zijlstra static void tlb_remove_table_rcu(struct rcu_head *head) 1680d6e24d4SPeter Zijlstra { 1690d6e24d4SPeter Zijlstra __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu)); 1700d6e24d4SPeter Zijlstra } 1710d6e24d4SPeter Zijlstra 1720d6e24d4SPeter Zijlstra static void tlb_remove_table_free(struct mmu_table_batch *batch) 1730d6e24d4SPeter Zijlstra { 1740d6e24d4SPeter Zijlstra call_rcu(&batch->rcu, tlb_remove_table_rcu); 1750d6e24d4SPeter Zijlstra } 1760d6e24d4SPeter Zijlstra 1770d6e24d4SPeter Zijlstra #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */ 1780d6e24d4SPeter Zijlstra 1790d6e24d4SPeter Zijlstra static void tlb_remove_table_sync_one(void) { } 1800d6e24d4SPeter Zijlstra 1810d6e24d4SPeter Zijlstra static void tlb_remove_table_free(struct mmu_table_batch *batch) 1820d6e24d4SPeter Zijlstra { 1830d6e24d4SPeter Zijlstra __tlb_remove_table_free(batch); 1840d6e24d4SPeter Zijlstra } 1850d6e24d4SPeter Zijlstra 1860d6e24d4SPeter Zijlstra #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ 1870d6e24d4SPeter Zijlstra 188196d9d8bSPeter Zijlstra /* 189196d9d8bSPeter Zijlstra * If we want tlb_remove_table() to imply TLB invalidates. 190196d9d8bSPeter Zijlstra */ 191196d9d8bSPeter Zijlstra static inline void tlb_table_invalidate(struct mmu_gather *tlb) 192196d9d8bSPeter Zijlstra { 1930ed13259SPeter Zijlstra if (tlb_needs_table_invalidate()) { 194196d9d8bSPeter Zijlstra /* 1950ed13259SPeter Zijlstra * Invalidate page-table caches used by hardware walkers. Then 1960ed13259SPeter Zijlstra * we still need to RCU-sched wait while freeing the pages 1970ed13259SPeter Zijlstra * because software walkers can still be in-flight. 198196d9d8bSPeter Zijlstra */ 199196d9d8bSPeter Zijlstra tlb_flush_mmu_tlbonly(tlb); 2000ed13259SPeter Zijlstra } 201196d9d8bSPeter Zijlstra } 202196d9d8bSPeter Zijlstra 203196d9d8bSPeter Zijlstra static void tlb_remove_table_one(void *table) 204196d9d8bSPeter Zijlstra { 2050d6e24d4SPeter Zijlstra tlb_remove_table_sync_one(); 206196d9d8bSPeter Zijlstra __tlb_remove_table(table); 207196d9d8bSPeter Zijlstra } 208196d9d8bSPeter Zijlstra 2090a8caf21SPeter Zijlstra static void tlb_table_flush(struct mmu_gather *tlb) 210196d9d8bSPeter Zijlstra { 211196d9d8bSPeter Zijlstra struct mmu_table_batch **batch = &tlb->batch; 212196d9d8bSPeter Zijlstra 213196d9d8bSPeter Zijlstra if (*batch) { 214196d9d8bSPeter Zijlstra tlb_table_invalidate(tlb); 2150d6e24d4SPeter Zijlstra tlb_remove_table_free(*batch); 216196d9d8bSPeter Zijlstra *batch = NULL; 217196d9d8bSPeter Zijlstra } 218196d9d8bSPeter Zijlstra } 219196d9d8bSPeter Zijlstra 220196d9d8bSPeter Zijlstra void tlb_remove_table(struct mmu_gather *tlb, void *table) 221196d9d8bSPeter Zijlstra { 222196d9d8bSPeter Zijlstra struct mmu_table_batch **batch = &tlb->batch; 223196d9d8bSPeter Zijlstra 224196d9d8bSPeter Zijlstra if (*batch == NULL) { 225196d9d8bSPeter Zijlstra *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); 226196d9d8bSPeter Zijlstra if (*batch == NULL) { 227196d9d8bSPeter Zijlstra tlb_table_invalidate(tlb); 228196d9d8bSPeter Zijlstra tlb_remove_table_one(table); 229196d9d8bSPeter Zijlstra return; 230196d9d8bSPeter Zijlstra } 231196d9d8bSPeter Zijlstra (*batch)->nr = 0; 232196d9d8bSPeter Zijlstra } 233196d9d8bSPeter Zijlstra 234196d9d8bSPeter Zijlstra (*batch)->tables[(*batch)->nr++] = table; 235196d9d8bSPeter Zijlstra if ((*batch)->nr == MAX_TABLE_BATCH) 236196d9d8bSPeter Zijlstra tlb_table_flush(tlb); 237196d9d8bSPeter Zijlstra } 238196d9d8bSPeter Zijlstra 2390d6e24d4SPeter Zijlstra static inline void tlb_table_init(struct mmu_gather *tlb) 2400d6e24d4SPeter Zijlstra { 2410d6e24d4SPeter Zijlstra tlb->batch = NULL; 2420d6e24d4SPeter Zijlstra } 2430d6e24d4SPeter Zijlstra 2440d6e24d4SPeter Zijlstra #else /* !CONFIG_MMU_GATHER_TABLE_FREE */ 2450d6e24d4SPeter Zijlstra 2460d6e24d4SPeter Zijlstra static inline void tlb_table_flush(struct mmu_gather *tlb) { } 2470d6e24d4SPeter Zijlstra static inline void tlb_table_init(struct mmu_gather *tlb) { } 2480d6e24d4SPeter Zijlstra 2490d6e24d4SPeter Zijlstra #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ 250196d9d8bSPeter Zijlstra 2510a8caf21SPeter Zijlstra static void tlb_flush_mmu_free(struct mmu_gather *tlb) 2520a8caf21SPeter Zijlstra { 2530a8caf21SPeter Zijlstra tlb_table_flush(tlb); 254580a586cSPeter Zijlstra #ifndef CONFIG_MMU_GATHER_NO_GATHER 2550a8caf21SPeter Zijlstra tlb_batch_pages_flush(tlb); 2560a8caf21SPeter Zijlstra #endif 2570a8caf21SPeter Zijlstra } 2580a8caf21SPeter Zijlstra 2590a8caf21SPeter Zijlstra void tlb_flush_mmu(struct mmu_gather *tlb) 2600a8caf21SPeter Zijlstra { 2610a8caf21SPeter Zijlstra tlb_flush_mmu_tlbonly(tlb); 2620a8caf21SPeter Zijlstra tlb_flush_mmu_free(tlb); 2630a8caf21SPeter Zijlstra } 2640a8caf21SPeter Zijlstra 265d8b45053SWill Deacon static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 266a72afd87SWill Deacon bool fullmm) 267196d9d8bSPeter Zijlstra { 2681808d65bSPeter Zijlstra tlb->mm = mm; 269a72afd87SWill Deacon tlb->fullmm = fullmm; 2701808d65bSPeter Zijlstra 271580a586cSPeter Zijlstra #ifndef CONFIG_MMU_GATHER_NO_GATHER 2721808d65bSPeter Zijlstra tlb->need_flush_all = 0; 2731808d65bSPeter Zijlstra tlb->local.next = NULL; 2741808d65bSPeter Zijlstra tlb->local.nr = 0; 2751808d65bSPeter Zijlstra tlb->local.max = ARRAY_SIZE(tlb->__pages); 2761808d65bSPeter Zijlstra tlb->active = &tlb->local; 2771808d65bSPeter Zijlstra tlb->batch_count = 0; 2781808d65bSPeter Zijlstra #endif 2791808d65bSPeter Zijlstra 2800d6e24d4SPeter Zijlstra tlb_table_init(tlb); 2813af4bd03SPeter Zijlstra #ifdef CONFIG_MMU_GATHER_PAGE_SIZE 2821808d65bSPeter Zijlstra tlb->page_size = 0; 2831808d65bSPeter Zijlstra #endif 2841808d65bSPeter Zijlstra 2851808d65bSPeter Zijlstra __tlb_reset_range(tlb); 286196d9d8bSPeter Zijlstra inc_tlb_flush_pending(tlb->mm); 287196d9d8bSPeter Zijlstra } 288196d9d8bSPeter Zijlstra 289845be1cdSRandy Dunlap /** 290845be1cdSRandy Dunlap * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down 291845be1cdSRandy Dunlap * @tlb: the mmu_gather structure to initialize 292845be1cdSRandy Dunlap * @mm: the mm_struct of the target address space 293845be1cdSRandy Dunlap * 294845be1cdSRandy Dunlap * Called to initialize an (on-stack) mmu_gather structure for page-table 295845be1cdSRandy Dunlap * tear-down from @mm. 296845be1cdSRandy Dunlap */ 297a72afd87SWill Deacon void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) 298d8b45053SWill Deacon { 299a72afd87SWill Deacon __tlb_gather_mmu(tlb, mm, false); 300d8b45053SWill Deacon } 301d8b45053SWill Deacon 302845be1cdSRandy Dunlap /** 303845be1cdSRandy Dunlap * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down 304845be1cdSRandy Dunlap * @tlb: the mmu_gather structure to initialize 305845be1cdSRandy Dunlap * @mm: the mm_struct of the target address space 306845be1cdSRandy Dunlap * 307845be1cdSRandy Dunlap * In this case, @mm is without users and we're going to destroy the 308845be1cdSRandy Dunlap * full address space (exit/execve). 309845be1cdSRandy Dunlap * 310845be1cdSRandy Dunlap * Called to initialize an (on-stack) mmu_gather structure for page-table 311845be1cdSRandy Dunlap * tear-down from @mm. 312845be1cdSRandy Dunlap */ 313d8b45053SWill Deacon void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) 314d8b45053SWill Deacon { 315a72afd87SWill Deacon __tlb_gather_mmu(tlb, mm, true); 316d8b45053SWill Deacon } 317d8b45053SWill Deacon 3181808d65bSPeter Zijlstra /** 3191808d65bSPeter Zijlstra * tlb_finish_mmu - finish an mmu_gather structure 3201808d65bSPeter Zijlstra * @tlb: the mmu_gather structure to finish 3211808d65bSPeter Zijlstra * 3221808d65bSPeter Zijlstra * Called at the end of the shootdown operation to free up any resources that 3231808d65bSPeter Zijlstra * were required. 3241808d65bSPeter Zijlstra */ 325ae8eba8bSWill Deacon void tlb_finish_mmu(struct mmu_gather *tlb) 326196d9d8bSPeter Zijlstra { 327196d9d8bSPeter Zijlstra /* 328196d9d8bSPeter Zijlstra * If there are parallel threads are doing PTE changes on same range 329c1e8d7c6SMichel Lespinasse * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB 3307a30df49SYang Shi * flush by batching, one thread may end up seeing inconsistent PTEs 3317a30df49SYang Shi * and result in having stale TLB entries. So flush TLB forcefully 3327a30df49SYang Shi * if we detect parallel PTE batching threads. 3337a30df49SYang Shi * 3347a30df49SYang Shi * However, some syscalls, e.g. munmap(), may free page tables, this 3357a30df49SYang Shi * needs force flush everything in the given range. Otherwise this 3367a30df49SYang Shi * may result in having stale TLB entries for some architectures, 3377a30df49SYang Shi * e.g. aarch64, that could specify flush what level TLB. 338196d9d8bSPeter Zijlstra */ 3391808d65bSPeter Zijlstra if (mm_tlb_flush_nested(tlb->mm)) { 3407a30df49SYang Shi /* 3417a30df49SYang Shi * The aarch64 yields better performance with fullmm by 3427a30df49SYang Shi * avoiding multiple CPUs spamming TLBI messages at the 3437a30df49SYang Shi * same time. 3447a30df49SYang Shi * 3457a30df49SYang Shi * On x86 non-fullmm doesn't yield significant difference 3467a30df49SYang Shi * against fullmm. 3477a30df49SYang Shi */ 3487a30df49SYang Shi tlb->fullmm = 1; 3491808d65bSPeter Zijlstra __tlb_reset_range(tlb); 3507a30df49SYang Shi tlb->freed_tables = 1; 3511808d65bSPeter Zijlstra } 352196d9d8bSPeter Zijlstra 3531808d65bSPeter Zijlstra tlb_flush_mmu(tlb); 3541808d65bSPeter Zijlstra 355580a586cSPeter Zijlstra #ifndef CONFIG_MMU_GATHER_NO_GATHER 3561808d65bSPeter Zijlstra tlb_batch_list_free(tlb); 3571808d65bSPeter Zijlstra #endif 358196d9d8bSPeter Zijlstra dec_tlb_flush_pending(tlb->mm); 359196d9d8bSPeter Zijlstra } 360