1 /* 2 * SN Platform GRU Driver 3 * 4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/mm.h> 16 #include <linux/spinlock.h> 17 #include <linux/sched.h> 18 #include <linux/device.h> 19 #include <linux/list.h> 20 #include <asm/uv/uv_hub.h> 21 #include "gru.h" 22 #include "grutables.h" 23 #include "gruhandles.h" 24 25 unsigned long gru_options __read_mostly; 26 27 static struct device_driver gru_driver = { 28 .name = "gru" 29 }; 30 31 static struct device gru_device = { 32 .bus_id = {0}, 33 .driver = &gru_driver, 34 }; 35 36 struct device *grudev = &gru_device; 37 38 /* 39 * Select a gru fault map to be used by the current cpu. Note that 40 * multiple cpus may be using the same map. 41 * ZZZ should "shift" be used?? Depends on HT cpu numbering 42 * ZZZ should be inline but did not work on emulator 43 */ 44 int gru_cpu_fault_map_id(void) 45 { 46 return uv_blade_processor_id() % GRU_NUM_TFM; 47 } 48 49 /*--------- ASID Management ------------------------------------------- 50 * 51 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. 52 * Once MAX is reached, flush the TLB & start over. However, 53 * some asids may still be in use. There won't be many (percentage wise) still 54 * in use. Search active contexts & determine the value of the first 55 * asid in use ("x"s below). Set "limit" to this value. 56 * This defines a block of assignable asids. 57 * 58 * When "limit" is reached, search forward from limit+1 and determine the 59 * next block of assignable asids. 60 * 61 * Repeat until MAX_ASID is reached, then start over again. 62 * 63 * Each time MAX_ASID is reached, increment the asid generation. Since 64 * the search for in-use asids only checks contexts with GRUs currently 65 * assigned, asids in some contexts will be missed. Prior to loading 66 * a context, the asid generation of the GTS asid is rechecked. If it 67 * doesn't match the current generation, a new asid will be assigned. 68 * 69 * 0---------------x------------x---------------------x----| 70 * ^-next ^-limit ^-MAX_ASID 71 * 72 * All asid manipulation & context loading/unloading is protected by the 73 * gs_lock. 74 */ 75 76 /* Hit the asid limit. Start over */ 77 static int gru_wrap_asid(struct gru_state *gru) 78 { 79 gru_dbg(grudev, "gru %p\n", gru); 80 STAT(asid_wrap); 81 gru->gs_asid_gen++; 82 gru_flush_all_tlb(gru); 83 return MIN_ASID; 84 } 85 86 /* Find the next chunk of unused asids */ 87 static int gru_reset_asid_limit(struct gru_state *gru, int asid) 88 { 89 int i, gid, inuse_asid, limit; 90 91 gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); 92 STAT(asid_next); 93 limit = MAX_ASID; 94 if (asid >= limit) 95 asid = gru_wrap_asid(gru); 96 gid = gru->gs_gid; 97 again: 98 for (i = 0; i < GRU_NUM_CCH; i++) { 99 if (!gru->gs_gts[i]) 100 continue; 101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 102 gru_dbg(grudev, "gru %p, inuse_asid 0x%x, cxtnum %d, gts %p\n", 103 gru, inuse_asid, i, gru->gs_gts[i]); 104 if (inuse_asid == asid) { 105 asid += ASID_INC; 106 if (asid >= limit) { 107 /* 108 * empty range: reset the range limit and 109 * start over 110 */ 111 limit = MAX_ASID; 112 if (asid >= MAX_ASID) 113 asid = gru_wrap_asid(gru); 114 goto again; 115 } 116 } 117 118 if ((inuse_asid > asid) && (inuse_asid < limit)) 119 limit = inuse_asid; 120 } 121 gru->gs_asid_limit = limit; 122 gru->gs_asid = asid; 123 gru_dbg(grudev, "gru %p, new asid 0x%x, new_limit 0x%x\n", gru, asid, 124 limit); 125 return asid; 126 } 127 128 /* Assign a new ASID to a thread context. */ 129 static int gru_assign_asid(struct gru_state *gru) 130 { 131 int asid; 132 133 spin_lock(&gru->gs_asid_lock); 134 gru->gs_asid += ASID_INC; 135 asid = gru->gs_asid; 136 if (asid >= gru->gs_asid_limit) 137 asid = gru_reset_asid_limit(gru, asid); 138 spin_unlock(&gru->gs_asid_lock); 139 140 gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); 141 return asid; 142 } 143 144 /* 145 * Clear n bits in a word. Return a word indicating the bits that were cleared. 146 * Optionally, build an array of chars that contain the bit numbers allocated. 147 */ 148 static unsigned long reserve_resources(unsigned long *p, int n, int mmax, 149 char *idx) 150 { 151 unsigned long bits = 0; 152 int i; 153 154 do { 155 i = find_first_bit(p, mmax); 156 if (i == mmax) 157 BUG(); 158 __clear_bit(i, p); 159 __set_bit(i, &bits); 160 if (idx) 161 *idx++ = i; 162 } while (--n); 163 return bits; 164 } 165 166 unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, 167 char *cbmap) 168 { 169 return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, 170 cbmap); 171 } 172 173 unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, 174 char *dsmap) 175 { 176 return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, 177 dsmap); 178 } 179 180 static void reserve_gru_resources(struct gru_state *gru, 181 struct gru_thread_state *gts) 182 { 183 gru->gs_active_contexts++; 184 gts->ts_cbr_map = 185 gru_reserve_cb_resources(gru, gts->ts_cbr_au_count, 186 gts->ts_cbr_idx); 187 gts->ts_dsr_map = 188 gru_reserve_ds_resources(gru, gts->ts_dsr_au_count, NULL); 189 } 190 191 static void free_gru_resources(struct gru_state *gru, 192 struct gru_thread_state *gts) 193 { 194 gru->gs_active_contexts--; 195 gru->gs_cbr_map |= gts->ts_cbr_map; 196 gru->gs_dsr_map |= gts->ts_dsr_map; 197 } 198 199 /* 200 * Check if a GRU has sufficient free resources to satisfy an allocation 201 * request. Note: GRU locks may or may not be held when this is called. If 202 * not held, recheck after acquiring the appropriate locks. 203 * 204 * Returns 1 if sufficient resources, 0 if not 205 */ 206 static int check_gru_resources(struct gru_state *gru, int cbr_au_count, 207 int dsr_au_count, int max_active_contexts) 208 { 209 return hweight64(gru->gs_cbr_map) >= cbr_au_count 210 && hweight64(gru->gs_dsr_map) >= dsr_au_count 211 && gru->gs_active_contexts < max_active_contexts; 212 } 213 214 /* 215 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG 216 * context. 217 */ 218 static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms, 219 int ctxnum) 220 { 221 struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; 222 unsigned short ctxbitmap = (1 << ctxnum); 223 int asid; 224 225 spin_lock(&gms->ms_asid_lock); 226 asid = asids->mt_asid; 227 228 if (asid == 0 || asids->mt_asid_gen != gru->gs_asid_gen) { 229 asid = gru_assign_asid(gru); 230 asids->mt_asid = asid; 231 asids->mt_asid_gen = gru->gs_asid_gen; 232 STAT(asid_new); 233 } else { 234 STAT(asid_reuse); 235 } 236 237 BUG_ON(asids->mt_ctxbitmap & ctxbitmap); 238 asids->mt_ctxbitmap |= ctxbitmap; 239 if (!test_bit(gru->gs_gid, gms->ms_asidmap)) 240 __set_bit(gru->gs_gid, gms->ms_asidmap); 241 spin_unlock(&gms->ms_asid_lock); 242 243 gru_dbg(grudev, 244 "gru %x, gms %p, ctxnum 0x%d, asid 0x%x, asidmap 0x%lx\n", 245 gru->gs_gid, gms, ctxnum, asid, gms->ms_asidmap[0]); 246 return asid; 247 } 248 249 static void gru_unload_mm_tracker(struct gru_state *gru, 250 struct gru_mm_struct *gms, int ctxnum) 251 { 252 struct gru_mm_tracker *asids; 253 unsigned short ctxbitmap; 254 255 asids = &gms->ms_asids[gru->gs_gid]; 256 ctxbitmap = (1 << ctxnum); 257 spin_lock(&gms->ms_asid_lock); 258 BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); 259 asids->mt_ctxbitmap ^= ctxbitmap; 260 gru_dbg(grudev, "gru %x, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", 261 gru->gs_gid, gms, ctxnum, gms->ms_asidmap[0]); 262 spin_unlock(&gms->ms_asid_lock); 263 } 264 265 /* 266 * Decrement the reference count on a GTS structure. Free the structure 267 * if the reference count goes to zero. 268 */ 269 void gts_drop(struct gru_thread_state *gts) 270 { 271 if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { 272 gru_drop_mmu_notifier(gts->ts_gms); 273 kfree(gts); 274 STAT(gts_free); 275 } 276 } 277 278 /* 279 * Locate the GTS structure for the current thread. 280 */ 281 static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data 282 *vdata, int tsid) 283 { 284 struct gru_thread_state *gts; 285 286 list_for_each_entry(gts, &vdata->vd_head, ts_next) 287 if (gts->ts_tsid == tsid) 288 return gts; 289 return NULL; 290 } 291 292 /* 293 * Allocate a thread state structure. 294 */ 295 static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, 296 struct gru_vma_data *vdata, 297 int tsid) 298 { 299 struct gru_thread_state *gts; 300 int bytes; 301 302 bytes = DSR_BYTES(vdata->vd_dsr_au_count) + 303 CBR_BYTES(vdata->vd_cbr_au_count); 304 bytes += sizeof(struct gru_thread_state); 305 gts = kzalloc(bytes, GFP_KERNEL); 306 if (!gts) 307 return NULL; 308 309 STAT(gts_alloc); 310 atomic_set(>s->ts_refcnt, 1); 311 mutex_init(>s->ts_ctxlock); 312 gts->ts_cbr_au_count = vdata->vd_cbr_au_count; 313 gts->ts_dsr_au_count = vdata->vd_dsr_au_count; 314 gts->ts_user_options = vdata->vd_user_options; 315 gts->ts_tsid = tsid; 316 gts->ts_user_options = vdata->vd_user_options; 317 gts->ts_ctxnum = NULLCTX; 318 gts->ts_mm = current->mm; 319 gts->ts_vma = vma; 320 gts->ts_tlb_int_select = -1; 321 gts->ts_gms = gru_register_mmu_notifier(); 322 if (!gts->ts_gms) 323 goto err; 324 325 gru_dbg(grudev, "alloc vdata %p, new gts %p\n", vdata, gts); 326 return gts; 327 328 err: 329 gts_drop(gts); 330 return NULL; 331 } 332 333 /* 334 * Allocate a vma private data structure. 335 */ 336 struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) 337 { 338 struct gru_vma_data *vdata = NULL; 339 340 vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); 341 if (!vdata) 342 return NULL; 343 344 INIT_LIST_HEAD(&vdata->vd_head); 345 spin_lock_init(&vdata->vd_lock); 346 gru_dbg(grudev, "alloc vdata %p\n", vdata); 347 return vdata; 348 } 349 350 /* 351 * Find the thread state structure for the current thread. 352 */ 353 struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, 354 int tsid) 355 { 356 struct gru_vma_data *vdata = vma->vm_private_data; 357 struct gru_thread_state *gts; 358 359 spin_lock(&vdata->vd_lock); 360 gts = gru_find_current_gts_nolock(vdata, tsid); 361 spin_unlock(&vdata->vd_lock); 362 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 363 return gts; 364 } 365 366 /* 367 * Allocate a new thread state for a GSEG. Note that races may allow 368 * another thread to race to create a gts. 369 */ 370 struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, 371 int tsid) 372 { 373 struct gru_vma_data *vdata = vma->vm_private_data; 374 struct gru_thread_state *gts, *ngts; 375 376 gts = gru_alloc_gts(vma, vdata, tsid); 377 if (!gts) 378 return NULL; 379 380 spin_lock(&vdata->vd_lock); 381 ngts = gru_find_current_gts_nolock(vdata, tsid); 382 if (ngts) { 383 gts_drop(gts); 384 gts = ngts; 385 STAT(gts_double_allocate); 386 } else { 387 list_add(>s->ts_next, &vdata->vd_head); 388 } 389 spin_unlock(&vdata->vd_lock); 390 gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); 391 return gts; 392 } 393 394 /* 395 * Free the GRU context assigned to the thread state. 396 */ 397 static void gru_free_gru_context(struct gru_thread_state *gts) 398 { 399 struct gru_state *gru; 400 401 gru = gts->ts_gru; 402 gru_dbg(grudev, "gts %p, gru %p\n", gts, gru); 403 404 spin_lock(&gru->gs_lock); 405 gru->gs_gts[gts->ts_ctxnum] = NULL; 406 free_gru_resources(gru, gts); 407 BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); 408 __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); 409 gts->ts_ctxnum = NULLCTX; 410 gts->ts_gru = NULL; 411 spin_unlock(&gru->gs_lock); 412 413 gts_drop(gts); 414 STAT(free_context); 415 } 416 417 /* 418 * Prefetching cachelines help hardware performance. 419 * (Strictly a performance enhancement. Not functionally required). 420 */ 421 static void prefetch_data(void *p, int num, int stride) 422 { 423 while (num-- > 0) { 424 prefetchw(p); 425 p += stride; 426 } 427 } 428 429 static inline long gru_copy_handle(void *d, void *s) 430 { 431 memcpy(d, s, GRU_HANDLE_BYTES); 432 return GRU_HANDLE_BYTES; 433 } 434 435 static void gru_prefetch_context(void *gseg, void *cb, void *cbe, unsigned long cbrmap, 436 unsigned long length) 437 { 438 int i, scr; 439 440 prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, 441 GRU_CACHE_LINE_BYTES); 442 443 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 444 prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); 445 prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, 446 GRU_CACHE_LINE_BYTES); 447 cb += GRU_HANDLE_STRIDE; 448 } 449 } 450 451 static void gru_load_context_data(void *save, void *grubase, int ctxnum, 452 unsigned long cbrmap, unsigned long dsrmap) 453 { 454 void *gseg, *cb, *cbe; 455 unsigned long length; 456 int i, scr; 457 458 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 459 cb = gseg + GRU_CB_BASE; 460 cbe = grubase + GRU_CBE_BASE; 461 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 462 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 463 464 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 465 save += gru_copy_handle(cb, save); 466 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save); 467 cb += GRU_HANDLE_STRIDE; 468 } 469 470 memcpy(gseg + GRU_DS_BASE, save, length); 471 } 472 473 static void gru_unload_context_data(void *save, void *grubase, int ctxnum, 474 unsigned long cbrmap, unsigned long dsrmap) 475 { 476 void *gseg, *cb, *cbe; 477 unsigned long length; 478 int i, scr; 479 480 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 481 cb = gseg + GRU_CB_BASE; 482 cbe = grubase + GRU_CBE_BASE; 483 length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; 484 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 485 486 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 487 save += gru_copy_handle(save, cb); 488 save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); 489 cb += GRU_HANDLE_STRIDE; 490 } 491 memcpy(save, gseg + GRU_DS_BASE, length); 492 } 493 494 void gru_unload_context(struct gru_thread_state *gts, int savestate) 495 { 496 struct gru_state *gru = gts->ts_gru; 497 struct gru_context_configuration_handle *cch; 498 int ctxnum = gts->ts_ctxnum; 499 500 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 501 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 502 503 lock_cch_handle(cch); 504 if (cch_interrupt_sync(cch)) 505 BUG(); 506 gru_dbg(grudev, "gts %p\n", gts); 507 508 gru_unload_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); 509 if (savestate) 510 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 511 ctxnum, gts->ts_cbr_map, 512 gts->ts_dsr_map); 513 514 if (cch_deallocate(cch)) 515 BUG(); 516 gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */ 517 unlock_cch_handle(cch); 518 519 gru_free_gru_context(gts); 520 STAT(unload_context); 521 } 522 523 /* 524 * Load a GRU context by copying it from the thread data structure in memory 525 * to the GRU. 526 */ 527 static void gru_load_context(struct gru_thread_state *gts) 528 { 529 struct gru_state *gru = gts->ts_gru; 530 struct gru_context_configuration_handle *cch; 531 int err, asid, ctxnum = gts->ts_ctxnum; 532 533 gru_dbg(grudev, "gts %p\n", gts); 534 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 535 536 lock_cch_handle(cch); 537 asid = gru_load_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); 538 cch->tfm_fault_bit_enable = 539 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 540 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 541 cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 542 if (cch->tlb_int_enable) { 543 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 544 cch->tlb_int_select = gts->ts_tlb_int_select; 545 } 546 cch->tfm_done_bit_enable = 0; 547 err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map); 548 if (err) { 549 gru_dbg(grudev, 550 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", 551 err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); 552 BUG(); 553 } 554 555 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, 556 gts->ts_cbr_map, gts->ts_dsr_map); 557 558 if (cch_start(cch)) 559 BUG(); 560 unlock_cch_handle(cch); 561 562 STAT(load_context); 563 } 564 565 /* 566 * Update fields in an active CCH: 567 * - retarget interrupts on local blade 568 * - force a delayed context unload by clearing the CCH asids. This 569 * forces TLB misses for new GRU instructions. The context is unloaded 570 * when the next TLB miss occurs. 571 */ 572 static int gru_update_cch(struct gru_thread_state *gts, int int_select) 573 { 574 struct gru_context_configuration_handle *cch; 575 struct gru_state *gru = gts->ts_gru; 576 int i, ctxnum = gts->ts_ctxnum, ret = 0; 577 578 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 579 580 lock_cch_handle(cch); 581 if (cch->state == CCHSTATE_ACTIVE) { 582 if (gru->gs_gts[gts->ts_ctxnum] != gts) 583 goto exit; 584 if (cch_interrupt(cch)) 585 BUG(); 586 if (int_select >= 0) { 587 gts->ts_tlb_int_select = int_select; 588 cch->tlb_int_select = int_select; 589 } else { 590 for (i = 0; i < 8; i++) 591 cch->asid[i] = 0; 592 cch->tfm_fault_bit_enable = 0; 593 cch->tlb_int_enable = 0; 594 gts->ts_force_unload = 1; 595 } 596 if (cch_start(cch)) 597 BUG(); 598 ret = 1; 599 } 600 exit: 601 unlock_cch_handle(cch); 602 return ret; 603 } 604 605 /* 606 * Update CCH tlb interrupt select. Required when all the following is true: 607 * - task's GRU context is loaded into a GRU 608 * - task is using interrupt notification for TLB faults 609 * - task has migrated to a different cpu on the same blade where 610 * it was previously running. 611 */ 612 static int gru_retarget_intr(struct gru_thread_state *gts) 613 { 614 if (gts->ts_tlb_int_select < 0 615 || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) 616 return 0; 617 618 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 619 gru_cpu_fault_map_id()); 620 return gru_update_cch(gts, gru_cpu_fault_map_id()); 621 } 622 623 624 /* 625 * Insufficient GRU resources available on the local blade. Steal a context from 626 * a process. This is a hack until a _real_ resource scheduler is written.... 627 */ 628 #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) 629 #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ 630 ((g)+1) : &(b)->bs_grus[0]) 631 632 static void gru_steal_context(struct gru_thread_state *gts) 633 { 634 struct gru_blade_state *blade; 635 struct gru_state *gru, *gru0; 636 struct gru_thread_state *ngts = NULL; 637 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 638 639 cbr = gts->ts_cbr_au_count; 640 dsr = gts->ts_dsr_au_count; 641 642 preempt_disable(); 643 blade = gru_base[uv_numa_blade_id()]; 644 spin_lock(&blade->bs_lock); 645 646 ctxnum = next_ctxnum(blade->bs_lru_ctxnum); 647 gru = blade->bs_lru_gru; 648 if (ctxnum == 0) 649 gru = next_gru(blade, gru); 650 ctxnum0 = ctxnum; 651 gru0 = gru; 652 while (1) { 653 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 654 break; 655 spin_lock(&gru->gs_lock); 656 for (; ctxnum < GRU_NUM_CCH; ctxnum++) { 657 if (flag && gru == gru0 && ctxnum == ctxnum0) 658 break; 659 ngts = gru->gs_gts[ctxnum]; 660 /* 661 * We are grabbing locks out of order, so trylock is 662 * needed. GTSs are usually not locked, so the odds of 663 * success are high. If trylock fails, try to steal a 664 * different GSEG. 665 */ 666 if (ngts && mutex_trylock(&ngts->ts_ctxlock)) 667 break; 668 ngts = NULL; 669 flag = 1; 670 } 671 spin_unlock(&gru->gs_lock); 672 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) 673 break; 674 ctxnum = 0; 675 gru = next_gru(blade, gru); 676 } 677 blade->bs_lru_gru = gru; 678 blade->bs_lru_ctxnum = ctxnum; 679 spin_unlock(&blade->bs_lock); 680 preempt_enable(); 681 682 if (ngts) { 683 STAT(steal_context); 684 ngts->ts_steal_jiffies = jiffies; 685 gru_unload_context(ngts, 1); 686 mutex_unlock(&ngts->ts_ctxlock); 687 } else { 688 STAT(steal_context_failed); 689 } 690 gru_dbg(grudev, 691 "stole gru %x, ctxnum %d from gts %p. Need cb %d, ds %d;" 692 " avail cb %ld, ds %ld\n", 693 gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), 694 hweight64(gru->gs_dsr_map)); 695 } 696 697 /* 698 * Scan the GRUs on the local blade & assign a GRU context. 699 */ 700 static struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) 701 { 702 struct gru_state *gru, *grux; 703 int i, max_active_contexts; 704 705 preempt_disable(); 706 707 again: 708 gru = NULL; 709 max_active_contexts = GRU_NUM_CCH; 710 for_each_gru_on_blade(grux, uv_numa_blade_id(), i) { 711 if (check_gru_resources(grux, gts->ts_cbr_au_count, 712 gts->ts_dsr_au_count, 713 max_active_contexts)) { 714 gru = grux; 715 max_active_contexts = grux->gs_active_contexts; 716 if (max_active_contexts == 0) 717 break; 718 } 719 } 720 721 if (gru) { 722 spin_lock(&gru->gs_lock); 723 if (!check_gru_resources(gru, gts->ts_cbr_au_count, 724 gts->ts_dsr_au_count, GRU_NUM_CCH)) { 725 spin_unlock(&gru->gs_lock); 726 goto again; 727 } 728 reserve_gru_resources(gru, gts); 729 gts->ts_gru = gru; 730 gts->ts_ctxnum = 731 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); 732 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH); 733 atomic_inc(>s->ts_refcnt); 734 gru->gs_gts[gts->ts_ctxnum] = gts; 735 __set_bit(gts->ts_ctxnum, &gru->gs_context_map); 736 spin_unlock(&gru->gs_lock); 737 738 STAT(assign_context); 739 gru_dbg(grudev, 740 "gseg %p, gts %p, gru %x, ctx %d, cbr %d, dsr %d\n", 741 gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, 742 gts->ts_gru->gs_gid, gts->ts_ctxnum, 743 gts->ts_cbr_au_count, gts->ts_dsr_au_count); 744 } else { 745 gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); 746 STAT(assign_context_failed); 747 } 748 749 preempt_enable(); 750 return gru; 751 } 752 753 /* 754 * gru_nopage 755 * 756 * Map the user's GRU segment 757 * 758 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries. 759 */ 760 int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 761 { 762 struct gru_thread_state *gts; 763 unsigned long paddr, vaddr; 764 765 vaddr = (unsigned long)vmf->virtual_address; 766 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 767 vma, vaddr, GSEG_BASE(vaddr)); 768 STAT(nopfn); 769 770 /* The following check ensures vaddr is a valid address in the VMA */ 771 gts = gru_find_thread_state(vma, TSID(vaddr, vma)); 772 if (!gts) 773 return VM_FAULT_SIGBUS; 774 775 again: 776 preempt_disable(); 777 mutex_lock(>s->ts_ctxlock); 778 if (gts->ts_gru) { 779 if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { 780 STAT(migrated_nopfn_unload); 781 gru_unload_context(gts, 1); 782 } else { 783 if (gru_retarget_intr(gts)) 784 STAT(migrated_nopfn_retarget); 785 } 786 } 787 788 if (!gts->ts_gru) { 789 if (!gru_assign_gru_context(gts)) { 790 mutex_unlock(>s->ts_ctxlock); 791 preempt_enable(); 792 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 793 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) 794 gru_steal_context(gts); 795 goto again; 796 } 797 gru_load_context(gts); 798 paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); 799 remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), 800 paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, 801 vma->vm_page_prot); 802 } 803 804 mutex_unlock(>s->ts_ctxlock); 805 preempt_enable(); 806 807 return VM_FAULT_NOPAGE; 808 } 809 810