1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * SGI UV architectural definitions 7 * 8 * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved. 9 */ 10 11 #ifndef _ASM_X86_UV_UV_HUB_H 12 #define _ASM_X86_UV_UV_HUB_H 13 14 #ifdef CONFIG_X86_64 15 #include <linux/numa.h> 16 #include <linux/percpu.h> 17 #include <linux/timer.h> 18 #include <linux/io.h> 19 #include <linux/topology.h> 20 #include <asm/types.h> 21 #include <asm/percpu.h> 22 #include <asm/uv/uv_mmrs.h> 23 #include <asm/uv/bios.h> 24 #include <asm/irq_vectors.h> 25 #include <asm/io_apic.h> 26 27 28 /* 29 * Addressing Terminology 30 * 31 * M - The low M bits of a physical address represent the offset 32 * into the blade local memory. RAM memory on a blade is physically 33 * contiguous (although various IO spaces may punch holes in 34 * it).. 35 * 36 * N - Number of bits in the node portion of a socket physical 37 * address. 38 * 39 * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of 40 * routers always have low bit of 1, C/MBricks have low bit 41 * equal to 0. Most addressing macros that target UV hub chips 42 * right shift the NASID by 1 to exclude the always-zero bit. 43 * NASIDs contain up to 15 bits. 44 * 45 * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead 46 * of nasids. 47 * 48 * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant 49 * of the nasid for socket usage. 50 * 51 * GPA - (global physical address) a socket physical address converted 52 * so that it can be used by the GRU as a global address. Socket 53 * physical addresses 1) need additional NASID (node) bits added 54 * to the high end of the address, and 2) unaliased if the 55 * partition does not have a physical address 0. In addition, on 56 * UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40. 57 * 58 * 59 * NumaLink Global Physical Address Format: 60 * +--------------------------------+---------------------+ 61 * |00..000| GNODE | NodeOffset | 62 * +--------------------------------+---------------------+ 63 * |<-------53 - M bits --->|<--------M bits -----> 64 * 65 * M - number of node offset bits (35 .. 40) 66 * 67 * 68 * Memory/UV-HUB Processor Socket Address Format: 69 * +----------------+---------------+---------------------+ 70 * |00..000000000000| PNODE | NodeOffset | 71 * +----------------+---------------+---------------------+ 72 * <--- N bits --->|<--------M bits -----> 73 * 74 * M - number of node offset bits (35 .. 40) 75 * N - number of PNODE bits (0 .. 10) 76 * 77 * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64). 78 * The actual values are configuration dependent and are set at 79 * boot time. M & N values are set by the hardware/BIOS at boot. 80 * 81 * 82 * APICID format 83 * NOTE!!!!!! This is the current format of the APICID. However, code 84 * should assume that this will change in the future. Use functions 85 * in this file for all APICID bit manipulations and conversion. 86 * 87 * 1111110000000000 88 * 5432109876543210 89 * pppppppppplc0cch Nehalem-EX (12 bits in hdw reg) 90 * ppppppppplcc0cch Westmere-EX (12 bits in hdw reg) 91 * pppppppppppcccch SandyBridge (15 bits in hdw reg) 92 * sssssssssss 93 * 94 * p = pnode bits 95 * l = socket number on board 96 * c = core 97 * h = hyperthread 98 * s = bits that are in the SOCKET_ID CSR 99 * 100 * Note: Processor may support fewer bits in the APICID register. The ACPI 101 * tables hold all 16 bits. Software needs to be aware of this. 102 * 103 * Unless otherwise specified, all references to APICID refer to 104 * the FULL value contained in ACPI tables, not the subset in the 105 * processor APICID register. 106 */ 107 108 /* 109 * Maximum number of bricks in all partitions and in all coherency domains. 110 * This is the total number of bricks accessible in the numalink fabric. It 111 * includes all C & M bricks. Routers are NOT included. 112 * 113 * This value is also the value of the maximum number of non-router NASIDs 114 * in the numalink fabric. 115 * 116 * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused. 117 */ 118 #define UV_MAX_NUMALINK_BLADES 16384 119 120 /* 121 * Maximum number of C/Mbricks within a software SSI (hardware may support 122 * more). 123 */ 124 #define UV_MAX_SSI_BLADES 256 125 126 /* 127 * The largest possible NASID of a C or M brick (+ 2) 128 */ 129 #define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2) 130 131 /* System Controller Interface Reg info */ 132 struct uv_scir_s { 133 struct timer_list timer; 134 unsigned long offset; 135 unsigned long last; 136 unsigned long idle_on; 137 unsigned long idle_off; 138 unsigned char state; 139 unsigned char enabled; 140 }; 141 142 /* GAM (globally addressed memory) range table */ 143 struct uv_gam_range_s { 144 u32 limit; /* PA bits 56:26 (GAM_RANGE_SHFT) */ 145 u16 nasid; /* node's global physical address */ 146 s8 base; /* entry index of node's base addr */ 147 u8 reserved; 148 }; 149 150 /* 151 * The following defines attributes of the HUB chip. These attributes are 152 * frequently referenced and are kept in a common per hub struct. 153 * After setup, the struct is read only, so it should be readily 154 * available in the L3 cache on the cpu socket for the node. 155 */ 156 struct uv_hub_info_s { 157 unsigned long global_mmr_base; 158 unsigned long global_mmr_shift; 159 unsigned long gpa_mask; 160 unsigned short *socket_to_node; 161 unsigned short *socket_to_pnode; 162 unsigned short *pnode_to_socket; 163 struct uv_gam_range_s *gr_table; 164 unsigned short min_socket; 165 unsigned short min_pnode; 166 unsigned char m_val; 167 unsigned char n_val; 168 unsigned char gr_table_len; 169 unsigned char hub_revision; 170 unsigned char apic_pnode_shift; 171 unsigned char gpa_shift; 172 unsigned char m_shift; 173 unsigned char n_lshift; 174 unsigned int gnode_extra; 175 unsigned long gnode_upper; 176 unsigned long lowmem_remap_top; 177 unsigned long lowmem_remap_base; 178 unsigned long global_gru_base; 179 unsigned long global_gru_shift; 180 unsigned short pnode; 181 unsigned short pnode_mask; 182 unsigned short coherency_domain_number; 183 unsigned short numa_blade_id; 184 unsigned short nr_possible_cpus; 185 unsigned short nr_online_cpus; 186 short memory_nid; 187 }; 188 189 /* CPU specific info with a pointer to the hub common info struct */ 190 struct uv_cpu_info_s { 191 void *p_uv_hub_info; 192 unsigned char blade_cpu_id; 193 struct uv_scir_s scir; 194 }; 195 DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info); 196 197 #define uv_cpu_info this_cpu_ptr(&__uv_cpu_info) 198 #define uv_cpu_info_per(cpu) (&per_cpu(__uv_cpu_info, cpu)) 199 200 #define uv_scir_info (&uv_cpu_info->scir) 201 #define uv_cpu_scir_info(cpu) (&uv_cpu_info_per(cpu)->scir) 202 203 /* Node specific hub common info struct */ 204 extern void **__uv_hub_info_list; 205 static inline struct uv_hub_info_s *uv_hub_info_list(int node) 206 { 207 return (struct uv_hub_info_s *)__uv_hub_info_list[node]; 208 } 209 210 static inline struct uv_hub_info_s *_uv_hub_info(void) 211 { 212 return (struct uv_hub_info_s *)uv_cpu_info->p_uv_hub_info; 213 } 214 #define uv_hub_info _uv_hub_info() 215 216 static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu) 217 { 218 return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info; 219 } 220 221 #define UV_HUB_INFO_VERSION 0x7150 222 extern int uv_hub_info_version(void); 223 static inline int uv_hub_info_check(int version) 224 { 225 if (uv_hub_info_version() == version) 226 return 0; 227 228 pr_crit("UV: uv_hub_info version(%x) mismatch, expecting(%x)\n", 229 uv_hub_info_version(), version); 230 231 BUG(); /* Catastrophic - cannot continue on unknown UV system */ 232 } 233 #define _uv_hub_info_check() uv_hub_info_check(UV_HUB_INFO_VERSION) 234 235 /* 236 * HUB revision ranges for each UV HUB architecture. 237 * This is a software convention - NOT the hardware revision numbers in 238 * the hub chip. 239 */ 240 #define UV1_HUB_REVISION_BASE 1 241 #define UV2_HUB_REVISION_BASE 3 242 #define UV3_HUB_REVISION_BASE 5 243 #define UV4_HUB_REVISION_BASE 7 244 245 #ifdef UV1_HUB_IS_SUPPORTED 246 static inline int is_uv1_hub(void) 247 { 248 return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE; 249 } 250 #else 251 static inline int is_uv1_hub(void) 252 { 253 return 0; 254 } 255 #endif 256 257 #ifdef UV2_HUB_IS_SUPPORTED 258 static inline int is_uv2_hub(void) 259 { 260 return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) && 261 (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE)); 262 } 263 #else 264 static inline int is_uv2_hub(void) 265 { 266 return 0; 267 } 268 #endif 269 270 #ifdef UV3_HUB_IS_SUPPORTED 271 static inline int is_uv3_hub(void) 272 { 273 return ((uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE) && 274 (uv_hub_info->hub_revision < UV4_HUB_REVISION_BASE)); 275 } 276 #else 277 static inline int is_uv3_hub(void) 278 { 279 return 0; 280 } 281 #endif 282 283 #ifdef UV4_HUB_IS_SUPPORTED 284 static inline int is_uv4_hub(void) 285 { 286 return uv_hub_info->hub_revision >= UV4_HUB_REVISION_BASE; 287 } 288 #else 289 static inline int is_uv4_hub(void) 290 { 291 return 0; 292 } 293 #endif 294 295 static inline int is_uvx_hub(void) 296 { 297 if (uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) 298 return uv_hub_info->hub_revision; 299 300 return 0; 301 } 302 303 static inline int is_uv_hub(void) 304 { 305 #ifdef UV1_HUB_IS_SUPPORTED 306 return uv_hub_info->hub_revision; 307 #endif 308 return is_uvx_hub(); 309 } 310 311 union uvh_apicid { 312 unsigned long v; 313 struct uvh_apicid_s { 314 unsigned long local_apic_mask : 24; 315 unsigned long local_apic_shift : 5; 316 unsigned long unused1 : 3; 317 unsigned long pnode_mask : 24; 318 unsigned long pnode_shift : 5; 319 unsigned long unused2 : 3; 320 } s; 321 }; 322 323 /* 324 * Local & Global MMR space macros. 325 * Note: macros are intended to be used ONLY by inline functions 326 * in this file - not by other kernel code. 327 * n - NASID (full 15-bit global nasid) 328 * g - GNODE (full 15-bit global nasid, right shifted 1) 329 * p - PNODE (local part of nsids, right shifted 1) 330 */ 331 #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) 332 #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) 333 #define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1) 334 335 #define UV1_LOCAL_MMR_BASE 0xf4000000UL 336 #define UV1_GLOBAL_MMR32_BASE 0xf8000000UL 337 #define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024) 338 #define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024) 339 340 #define UV2_LOCAL_MMR_BASE 0xfa000000UL 341 #define UV2_GLOBAL_MMR32_BASE 0xfc000000UL 342 #define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024) 343 #define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) 344 345 #define UV3_LOCAL_MMR_BASE 0xfa000000UL 346 #define UV3_GLOBAL_MMR32_BASE 0xfc000000UL 347 #define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024) 348 #define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) 349 350 #define UV4_LOCAL_MMR_BASE 0xfa000000UL 351 #define UV4_GLOBAL_MMR32_BASE 0xfc000000UL 352 #define UV4_LOCAL_MMR_SIZE (32UL * 1024 * 1024) 353 #define UV4_GLOBAL_MMR32_SIZE (16UL * 1024 * 1024) 354 355 #define UV_LOCAL_MMR_BASE ( \ 356 is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \ 357 is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \ 358 is_uv3_hub() ? UV3_LOCAL_MMR_BASE : \ 359 /*is_uv4_hub*/ UV4_LOCAL_MMR_BASE) 360 361 #define UV_GLOBAL_MMR32_BASE ( \ 362 is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE : \ 363 is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE : \ 364 is_uv3_hub() ? UV3_GLOBAL_MMR32_BASE : \ 365 /*is_uv4_hub*/ UV4_GLOBAL_MMR32_BASE) 366 367 #define UV_LOCAL_MMR_SIZE ( \ 368 is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \ 369 is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \ 370 is_uv3_hub() ? UV3_LOCAL_MMR_SIZE : \ 371 /*is_uv4_hub*/ UV4_LOCAL_MMR_SIZE) 372 373 #define UV_GLOBAL_MMR32_SIZE ( \ 374 is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE : \ 375 is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE : \ 376 is_uv3_hub() ? UV3_GLOBAL_MMR32_SIZE : \ 377 /*is_uv4_hub*/ UV4_GLOBAL_MMR32_SIZE) 378 379 #define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) 380 381 #define UV_GLOBAL_GRU_MMR_BASE 0x4000000 382 383 #define UV_GLOBAL_MMR32_PNODE_SHIFT 15 384 #define _UV_GLOBAL_MMR64_PNODE_SHIFT 26 385 #define UV_GLOBAL_MMR64_PNODE_SHIFT (uv_hub_info->global_mmr_shift) 386 387 #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) 388 389 #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ 390 (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) 391 392 #define UVH_APICID 0x002D0E00L 393 #define UV_APIC_PNODE_SHIFT 6 394 395 #define UV_APICID_HIBIT_MASK 0xffff0000 396 397 /* Local Bus from cpu's perspective */ 398 #define LOCAL_BUS_BASE 0x1c00000 399 #define LOCAL_BUS_SIZE (4 * 1024 * 1024) 400 401 /* 402 * System Controller Interface Reg 403 * 404 * Note there are NO leds on a UV system. This register is only 405 * used by the system controller to monitor system-wide operation. 406 * There are 64 regs per node. With Nahelem cpus (2 cores per node, 407 * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on 408 * a node. 409 * 410 * The window is located at top of ACPI MMR space 411 */ 412 #define SCIR_WINDOW_COUNT 64 413 #define SCIR_LOCAL_MMR_BASE (LOCAL_BUS_BASE + \ 414 LOCAL_BUS_SIZE - \ 415 SCIR_WINDOW_COUNT) 416 417 #define SCIR_CPU_HEARTBEAT 0x01 /* timer interrupt */ 418 #define SCIR_CPU_ACTIVITY 0x02 /* not idle */ 419 #define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */ 420 421 /* Loop through all installed blades */ 422 #define for_each_possible_blade(bid) \ 423 for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++) 424 425 /* 426 * Macros for converting between kernel virtual addresses, socket local physical 427 * addresses, and UV global physical addresses. 428 * Note: use the standard __pa() & __va() macros for converting 429 * between socket virtual and socket physical addresses. 430 */ 431 432 /* global bits offset - number of local address bits in gpa for this UV arch */ 433 static inline unsigned int uv_gpa_shift(void) 434 { 435 return uv_hub_info->gpa_shift; 436 } 437 #define _uv_gpa_shift 438 439 /* Find node that has the address range that contains global address */ 440 static inline struct uv_gam_range_s *uv_gam_range(unsigned long pa) 441 { 442 struct uv_gam_range_s *gr = uv_hub_info->gr_table; 443 unsigned long pal = (pa & uv_hub_info->gpa_mask) >> UV_GAM_RANGE_SHFT; 444 int i, num = uv_hub_info->gr_table_len; 445 446 if (gr) { 447 for (i = 0; i < num; i++, gr++) { 448 if (pal < gr->limit) 449 return gr; 450 } 451 } 452 pr_crit("UV: GAM Range for 0x%lx not found at %p!\n", pa, gr); 453 BUG(); 454 } 455 456 /* Return base address of node that contains global address */ 457 static inline unsigned long uv_gam_range_base(unsigned long pa) 458 { 459 struct uv_gam_range_s *gr = uv_gam_range(pa); 460 int base = gr->base; 461 462 if (base < 0) 463 return 0UL; 464 465 return uv_hub_info->gr_table[base].limit; 466 } 467 468 /* socket phys RAM --> UV global NASID (UV4+) */ 469 static inline unsigned long uv_soc_phys_ram_to_nasid(unsigned long paddr) 470 { 471 return uv_gam_range(paddr)->nasid; 472 } 473 #define _uv_soc_phys_ram_to_nasid 474 475 /* socket virtual --> UV global NASID (UV4+) */ 476 static inline unsigned long uv_gpa_nasid(void *v) 477 { 478 return uv_soc_phys_ram_to_nasid(__pa(v)); 479 } 480 481 /* socket phys RAM --> UV global physical address */ 482 static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr) 483 { 484 unsigned int m_val = uv_hub_info->m_val; 485 486 if (paddr < uv_hub_info->lowmem_remap_top) 487 paddr |= uv_hub_info->lowmem_remap_base; 488 paddr |= uv_hub_info->gnode_upper; 489 if (m_val) 490 paddr = ((paddr << uv_hub_info->m_shift) 491 >> uv_hub_info->m_shift) | 492 ((paddr >> uv_hub_info->m_val) 493 << uv_hub_info->n_lshift); 494 else 495 paddr |= uv_soc_phys_ram_to_nasid(paddr) 496 << uv_hub_info->gpa_shift; 497 return paddr; 498 } 499 500 /* socket virtual --> UV global physical address */ 501 static inline unsigned long uv_gpa(void *v) 502 { 503 return uv_soc_phys_ram_to_gpa(__pa(v)); 504 } 505 506 /* Top two bits indicate the requested address is in MMR space. */ 507 static inline int 508 uv_gpa_in_mmr_space(unsigned long gpa) 509 { 510 return (gpa >> 62) == 0x3UL; 511 } 512 513 /* UV global physical address --> socket phys RAM */ 514 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa) 515 { 516 unsigned long paddr; 517 unsigned long remap_base = uv_hub_info->lowmem_remap_base; 518 unsigned long remap_top = uv_hub_info->lowmem_remap_top; 519 unsigned int m_val = uv_hub_info->m_val; 520 521 if (m_val) 522 gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) | 523 ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val); 524 525 paddr = gpa & uv_hub_info->gpa_mask; 526 if (paddr >= remap_base && paddr < remap_base + remap_top) 527 paddr -= remap_base; 528 return paddr; 529 } 530 531 /* gpa -> gnode */ 532 static inline unsigned long uv_gpa_to_gnode(unsigned long gpa) 533 { 534 unsigned int n_lshift = uv_hub_info->n_lshift; 535 536 if (n_lshift) 537 return gpa >> n_lshift; 538 539 return uv_gam_range(gpa)->nasid >> 1; 540 } 541 542 /* gpa -> pnode */ 543 static inline int uv_gpa_to_pnode(unsigned long gpa) 544 { 545 return uv_gpa_to_gnode(gpa) & uv_hub_info->pnode_mask; 546 } 547 548 /* gpa -> node offset */ 549 static inline unsigned long uv_gpa_to_offset(unsigned long gpa) 550 { 551 unsigned int m_shift = uv_hub_info->m_shift; 552 553 if (m_shift) 554 return (gpa << m_shift) >> m_shift; 555 556 return (gpa & uv_hub_info->gpa_mask) - uv_gam_range_base(gpa); 557 } 558 559 /* Convert socket to node */ 560 static inline int _uv_socket_to_node(int socket, unsigned short *s2nid) 561 { 562 return s2nid ? s2nid[socket - uv_hub_info->min_socket] : socket; 563 } 564 565 static inline int uv_socket_to_node(int socket) 566 { 567 return _uv_socket_to_node(socket, uv_hub_info->socket_to_node); 568 } 569 570 /* pnode, offset --> socket virtual */ 571 static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset) 572 { 573 unsigned int m_val = uv_hub_info->m_val; 574 unsigned long base; 575 unsigned short sockid, node, *p2s; 576 577 if (m_val) 578 return __va(((unsigned long)pnode << m_val) | offset); 579 580 p2s = uv_hub_info->pnode_to_socket; 581 sockid = p2s ? p2s[pnode - uv_hub_info->min_pnode] : pnode; 582 node = uv_socket_to_node(sockid); 583 584 /* limit address of previous socket is our base, except node 0 is 0 */ 585 if (!node) 586 return __va((unsigned long)offset); 587 588 base = (unsigned long)(uv_hub_info->gr_table[node - 1].limit); 589 return __va(base << UV_GAM_RANGE_SHFT | offset); 590 } 591 592 /* Extract/Convert a PNODE from an APICID (full apicid, not processor subset) */ 593 static inline int uv_apicid_to_pnode(int apicid) 594 { 595 int pnode = apicid >> uv_hub_info->apic_pnode_shift; 596 unsigned short *s2pn = uv_hub_info->socket_to_pnode; 597 598 return s2pn ? s2pn[pnode - uv_hub_info->min_socket] : pnode; 599 } 600 601 /* Convert an apicid to the socket number on the blade */ 602 static inline int uv_apicid_to_socket(int apicid) 603 { 604 if (is_uv1_hub()) 605 return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1; 606 else 607 return 0; 608 } 609 610 /* 611 * Access global MMRs using the low memory MMR32 space. This region supports 612 * faster MMR access but not all MMRs are accessible in this space. 613 */ 614 static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset) 615 { 616 return __va(UV_GLOBAL_MMR32_BASE | 617 UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset); 618 } 619 620 static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val) 621 { 622 writeq(val, uv_global_mmr32_address(pnode, offset)); 623 } 624 625 static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset) 626 { 627 return readq(uv_global_mmr32_address(pnode, offset)); 628 } 629 630 /* 631 * Access Global MMR space using the MMR space located at the top of physical 632 * memory. 633 */ 634 static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset) 635 { 636 return __va(UV_GLOBAL_MMR64_BASE | 637 UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); 638 } 639 640 static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val) 641 { 642 writeq(val, uv_global_mmr64_address(pnode, offset)); 643 } 644 645 static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset) 646 { 647 return readq(uv_global_mmr64_address(pnode, offset)); 648 } 649 650 static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val) 651 { 652 writeb(val, uv_global_mmr64_address(pnode, offset)); 653 } 654 655 static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset) 656 { 657 return readb(uv_global_mmr64_address(pnode, offset)); 658 } 659 660 /* 661 * Access hub local MMRs. Faster than using global space but only local MMRs 662 * are accessible. 663 */ 664 static inline unsigned long *uv_local_mmr_address(unsigned long offset) 665 { 666 return __va(UV_LOCAL_MMR_BASE | offset); 667 } 668 669 static inline unsigned long uv_read_local_mmr(unsigned long offset) 670 { 671 return readq(uv_local_mmr_address(offset)); 672 } 673 674 static inline void uv_write_local_mmr(unsigned long offset, unsigned long val) 675 { 676 writeq(val, uv_local_mmr_address(offset)); 677 } 678 679 static inline unsigned char uv_read_local_mmr8(unsigned long offset) 680 { 681 return readb(uv_local_mmr_address(offset)); 682 } 683 684 static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val) 685 { 686 writeb(val, uv_local_mmr_address(offset)); 687 } 688 689 /* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */ 690 static inline int uv_blade_processor_id(void) 691 { 692 return uv_cpu_info->blade_cpu_id; 693 } 694 695 /* Blade-local cpu number of cpu N. Numbered 0 .. <# cpus on the blade> */ 696 static inline int uv_cpu_blade_processor_id(int cpu) 697 { 698 return uv_cpu_info_per(cpu)->blade_cpu_id; 699 } 700 #define _uv_cpu_blade_processor_id 1 /* indicate function available */ 701 702 /* Blade number to Node number (UV1..UV4 is 1:1) */ 703 static inline int uv_blade_to_node(int blade) 704 { 705 return blade; 706 } 707 708 /* Blade number of current cpu. Numnbered 0 .. <#blades -1> */ 709 static inline int uv_numa_blade_id(void) 710 { 711 return uv_hub_info->numa_blade_id; 712 } 713 714 /* 715 * Convert linux node number to the UV blade number. 716 * .. Currently for UV1 thru UV4 the node and the blade are identical. 717 * .. If this changes then you MUST check references to this function! 718 */ 719 static inline int uv_node_to_blade_id(int nid) 720 { 721 return nid; 722 } 723 724 /* Convert a cpu number to the the UV blade number */ 725 static inline int uv_cpu_to_blade_id(int cpu) 726 { 727 return uv_node_to_blade_id(cpu_to_node(cpu)); 728 } 729 730 /* Convert a blade id to the PNODE of the blade */ 731 static inline int uv_blade_to_pnode(int bid) 732 { 733 return uv_hub_info_list(uv_blade_to_node(bid))->pnode; 734 } 735 736 /* Nid of memory node on blade. -1 if no blade-local memory */ 737 static inline int uv_blade_to_memory_nid(int bid) 738 { 739 return uv_hub_info_list(uv_blade_to_node(bid))->memory_nid; 740 } 741 742 /* Determine the number of possible cpus on a blade */ 743 static inline int uv_blade_nr_possible_cpus(int bid) 744 { 745 return uv_hub_info_list(uv_blade_to_node(bid))->nr_possible_cpus; 746 } 747 748 /* Determine the number of online cpus on a blade */ 749 static inline int uv_blade_nr_online_cpus(int bid) 750 { 751 return uv_hub_info_list(uv_blade_to_node(bid))->nr_online_cpus; 752 } 753 754 /* Convert a cpu id to the PNODE of the blade containing the cpu */ 755 static inline int uv_cpu_to_pnode(int cpu) 756 { 757 return uv_cpu_hub_info(cpu)->pnode; 758 } 759 760 /* Convert a linux node number to the PNODE of the blade */ 761 static inline int uv_node_to_pnode(int nid) 762 { 763 return uv_hub_info_list(nid)->pnode; 764 } 765 766 /* Maximum possible number of blades */ 767 extern short uv_possible_blades; 768 static inline int uv_num_possible_blades(void) 769 { 770 return uv_possible_blades; 771 } 772 773 /* Per Hub NMI support */ 774 extern void uv_nmi_setup(void); 775 776 /* BMC sets a bit this MMR non-zero before sending an NMI */ 777 #define UVH_NMI_MMR UVH_SCRATCH5 778 #define UVH_NMI_MMR_CLEAR UVH_SCRATCH5_ALIAS 779 #define UVH_NMI_MMR_SHIFT 63 780 #define UVH_NMI_MMR_TYPE "SCRATCH5" 781 782 /* Newer SMM NMI handler, not present in all systems */ 783 #define UVH_NMI_MMRX UVH_EVENT_OCCURRED0 784 #define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS 785 #define UVH_NMI_MMRX_SHIFT UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 786 #define UVH_NMI_MMRX_TYPE "EXTIO_INT0" 787 788 /* Non-zero indicates newer SMM NMI handler present */ 789 #define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST 790 791 /* Indicates to BIOS that we want to use the newer SMM NMI handler */ 792 #define UVH_NMI_MMRX_REQ UVH_SCRATCH5_ALIAS_2 793 #define UVH_NMI_MMRX_REQ_SHIFT 62 794 795 struct uv_hub_nmi_s { 796 raw_spinlock_t nmi_lock; 797 atomic_t in_nmi; /* flag this node in UV NMI IRQ */ 798 atomic_t cpu_owner; /* last locker of this struct */ 799 atomic_t read_mmr_count; /* count of MMR reads */ 800 atomic_t nmi_count; /* count of true UV NMIs */ 801 unsigned long nmi_value; /* last value read from NMI MMR */ 802 }; 803 804 struct uv_cpu_nmi_s { 805 struct uv_hub_nmi_s *hub; 806 int state; 807 int pinging; 808 int queries; 809 int pings; 810 }; 811 812 DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); 813 814 #define uv_hub_nmi this_cpu_read(uv_cpu_nmi.hub) 815 #define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu)) 816 #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) 817 818 /* uv_cpu_nmi_states */ 819 #define UV_NMI_STATE_OUT 0 820 #define UV_NMI_STATE_IN 1 821 #define UV_NMI_STATE_DUMP 2 822 #define UV_NMI_STATE_DUMP_DONE 3 823 824 /* Update SCIR state */ 825 static inline void uv_set_scir_bits(unsigned char value) 826 { 827 if (uv_scir_info->state != value) { 828 uv_scir_info->state = value; 829 uv_write_local_mmr8(uv_scir_info->offset, value); 830 } 831 } 832 833 static inline unsigned long uv_scir_offset(int apicid) 834 { 835 return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f); 836 } 837 838 static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) 839 { 840 if (uv_cpu_scir_info(cpu)->state != value) { 841 uv_write_global_mmr8(uv_cpu_to_pnode(cpu), 842 uv_cpu_scir_info(cpu)->offset, value); 843 uv_cpu_scir_info(cpu)->state = value; 844 } 845 } 846 847 extern unsigned int uv_apicid_hibits; 848 static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) 849 { 850 apicid |= uv_apicid_hibits; 851 return (1UL << UVH_IPI_INT_SEND_SHFT) | 852 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | 853 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | 854 (vector << UVH_IPI_INT_VECTOR_SHFT); 855 } 856 857 static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) 858 { 859 unsigned long val; 860 unsigned long dmode = dest_Fixed; 861 862 if (vector == NMI_VECTOR) 863 dmode = dest_NMI; 864 865 val = uv_hub_ipi_value(apicid, vector, dmode); 866 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 867 } 868 869 /* 870 * Get the minimum revision number of the hub chips within the partition. 871 * (See UVx_HUB_REVISION_BASE above for specific values.) 872 */ 873 static inline int uv_get_min_hub_revision_id(void) 874 { 875 return uv_hub_info->hub_revision; 876 } 877 878 #endif /* CONFIG_X86_64 */ 879 #endif /* _ASM_X86_UV_UV_HUB_H */ 880