1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 27 /* All Rights Reserved */ 28 29 /* 30 * University Copyright- Copyright (c) 1982, 1986, 1988 31 * The Regents of the University of California 32 * All Rights Reserved 33 * 34 * University Acknowledgment- Portions of this document are derived from 35 * software developed by the University of California, Berkeley, and its 36 * contributors. 37 */ 38 39 #ifndef _VM_HAT_H 40 #define _VM_HAT_H 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <vm/faultcode.h> 45 #include <sys/kstat.h> 46 #include <sys/siginfo.h> 47 48 #ifdef __cplusplus 49 extern "C" { 50 #endif 51 52 /* 53 * VM - Hardware Address Translation management. 54 * 55 * This file describes the machine independent interfaces to 56 * the hardware address translation management routines. Other 57 * machine specific interfaces and structures are defined 58 * in <vm/hat_xxx.h>. The hat layer manages the address 59 * translation hardware as a cache driven by calls from the 60 * higher levels of the VM system. 61 */ 62 63 struct hat; 64 struct kpme; 65 struct memseg; 66 67 #include <vm/page.h> 68 69 /* 70 * a callback used with hat_unload_callback() 71 * start and end mark are set to a range of unloaded addresses 72 * and the function is invoked with a pointer to this data structure 73 */ 74 typedef struct hat_callback { 75 caddr_t hcb_start_addr; 76 caddr_t hcb_end_addr; 77 void (*hcb_function)(struct hat_callback *); 78 void *hcb_data; 79 } hat_callback_t; 80 81 typedef void *hat_region_cookie_t; 82 83 #ifdef _KERNEL 84 85 /* 86 * One time hat initialization 87 */ 88 void hat_init(void); 89 90 /* 91 * Notify hat of a system dump 92 */ 93 void hat_dump(void); 94 95 /* 96 * Operations on an address space: 97 * 98 * struct hat *hat_alloc(as) 99 * allocated a hat structure for as. 100 * 101 * void hat_free_start(hat) 102 * informs hat layer process has finished executing but as has not 103 * been cleaned up yet. 104 * 105 * void hat_free_end(hat) 106 * informs hat layer as is being destroyed. hat layer cannot use as 107 * pointer after this call. 108 * 109 * void hat_swapin(hat) 110 * allocate any hat resources required for process being swapped in. 111 * 112 * void hat_swapout(hat) 113 * deallocate hat resources for process being swapped out. 114 * 115 * size_t hat_get_mapped_size(hat) 116 * returns number of bytes that have valid mappings in hat. 117 * 118 * void hat_stats_enable(hat) 119 * void hat_stats_disable(hat) 120 * enables/disables collection of stats for hat. 121 * 122 * int hat_dup(parenthat, childhat, addr, len, flags) 123 * Duplicate address translations of the parent to the child. Supports 124 * the entire address range or a range depending on flag, 125 * zero returned on success, non-zero on error 126 * 127 * void hat_thread_exit(thread) 128 * Notifies the HAT that a thread is exiting, called after it has been 129 * reassigned to the kernel AS. 130 */ 131 132 struct hat *hat_alloc(struct as *); 133 void hat_free_start(struct hat *); 134 void hat_free_end(struct hat *); 135 int hat_dup(struct hat *, struct hat *, caddr_t, size_t, uint_t); 136 void hat_swapin(struct hat *); 137 void hat_swapout(struct hat *); 138 size_t hat_get_mapped_size(struct hat *); 139 int hat_stats_enable(struct hat *); 140 void hat_stats_disable(struct hat *); 141 void hat_thread_exit(kthread_t *); 142 143 /* 144 * Operations on a named address within a segment: 145 * 146 * void hat_memload(hat, addr, pp, attr, flags) 147 * load/lock the given page struct 148 * 149 * void hat_memload_array(hat, addr, len, ppa, attr, flags) 150 * load/lock the given array of page structs 151 * 152 * void hat_devload(hat, addr, len, pf, attr, flags) 153 * load/lock the given page frame number 154 * 155 * void hat_unlock(hat, addr, len) 156 * unlock a given range of addresses 157 * 158 * void hat_unload(hat, addr, len, flags) 159 * void hat_unload_callback(hat, addr, len, flags, callback) 160 * unload a given range of addresses (has optional callback) 161 * 162 * void hat_sync(hat, addr, len, flags) 163 * synchronize mapping with software data structures 164 * 165 * void hat_map(hat, addr, len, flags) 166 * 167 * void hat_setattr(hat, addr, len, attr) 168 * void hat_clrattr(hat, addr, len, attr) 169 * void hat_chgattr(hat, addr, len, attr) 170 * modify attributes for a range of addresses. skips any invalid mappings 171 * 172 * uint_t hat_getattr(hat, addr, *attr) 173 * returns attr for <hat,addr> in *attr. returns 0 if there was a 174 * mapping and *attr is valid, nonzero if there was no mapping and 175 * *attr is not valid. 176 * 177 * size_t hat_getpagesize(hat, addr) 178 * returns pagesize in bytes for <hat, addr>. returns -1 if there is 179 * no mapping. This is an advisory call. 180 * 181 * pfn_t hat_getpfnum(hat, addr) 182 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 183 * 184 * pfn_t hat_getkpfnum(addr) 185 * returns pfn for non-memory mapped addr in kernel address space 186 * or PFN_INVALID if mapping is invalid or is kernel memory. 187 * 188 * int hat_probe(hat, addr) 189 * return 0 if no valid mapping is present. Faster version 190 * of hat_getattr in certain architectures. 191 * 192 * int hat_share(dhat, daddr, shat, saddr, len, szc) 193 * 194 * void hat_unshare(hat, addr, len, szc) 195 * 196 * void hat_chgprot(hat, addr, len, vprot) 197 * This is a deprecated call. New segment drivers should store 198 * all attributes and use hat_*attr calls. 199 * Change the protections in the virtual address range 200 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 201 * then remove write permission, leaving the other permissions 202 * unchanged. If vprot is ~PROT_USER, remove user permissions. 203 */ 204 205 void hat_memload(struct hat *, caddr_t, struct page *, uint_t, uint_t); 206 void hat_memload_array(struct hat *, caddr_t, size_t, struct page **, 207 uint_t, uint_t); 208 void hat_memload_region(struct hat *, caddr_t, struct page *, uint_t, 209 uint_t, hat_region_cookie_t); 210 void hat_memload_array_region(struct hat *, caddr_t, size_t, struct page **, 211 uint_t, uint_t, hat_region_cookie_t); 212 213 void hat_devload(struct hat *, caddr_t, size_t, pfn_t, uint_t, int); 214 215 void hat_unlock(struct hat *, caddr_t, size_t); 216 void hat_unlock_region(struct hat *, caddr_t, size_t, hat_region_cookie_t); 217 218 void hat_unload(struct hat *, caddr_t, size_t, uint_t); 219 void hat_unload_callback(struct hat *, caddr_t, size_t, uint_t, 220 hat_callback_t *); 221 void hat_sync(struct hat *, caddr_t, size_t, uint_t); 222 void hat_map(struct hat *, caddr_t, size_t, uint_t); 223 void hat_setattr(struct hat *, caddr_t, size_t, uint_t); 224 void hat_clrattr(struct hat *, caddr_t, size_t, uint_t); 225 void hat_chgattr(struct hat *, caddr_t, size_t, uint_t); 226 uint_t hat_getattr(struct hat *, caddr_t, uint_t *); 227 ssize_t hat_getpagesize(struct hat *, caddr_t); 228 pfn_t hat_getpfnum(struct hat *, caddr_t); 229 int hat_probe(struct hat *, caddr_t); 230 int hat_share(struct hat *, caddr_t, struct hat *, caddr_t, size_t, uint_t); 231 void hat_unshare(struct hat *, caddr_t, size_t, uint_t); 232 void hat_chgprot(struct hat *, caddr_t, size_t, uint_t); 233 void hat_reserve(struct as *, caddr_t, size_t); 234 pfn_t va_to_pfn(void *); 235 uint64_t va_to_pa(void *); 236 237 /* 238 * hat_getkpfnum() is never supported on amd64 and will be 239 * removed from other platforms in future release 240 */ 241 #if !defined(__amd64) 242 pfn_t hat_getkpfnum(caddr_t); 243 #endif 244 245 246 /* 247 * Kernel Physical Mapping (segkpm) hat interface routines. 248 */ 249 caddr_t hat_kpm_mapin(struct page *, struct kpme *); 250 void hat_kpm_mapout(struct page *, struct kpme *, caddr_t); 251 caddr_t hat_kpm_page2va(struct page *, int); 252 struct page *hat_kpm_vaddr2page(caddr_t); 253 int hat_kpm_fault(struct hat *, caddr_t); 254 void hat_kpm_mseghash_clear(int); 255 void hat_kpm_mseghash_update(pgcnt_t, struct memseg *); 256 void hat_kpm_addmem_mseg_update(struct memseg *, pgcnt_t, offset_t); 257 void hat_kpm_addmem_mseg_insert(struct memseg *); 258 void hat_kpm_addmem_memsegs_update(struct memseg *); 259 caddr_t hat_kpm_mseg_reuse(struct memseg *); 260 void hat_kpm_delmem_mseg_update(struct memseg *, struct memseg **); 261 void hat_kpm_split_mseg_update(struct memseg *, struct memseg **, 262 struct memseg *, struct memseg *, struct memseg *); 263 void hat_kpm_walk(void (*)(void *, void *, size_t), void *); 264 265 /* 266 * Operations on all translations for a given page(s) 267 * 268 * void hat_page_setattr(pp, flag) 269 * void hat_page_clrattr(pp, flag) 270 * used to set/clr red/mod bits. 271 * 272 * uint hat_page_getattr(pp, flag) 273 * If flag is specified, returns 0 if attribute is disabled 274 * and non zero if enabled. If flag specifes multiple attributs 275 * then returns 0 if ALL atriibutes are disabled. This is an advisory 276 * call. 277 * 278 * int hat_pageunload(pp, forceflag) 279 * unload all translations attached to pp. 280 * 281 * uint_t hat_pagesync(pp, flags) 282 * get hw stats from hardware into page struct and reset hw stats 283 * returns attributes of page 284 * 285 * ulong_t hat_page_getshare(pp) 286 * returns approx number of mappings to this pp. A return of 0 implies 287 * there are no mappings to the page. 288 * 289 * faultcode_t hat_softlock(hat, addr, lenp, ppp, flags); 290 * called to softlock pages for zero copy tcp 291 * 292 * void hat_page_demote(pp); 293 * unload all large mappings to pp and decrease p_szc of all 294 * constituent pages according to the remaining mappings. 295 */ 296 297 void hat_page_setattr(struct page *, uint_t); 298 void hat_page_clrattr(struct page *, uint_t); 299 uint_t hat_page_getattr(struct page *, uint_t); 300 int hat_pageunload(struct page *, uint_t); 301 uint_t hat_pagesync(struct page *, uint_t); 302 ulong_t hat_page_getshare(struct page *); 303 int hat_page_checkshare(struct page *, ulong_t); 304 faultcode_t hat_softlock(struct hat *, caddr_t, size_t *, 305 struct page **, uint_t); 306 void hat_page_demote(struct page *); 307 308 /* 309 * Rountine to expose supported HAT features to PIM. 310 */ 311 enum hat_features { 312 HAT_SHARED_PT, /* Shared page tables */ 313 HAT_DYNAMIC_ISM_UNMAP, /* hat_pageunload() handles ISM pages */ 314 HAT_VMODSORT, /* support for VMODSORT flag of vnode */ 315 HAT_SHARED_REGIONS /* shared regions support */ 316 }; 317 318 int hat_supported(enum hat_features, void *); 319 320 /* 321 * Services provided to the hat: 322 * 323 * void as_signal_proc(as, siginfo) 324 * deliver signal to all processes that have this as. 325 * 326 * int hat_setstat(as, addr, len, rmbits) 327 * informs hatstat layer that ref/mod bits need to be updated for 328 * address range. Returns 0 on success, 1 for failure. 329 */ 330 void as_signal_proc(struct as *, k_siginfo_t *siginfo); 331 void hat_setstat(struct as *, caddr_t, size_t, uint_t); 332 333 /* 334 * Flags to pass to hat routines. 335 * 336 * Certain flags only apply to some interfaces: 337 * 338 * HAT_LOAD Default flags to load a translation to the page. 339 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 340 * and hat_devload(). 341 * HAT_LOAD_ADV Advisory load - Load translation if and only if 342 * sufficient MMU resources exist (i.e., do not steal). 343 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 344 * that map some user pages (not kas) is shared by more 345 * than one process (eg. ISM). 346 * HAT_LOAD_CONTIG Pages are contigous 347 * HAT_LOAD_NOCONSIST Do not add mapping to mapping list. 348 * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 349 * HAT_RELOAD_SHARE Reload a shared page table entry. Some platforms 350 * may require different actions than on the first 351 * load of a shared mapping. 352 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 353 * point, it's setting up mapping to allocate internal 354 * hat layer data structures. This flag forces hat layer 355 * to tap its reserves in order to prevent infinite 356 * recursion. 357 * HAT_LOAD_TEXT A flag to hat_memload() to indicate loading text pages. 358 */ 359 360 /* 361 * Flags for hat_memload/hat_devload 362 */ 363 #define HAT_FLAGS_RESV 0xFF000000 /* resv for hat impl */ 364 #define HAT_LOAD 0x00 365 #define HAT_LOAD_LOCK 0x01 366 #define HAT_LOAD_ADV 0x04 367 #define HAT_LOAD_CONTIG 0x10 368 #define HAT_LOAD_NOCONSIST 0x20 369 #define HAT_LOAD_SHARE 0x40 370 #define HAT_LOAD_REMAP 0x80 371 #define HAT_RELOAD_SHARE 0x100 372 #define HAT_NO_KALLOC 0x200 373 #define HAT_LOAD_TEXT 0x400 374 375 /* 376 * Flags for initializing disable_*large_pages. 377 * 378 * HAT_AUTO_TEXT Get MMU specific disable_auto_text_large_pages 379 * HAT_AUTO_DATA Get MMU specific disable_auto_data_large_pages 380 */ 381 #define HAT_AUTO_TEXT 0x800 382 #define HAT_AUTO_DATA 0x1000 383 384 /* 385 * Attributes for hat_memload/hat_devload/hat_*attr 386 * are a superset of prot flags defined in mman.h. 387 */ 388 #define HAT_PLAT_ATTR_MASK 0xF00000 389 #define HAT_PROT_MASK 0x0F 390 391 #define HAT_NOFAULT 0x10 392 #define HAT_NOSYNC 0x20 393 394 /* 395 * Advisory ordering attributes. Apply only to device mappings. 396 * 397 * HAT_STRICTORDER: the CPU must issue the references in order, as the 398 * programmer specified. This is the default. 399 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 400 * of reordering; store or load with store or load). 401 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 402 * to consecutive locations (for example, turn two consecutive byte 403 * stores into one halfword store), and it may batch individual loads 404 * (for example, turn two consecutive byte loads into one halfword load). 405 * This also implies re-ordering. 406 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 407 * until another store occurs. The default is to fetch new data 408 * on every load. This also implies merging. 409 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 410 * the device (perhaps with other data) at a later time. The default is 411 * to push the data right away. This also implies load caching. 412 */ 413 #define HAT_STRICTORDER 0x0000 414 #define HAT_UNORDERED_OK 0x0100 415 #define HAT_MERGING_OK 0x0200 416 #define HAT_LOADCACHING_OK 0x0300 417 #define HAT_STORECACHING_OK 0x0400 418 #define HAT_ORDER_MASK 0x0700 419 420 /* endian attributes */ 421 #define HAT_NEVERSWAP 0x0000 422 #define HAT_STRUCTURE_BE 0x1000 423 #define HAT_STRUCTURE_LE 0x2000 424 #define HAT_ENDIAN_MASK 0x3000 425 426 /* 427 * Attributes for non-coherent I-cache support. 428 * 429 * We detect if an I-cache has been filled by first resetting 430 * execute permission in a tte entry. This forces a trap when 431 * an instruction fetch first occurs in that page. In "soft 432 * execute mode", the hardware execute permission is cleared 433 * and a different software execution bit is set in the tte. 434 * 435 * HAT_ATTR_TEXT: set this flag to avoid the extra trap associated 436 * with soft execute mode. Same meaning as HAT_LOAD_TEXT. 437 * 438 * HAT_ATTR_NOSOFTEXEC: set this flag when installing a permanent 439 * mapping, or installing a mapping that will never be 440 * freed. Overrides soft execute mode. 441 */ 442 #define HAT_ATTR_TEXT 0x4000 443 #define HAT_ATTR_NOSOFTEXEC 0x8000 444 445 /* flags for hat_softlock */ 446 #define HAT_COW 0x0001 447 448 /* 449 * Flags for hat_unload 450 */ 451 #define HAT_UNLOAD 0x00 452 #define HAT_UNLOAD_NOSYNC 0x02 453 #define HAT_UNLOAD_UNLOCK 0x04 454 #define HAT_UNLOAD_OTHER 0x08 455 #define HAT_UNLOAD_UNMAP 0x10 456 457 /* 458 * Flags for hat_pagesync, hat_getstat, hat_sync 459 */ 460 #define HAT_SYNC_DONTZERO 0x00 461 #define HAT_SYNC_ZERORM 0x01 462 /* Additional flags for hat_pagesync */ 463 #define HAT_SYNC_STOPON_REF 0x02 464 #define HAT_SYNC_STOPON_MOD 0x04 465 #define HAT_SYNC_STOPON_RM (HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD) 466 #define HAT_SYNC_STOPON_SHARED 0x08 467 468 /* 469 * Flags for hat_dup 470 * 471 * HAT_DUP_ALL dup entire address space 472 * HAT_DUP_COW dup plus hat_clrattr(..PROT_WRITE) on newas 473 */ 474 #define HAT_DUP_ALL 1 475 #define HAT_DUP_COW 2 476 #define HAT_DUP_SRD 3 477 478 479 /* 480 * Flags for hat_map 481 */ 482 #define HAT_MAP 0x00 483 484 /* 485 * Flag for hat_pageunload 486 */ 487 #define HAT_ADV_PGUNLOAD 0x00 488 #define HAT_FORCE_PGUNLOAD 0x01 489 490 /* 491 * Attributes for hat_page_*attr, hat_setstats and 492 * returned by hat_pagesync. 493 */ 494 #define P_MOD 0x1 /* the modified bit */ 495 #define P_REF 0x2 /* the referenced bit */ 496 #define P_RO 0x4 /* Read only page */ 497 #define P_NSH 0x8 /* Not to shuffle v_pages */ 498 499 #define hat_ismod(pp) (hat_page_getattr(pp, P_MOD)) 500 #define hat_isref(pp) (hat_page_getattr(pp, P_REF)) 501 #define hat_isro(pp) (hat_page_getattr(pp, P_RO)) 502 503 #define hat_setmod(pp) (hat_page_setattr(pp, P_MOD)) 504 #define hat_setmod_only(pp) (hat_page_setattr(pp, P_MOD|P_NSH)) 505 #define hat_setref(pp) (hat_page_setattr(pp, P_REF)) 506 #define hat_setrefmod(pp) (hat_page_setattr(pp, P_REF|P_MOD)) 507 508 #define hat_clrmod(pp) (hat_page_clrattr(pp, P_MOD)) 509 #define hat_clrref(pp) (hat_page_clrattr(pp, P_REF)) 510 #define hat_clrrefmod(pp) (hat_page_clrattr(pp, P_REF|P_MOD)) 511 512 #define hat_page_is_mapped(pp) (hat_page_getshare(pp)) 513 514 /* 515 * hat_setup is being used in sparc/os/sundep.c 516 */ 517 void hat_setup(struct hat *, int); 518 519 /* 520 * Flags for hat_setup 521 */ 522 #define HAT_DONTALLOC 0 523 #define HAT_ALLOC 1 524 #define HAT_INIT 2 525 526 /* 527 * Other routines, for statistics 528 */ 529 int hat_startstat(struct as *); 530 void hat_getstat(struct as *, caddr_t, size_t, uint_t, char *, int); 531 void hat_freestat(struct as *, int); 532 void hat_resvstat(size_t, struct as *, caddr_t); 533 534 /* 535 * Transitionary routine while we still allow hat_getkpfnum(caddr_t) 536 * to return a pfn for kernel memory, but want to warn the user that 537 * it isn't supported. 538 */ 539 void hat_getkpfnum_badcall(void *caller); 540 541 /* 542 * Relocation callback routines. Currently only sfmmu HAT supports 543 * these. 544 */ 545 extern int hat_add_callback(id_t, caddr_t, uint_t, uint_t, void *, 546 pfn_t *, void **); 547 extern id_t hat_register_callback(int, 548 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 549 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 550 int (*errhandler)(caddr_t, uint_t, uint_t, void *), int); 551 extern void hat_delete_callback(caddr_t, uint_t, void *, uint_t, void *); 552 553 /* 554 * hat_add_callback()/hat_delete_callback() flags. 555 */ 556 #define HAC_NOSLEEP 0x0 557 #define HAC_SLEEP 0x1 558 #define HAC_PAGELOCK 0x2 559 560 /* 561 * Suspend/unsuspend handler callback arguments. 562 */ 563 #define HAT_SUSPEND 0x0010 564 #define HAT_UNSUSPEND 0x0010 565 #define HAT_PRESUSPEND 0x0020 566 #define HAT_POSTUNSUSPEND 0x0020 567 568 /* 569 * Error handler callback arguments. See the block comments 570 * before the implementation of hat_add_callback() for an 571 * explanation of what these mean. 572 */ 573 #define HAT_CB_ERR_LEAKED 0x1 574 575 #endif /* _KERNEL */ 576 577 /* 578 * The size of the bit array for ref and mod bit storage must be a power of 2. 579 * 2 bits are collected for each page. Below the power used is 4, 580 * which is 16 8-bit characters = 128 bits, ref and mod bit information 581 * for 64 pages. 582 */ 583 #define HRM_SHIFT 4 584 #define HRM_BYTES (1 << HRM_SHIFT) 585 #define HRM_PAGES ((HRM_BYTES * NBBY) / 2) 586 #define HRM_PGPERBYTE (NBBY/2) 587 #define HRM_PGBYTEMASK (HRM_PGPERBYTE-1) 588 589 #define HRM_PGOFFMASK ((HRM_PGPERBYTE-1) << MMU_PAGESHIFT) 590 #define HRM_BASEOFFSET (((MMU_PAGESIZE * HRM_PAGES) - 1)) 591 #define HRM_BASEMASK (~(HRM_BASEOFFSET)) 592 593 #define HRM_BASESHIFT (MMU_PAGESHIFT + (HRM_SHIFT + 2)) 594 #define HRM_PAGEMASK (MMU_PAGEMASK ^ HRM_BASEMASK) 595 596 #define HRM_HASHSIZE 0x200 597 #define HRM_HASHMASK (HRM_HASHSIZE - 1) 598 599 #define HRM_BLIST_INCR 0x200 600 601 /* 602 * The structure for maintaining referenced and modified information 603 */ 604 struct hrmstat { 605 struct as *hrm_as; /* stat block belongs to this as */ 606 uintptr_t hrm_base; /* base of block */ 607 ushort_t hrm_id; /* opaque identifier, one of a_vbits */ 608 struct hrmstat *hrm_anext; /* as statistics block list */ 609 struct hrmstat *hrm_hnext; /* list for hashed blocks */ 610 uchar_t hrm_bits[HRM_BYTES]; /* the ref and mod bits */ 611 }; 612 613 extern struct hrmstat **hrm_hashtab; 614 615 /* 616 * For global monitoring of the reference and modified bits 617 * of all address spaces we reserve one id bit. 618 */ 619 #define HRM_SWSMONID 1 620 621 622 #ifdef _KERNEL 623 624 /* 625 * Hat locking functions 626 * XXX - these two functions are currently being used by hatstats 627 * they can be removed by using a per-as mutex for hatstats. 628 */ 629 void hat_enter(struct hat *); 630 void hat_exit(struct hat *); 631 632 typedef void (*hat_rgn_cb_func_t)(caddr_t, caddr_t, caddr_t, 633 size_t, void *, u_offset_t); 634 635 void hat_join_srd(struct hat *, vnode_t *); 636 637 hat_region_cookie_t hat_join_region(struct hat *, caddr_t, size_t, void *, 638 u_offset_t, uchar_t, uchar_t, hat_rgn_cb_func_t, 639 uint_t); 640 void hat_leave_region(struct hat *, hat_region_cookie_t, 641 uint_t); 642 void hat_dup_region(struct hat *, hat_region_cookie_t); 643 644 #define HAT_INVALID_REGION_COOKIE ((hat_region_cookie_t)-1) 645 #define HAT_IS_REGION_COOKIE_VALID(c) ((c) != HAT_INVALID_REGION_COOKIE) 646 647 /* hat_join_region() flags */ 648 649 #define HAT_REGION_TEXT 0x1 /* passed by segvn */ 650 #define HAT_REGION_ISM 0x2 /* for hat_share()/hat_unshare() */ 651 652 #define HAT_REGION_TYPE_MASK (0x7) 653 654 #endif /* _KERNEL */ 655 656 #ifdef __cplusplus 657 } 658 #endif 659 660 #endif /* _VM_HAT_H */ 661