17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 519397407SSherry Moore * Common Development and Distribution License (the "License"). 619397407SSherry Moore * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22*d3d50737SRafael Vanoni * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate /* 287c478bd9Sstevel@tonic-gate * This is the lock device driver. 297c478bd9Sstevel@tonic-gate * 307c478bd9Sstevel@tonic-gate * The lock driver provides a variation of inter-process mutexes with the 317c478bd9Sstevel@tonic-gate * following twist in semantics: 327c478bd9Sstevel@tonic-gate * A waiter for a lock after a set timeout can "break" the lock and 337c478bd9Sstevel@tonic-gate * grab it from the current owner (without informing the owner). 347c478bd9Sstevel@tonic-gate * 357c478bd9Sstevel@tonic-gate * These semantics result in temporarily multiple processes thinking they 367c478bd9Sstevel@tonic-gate * own the lock. This usually does not make sense for cases where locks are 377c478bd9Sstevel@tonic-gate * used to protect a critical region and it is important to serialize access 387c478bd9Sstevel@tonic-gate * to data structures. As breaking the lock will also lose the serialization 397c478bd9Sstevel@tonic-gate * and result in corrupt data structures. 407c478bd9Sstevel@tonic-gate * 417c478bd9Sstevel@tonic-gate * The usage for winlock driver is primarily driven by the graphics system 427c478bd9Sstevel@tonic-gate * when doing DGA (direct graphics access) graphics. The locks are used to 437c478bd9Sstevel@tonic-gate * protect access to the frame buffer (presumably reflects back to the screen) 447c478bd9Sstevel@tonic-gate * between competing processes that directly write to the screen as opposed 457c478bd9Sstevel@tonic-gate * to going through the window server etc. 467c478bd9Sstevel@tonic-gate * In this case, the result of breaking the lock at worst causes the screen 477c478bd9Sstevel@tonic-gate * image to be distorted and is easily fixed by doing a "refresh" 487c478bd9Sstevel@tonic-gate * 497c478bd9Sstevel@tonic-gate * In well-behaved applications, the lock is held for a very short time and 507c478bd9Sstevel@tonic-gate * the breaking semantics do not come into play. Not having this feature and 517c478bd9Sstevel@tonic-gate * using normal inter-process mutexes will result in a misbehaved application 527c478bd9Sstevel@tonic-gate * from grabbing the screen writing capability from the window manager and 537c478bd9Sstevel@tonic-gate * effectively make the system look like it is hung (mouse pointer does not 547c478bd9Sstevel@tonic-gate * move). 557c478bd9Sstevel@tonic-gate * 567c478bd9Sstevel@tonic-gate * A secondary aspect of the winlock driver is that it allows for extremely 577c478bd9Sstevel@tonic-gate * fast lock acquire/release in cases where there is low contention. A memory 587c478bd9Sstevel@tonic-gate * write is all that is needed (not even a function call). And the window 597c478bd9Sstevel@tonic-gate * manager is the only DGA writer usually and this optimized for. Occasionally 607c478bd9Sstevel@tonic-gate * some processes might do DGA graphics and cause kernel faults to handle 617c478bd9Sstevel@tonic-gate * the contention/locking (and that has got to be slow!). 627c478bd9Sstevel@tonic-gate * 637c478bd9Sstevel@tonic-gate * The following IOCTLs are supported: 647c478bd9Sstevel@tonic-gate * 657c478bd9Sstevel@tonic-gate * GRABPAGEALLOC: 667c478bd9Sstevel@tonic-gate * Compatibility with old cgsix device driver lockpage ioctls. 677c478bd9Sstevel@tonic-gate * Lockpages created this way must be an entire page for compatibility with 687c478bd9Sstevel@tonic-gate * older software. This ioctl allocates a lock context with its own 697c478bd9Sstevel@tonic-gate * private lock page. The unique "ident" that identifies this lock is 707c478bd9Sstevel@tonic-gate * returned. 717c478bd9Sstevel@tonic-gate * 727c478bd9Sstevel@tonic-gate * GRABPAGEFREE: 737c478bd9Sstevel@tonic-gate * Compatibility with cgsix device driver lockpage ioctls. This 747c478bd9Sstevel@tonic-gate * ioctl releases the lock context allocated by GRABPAGEALLOC. 757c478bd9Sstevel@tonic-gate * 767c478bd9Sstevel@tonic-gate * GRABLOCKINFO: 777c478bd9Sstevel@tonic-gate * Returns a one-word flag. '1' means that multiple clients may 787c478bd9Sstevel@tonic-gate * access this lock page. Older device drivers returned '0', 797c478bd9Sstevel@tonic-gate * meaning that only two clients could access a lock page. 807c478bd9Sstevel@tonic-gate * 817c478bd9Sstevel@tonic-gate * GRABATTACH: 827c478bd9Sstevel@tonic-gate * Not supported. This ioctl would have grabbed all lock pages 837c478bd9Sstevel@tonic-gate * on behalf of the calling program. 847c478bd9Sstevel@tonic-gate * 857c478bd9Sstevel@tonic-gate * WINLOCKALLOC: 867c478bd9Sstevel@tonic-gate * Allocate a lock context. This ioctl accepts a key value. as 877c478bd9Sstevel@tonic-gate * its argument. If the key is zero, a new lock context is 887c478bd9Sstevel@tonic-gate * created, and its "ident" is returned. If the key is nonzero, 897c478bd9Sstevel@tonic-gate * all existing contexts are checked to see if they match they 907c478bd9Sstevel@tonic-gate * key. If a match is found, its reference count is incremented 917c478bd9Sstevel@tonic-gate * and its ident is returned, otherwise a new context is created 927c478bd9Sstevel@tonic-gate * and its ident is returned. 937c478bd9Sstevel@tonic-gate * 947c478bd9Sstevel@tonic-gate * WINLOCKFREE: 957c478bd9Sstevel@tonic-gate * Free a lock context. This ioctl accepts the ident of a lock 967c478bd9Sstevel@tonic-gate * context and decrements its reference count. Once the reference 977c478bd9Sstevel@tonic-gate * count reaches zero *and* all mappings are released, the lock 987c478bd9Sstevel@tonic-gate * context is freed. When all the lock context in the lock page are 997c478bd9Sstevel@tonic-gate * freed, the lock page is freed as well. 1007c478bd9Sstevel@tonic-gate * 1017c478bd9Sstevel@tonic-gate * WINLOCKSETTIMEOUT: 1027c478bd9Sstevel@tonic-gate * Set lock timeout for a context. This ioctl accepts the ident 1037c478bd9Sstevel@tonic-gate * of a lock context and a timeout value in milliseconds. 1047c478bd9Sstevel@tonic-gate * Whenever lock contention occurs, the timer is started and the lock is 1057c478bd9Sstevel@tonic-gate * broken after the timeout expires. If timeout value is zero, lock does 1067c478bd9Sstevel@tonic-gate * not timeout. This value will be rounded to the nearest clock 1077c478bd9Sstevel@tonic-gate * tick, so don't try to use it for real-time control or something. 1087c478bd9Sstevel@tonic-gate * 1097c478bd9Sstevel@tonic-gate * WINLOCKGETTIMEOUT: 1107c478bd9Sstevel@tonic-gate * Get lock timeout from a context. 1117c478bd9Sstevel@tonic-gate * 1127c478bd9Sstevel@tonic-gate * WINLOCKDUMP: 1137c478bd9Sstevel@tonic-gate * Dump state of this device. 1147c478bd9Sstevel@tonic-gate * 1157c478bd9Sstevel@tonic-gate * 1167c478bd9Sstevel@tonic-gate * How /dev/winlock works: 1177c478bd9Sstevel@tonic-gate * 1187c478bd9Sstevel@tonic-gate * Every lock context consists of two mappings for the client to the lock 1197c478bd9Sstevel@tonic-gate * page. These mappings are known as the "lock page" and "unlock page" 1207c478bd9Sstevel@tonic-gate * to the client. The first mmap to the lock context (identified by the 1217c478bd9Sstevel@tonic-gate * sy_ident field returns during alloc) allocates mapping to the lock page, 1227c478bd9Sstevel@tonic-gate * the second mmap allocates a mapping to the unlock page. 1237c478bd9Sstevel@tonic-gate * The mappings dont have to be ordered in virtual address space, but do 1247c478bd9Sstevel@tonic-gate * need to be ordered in time. Mapping and unmapping of these lock and unlock 1257c478bd9Sstevel@tonic-gate * pages should happen in pairs. Doing them one at a time or unmapping one 1267c478bd9Sstevel@tonic-gate * and leaving one mapped etc cause undefined behaviors. 1277c478bd9Sstevel@tonic-gate * The mappings are always of length PAGESIZE, and type MAP_SHARED. 1287c478bd9Sstevel@tonic-gate * 1297c478bd9Sstevel@tonic-gate * The first ioctl is to ALLOC a lock, either based on a key (if trying to 1307c478bd9Sstevel@tonic-gate * grab a preexisting lock) or 0 (gets a default new one) 1317c478bd9Sstevel@tonic-gate * This ioctl returns a value in sy_ident which is needed to do the 1327c478bd9Sstevel@tonic-gate * later mmaps and FREE/other ioctls. 1337c478bd9Sstevel@tonic-gate * 1347c478bd9Sstevel@tonic-gate * The "page number" portion of the sy_ident needs to be passed as the 1357c478bd9Sstevel@tonic-gate * file offset when doing an mmap for both the lock page and unlock page 1367c478bd9Sstevel@tonic-gate * 1377c478bd9Sstevel@tonic-gate * The value returned by mmap ( a user virtual address) needs to be 1387c478bd9Sstevel@tonic-gate * incremented by the "page offset" portion of sy_ident to obtain the 1397c478bd9Sstevel@tonic-gate * pointer to the actual lock. (Skipping this step, does not cause any 1407c478bd9Sstevel@tonic-gate * visible error, but the process will be using the wrong lock!) 1417c478bd9Sstevel@tonic-gate * 1427c478bd9Sstevel@tonic-gate * On a fork(), the child process will inherit the mappings for free, but 1437c478bd9Sstevel@tonic-gate * will not inherit the parent's lock ownership if any. The child should NOT 1447c478bd9Sstevel@tonic-gate * do an explicit FREE on the lock context unless it did an explicit ALLOC. 1457c478bd9Sstevel@tonic-gate * Only one process at a time is allowed to have a valid hat 1467c478bd9Sstevel@tonic-gate * mapping to a lock page. This is enforced by this driver. 1477c478bd9Sstevel@tonic-gate * A client acquires a lock by writing a '1' to the lock page. 1487c478bd9Sstevel@tonic-gate * Note, that it is not necessary to read and veryify that the lock is '0' 1497c478bd9Sstevel@tonic-gate * prior to writing a '1' in it. 1507c478bd9Sstevel@tonic-gate * If it does not already have a valid mapping to that page, the driver 1517c478bd9Sstevel@tonic-gate * takes a fault (devmap_access), loads the client mapping 1527c478bd9Sstevel@tonic-gate * and allows the client to continue. The client releases the lock by 1537c478bd9Sstevel@tonic-gate * writing a '0' to the unlock page. Again, if it does not have a valid 1547c478bd9Sstevel@tonic-gate * mapping to the unlock page, the segment driver takes a fault, 1557c478bd9Sstevel@tonic-gate * loads the mapping, and lets the client continue. From this point 1567c478bd9Sstevel@tonic-gate * forward, the client can make as many locks and unlocks as it 1577c478bd9Sstevel@tonic-gate * wants, without any more faults into the kernel. 1587c478bd9Sstevel@tonic-gate * 1597c478bd9Sstevel@tonic-gate * If a different process wants to acquire a lock, it takes a page fault 1607c478bd9Sstevel@tonic-gate * when it writes the '1' to the lock page. If the segment driver sees 1617c478bd9Sstevel@tonic-gate * that the lock page contained a zero, then it invalidates the owner's 1627c478bd9Sstevel@tonic-gate * mappings and gives the mappings to this process. 1637c478bd9Sstevel@tonic-gate * 1647c478bd9Sstevel@tonic-gate * If there is already a '1' in the lock page when the second client 1657c478bd9Sstevel@tonic-gate * tries to access the lock page, then a lock exists. The segment 1667c478bd9Sstevel@tonic-gate * driver sleeps the second client and, if applicable, starts the 1677c478bd9Sstevel@tonic-gate * timeout on the lock. The owner's mapping to the unlock page 1687c478bd9Sstevel@tonic-gate * is invalidated so that the driver will be woken again when the owner 1697c478bd9Sstevel@tonic-gate * releases the lock. 1707c478bd9Sstevel@tonic-gate * 1717c478bd9Sstevel@tonic-gate * When the locking client finally writes a '0' to the unlock page, the 1727c478bd9Sstevel@tonic-gate * segment driver takes another fault. The client is given a valid 1737c478bd9Sstevel@tonic-gate * mapping, not to the unlock page, but to the "trash page", and allowed 1747c478bd9Sstevel@tonic-gate * to continue. Meanwhile, the sleeping client is given a valid mapping 1757c478bd9Sstevel@tonic-gate * to the lock/unlock pages and allowed to continue as well. 1767c478bd9Sstevel@tonic-gate * 1777c478bd9Sstevel@tonic-gate * RFE: There is a leak if process exits before freeing allocated locks 1787c478bd9Sstevel@tonic-gate * But currently not tracking which locks were allocated by which 1797c478bd9Sstevel@tonic-gate * process and we do not have a clean entry point into the driver 1807c478bd9Sstevel@tonic-gate * to do garbage collection. If the interface used a file descriptor for each 1817c478bd9Sstevel@tonic-gate * lock it allocs, then the driver can free up stuff in the _close routine 1827c478bd9Sstevel@tonic-gate */ 1837c478bd9Sstevel@tonic-gate 1847c478bd9Sstevel@tonic-gate #include <sys/types.h> /* various type defn's */ 1857c478bd9Sstevel@tonic-gate #include <sys/debug.h> 1867c478bd9Sstevel@tonic-gate #include <sys/param.h> /* various kernel limits */ 1877c478bd9Sstevel@tonic-gate #include <sys/time.h> 1887c478bd9Sstevel@tonic-gate #include <sys/errno.h> 1897c478bd9Sstevel@tonic-gate #include <sys/kmem.h> /* defines kmem_alloc() */ 1907c478bd9Sstevel@tonic-gate #include <sys/conf.h> /* defines cdevsw */ 1917c478bd9Sstevel@tonic-gate #include <sys/file.h> /* various file modes, etc. */ 1927c478bd9Sstevel@tonic-gate #include <sys/uio.h> /* UIO stuff */ 1937c478bd9Sstevel@tonic-gate #include <sys/ioctl.h> 1947c478bd9Sstevel@tonic-gate #include <sys/cred.h> /* defines cred struct */ 1957c478bd9Sstevel@tonic-gate #include <sys/mman.h> /* defines mmap(2) parameters */ 1967c478bd9Sstevel@tonic-gate #include <sys/stat.h> /* defines S_IFCHR */ 1977c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> /* use cmn_err */ 1987c478bd9Sstevel@tonic-gate #include <sys/ddi.h> /* ddi stuff */ 1997c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> /* ddi stuff */ 2007c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h> /* ddi stuff */ 2017c478bd9Sstevel@tonic-gate #include <sys/winlockio.h> /* defines ioctls, flags, data structs */ 2027c478bd9Sstevel@tonic-gate 2037c478bd9Sstevel@tonic-gate static int winlock_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 2047c478bd9Sstevel@tonic-gate static int winlock_devmap(dev_t, devmap_cookie_t, offset_t, size_t, 2057c478bd9Sstevel@tonic-gate size_t *, uint_t); 2067c478bd9Sstevel@tonic-gate static int winlocksegmap(dev_t, off_t, struct as *, caddr_t *, off_t, 2077c478bd9Sstevel@tonic-gate uint_t, uint_t, uint_t, cred_t *); 2087c478bd9Sstevel@tonic-gate 2097c478bd9Sstevel@tonic-gate static struct cb_ops winlock_cb_ops = { 2107c478bd9Sstevel@tonic-gate nulldev, /* open */ 2117c478bd9Sstevel@tonic-gate nulldev, /* close */ 2127c478bd9Sstevel@tonic-gate nodev, /* strategy */ 2137c478bd9Sstevel@tonic-gate nodev, /* print */ 2147c478bd9Sstevel@tonic-gate nodev, /* dump */ 2157c478bd9Sstevel@tonic-gate nodev, /* read */ 2167c478bd9Sstevel@tonic-gate nodev, /* write */ 2177c478bd9Sstevel@tonic-gate winlock_ioctl, /* ioctl */ 2187c478bd9Sstevel@tonic-gate winlock_devmap, /* devmap */ 2197c478bd9Sstevel@tonic-gate nodev, /* mmap */ 2207c478bd9Sstevel@tonic-gate winlocksegmap, /* segmap */ 2217c478bd9Sstevel@tonic-gate nochpoll, /* poll */ 2227c478bd9Sstevel@tonic-gate ddi_prop_op, /* prop_op */ 2237c478bd9Sstevel@tonic-gate NULL, /* streamtab */ 2247c478bd9Sstevel@tonic-gate D_NEW|D_MP|D_DEVMAP, /* Driver compatibility flag */ 2257c478bd9Sstevel@tonic-gate 0, /* rev */ 2267c478bd9Sstevel@tonic-gate nodev, /* aread */ 2277c478bd9Sstevel@tonic-gate nodev /* awrite */ 2287c478bd9Sstevel@tonic-gate }; 2297c478bd9Sstevel@tonic-gate 2307c478bd9Sstevel@tonic-gate static int winlock_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 2317c478bd9Sstevel@tonic-gate static int winlock_attach(dev_info_t *, ddi_attach_cmd_t); 2327c478bd9Sstevel@tonic-gate static int winlock_detach(dev_info_t *, ddi_detach_cmd_t); 2337c478bd9Sstevel@tonic-gate 2347c478bd9Sstevel@tonic-gate static struct dev_ops winlock_ops = { 2357c478bd9Sstevel@tonic-gate DEVO_REV, 2367c478bd9Sstevel@tonic-gate 0, /* refcount */ 2377c478bd9Sstevel@tonic-gate winlock_info, /* info */ 2387c478bd9Sstevel@tonic-gate nulldev, /* identify */ 2397c478bd9Sstevel@tonic-gate nulldev, /* probe */ 2407c478bd9Sstevel@tonic-gate winlock_attach, /* attach */ 2417c478bd9Sstevel@tonic-gate winlock_detach, /* detach */ 2427c478bd9Sstevel@tonic-gate nodev, /* reset */ 2437c478bd9Sstevel@tonic-gate &winlock_cb_ops, /* driver ops */ 2447c478bd9Sstevel@tonic-gate NULL, /* bus ops */ 24519397407SSherry Moore NULL, /* power */ 24619397407SSherry Moore ddi_quiesce_not_needed, /* quiesce */ 2477c478bd9Sstevel@tonic-gate }; 2487c478bd9Sstevel@tonic-gate 2497c478bd9Sstevel@tonic-gate static int winlockmap_map(devmap_cookie_t, dev_t, uint_t, offset_t, size_t, 2507c478bd9Sstevel@tonic-gate void **); 2517c478bd9Sstevel@tonic-gate static void winlockmap_unmap(devmap_cookie_t, void *, offset_t, size_t, 2527c478bd9Sstevel@tonic-gate devmap_cookie_t, void **, devmap_cookie_t, void **); 2537c478bd9Sstevel@tonic-gate static int winlockmap_dup(devmap_cookie_t, void *, 2547c478bd9Sstevel@tonic-gate devmap_cookie_t, void **); 2557c478bd9Sstevel@tonic-gate static int winlockmap_access(devmap_cookie_t, void *, offset_t, size_t, 2567c478bd9Sstevel@tonic-gate uint_t, uint_t); 2577c478bd9Sstevel@tonic-gate 2587c478bd9Sstevel@tonic-gate static 2597c478bd9Sstevel@tonic-gate struct devmap_callback_ctl winlockmap_ops = { 2607c478bd9Sstevel@tonic-gate DEVMAP_OPS_REV, 2617c478bd9Sstevel@tonic-gate winlockmap_map, 2627c478bd9Sstevel@tonic-gate winlockmap_access, 2637c478bd9Sstevel@tonic-gate winlockmap_dup, 2647c478bd9Sstevel@tonic-gate winlockmap_unmap, 2657c478bd9Sstevel@tonic-gate }; 2667c478bd9Sstevel@tonic-gate 2677c478bd9Sstevel@tonic-gate #if DEBUG 2687c478bd9Sstevel@tonic-gate static int lock_debug = 0; 2697c478bd9Sstevel@tonic-gate #define DEBUGF(level, args) { if (lock_debug >= (level)) cmn_err args; } 2707c478bd9Sstevel@tonic-gate #else 2717c478bd9Sstevel@tonic-gate #define DEBUGF(level, args) 2727c478bd9Sstevel@tonic-gate #endif 2737c478bd9Sstevel@tonic-gate 2747c478bd9Sstevel@tonic-gate /* Driver supports two styles of locks */ 2757c478bd9Sstevel@tonic-gate enum winlock_style { NEWSTYLE_LOCK, OLDSTYLE_LOCK }; 2767c478bd9Sstevel@tonic-gate 2777c478bd9Sstevel@tonic-gate /* 2787c478bd9Sstevel@tonic-gate * These structures describe a lock context. We permit multiple 2797c478bd9Sstevel@tonic-gate * clients (not just two) to access a lock page 2807c478bd9Sstevel@tonic-gate * 2817c478bd9Sstevel@tonic-gate * The "cookie" identifies the lock context. It is the page number portion 2827c478bd9Sstevel@tonic-gate * sy_ident returned on lock allocation. Cookie is used in later ioctls. 2837c478bd9Sstevel@tonic-gate * "cookie" is lockid * PAGESIZE 2847c478bd9Sstevel@tonic-gate * "lockptr" is the kernel virtual address to the lock itself 2857c478bd9Sstevel@tonic-gate * The page offset portion of lockptr is the page offset portion of sy_ident 2867c478bd9Sstevel@tonic-gate */ 2877c478bd9Sstevel@tonic-gate 2887c478bd9Sstevel@tonic-gate /* 2897c478bd9Sstevel@tonic-gate * per-process information about locks. This is the private field of 2907c478bd9Sstevel@tonic-gate * a devmap mapping. Note that usually *two* mappings point to this. 2917c478bd9Sstevel@tonic-gate */ 2927c478bd9Sstevel@tonic-gate 2937c478bd9Sstevel@tonic-gate /* 2947c478bd9Sstevel@tonic-gate * Each process using winlock is associated with a segproc structure 2957c478bd9Sstevel@tonic-gate * In various driver entry points, we need to search to find the right 2967c478bd9Sstevel@tonic-gate * segproc structure (If we were using file handles for each lock this 2977c478bd9Sstevel@tonic-gate * would not have been necessary). 2987c478bd9Sstevel@tonic-gate * It would have been simple to use the process pid (and ddi_get_pid) 2997c478bd9Sstevel@tonic-gate * However, during fork devmap_dup is called in the parent process context 3007c478bd9Sstevel@tonic-gate * and using the pid complicates the code by introducing orphans. 3017c478bd9Sstevel@tonic-gate * Instead we use the as pointer for the process as a cookie 3027c478bd9Sstevel@tonic-gate * which requires delving into various non-DDI kosher structs 3037c478bd9Sstevel@tonic-gate */ 3047c478bd9Sstevel@tonic-gate typedef struct segproc { 3057c478bd9Sstevel@tonic-gate struct segproc *next; /* next client of this lock */ 3067c478bd9Sstevel@tonic-gate struct seglock *lp; /* associated lock context */ 3077c478bd9Sstevel@tonic-gate devmap_cookie_t lockseg; /* lock mapping, if any */ 3087c478bd9Sstevel@tonic-gate devmap_cookie_t unlockseg; /* unlock mapping, if any */ 3097c478bd9Sstevel@tonic-gate void *tag; /* process as pointer as tag */ 3107c478bd9Sstevel@tonic-gate uint_t flag; /* see "flag bits" in winlockio.h */ 3117c478bd9Sstevel@tonic-gate } SegProc; 3127c478bd9Sstevel@tonic-gate 3137c478bd9Sstevel@tonic-gate #define ID(sdp) ((sdp)->tag) 3147c478bd9Sstevel@tonic-gate #define CURPROC_ID (void *)(curproc->p_as) 3157c478bd9Sstevel@tonic-gate 3167c478bd9Sstevel@tonic-gate /* per lock context information */ 3177c478bd9Sstevel@tonic-gate 3187c478bd9Sstevel@tonic-gate typedef struct seglock { 3197c478bd9Sstevel@tonic-gate struct seglock *next; /* next lock */ 3207c478bd9Sstevel@tonic-gate uint_t sleepers; /* nthreads sleeping on this lock */ 3217c478bd9Sstevel@tonic-gate uint_t alloccount; /* how many times created? */ 3227c478bd9Sstevel@tonic-gate uint_t cookie; /* mmap() offset (page #) into device */ 3237c478bd9Sstevel@tonic-gate uint_t key; /* key, if any */ 3247c478bd9Sstevel@tonic-gate enum winlock_style style; /* style of lock - OLDSTYLE, NEWSTYLE */ 3257c478bd9Sstevel@tonic-gate clock_t timeout; /* sleep time in ticks */ 3267c478bd9Sstevel@tonic-gate ddi_umem_cookie_t umem_cookie; /* cookie for umem allocated memory */ 3277c478bd9Sstevel@tonic-gate int *lockptr; /* kernel virtual addr of lock */ 3287c478bd9Sstevel@tonic-gate struct segproc *clients; /* list of clients of this lock */ 3297c478bd9Sstevel@tonic-gate struct segproc *owner; /* current owner of lock */ 3307c478bd9Sstevel@tonic-gate kmutex_t mutex; /* mutex for lock */ 3317c478bd9Sstevel@tonic-gate kcondvar_t locksleep; /* for sleeping on lock */ 3327c478bd9Sstevel@tonic-gate } SegLock; 3337c478bd9Sstevel@tonic-gate 3347c478bd9Sstevel@tonic-gate #define LOCK(lp) (*((lp)->lockptr)) 3357c478bd9Sstevel@tonic-gate 3367c478bd9Sstevel@tonic-gate /* 3377c478bd9Sstevel@tonic-gate * Number of locks that can fit in a page. Driver can support only that many. 3387c478bd9Sstevel@tonic-gate * For oldsytle locks, it is relatively easy to increase the limit as each 3397c478bd9Sstevel@tonic-gate * is in a separate page (MAX_LOCKS mostly serves to prevent runaway allocation 3407c478bd9Sstevel@tonic-gate * For newstyle locks, this is trickier as the code needs to allow for mapping 3417c478bd9Sstevel@tonic-gate * into the second or third page of the cookie for some locks. 3427c478bd9Sstevel@tonic-gate */ 3437c478bd9Sstevel@tonic-gate #define MAX_LOCKS (PAGESIZE/sizeof (int)) 3447c478bd9Sstevel@tonic-gate 3457c478bd9Sstevel@tonic-gate #define LOCKTIME 3 /* Default lock timeout in seconds */ 3467c478bd9Sstevel@tonic-gate 3477c478bd9Sstevel@tonic-gate 3487c478bd9Sstevel@tonic-gate /* Protections setting for winlock user mappings */ 3497c478bd9Sstevel@tonic-gate #define WINLOCK_PROT (PROT_READ|PROT_WRITE|PROT_USER) 3507c478bd9Sstevel@tonic-gate 3517c478bd9Sstevel@tonic-gate /* 3527c478bd9Sstevel@tonic-gate * The trash page is where unwanted writes go 3537c478bd9Sstevel@tonic-gate * when a process is releasing a lock. 3547c478bd9Sstevel@tonic-gate */ 3557c478bd9Sstevel@tonic-gate static ddi_umem_cookie_t trashpage_cookie = NULL; 3567c478bd9Sstevel@tonic-gate 3577c478bd9Sstevel@tonic-gate /* For newstyle allocations a common page of locks is used */ 3587c478bd9Sstevel@tonic-gate static caddr_t lockpage = NULL; 3597c478bd9Sstevel@tonic-gate static ddi_umem_cookie_t lockpage_cookie = NULL; 3607c478bd9Sstevel@tonic-gate 3617c478bd9Sstevel@tonic-gate static dev_info_t *winlock_dip = NULL; 3627c478bd9Sstevel@tonic-gate static kmutex_t winlock_mutex; 3637c478bd9Sstevel@tonic-gate 3647c478bd9Sstevel@tonic-gate /* 3657c478bd9Sstevel@tonic-gate * winlock_mutex protects 3667c478bd9Sstevel@tonic-gate * lock_list 3677c478bd9Sstevel@tonic-gate * lock_free_list 3687c478bd9Sstevel@tonic-gate * "next" field in SegLock 3697c478bd9Sstevel@tonic-gate * next_lock 3707c478bd9Sstevel@tonic-gate * trashpage_cookie 3717c478bd9Sstevel@tonic-gate * lockpage & lockpage_cookie 3727c478bd9Sstevel@tonic-gate * 3737c478bd9Sstevel@tonic-gate * SegLock_mutex protects 3747c478bd9Sstevel@tonic-gate * rest of fields in SegLock 3757c478bd9Sstevel@tonic-gate * All fields in list of SegProc (lp->clients) 3767c478bd9Sstevel@tonic-gate * 3777c478bd9Sstevel@tonic-gate * Lock ordering is winlock_mutex->SegLock_mutex 3787c478bd9Sstevel@tonic-gate * During devmap/seg operations SegLock_mutex acquired without winlock_mutex 3797c478bd9Sstevel@tonic-gate * 3807c478bd9Sstevel@tonic-gate * During devmap callbacks, the pointer to SegProc is stored as the private 3817c478bd9Sstevel@tonic-gate * data in the devmap handle. This pointer will not go stale (i.e., the 3827c478bd9Sstevel@tonic-gate * SegProc getting deleted) as the SegProc is not deleted until both the 3837c478bd9Sstevel@tonic-gate * lockseg and unlockseg have been unmapped and the pointers stored in 3847c478bd9Sstevel@tonic-gate * the devmap handles have been NULL'ed. 3857c478bd9Sstevel@tonic-gate * But before this pointer is used to access any fields (other than the 'lp') 3867c478bd9Sstevel@tonic-gate * lp->mutex must be held. 3877c478bd9Sstevel@tonic-gate */ 3887c478bd9Sstevel@tonic-gate 3897c478bd9Sstevel@tonic-gate /* 3907c478bd9Sstevel@tonic-gate * The allocation code tries to allocate from lock_free_list 3917c478bd9Sstevel@tonic-gate * first, otherwise it uses kmem_zalloc. When lock list is idle, all 3927c478bd9Sstevel@tonic-gate * locks in lock_free_list are kmem_freed 3937c478bd9Sstevel@tonic-gate */ 3947c478bd9Sstevel@tonic-gate static SegLock *lock_list = NULL; /* in-use locks */ 3957c478bd9Sstevel@tonic-gate static SegLock *lock_free_list = NULL; /* free locks */ 3967c478bd9Sstevel@tonic-gate static int next_lock = 0; /* next lock cookie */ 3977c478bd9Sstevel@tonic-gate 3987c478bd9Sstevel@tonic-gate /* Routines to find a lock in lock_list based on offset or key */ 3997c478bd9Sstevel@tonic-gate static SegLock *seglock_findlock(uint_t); 4007c478bd9Sstevel@tonic-gate static SegLock *seglock_findkey(uint_t); 4017c478bd9Sstevel@tonic-gate 4027c478bd9Sstevel@tonic-gate /* Routines to find and allocate SegProc structures */ 4037c478bd9Sstevel@tonic-gate static SegProc *seglock_find_specific(SegLock *, void *); 4047c478bd9Sstevel@tonic-gate static SegProc *seglock_alloc_specific(SegLock *, void *); 4057c478bd9Sstevel@tonic-gate #define seglock_findclient(lp) seglock_find_specific((lp), CURPROC_ID) 4067c478bd9Sstevel@tonic-gate #define seglock_allocclient(lp) seglock_alloc_specific((lp), CURPROC_ID) 4077c478bd9Sstevel@tonic-gate 4087c478bd9Sstevel@tonic-gate /* Delete client from lock's client list */ 4097c478bd9Sstevel@tonic-gate static void seglock_deleteclient(SegLock *, SegProc *); 4107c478bd9Sstevel@tonic-gate static void garbage_collect_lock(SegLock *, SegProc *); 4117c478bd9Sstevel@tonic-gate 4127c478bd9Sstevel@tonic-gate /* Create a new lock */ 4137c478bd9Sstevel@tonic-gate static SegLock *seglock_createlock(enum winlock_style); 4147c478bd9Sstevel@tonic-gate /* Destroy lock */ 4157c478bd9Sstevel@tonic-gate static void seglock_destroylock(SegLock *); 4167c478bd9Sstevel@tonic-gate static void lock_destroyall(void); 4177c478bd9Sstevel@tonic-gate 4187c478bd9Sstevel@tonic-gate /* Helper functions in winlockmap_access */ 4197c478bd9Sstevel@tonic-gate static int give_mapping(SegLock *, SegProc *, uint_t); 4207c478bd9Sstevel@tonic-gate static int lock_giveup(SegLock *, int); 4217c478bd9Sstevel@tonic-gate static int seglock_lockfault(devmap_cookie_t, SegProc *, SegLock *, uint_t); 4227c478bd9Sstevel@tonic-gate 4237c478bd9Sstevel@tonic-gate /* routines called from ioctl */ 4247c478bd9Sstevel@tonic-gate static int seglock_graballoc(intptr_t, enum winlock_style, int); 4257c478bd9Sstevel@tonic-gate static int seglock_grabinfo(intptr_t, int); 4267c478bd9Sstevel@tonic-gate static int seglock_grabfree(intptr_t, int); 4277c478bd9Sstevel@tonic-gate static int seglock_gettimeout(intptr_t, int); 4287c478bd9Sstevel@tonic-gate static int seglock_settimeout(intptr_t, int); 4297c478bd9Sstevel@tonic-gate static void seglock_dump_all(void); 4307c478bd9Sstevel@tonic-gate 4317c478bd9Sstevel@tonic-gate static int 4327c478bd9Sstevel@tonic-gate winlock_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 4337c478bd9Sstevel@tonic-gate { 4347c478bd9Sstevel@tonic-gate DEBUGF(1, (CE_CONT, "winlock_attach, devi=%p, cmd=%d\n", 4357c478bd9Sstevel@tonic-gate (void *)devi, (int)cmd)); 4367c478bd9Sstevel@tonic-gate if (cmd != DDI_ATTACH) 4377c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 4387c478bd9Sstevel@tonic-gate if (ddi_create_minor_node(devi, "winlock", S_IFCHR, 0, DDI_PSEUDO, 0) 4397c478bd9Sstevel@tonic-gate == DDI_FAILURE) { 4407c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 4417c478bd9Sstevel@tonic-gate } 4427c478bd9Sstevel@tonic-gate winlock_dip = devi; 4437c478bd9Sstevel@tonic-gate ddi_report_dev(devi); 4447c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 4457c478bd9Sstevel@tonic-gate } 4467c478bd9Sstevel@tonic-gate 4477c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 4487c478bd9Sstevel@tonic-gate static int 4497c478bd9Sstevel@tonic-gate winlock_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 4507c478bd9Sstevel@tonic-gate { 4517c478bd9Sstevel@tonic-gate DEBUGF(1, (CE_CONT, "winlock_detach, devi=%p, cmd=%d\n", 4527c478bd9Sstevel@tonic-gate (void *)devi, (int)cmd)); 4537c478bd9Sstevel@tonic-gate if (cmd != DDI_DETACH) 4547c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 4557c478bd9Sstevel@tonic-gate 4567c478bd9Sstevel@tonic-gate mutex_enter(&winlock_mutex); 4577c478bd9Sstevel@tonic-gate if (lock_list != NULL) { 4587c478bd9Sstevel@tonic-gate mutex_exit(&winlock_mutex); 4597c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 4607c478bd9Sstevel@tonic-gate } 4617c478bd9Sstevel@tonic-gate ASSERT(lock_free_list == NULL); 4627c478bd9Sstevel@tonic-gate 4637c478bd9Sstevel@tonic-gate DEBUGF(1, (CE_CONT, "detach freeing trashpage and lockpage\n")); 4647c478bd9Sstevel@tonic-gate /* destroy any common stuff created */ 4657c478bd9Sstevel@tonic-gate if (trashpage_cookie != NULL) { 4667c478bd9Sstevel@tonic-gate ddi_umem_free(trashpage_cookie); 4677c478bd9Sstevel@tonic-gate trashpage_cookie = NULL; 4687c478bd9Sstevel@tonic-gate } 4697c478bd9Sstevel@tonic-gate if (lockpage != NULL) { 4707c478bd9Sstevel@tonic-gate ddi_umem_free(lockpage_cookie); 4717c478bd9Sstevel@tonic-gate lockpage = NULL; 4727c478bd9Sstevel@tonic-gate lockpage_cookie = NULL; 4737c478bd9Sstevel@tonic-gate } 4747c478bd9Sstevel@tonic-gate winlock_dip = NULL; 4757c478bd9Sstevel@tonic-gate mutex_exit(&winlock_mutex); 4767c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 4777c478bd9Sstevel@tonic-gate } 4787c478bd9Sstevel@tonic-gate 4797c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 4807c478bd9Sstevel@tonic-gate static int 4817c478bd9Sstevel@tonic-gate winlock_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 4827c478bd9Sstevel@tonic-gate { 4837c478bd9Sstevel@tonic-gate register int error; 4847c478bd9Sstevel@tonic-gate 4857c478bd9Sstevel@tonic-gate /* initialize result */ 4867c478bd9Sstevel@tonic-gate *result = NULL; 4877c478bd9Sstevel@tonic-gate 4887c478bd9Sstevel@tonic-gate /* only valid instance (i.e., getminor) is 0 */ 4897c478bd9Sstevel@tonic-gate if (getminor((dev_t)arg) >= 1) 4907c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 4917c478bd9Sstevel@tonic-gate 4927c478bd9Sstevel@tonic-gate switch (infocmd) { 4937c478bd9Sstevel@tonic-gate case DDI_INFO_DEVT2DEVINFO: 4947c478bd9Sstevel@tonic-gate if (winlock_dip == NULL) 4957c478bd9Sstevel@tonic-gate error = DDI_FAILURE; 4967c478bd9Sstevel@tonic-gate else { 4977c478bd9Sstevel@tonic-gate *result = (void *)winlock_dip; 4987c478bd9Sstevel@tonic-gate error = DDI_SUCCESS; 4997c478bd9Sstevel@tonic-gate } 5007c478bd9Sstevel@tonic-gate break; 5017c478bd9Sstevel@tonic-gate case DDI_INFO_DEVT2INSTANCE: 5027c478bd9Sstevel@tonic-gate *result = (void *)0; 5037c478bd9Sstevel@tonic-gate error = DDI_SUCCESS; 5047c478bd9Sstevel@tonic-gate break; 5057c478bd9Sstevel@tonic-gate default: 5067c478bd9Sstevel@tonic-gate error = DDI_FAILURE; 5077c478bd9Sstevel@tonic-gate } 5087c478bd9Sstevel@tonic-gate return (error); 5097c478bd9Sstevel@tonic-gate } 5107c478bd9Sstevel@tonic-gate 5117c478bd9Sstevel@tonic-gate 5127c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 5137c478bd9Sstevel@tonic-gate int 5147c478bd9Sstevel@tonic-gate winlock_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, 5157c478bd9Sstevel@tonic-gate cred_t *cred, int *rval) 5167c478bd9Sstevel@tonic-gate { 5177c478bd9Sstevel@tonic-gate DEBUGF(1, (CE_CONT, "winlockioctl: cmd=%d, arg=0x%p\n", 5187c478bd9Sstevel@tonic-gate cmd, (void *)arg)); 5197c478bd9Sstevel@tonic-gate 5207c478bd9Sstevel@tonic-gate switch (cmd) { 5217c478bd9Sstevel@tonic-gate /* 5227c478bd9Sstevel@tonic-gate * ioctls that used to be handled by framebuffers (defined in fbio.h) 5237c478bd9Sstevel@tonic-gate * RFE: No code really calls the GRAB* ioctls now. Should EOL. 5247c478bd9Sstevel@tonic-gate */ 5257c478bd9Sstevel@tonic-gate 5267c478bd9Sstevel@tonic-gate case GRABPAGEALLOC: 5277c478bd9Sstevel@tonic-gate return (seglock_graballoc(arg, OLDSTYLE_LOCK, mode)); 5287c478bd9Sstevel@tonic-gate case GRABPAGEFREE: 5297c478bd9Sstevel@tonic-gate return (seglock_grabfree(arg, mode)); 5307c478bd9Sstevel@tonic-gate case GRABLOCKINFO: 5317c478bd9Sstevel@tonic-gate return (seglock_grabinfo(arg, mode)); 5327c478bd9Sstevel@tonic-gate case GRABATTACH: 5337c478bd9Sstevel@tonic-gate return (EINVAL); /* GRABATTACH is not supported (never was) */ 5347c478bd9Sstevel@tonic-gate 5357c478bd9Sstevel@tonic-gate case WINLOCKALLOC: 5367c478bd9Sstevel@tonic-gate return (seglock_graballoc(arg, NEWSTYLE_LOCK, mode)); 5377c478bd9Sstevel@tonic-gate case WINLOCKFREE: 5387c478bd9Sstevel@tonic-gate return (seglock_grabfree(arg, mode)); 5397c478bd9Sstevel@tonic-gate case WINLOCKSETTIMEOUT: 5407c478bd9Sstevel@tonic-gate return (seglock_settimeout(arg, mode)); 5417c478bd9Sstevel@tonic-gate case WINLOCKGETTIMEOUT: 5427c478bd9Sstevel@tonic-gate return (seglock_gettimeout(arg, mode)); 5437c478bd9Sstevel@tonic-gate case WINLOCKDUMP: 5447c478bd9Sstevel@tonic-gate seglock_dump_all(); 5457c478bd9Sstevel@tonic-gate return (0); 5467c478bd9Sstevel@tonic-gate 5477c478bd9Sstevel@tonic-gate #ifdef DEBUG 5487c478bd9Sstevel@tonic-gate case (WIOC|255): 5497c478bd9Sstevel@tonic-gate lock_debug = arg; 5507c478bd9Sstevel@tonic-gate return (0); 5517c478bd9Sstevel@tonic-gate #endif 5527c478bd9Sstevel@tonic-gate 5537c478bd9Sstevel@tonic-gate default: 5547c478bd9Sstevel@tonic-gate return (ENOTTY); /* Why is this not EINVAL */ 5557c478bd9Sstevel@tonic-gate } 5567c478bd9Sstevel@tonic-gate } 5577c478bd9Sstevel@tonic-gate 5587c478bd9Sstevel@tonic-gate int 5597c478bd9Sstevel@tonic-gate winlocksegmap( 5607c478bd9Sstevel@tonic-gate dev_t dev, /* major:minor */ 5617c478bd9Sstevel@tonic-gate off_t off, /* device offset from mmap(2) */ 5627c478bd9Sstevel@tonic-gate struct as *as, /* user's address space. */ 5637c478bd9Sstevel@tonic-gate caddr_t *addr, /* address from mmap(2) */ 5647c478bd9Sstevel@tonic-gate off_t len, /* length from mmap(2) */ 5657c478bd9Sstevel@tonic-gate uint_t prot, /* user wants this access */ 5667c478bd9Sstevel@tonic-gate uint_t maxprot, /* this is the maximum the user can have */ 5677c478bd9Sstevel@tonic-gate uint_t flags, /* flags from mmap(2) */ 5687c478bd9Sstevel@tonic-gate cred_t *cred) 5697c478bd9Sstevel@tonic-gate { 5707c478bd9Sstevel@tonic-gate DEBUGF(1, (CE_CONT, "winlock_segmap off=%lx, len=0x%lx\n", off, len)); 5717c478bd9Sstevel@tonic-gate 5727c478bd9Sstevel@tonic-gate /* Only MAP_SHARED mappings are supported */ 5737c478bd9Sstevel@tonic-gate if ((flags & MAP_TYPE) == MAP_PRIVATE) { 5747c478bd9Sstevel@tonic-gate return (EINVAL); 5757c478bd9Sstevel@tonic-gate } 5767c478bd9Sstevel@tonic-gate 5777c478bd9Sstevel@tonic-gate /* Use devmap_setup to setup the mapping */ 5787c478bd9Sstevel@tonic-gate return (devmap_setup(dev, (offset_t)off, as, addr, (size_t)len, prot, 5797c478bd9Sstevel@tonic-gate maxprot, flags, cred)); 5807c478bd9Sstevel@tonic-gate } 5817c478bd9Sstevel@tonic-gate 5827c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 5837c478bd9Sstevel@tonic-gate int 5847c478bd9Sstevel@tonic-gate winlock_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len, 5857c478bd9Sstevel@tonic-gate size_t *maplen, uint_t model) 5867c478bd9Sstevel@tonic-gate { 5877c478bd9Sstevel@tonic-gate SegLock *lp; 5887c478bd9Sstevel@tonic-gate int err; 5897c478bd9Sstevel@tonic-gate 5907c478bd9Sstevel@tonic-gate DEBUGF(1, (CE_CONT, "winlock devmap: off=%llx, len=%lx, dhp=%p\n", 5917c478bd9Sstevel@tonic-gate off, len, (void *)dhp)); 5927c478bd9Sstevel@tonic-gate 5937c478bd9Sstevel@tonic-gate *maplen = 0; 5947c478bd9Sstevel@tonic-gate 5957c478bd9Sstevel@tonic-gate /* Check if the lock exists, i.e., has been created by alloc */ 5967c478bd9Sstevel@tonic-gate /* off is the sy_ident returned in the alloc ioctl */ 5977c478bd9Sstevel@tonic-gate if ((lp = seglock_findlock((uint_t)off)) == NULL) { 5987c478bd9Sstevel@tonic-gate return (ENXIO); 5997c478bd9Sstevel@tonic-gate } 6007c478bd9Sstevel@tonic-gate 6017c478bd9Sstevel@tonic-gate /* 6027c478bd9Sstevel@tonic-gate * The offset bits in mmap(2) offset has to be same as in lockptr 6037c478bd9Sstevel@tonic-gate * OR the offset should be 0 (i.e. masked off) 6047c478bd9Sstevel@tonic-gate */ 6057c478bd9Sstevel@tonic-gate if (((off & PAGEOFFSET) != 0) && 6067c478bd9Sstevel@tonic-gate ((off ^ (uintptr_t)(lp->lockptr)) & (offset_t)PAGEOFFSET) != 0) { 6077c478bd9Sstevel@tonic-gate DEBUGF(2, (CE_CONT, 6087c478bd9Sstevel@tonic-gate "mmap offset %llx mismatch with lockptr %p\n", 6097c478bd9Sstevel@tonic-gate off, (void *)lp->lockptr)); 6107c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */ 6117c478bd9Sstevel@tonic-gate return (EINVAL); 6127c478bd9Sstevel@tonic-gate } 6137c478bd9Sstevel@tonic-gate 6147c478bd9Sstevel@tonic-gate /* Only supports PAGESIZE length mappings */ 6157c478bd9Sstevel@tonic-gate if (len != PAGESIZE) { 6167c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */ 6177c478bd9Sstevel@tonic-gate return (EINVAL); 6187c478bd9Sstevel@tonic-gate } 6197c478bd9Sstevel@tonic-gate 6207c478bd9Sstevel@tonic-gate /* 6217c478bd9Sstevel@tonic-gate * Set up devmap to point at page associated with lock 6227c478bd9Sstevel@tonic-gate * RFE: At this point we dont know if this is a lockpage or unlockpage 6237c478bd9Sstevel@tonic-gate * a lockpage would not need DEVMAP_ALLOW_REMAP setting 6247c478bd9Sstevel@tonic-gate * We could have kept track of the mapping order here, 6257c478bd9Sstevel@tonic-gate * but devmap framework does not support storing any state in this 6267c478bd9Sstevel@tonic-gate * devmap callback as it does not callback for error cleanup if some 6277c478bd9Sstevel@tonic-gate * other error happens in the framework. 6287c478bd9Sstevel@tonic-gate * RFE: We should modify the winlock mmap interface so that the 6297c478bd9Sstevel@tonic-gate * user process marks in the offset passed in whether this is for a 6307c478bd9Sstevel@tonic-gate * lock or unlock mapping instead of guessing based on order of maps 6317c478bd9Sstevel@tonic-gate * This would cleanup other things (such as in fork) 6327c478bd9Sstevel@tonic-gate */ 6337c478bd9Sstevel@tonic-gate if ((err = devmap_umem_setup(dhp, winlock_dip, &winlockmap_ops, 6347c478bd9Sstevel@tonic-gate lp->umem_cookie, 0, PAGESIZE, WINLOCK_PROT, 6357c478bd9Sstevel@tonic-gate DEVMAP_ALLOW_REMAP, 0)) < 0) { 6367c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); /* held by seglock_findlock */ 6377c478bd9Sstevel@tonic-gate return (err); 6387c478bd9Sstevel@tonic-gate } 6397c478bd9Sstevel@tonic-gate /* 6407c478bd9Sstevel@tonic-gate * No mappings are loaded to those segments yet. The correctness 6417c478bd9Sstevel@tonic-gate * of the winlock semantics depends on the devmap framework/seg_dev NOT 6427c478bd9Sstevel@tonic-gate * loading the translations without calling _access callback. 6437c478bd9Sstevel@tonic-gate */ 6447c478bd9Sstevel@tonic-gate 6457c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */ 6467c478bd9Sstevel@tonic-gate *maplen = PAGESIZE; 6477c478bd9Sstevel@tonic-gate return (0); 6487c478bd9Sstevel@tonic-gate } 6497c478bd9Sstevel@tonic-gate 6507c478bd9Sstevel@tonic-gate /* 6517c478bd9Sstevel@tonic-gate * This routine is called by the devmap framework after the devmap entry point 6527c478bd9Sstevel@tonic-gate * above and the mapping is setup in seg_dev. 6537c478bd9Sstevel@tonic-gate * We store the pointer to the per-process context in the devmap private data. 6547c478bd9Sstevel@tonic-gate */ 6557c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 6567c478bd9Sstevel@tonic-gate static int 6577c478bd9Sstevel@tonic-gate winlockmap_map(devmap_cookie_t dhp, dev_t dev, uint_t flags, offset_t off, 6587c478bd9Sstevel@tonic-gate size_t len, void **pvtp) 6597c478bd9Sstevel@tonic-gate { 6607c478bd9Sstevel@tonic-gate SegLock *lp = seglock_findlock((uint_t)off); /* returns w/ mutex held */ 6617c478bd9Sstevel@tonic-gate SegProc *sdp; 6627c478bd9Sstevel@tonic-gate 6637c478bd9Sstevel@tonic-gate ASSERT(len == PAGESIZE); 6647c478bd9Sstevel@tonic-gate 6657c478bd9Sstevel@tonic-gate /* Find the per-process context for this lock, alloc one if not found */ 6667c478bd9Sstevel@tonic-gate sdp = seglock_allocclient(lp); 6677c478bd9Sstevel@tonic-gate 6687c478bd9Sstevel@tonic-gate /* 6697c478bd9Sstevel@tonic-gate * RFE: Determining which is a lock vs unlock seg is based on order 6707c478bd9Sstevel@tonic-gate * of mmaps, we should change that to be derivable from off 6717c478bd9Sstevel@tonic-gate */ 6727c478bd9Sstevel@tonic-gate if (sdp->lockseg == NULL) { 6737c478bd9Sstevel@tonic-gate sdp->lockseg = dhp; 6747c478bd9Sstevel@tonic-gate } else if (sdp->unlockseg == NULL) { 6757c478bd9Sstevel@tonic-gate sdp->unlockseg = dhp; 6767c478bd9Sstevel@tonic-gate } else { 6777c478bd9Sstevel@tonic-gate /* attempting to map lock more than twice */ 6787c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */ 6797c478bd9Sstevel@tonic-gate return (ENOMEM); 6807c478bd9Sstevel@tonic-gate } 6817c478bd9Sstevel@tonic-gate 6827c478bd9Sstevel@tonic-gate *pvtp = sdp; 6837c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */ 6847c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 6857c478bd9Sstevel@tonic-gate } 6867c478bd9Sstevel@tonic-gate 6877c478bd9Sstevel@tonic-gate /* 6887c478bd9Sstevel@tonic-gate * duplicate a segment, as in fork() 6897c478bd9Sstevel@tonic-gate * On fork, the child inherits the mappings to the lock 6907c478bd9Sstevel@tonic-gate * lp->alloccount is NOT incremented, so child should not do a free(). 6917c478bd9Sstevel@tonic-gate * Semantics same as if done an alloc(), map(), map(). 6927c478bd9Sstevel@tonic-gate * This way it would work fine if doing an exec() variant later 6937c478bd9Sstevel@tonic-gate * Child does not inherit any UFLAGS set in parent 6947c478bd9Sstevel@tonic-gate * The lock and unlock pages are started off unmapped, i.e., child does not 6957c478bd9Sstevel@tonic-gate * own the lock. 6967c478bd9Sstevel@tonic-gate * The code assumes that the child process has a valid pid at this point 6977c478bd9Sstevel@tonic-gate * RFE: This semantics depends on fork not duplicating the hat mappings 6987c478bd9Sstevel@tonic-gate * (which is the current implementation). To enforce it would need to 6997c478bd9Sstevel@tonic-gate * call devmap_unload from here - not clear if that is allowed. 7007c478bd9Sstevel@tonic-gate */ 7017c478bd9Sstevel@tonic-gate 7027c478bd9Sstevel@tonic-gate static int 7037c478bd9Sstevel@tonic-gate winlockmap_dup(devmap_cookie_t dhp, void *oldpvt, devmap_cookie_t new_dhp, 7047c478bd9Sstevel@tonic-gate void **newpvt) 7057c478bd9Sstevel@tonic-gate { 7067c478bd9Sstevel@tonic-gate SegProc *sdp = (SegProc *)oldpvt; 7077c478bd9Sstevel@tonic-gate SegProc *ndp; 7087c478bd9Sstevel@tonic-gate SegLock *lp = sdp->lp; 7097c478bd9Sstevel@tonic-gate 7107c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 7117c478bd9Sstevel@tonic-gate ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg)); 7127c478bd9Sstevel@tonic-gate 7137c478bd9Sstevel@tonic-gate /* 7147c478bd9Sstevel@tonic-gate * Note: At this point, the child process does have a pid, but 7157c478bd9Sstevel@tonic-gate * the arguments passed to as_dup and hence to devmap_dup dont pass it 7167c478bd9Sstevel@tonic-gate * down. So we cannot use normal seglock_findclient - which finds the 7177c478bd9Sstevel@tonic-gate * parent sdp itself! 7187c478bd9Sstevel@tonic-gate * Instead we allocate the child's SegProc by using the child as pointer 7197c478bd9Sstevel@tonic-gate * RFE: we are using the as stucture which means peeking into the 7207c478bd9Sstevel@tonic-gate * devmap_cookie. This is not DDI-compliant. Need a compliant way of 7217c478bd9Sstevel@tonic-gate * getting at either the as or, better, a way to get the child's new pid 7227c478bd9Sstevel@tonic-gate */ 7237c478bd9Sstevel@tonic-gate ndp = seglock_alloc_specific(lp, 7247c478bd9Sstevel@tonic-gate (void *)((devmap_handle_t *)new_dhp)->dh_seg->s_as); 7257c478bd9Sstevel@tonic-gate ASSERT(ndp != sdp); 7267c478bd9Sstevel@tonic-gate 7277c478bd9Sstevel@tonic-gate if (sdp->lockseg == dhp) { 7287c478bd9Sstevel@tonic-gate ASSERT(ndp->lockseg == NULL); 7297c478bd9Sstevel@tonic-gate ndp->lockseg = new_dhp; 7307c478bd9Sstevel@tonic-gate } else { 7317c478bd9Sstevel@tonic-gate ASSERT(sdp->unlockseg == dhp); 7327c478bd9Sstevel@tonic-gate ASSERT(ndp->unlockseg == NULL); 7337c478bd9Sstevel@tonic-gate ndp->unlockseg = new_dhp; 7347c478bd9Sstevel@tonic-gate if (sdp->flag & TRASHPAGE) { 7357c478bd9Sstevel@tonic-gate ndp->flag |= TRASHPAGE; 7367c478bd9Sstevel@tonic-gate } 7377c478bd9Sstevel@tonic-gate } 7387c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 7397c478bd9Sstevel@tonic-gate *newpvt = (void *)ndp; 7407c478bd9Sstevel@tonic-gate return (0); 7417c478bd9Sstevel@tonic-gate } 7427c478bd9Sstevel@tonic-gate 7437c478bd9Sstevel@tonic-gate 7447c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 7457c478bd9Sstevel@tonic-gate static void 7467c478bd9Sstevel@tonic-gate winlockmap_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off, size_t len, 7477c478bd9Sstevel@tonic-gate devmap_cookie_t new_dhp1, void **newpvtp1, 7487c478bd9Sstevel@tonic-gate devmap_cookie_t new_dhp2, void **newpvtp2) 7497c478bd9Sstevel@tonic-gate { 7507c478bd9Sstevel@tonic-gate SegProc *sdp = (SegProc *)pvtp; 7517c478bd9Sstevel@tonic-gate SegLock *lp = sdp->lp; 7527c478bd9Sstevel@tonic-gate 7537c478bd9Sstevel@tonic-gate /* 7547c478bd9Sstevel@tonic-gate * We always create PAGESIZE length mappings, so there should never 7557c478bd9Sstevel@tonic-gate * be a partial unmapping case 7567c478bd9Sstevel@tonic-gate */ 7577c478bd9Sstevel@tonic-gate ASSERT((new_dhp1 == NULL) && (new_dhp2 == NULL)); 7587c478bd9Sstevel@tonic-gate 7597c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 7607c478bd9Sstevel@tonic-gate ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg)); 7617c478bd9Sstevel@tonic-gate /* make sure this process doesn't own the lock */ 7627c478bd9Sstevel@tonic-gate if (sdp == lp->owner) { 7637c478bd9Sstevel@tonic-gate /* 7647c478bd9Sstevel@tonic-gate * Not handling errors - i.e., errors in unloading mapping 7657c478bd9Sstevel@tonic-gate * As part of unmapping hat/seg structure get torn down anyway 7667c478bd9Sstevel@tonic-gate */ 7677c478bd9Sstevel@tonic-gate (void) lock_giveup(lp, 0); 7687c478bd9Sstevel@tonic-gate } 7697c478bd9Sstevel@tonic-gate 7707c478bd9Sstevel@tonic-gate ASSERT(sdp != lp->owner); 7717c478bd9Sstevel@tonic-gate if (sdp->lockseg == dhp) { 7727c478bd9Sstevel@tonic-gate sdp->lockseg = NULL; 7737c478bd9Sstevel@tonic-gate } else { 7747c478bd9Sstevel@tonic-gate ASSERT(sdp->unlockseg == dhp); 7757c478bd9Sstevel@tonic-gate sdp->unlockseg = NULL; 7767c478bd9Sstevel@tonic-gate sdp->flag &= ~TRASHPAGE; /* clear flag if set */ 7777c478bd9Sstevel@tonic-gate } 7787c478bd9Sstevel@tonic-gate 7797c478bd9Sstevel@tonic-gate garbage_collect_lock(lp, sdp); 7807c478bd9Sstevel@tonic-gate } 7817c478bd9Sstevel@tonic-gate 7827c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 7837c478bd9Sstevel@tonic-gate static int 7847c478bd9Sstevel@tonic-gate winlockmap_access(devmap_cookie_t dhp, void *pvt, offset_t off, size_t len, 7857c478bd9Sstevel@tonic-gate uint_t type, uint_t rw) 7867c478bd9Sstevel@tonic-gate { 7877c478bd9Sstevel@tonic-gate SegProc *sdp = (SegProc *)pvt; 7887c478bd9Sstevel@tonic-gate SegLock *lp = sdp->lp; 7897c478bd9Sstevel@tonic-gate int err; 7907c478bd9Sstevel@tonic-gate 7917c478bd9Sstevel@tonic-gate /* Driver handles only DEVMAP_ACCESS type of faults */ 7927c478bd9Sstevel@tonic-gate if (type != DEVMAP_ACCESS) 7937c478bd9Sstevel@tonic-gate return (-1); 7947c478bd9Sstevel@tonic-gate 7957c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 7967c478bd9Sstevel@tonic-gate ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg)); 7977c478bd9Sstevel@tonic-gate 7987c478bd9Sstevel@tonic-gate /* should be using a SegProc that corresponds to current process */ 7997c478bd9Sstevel@tonic-gate ASSERT(ID(sdp) == CURPROC_ID); 8007c478bd9Sstevel@tonic-gate 8017c478bd9Sstevel@tonic-gate /* 8027c478bd9Sstevel@tonic-gate * If process is faulting but does not have both segments mapped 8037c478bd9Sstevel@tonic-gate * return error (should cause a segv). 8047c478bd9Sstevel@tonic-gate * RFE: could give it a permanent trashpage 8057c478bd9Sstevel@tonic-gate */ 8067c478bd9Sstevel@tonic-gate if ((sdp->lockseg == NULL) || (sdp->unlockseg == NULL)) { 8077c478bd9Sstevel@tonic-gate err = -1; 8087c478bd9Sstevel@tonic-gate } else { 8097c478bd9Sstevel@tonic-gate err = seglock_lockfault(dhp, sdp, lp, rw); 8107c478bd9Sstevel@tonic-gate } 8117c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 8127c478bd9Sstevel@tonic-gate return (err); 8137c478bd9Sstevel@tonic-gate } 8147c478bd9Sstevel@tonic-gate 8157c478bd9Sstevel@tonic-gate /* INTERNAL ROUTINES START HERE */ 8167c478bd9Sstevel@tonic-gate 8177c478bd9Sstevel@tonic-gate 8187c478bd9Sstevel@tonic-gate 8197c478bd9Sstevel@tonic-gate /* 8207c478bd9Sstevel@tonic-gate * search the lock_list list for the specified cookie 8217c478bd9Sstevel@tonic-gate * The cookie is the sy_ident field returns by ALLOC ioctl. 8227c478bd9Sstevel@tonic-gate * This has two parts: 8237c478bd9Sstevel@tonic-gate * the pageoffset bits contain offset into the lock page. 8247c478bd9Sstevel@tonic-gate * the pagenumber bits contain the lock id. 8257c478bd9Sstevel@tonic-gate * The user code is supposed to pass in only the pagenumber portion 8267c478bd9Sstevel@tonic-gate * (i.e. mask off the pageoffset bits). However the code below 8277c478bd9Sstevel@tonic-gate * does the mask in case the users are not diligent 8287c478bd9Sstevel@tonic-gate * if found, returns with mutex for SegLock structure held 8297c478bd9Sstevel@tonic-gate */ 8307c478bd9Sstevel@tonic-gate static SegLock * 8317c478bd9Sstevel@tonic-gate seglock_findlock(uint_t cookie) 8327c478bd9Sstevel@tonic-gate { 8337c478bd9Sstevel@tonic-gate SegLock *lp; 8347c478bd9Sstevel@tonic-gate 8357c478bd9Sstevel@tonic-gate cookie &= (uint_t)PAGEMASK; /* remove pageoffset bits to get cookie */ 8367c478bd9Sstevel@tonic-gate mutex_enter(&winlock_mutex); 8377c478bd9Sstevel@tonic-gate for (lp = lock_list; lp != NULL; lp = lp->next) { 8387c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 8397c478bd9Sstevel@tonic-gate if (cookie == lp->cookie) { 8407c478bd9Sstevel@tonic-gate break; /* return with lp->mutex held */ 8417c478bd9Sstevel@tonic-gate } 8427c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 8437c478bd9Sstevel@tonic-gate } 8447c478bd9Sstevel@tonic-gate mutex_exit(&winlock_mutex); 8457c478bd9Sstevel@tonic-gate return (lp); 8467c478bd9Sstevel@tonic-gate } 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate /* 8497c478bd9Sstevel@tonic-gate * search the lock_list list for the specified non-zero key 8507c478bd9Sstevel@tonic-gate * if found, returns with lock for SegLock structure held 8517c478bd9Sstevel@tonic-gate */ 8527c478bd9Sstevel@tonic-gate static SegLock * 8537c478bd9Sstevel@tonic-gate seglock_findkey(uint_t key) 8547c478bd9Sstevel@tonic-gate { 8557c478bd9Sstevel@tonic-gate SegLock *lp; 8567c478bd9Sstevel@tonic-gate 8577c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&winlock_mutex)); 8587c478bd9Sstevel@tonic-gate /* The driver allows multiple locks with key 0, dont search */ 8597c478bd9Sstevel@tonic-gate if (key == 0) 8607c478bd9Sstevel@tonic-gate return (NULL); 8617c478bd9Sstevel@tonic-gate for (lp = lock_list; lp != NULL; lp = lp->next) { 8627c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 8637c478bd9Sstevel@tonic-gate if (key == lp->key) 8647c478bd9Sstevel@tonic-gate break; 8657c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 8667c478bd9Sstevel@tonic-gate } 8677c478bd9Sstevel@tonic-gate return (lp); 8687c478bd9Sstevel@tonic-gate } 8697c478bd9Sstevel@tonic-gate 8707c478bd9Sstevel@tonic-gate /* 8717c478bd9Sstevel@tonic-gate * Create a new lock context. 8727c478bd9Sstevel@tonic-gate * Returns with SegLock mutex held 8737c478bd9Sstevel@tonic-gate */ 8747c478bd9Sstevel@tonic-gate 8757c478bd9Sstevel@tonic-gate static SegLock * 8767c478bd9Sstevel@tonic-gate seglock_createlock(enum winlock_style style) 8777c478bd9Sstevel@tonic-gate { 8787c478bd9Sstevel@tonic-gate SegLock *lp; 8797c478bd9Sstevel@tonic-gate 8807c478bd9Sstevel@tonic-gate DEBUGF(3, (CE_CONT, "seglock_createlock: free_list=%p, next_lock %d\n", 8817c478bd9Sstevel@tonic-gate (void *)lock_free_list, next_lock)); 8827c478bd9Sstevel@tonic-gate 8837c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&winlock_mutex)); 8847c478bd9Sstevel@tonic-gate if (lock_free_list != NULL) { 8857c478bd9Sstevel@tonic-gate lp = lock_free_list; 8867c478bd9Sstevel@tonic-gate lock_free_list = lp->next; 8877c478bd9Sstevel@tonic-gate } else if (next_lock >= MAX_LOCKS) { 8887c478bd9Sstevel@tonic-gate return (NULL); 8897c478bd9Sstevel@tonic-gate } else { 8907c478bd9Sstevel@tonic-gate lp = kmem_zalloc(sizeof (SegLock), KM_SLEEP); 8917c478bd9Sstevel@tonic-gate lp->cookie = (next_lock + 1) * (uint_t)PAGESIZE; 8927c478bd9Sstevel@tonic-gate mutex_init(&lp->mutex, NULL, MUTEX_DEFAULT, NULL); 8937c478bd9Sstevel@tonic-gate cv_init(&lp->locksleep, NULL, CV_DEFAULT, NULL); 8947c478bd9Sstevel@tonic-gate ++next_lock; 8957c478bd9Sstevel@tonic-gate } 8967c478bd9Sstevel@tonic-gate 8977c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 8987c478bd9Sstevel@tonic-gate ASSERT((lp->cookie/PAGESIZE) <= next_lock); 8997c478bd9Sstevel@tonic-gate 9007c478bd9Sstevel@tonic-gate if (style == OLDSTYLE_LOCK) { 9017c478bd9Sstevel@tonic-gate lp->lockptr = (int *)ddi_umem_alloc(PAGESIZE, 9027c478bd9Sstevel@tonic-gate DDI_UMEM_SLEEP, &(lp->umem_cookie)); 9037c478bd9Sstevel@tonic-gate } else { 9047c478bd9Sstevel@tonic-gate lp->lockptr = ((int *)lockpage) + ((lp->cookie/PAGESIZE) - 1); 9057c478bd9Sstevel@tonic-gate lp->umem_cookie = lockpage_cookie; 9067c478bd9Sstevel@tonic-gate } 9077c478bd9Sstevel@tonic-gate 9087c478bd9Sstevel@tonic-gate ASSERT(lp->lockptr != NULL); 9097c478bd9Sstevel@tonic-gate lp->style = style; 9107c478bd9Sstevel@tonic-gate lp->sleepers = 0; 9117c478bd9Sstevel@tonic-gate lp->alloccount = 1; 9127c478bd9Sstevel@tonic-gate lp->timeout = LOCKTIME*hz; 9137c478bd9Sstevel@tonic-gate lp->clients = NULL; 9147c478bd9Sstevel@tonic-gate lp->owner = NULL; 9157c478bd9Sstevel@tonic-gate LOCK(lp) = 0; 9167c478bd9Sstevel@tonic-gate lp->next = lock_list; 9177c478bd9Sstevel@tonic-gate lock_list = lp; 9187c478bd9Sstevel@tonic-gate return (lp); 9197c478bd9Sstevel@tonic-gate } 9207c478bd9Sstevel@tonic-gate 9217c478bd9Sstevel@tonic-gate /* 9227c478bd9Sstevel@tonic-gate * Routine to destory a lock structure. 9237c478bd9Sstevel@tonic-gate * This routine is called while holding the lp->mutex but not the 9247c478bd9Sstevel@tonic-gate * winlock_mutex. 9257c478bd9Sstevel@tonic-gate */ 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate static void 9287c478bd9Sstevel@tonic-gate seglock_destroylock(SegLock *lp) 9297c478bd9Sstevel@tonic-gate { 9307c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&lp->mutex)); 9317c478bd9Sstevel@tonic-gate ASSERT(!MUTEX_HELD(&winlock_mutex)); 9327c478bd9Sstevel@tonic-gate 9337c478bd9Sstevel@tonic-gate DEBUGF(3, (CE_CONT, "destroying lock cookie %d key %d\n", 9347c478bd9Sstevel@tonic-gate lp->cookie, lp->key)); 9357c478bd9Sstevel@tonic-gate 9367c478bd9Sstevel@tonic-gate ASSERT(lp->alloccount == 0); 9377c478bd9Sstevel@tonic-gate ASSERT(lp->clients == NULL); 9387c478bd9Sstevel@tonic-gate ASSERT(lp->owner == NULL); 9397c478bd9Sstevel@tonic-gate ASSERT(lp->sleepers == 0); 9407c478bd9Sstevel@tonic-gate 9417c478bd9Sstevel@tonic-gate /* clean up/release fields in lp */ 9427c478bd9Sstevel@tonic-gate if (lp->style == OLDSTYLE_LOCK) { 9437c478bd9Sstevel@tonic-gate ddi_umem_free(lp->umem_cookie); 9447c478bd9Sstevel@tonic-gate } 9457c478bd9Sstevel@tonic-gate lp->umem_cookie = NULL; 9467c478bd9Sstevel@tonic-gate lp->lockptr = NULL; 9477c478bd9Sstevel@tonic-gate lp->key = 0; 9487c478bd9Sstevel@tonic-gate 9497c478bd9Sstevel@tonic-gate /* 9507c478bd9Sstevel@tonic-gate * Reduce cookie by 1, makes it non page-aligned and invalid 9517c478bd9Sstevel@tonic-gate * This prevents any valid lookup from finding this lock 9527c478bd9Sstevel@tonic-gate * so when we drop the lock and regrab it it will still 9537c478bd9Sstevel@tonic-gate * be there and nobody else would have attached to it 9547c478bd9Sstevel@tonic-gate */ 9557c478bd9Sstevel@tonic-gate lp->cookie--; 9567c478bd9Sstevel@tonic-gate 9577c478bd9Sstevel@tonic-gate /* Drop and reacquire mutexes in right order */ 9587c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 9597c478bd9Sstevel@tonic-gate mutex_enter(&winlock_mutex); 9607c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 9617c478bd9Sstevel@tonic-gate 9627c478bd9Sstevel@tonic-gate /* reincrement the cookie to get the original valid cookie */ 9637c478bd9Sstevel@tonic-gate lp->cookie++; 9647c478bd9Sstevel@tonic-gate ASSERT((lp->cookie & PAGEOFFSET) == 0); 9657c478bd9Sstevel@tonic-gate ASSERT(lp->alloccount == 0); 9667c478bd9Sstevel@tonic-gate ASSERT(lp->clients == NULL); 9677c478bd9Sstevel@tonic-gate ASSERT(lp->owner == NULL); 9687c478bd9Sstevel@tonic-gate ASSERT(lp->sleepers == 0); 9697c478bd9Sstevel@tonic-gate 9707c478bd9Sstevel@tonic-gate /* Remove lp from lock_list */ 9717c478bd9Sstevel@tonic-gate if (lock_list == lp) { 9727c478bd9Sstevel@tonic-gate lock_list = lp->next; 9737c478bd9Sstevel@tonic-gate } else { 9747c478bd9Sstevel@tonic-gate SegLock *tmp = lock_list; 9757c478bd9Sstevel@tonic-gate while (tmp->next != lp) { 9767c478bd9Sstevel@tonic-gate tmp = tmp->next; 9777c478bd9Sstevel@tonic-gate ASSERT(tmp != NULL); 9787c478bd9Sstevel@tonic-gate } 9797c478bd9Sstevel@tonic-gate tmp->next = lp->next; 9807c478bd9Sstevel@tonic-gate } 9817c478bd9Sstevel@tonic-gate 9827c478bd9Sstevel@tonic-gate /* Add to lock_free_list */ 9837c478bd9Sstevel@tonic-gate lp->next = lock_free_list; 9847c478bd9Sstevel@tonic-gate lock_free_list = lp; 9857c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 9867c478bd9Sstevel@tonic-gate 9877c478bd9Sstevel@tonic-gate /* Check if all locks deleted and cleanup */ 9887c478bd9Sstevel@tonic-gate if (lock_list == NULL) { 9897c478bd9Sstevel@tonic-gate lock_destroyall(); 9907c478bd9Sstevel@tonic-gate } 9917c478bd9Sstevel@tonic-gate 9927c478bd9Sstevel@tonic-gate mutex_exit(&winlock_mutex); 9937c478bd9Sstevel@tonic-gate } 9947c478bd9Sstevel@tonic-gate 9957c478bd9Sstevel@tonic-gate /* Routine to find a SegProc corresponding to the tag */ 9967c478bd9Sstevel@tonic-gate 9977c478bd9Sstevel@tonic-gate static SegProc * 9987c478bd9Sstevel@tonic-gate seglock_find_specific(SegLock *lp, void *tag) 9997c478bd9Sstevel@tonic-gate { 10007c478bd9Sstevel@tonic-gate SegProc *sdp; 10017c478bd9Sstevel@tonic-gate 10027c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&lp->mutex)); 10037c478bd9Sstevel@tonic-gate ASSERT(tag != NULL); 10047c478bd9Sstevel@tonic-gate for (sdp = lp->clients; sdp != NULL; sdp = sdp->next) { 10057c478bd9Sstevel@tonic-gate if (ID(sdp) == tag) 10067c478bd9Sstevel@tonic-gate break; 10077c478bd9Sstevel@tonic-gate } 10087c478bd9Sstevel@tonic-gate return (sdp); 10097c478bd9Sstevel@tonic-gate } 10107c478bd9Sstevel@tonic-gate 10117c478bd9Sstevel@tonic-gate /* Routine to find (and if needed allocate) a SegProc corresponding to tag */ 10127c478bd9Sstevel@tonic-gate 10137c478bd9Sstevel@tonic-gate static SegProc * 10147c478bd9Sstevel@tonic-gate seglock_alloc_specific(SegLock *lp, void *tag) 10157c478bd9Sstevel@tonic-gate { 10167c478bd9Sstevel@tonic-gate SegProc *sdp; 10177c478bd9Sstevel@tonic-gate 10187c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&lp->mutex)); 10197c478bd9Sstevel@tonic-gate ASSERT(tag != NULL); 10207c478bd9Sstevel@tonic-gate 10217c478bd9Sstevel@tonic-gate /* Search and return if existing one found */ 10227c478bd9Sstevel@tonic-gate sdp = seglock_find_specific(lp, tag); 10237c478bd9Sstevel@tonic-gate if (sdp != NULL) 10247c478bd9Sstevel@tonic-gate return (sdp); 10257c478bd9Sstevel@tonic-gate 10267c478bd9Sstevel@tonic-gate DEBUGF(3, (CE_CONT, "Allocating segproc structure for tag %p lock %d\n", 10277c478bd9Sstevel@tonic-gate tag, lp->cookie)); 10287c478bd9Sstevel@tonic-gate 10297c478bd9Sstevel@tonic-gate /* Allocate a new SegProc */ 10307c478bd9Sstevel@tonic-gate sdp = kmem_zalloc(sizeof (SegProc), KM_SLEEP); 10317c478bd9Sstevel@tonic-gate sdp->next = lp->clients; 10327c478bd9Sstevel@tonic-gate lp->clients = sdp; 10337c478bd9Sstevel@tonic-gate sdp->lp = lp; 10347c478bd9Sstevel@tonic-gate ID(sdp) = tag; 10357c478bd9Sstevel@tonic-gate return (sdp); 10367c478bd9Sstevel@tonic-gate } 10377c478bd9Sstevel@tonic-gate 10387c478bd9Sstevel@tonic-gate /* 10397c478bd9Sstevel@tonic-gate * search a context's client list for the given client and delete 10407c478bd9Sstevel@tonic-gate */ 10417c478bd9Sstevel@tonic-gate 10427c478bd9Sstevel@tonic-gate static void 10437c478bd9Sstevel@tonic-gate seglock_deleteclient(SegLock *lp, SegProc *sdp) 10447c478bd9Sstevel@tonic-gate { 10457c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&lp->mutex)); 10467c478bd9Sstevel@tonic-gate ASSERT(lp->owner != sdp); /* Not current owner of lock */ 10477c478bd9Sstevel@tonic-gate ASSERT(sdp->lockseg == NULL); /* Mappings torn down */ 10487c478bd9Sstevel@tonic-gate ASSERT(sdp->unlockseg == NULL); 10497c478bd9Sstevel@tonic-gate 10507c478bd9Sstevel@tonic-gate DEBUGF(3, (CE_CONT, "Deleting segproc structure for pid %d lock %d\n", 10517c478bd9Sstevel@tonic-gate ddi_get_pid(), lp->cookie)); 10527c478bd9Sstevel@tonic-gate if (lp->clients == sdp) { 10537c478bd9Sstevel@tonic-gate lp->clients = sdp->next; 10547c478bd9Sstevel@tonic-gate } else { 10557c478bd9Sstevel@tonic-gate SegProc *tmp = lp->clients; 10567c478bd9Sstevel@tonic-gate while (tmp->next != sdp) { 10577c478bd9Sstevel@tonic-gate tmp = tmp->next; 10587c478bd9Sstevel@tonic-gate ASSERT(tmp != NULL); 10597c478bd9Sstevel@tonic-gate } 10607c478bd9Sstevel@tonic-gate tmp->next = sdp->next; 10617c478bd9Sstevel@tonic-gate } 10627c478bd9Sstevel@tonic-gate kmem_free(sdp, sizeof (SegProc)); 10637c478bd9Sstevel@tonic-gate } 10647c478bd9Sstevel@tonic-gate 10657c478bd9Sstevel@tonic-gate /* 10667c478bd9Sstevel@tonic-gate * Routine to verify if a SegProc and SegLock 10677c478bd9Sstevel@tonic-gate * structures are empty/idle. 10687c478bd9Sstevel@tonic-gate * Destroys the structures if they are ready 10697c478bd9Sstevel@tonic-gate * Can be called with sdp == NULL if want to verify only the lock state 10707c478bd9Sstevel@tonic-gate * caller should hold the lp->mutex 10717c478bd9Sstevel@tonic-gate * and this routine drops the mutex 10727c478bd9Sstevel@tonic-gate */ 10737c478bd9Sstevel@tonic-gate static void 10747c478bd9Sstevel@tonic-gate garbage_collect_lock(SegLock *lp, SegProc *sdp) 10757c478bd9Sstevel@tonic-gate { 10767c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&lp->mutex)); 10777c478bd9Sstevel@tonic-gate /* see if both segments unmapped from client structure */ 10787c478bd9Sstevel@tonic-gate if ((sdp != NULL) && (sdp->lockseg == NULL) && (sdp->unlockseg == NULL)) 10797c478bd9Sstevel@tonic-gate seglock_deleteclient(lp, sdp); 10807c478bd9Sstevel@tonic-gate 10817c478bd9Sstevel@tonic-gate /* see if this is last client in the entire lock context */ 10827c478bd9Sstevel@tonic-gate if ((lp->clients == NULL) && (lp->alloccount == 0)) { 10837c478bd9Sstevel@tonic-gate seglock_destroylock(lp); 10847c478bd9Sstevel@tonic-gate } else { 10857c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 10867c478bd9Sstevel@tonic-gate } 10877c478bd9Sstevel@tonic-gate } 10887c478bd9Sstevel@tonic-gate 10897c478bd9Sstevel@tonic-gate 10907c478bd9Sstevel@tonic-gate /* IOCTLS START HERE */ 10917c478bd9Sstevel@tonic-gate 10927c478bd9Sstevel@tonic-gate static int 10937c478bd9Sstevel@tonic-gate seglock_grabinfo(intptr_t arg, int mode) 10947c478bd9Sstevel@tonic-gate { 10957c478bd9Sstevel@tonic-gate int i = 1; 10967c478bd9Sstevel@tonic-gate 10977c478bd9Sstevel@tonic-gate /* multiple clients per lock supported - see comments up top */ 10987c478bd9Sstevel@tonic-gate if (ddi_copyout((caddr_t)&i, (caddr_t)arg, sizeof (int), mode) != 0) 10997c478bd9Sstevel@tonic-gate return (EFAULT); 11007c478bd9Sstevel@tonic-gate return (0); 11017c478bd9Sstevel@tonic-gate } 11027c478bd9Sstevel@tonic-gate 11037c478bd9Sstevel@tonic-gate static int 11047c478bd9Sstevel@tonic-gate seglock_graballoc(intptr_t arg, enum winlock_style style, int mode) /* IOCTL */ 11057c478bd9Sstevel@tonic-gate { 11067c478bd9Sstevel@tonic-gate struct seglock *lp; 11077c478bd9Sstevel@tonic-gate uint_t key; 11087c478bd9Sstevel@tonic-gate struct winlockalloc wla; 11097c478bd9Sstevel@tonic-gate int err; 11107c478bd9Sstevel@tonic-gate 11117c478bd9Sstevel@tonic-gate if (style == OLDSTYLE_LOCK) { 11127c478bd9Sstevel@tonic-gate key = 0; 11137c478bd9Sstevel@tonic-gate } else { 11147c478bd9Sstevel@tonic-gate if (ddi_copyin((caddr_t)arg, (caddr_t)&wla, sizeof (wla), 11157c478bd9Sstevel@tonic-gate mode)) { 11167c478bd9Sstevel@tonic-gate return (EFAULT); 11177c478bd9Sstevel@tonic-gate } 11187c478bd9Sstevel@tonic-gate key = wla.sy_key; 11197c478bd9Sstevel@tonic-gate } 11207c478bd9Sstevel@tonic-gate 11217c478bd9Sstevel@tonic-gate DEBUGF(3, (CE_CONT, 11227c478bd9Sstevel@tonic-gate "seglock_graballoc: key=%u, style=%d\n", key, style)); 11237c478bd9Sstevel@tonic-gate 11247c478bd9Sstevel@tonic-gate mutex_enter(&winlock_mutex); 11257c478bd9Sstevel@tonic-gate /* Allocate lockpage on first new style alloc */ 11267c478bd9Sstevel@tonic-gate if ((lockpage == NULL) && (style == NEWSTYLE_LOCK)) { 11277c478bd9Sstevel@tonic-gate lockpage = ddi_umem_alloc(PAGESIZE, DDI_UMEM_SLEEP, 11287c478bd9Sstevel@tonic-gate &lockpage_cookie); 11297c478bd9Sstevel@tonic-gate } 11307c478bd9Sstevel@tonic-gate 11317c478bd9Sstevel@tonic-gate /* Allocate trashpage on first alloc (any style) */ 11327c478bd9Sstevel@tonic-gate if (trashpage_cookie == NULL) { 11337c478bd9Sstevel@tonic-gate (void) ddi_umem_alloc(PAGESIZE, DDI_UMEM_TRASH | DDI_UMEM_SLEEP, 11347c478bd9Sstevel@tonic-gate &trashpage_cookie); 11357c478bd9Sstevel@tonic-gate } 11367c478bd9Sstevel@tonic-gate 11377c478bd9Sstevel@tonic-gate if ((lp = seglock_findkey(key)) != NULL) { 11387c478bd9Sstevel@tonic-gate DEBUGF(2, (CE_CONT, "alloc: found lock key %d cookie %d\n", 11397c478bd9Sstevel@tonic-gate key, lp->cookie)); 11407c478bd9Sstevel@tonic-gate ++lp->alloccount; 11417c478bd9Sstevel@tonic-gate } else if ((lp = seglock_createlock(style)) != NULL) { 11427c478bd9Sstevel@tonic-gate DEBUGF(2, (CE_CONT, "alloc: created lock key %d cookie %d\n", 11437c478bd9Sstevel@tonic-gate key, lp->cookie)); 11447c478bd9Sstevel@tonic-gate lp->key = key; 11457c478bd9Sstevel@tonic-gate } else { 11467c478bd9Sstevel@tonic-gate DEBUGF(2, (CE_CONT, "alloc: cannot create lock key %d\n", key)); 11477c478bd9Sstevel@tonic-gate mutex_exit(&winlock_mutex); 11487c478bd9Sstevel@tonic-gate return (ENOMEM); 11497c478bd9Sstevel@tonic-gate } 11507c478bd9Sstevel@tonic-gate ASSERT((lp != NULL) && MUTEX_HELD(&lp->mutex)); 11517c478bd9Sstevel@tonic-gate 11527c478bd9Sstevel@tonic-gate mutex_exit(&winlock_mutex); 11537c478bd9Sstevel@tonic-gate 11547c478bd9Sstevel@tonic-gate if (style == OLDSTYLE_LOCK) { 11557c478bd9Sstevel@tonic-gate err = ddi_copyout((caddr_t)&lp->cookie, (caddr_t)arg, 11567c478bd9Sstevel@tonic-gate sizeof (lp->cookie), mode); 11577c478bd9Sstevel@tonic-gate } else { 11587c478bd9Sstevel@tonic-gate wla.sy_ident = lp->cookie + 11597c478bd9Sstevel@tonic-gate (uint_t)((uintptr_t)(lp->lockptr) & PAGEOFFSET); 11607c478bd9Sstevel@tonic-gate err = ddi_copyout((caddr_t)&wla, (caddr_t)arg, 11617c478bd9Sstevel@tonic-gate sizeof (wla), mode); 11627c478bd9Sstevel@tonic-gate } 11637c478bd9Sstevel@tonic-gate 11647c478bd9Sstevel@tonic-gate if (err) { 11657c478bd9Sstevel@tonic-gate /* On error, should undo allocation */ 11667c478bd9Sstevel@tonic-gate lp->alloccount--; 11677c478bd9Sstevel@tonic-gate 11687c478bd9Sstevel@tonic-gate /* Verify and delete if lock is unused now */ 11697c478bd9Sstevel@tonic-gate garbage_collect_lock(lp, NULL); 11707c478bd9Sstevel@tonic-gate return (EFAULT); 11717c478bd9Sstevel@tonic-gate } 11727c478bd9Sstevel@tonic-gate 11737c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 11747c478bd9Sstevel@tonic-gate return (0); 11757c478bd9Sstevel@tonic-gate } 11767c478bd9Sstevel@tonic-gate 11777c478bd9Sstevel@tonic-gate static int 11787c478bd9Sstevel@tonic-gate seglock_grabfree(intptr_t arg, int mode) /* IOCTL */ 11797c478bd9Sstevel@tonic-gate { 11807c478bd9Sstevel@tonic-gate struct seglock *lp; 11817c478bd9Sstevel@tonic-gate uint_t offset; 11827c478bd9Sstevel@tonic-gate 11837c478bd9Sstevel@tonic-gate if (ddi_copyin((caddr_t)arg, &offset, sizeof (offset), mode) 11847c478bd9Sstevel@tonic-gate != 0) { 11857c478bd9Sstevel@tonic-gate return (EFAULT); 11867c478bd9Sstevel@tonic-gate } 11877c478bd9Sstevel@tonic-gate DEBUGF(2, (CE_CONT, "seglock_grabfree: offset=%u", offset)); 11887c478bd9Sstevel@tonic-gate 11897c478bd9Sstevel@tonic-gate if ((lp = seglock_findlock(offset)) == NULL) { 11907c478bd9Sstevel@tonic-gate DEBUGF(2, (CE_CONT, "did not find lock\n")); 11917c478bd9Sstevel@tonic-gate return (EINVAL); 11927c478bd9Sstevel@tonic-gate } 11937c478bd9Sstevel@tonic-gate DEBUGF(3, (CE_CONT, " lock key %d, cookie %d, alloccount %d\n", 11947c478bd9Sstevel@tonic-gate lp->key, lp->cookie, lp->alloccount)); 11957c478bd9Sstevel@tonic-gate 11967c478bd9Sstevel@tonic-gate if (lp->alloccount > 0) 11977c478bd9Sstevel@tonic-gate lp->alloccount--; 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate /* Verify and delete if lock is unused now */ 12007c478bd9Sstevel@tonic-gate garbage_collect_lock(lp, NULL); 12017c478bd9Sstevel@tonic-gate return (0); 12027c478bd9Sstevel@tonic-gate } 12037c478bd9Sstevel@tonic-gate 12047c478bd9Sstevel@tonic-gate 12057c478bd9Sstevel@tonic-gate /* 12067c478bd9Sstevel@tonic-gate * Sets timeout in lock and UFLAGS in client 12077c478bd9Sstevel@tonic-gate * the UFLAGS are stored in the client structure and persistent only 12087c478bd9Sstevel@tonic-gate * till the unmap of the lock pages. If the process sets UFLAGS 12097c478bd9Sstevel@tonic-gate * does a map of the lock/unlock pages and unmaps them, the client 12107c478bd9Sstevel@tonic-gate * structure will get deleted and the UFLAGS will be lost. The process 12117c478bd9Sstevel@tonic-gate * will need to resetup the flags. 12127c478bd9Sstevel@tonic-gate */ 12137c478bd9Sstevel@tonic-gate static int 12147c478bd9Sstevel@tonic-gate seglock_settimeout(intptr_t arg, int mode) /* IOCTL */ 12157c478bd9Sstevel@tonic-gate { 12167c478bd9Sstevel@tonic-gate SegLock *lp; 12177c478bd9Sstevel@tonic-gate SegProc *sdp; 12187c478bd9Sstevel@tonic-gate struct winlocktimeout wlt; 12197c478bd9Sstevel@tonic-gate 12207c478bd9Sstevel@tonic-gate if (ddi_copyin((caddr_t)arg, &wlt, sizeof (wlt), mode) != 0) { 12217c478bd9Sstevel@tonic-gate return (EFAULT); 12227c478bd9Sstevel@tonic-gate } 12237c478bd9Sstevel@tonic-gate 12247c478bd9Sstevel@tonic-gate if ((lp = seglock_findlock(wlt.sy_ident)) == NULL) 12257c478bd9Sstevel@tonic-gate return (EINVAL); 12267c478bd9Sstevel@tonic-gate 12277c478bd9Sstevel@tonic-gate lp->timeout = MSEC_TO_TICK_ROUNDUP(wlt.sy_timeout); 12287c478bd9Sstevel@tonic-gate /* if timeout modified, wake up any sleepers */ 12297c478bd9Sstevel@tonic-gate if (lp->sleepers > 0) { 12307c478bd9Sstevel@tonic-gate cv_broadcast(&lp->locksleep); 12317c478bd9Sstevel@tonic-gate } 12327c478bd9Sstevel@tonic-gate 12337c478bd9Sstevel@tonic-gate /* 12347c478bd9Sstevel@tonic-gate * If the process is trying to set UFLAGS, 12357c478bd9Sstevel@tonic-gate * Find the client segproc and allocate one if needed 12367c478bd9Sstevel@tonic-gate * Set the flags preserving the kernel flags 12377c478bd9Sstevel@tonic-gate * If the process is clearing UFLAGS 12387c478bd9Sstevel@tonic-gate * Find the client segproc but dont allocate one if does not exist 12397c478bd9Sstevel@tonic-gate */ 12407c478bd9Sstevel@tonic-gate if (wlt.sy_flags & UFLAGS) { 12417c478bd9Sstevel@tonic-gate sdp = seglock_allocclient(lp); 12427c478bd9Sstevel@tonic-gate sdp->flag = sdp->flag & KFLAGS | wlt.sy_flags & UFLAGS; 12437c478bd9Sstevel@tonic-gate } else if ((sdp = seglock_findclient(lp)) != NULL) { 12447c478bd9Sstevel@tonic-gate sdp->flag = sdp->flag & KFLAGS; 12457c478bd9Sstevel@tonic-gate /* If clearing UFLAGS leaves the segment or lock idle, delete */ 12467c478bd9Sstevel@tonic-gate garbage_collect_lock(lp, sdp); 12477c478bd9Sstevel@tonic-gate return (0); 12487c478bd9Sstevel@tonic-gate } 12497c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */ 12507c478bd9Sstevel@tonic-gate return (0); 12517c478bd9Sstevel@tonic-gate } 12527c478bd9Sstevel@tonic-gate 12537c478bd9Sstevel@tonic-gate static int 12547c478bd9Sstevel@tonic-gate seglock_gettimeout(intptr_t arg, int mode) 12557c478bd9Sstevel@tonic-gate { 12567c478bd9Sstevel@tonic-gate SegLock *lp; 12577c478bd9Sstevel@tonic-gate SegProc *sdp; 12587c478bd9Sstevel@tonic-gate struct winlocktimeout wlt; 12597c478bd9Sstevel@tonic-gate 12607c478bd9Sstevel@tonic-gate if (ddi_copyin((caddr_t)arg, &wlt, sizeof (wlt), mode) != 0) 12617c478bd9Sstevel@tonic-gate return (EFAULT); 12627c478bd9Sstevel@tonic-gate 12637c478bd9Sstevel@tonic-gate if ((lp = seglock_findlock(wlt.sy_ident)) == NULL) 12647c478bd9Sstevel@tonic-gate return (EINVAL); 12657c478bd9Sstevel@tonic-gate 12667c478bd9Sstevel@tonic-gate wlt.sy_timeout = TICK_TO_MSEC(lp->timeout); 12677c478bd9Sstevel@tonic-gate /* 12687c478bd9Sstevel@tonic-gate * If this process has an active allocated lock return those flags 12697c478bd9Sstevel@tonic-gate * Dont allocate a client structure on gettimeout 12707c478bd9Sstevel@tonic-gate * If not, return 0. 12717c478bd9Sstevel@tonic-gate */ 12727c478bd9Sstevel@tonic-gate if ((sdp = seglock_findclient(lp)) != NULL) { 12737c478bd9Sstevel@tonic-gate wlt.sy_flags = sdp->flag & UFLAGS; 12747c478bd9Sstevel@tonic-gate } else { 12757c478bd9Sstevel@tonic-gate wlt.sy_flags = 0; 12767c478bd9Sstevel@tonic-gate } 12777c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */ 12787c478bd9Sstevel@tonic-gate 12797c478bd9Sstevel@tonic-gate if (ddi_copyout(&wlt, (caddr_t)arg, sizeof (wlt), mode) != 0) 12807c478bd9Sstevel@tonic-gate return (EFAULT); 12817c478bd9Sstevel@tonic-gate 12827c478bd9Sstevel@tonic-gate return (0); 12837c478bd9Sstevel@tonic-gate } 12847c478bd9Sstevel@tonic-gate 12857c478bd9Sstevel@tonic-gate /* 12867c478bd9Sstevel@tonic-gate * Handle lock segment faults here... 12877c478bd9Sstevel@tonic-gate * 12887c478bd9Sstevel@tonic-gate * This is where the magic happens. 12897c478bd9Sstevel@tonic-gate */ 12907c478bd9Sstevel@tonic-gate 12917c478bd9Sstevel@tonic-gate /* ARGSUSED */ 12927c478bd9Sstevel@tonic-gate static int 12937c478bd9Sstevel@tonic-gate seglock_lockfault(devmap_cookie_t dhp, SegProc *sdp, SegLock *lp, uint_t rw) 12947c478bd9Sstevel@tonic-gate { 12957c478bd9Sstevel@tonic-gate SegProc *owner = lp->owner; 12967c478bd9Sstevel@tonic-gate int err; 12977c478bd9Sstevel@tonic-gate 12987c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&lp->mutex)); 12997c478bd9Sstevel@tonic-gate DEBUGF(3, (CE_CONT, 13007c478bd9Sstevel@tonic-gate "seglock_lockfault: hdl=%p, sdp=%p, lp=%p owner=%p\n", 13017c478bd9Sstevel@tonic-gate (void *)dhp, (void *)sdp, (void *)lp, (void *)owner)); 13027c478bd9Sstevel@tonic-gate 13037c478bd9Sstevel@tonic-gate /* lockfault is always called with sdp in current process context */ 13047c478bd9Sstevel@tonic-gate ASSERT(ID(sdp) == CURPROC_ID); 13057c478bd9Sstevel@tonic-gate 13067c478bd9Sstevel@tonic-gate /* If Lock has no current owner, give the mapping to new owner */ 13077c478bd9Sstevel@tonic-gate if (owner == NULL) { 13087c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, " lock has no current owner\n")); 13097c478bd9Sstevel@tonic-gate return (give_mapping(lp, sdp, rw)); 13107c478bd9Sstevel@tonic-gate } 13117c478bd9Sstevel@tonic-gate 13127c478bd9Sstevel@tonic-gate if (owner == sdp) { 13137c478bd9Sstevel@tonic-gate /* 13147c478bd9Sstevel@tonic-gate * Current owner is faulting on owned lock segment OR 13157c478bd9Sstevel@tonic-gate * Current owner is faulting on unlock page and has no waiters 13167c478bd9Sstevel@tonic-gate * Then can give the mapping to current owner 13177c478bd9Sstevel@tonic-gate */ 13187c478bd9Sstevel@tonic-gate if ((sdp->lockseg == dhp) || (lp->sleepers == 0)) { 13197c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, "lock owner faulting\n")); 13207c478bd9Sstevel@tonic-gate return (give_mapping(lp, sdp, rw)); 13217c478bd9Sstevel@tonic-gate } else { 13227c478bd9Sstevel@tonic-gate /* 13237c478bd9Sstevel@tonic-gate * Owner must be writing to unlock page and there are waiters. 13247c478bd9Sstevel@tonic-gate * other cases have been checked earlier. 13257c478bd9Sstevel@tonic-gate * Release the lock, owner, and owners mappings 13267c478bd9Sstevel@tonic-gate * As the owner is trying to write to the unlock page, leave 13277c478bd9Sstevel@tonic-gate * it with a trashpage mapping and wake up the sleepers 13287c478bd9Sstevel@tonic-gate */ 13297c478bd9Sstevel@tonic-gate ASSERT((dhp == sdp->unlockseg) && (lp->sleepers != 0)); 13307c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, " owner fault on unlock seg w/ sleeper\n")); 13317c478bd9Sstevel@tonic-gate return (lock_giveup(lp, 1)); 13327c478bd9Sstevel@tonic-gate } 13337c478bd9Sstevel@tonic-gate } 13347c478bd9Sstevel@tonic-gate 13357c478bd9Sstevel@tonic-gate ASSERT(owner != sdp); 13367c478bd9Sstevel@tonic-gate 13377c478bd9Sstevel@tonic-gate /* 13387c478bd9Sstevel@tonic-gate * If old owner faulting on trash unlock mapping, 13397c478bd9Sstevel@tonic-gate * load hat mappings to trash page 13407c478bd9Sstevel@tonic-gate * RFE: non-owners should NOT be faulting on unlock mapping as they 13417c478bd9Sstevel@tonic-gate * as first supposed to fault on the lock seg. We could give them 13427c478bd9Sstevel@tonic-gate * a trash page or return error. 13437c478bd9Sstevel@tonic-gate */ 13447c478bd9Sstevel@tonic-gate if ((sdp->unlockseg == dhp) && (sdp->flag & TRASHPAGE)) { 13457c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, " old owner reloads trash mapping\n")); 13467c478bd9Sstevel@tonic-gate return (devmap_load(sdp->unlockseg, lp->cookie, PAGESIZE, 13477c478bd9Sstevel@tonic-gate DEVMAP_ACCESS, rw)); 13487c478bd9Sstevel@tonic-gate } 13497c478bd9Sstevel@tonic-gate 13507c478bd9Sstevel@tonic-gate /* 13517c478bd9Sstevel@tonic-gate * Non-owner faulting. Need to check current LOCK state. 13527c478bd9Sstevel@tonic-gate * 13537c478bd9Sstevel@tonic-gate * Before reading lock value in LOCK(lp), we must make sure that 13547c478bd9Sstevel@tonic-gate * the owner cannot change its value before we change mappings 13557c478bd9Sstevel@tonic-gate * or else we could end up either with a hung process 13567c478bd9Sstevel@tonic-gate * or more than one process thinking they have the lock. 13577c478bd9Sstevel@tonic-gate * We do that by unloading the owner's mappings 13587c478bd9Sstevel@tonic-gate */ 13597c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, " owner loses mappings to check lock state\n")); 13607c478bd9Sstevel@tonic-gate err = devmap_unload(owner->lockseg, lp->cookie, PAGESIZE); 13617c478bd9Sstevel@tonic-gate err |= devmap_unload(owner->unlockseg, lp->cookie, PAGESIZE); 13627c478bd9Sstevel@tonic-gate if (err != 0) 13637c478bd9Sstevel@tonic-gate return (err); /* unable to remove owner mapping */ 13647c478bd9Sstevel@tonic-gate 13657c478bd9Sstevel@tonic-gate /* 13667c478bd9Sstevel@tonic-gate * If lock is not held, then current owner mappings were 13677c478bd9Sstevel@tonic-gate * unloaded above and we can give the lock to the new owner 13687c478bd9Sstevel@tonic-gate */ 13697c478bd9Sstevel@tonic-gate if (LOCK(lp) == 0) { 13707c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, 13717c478bd9Sstevel@tonic-gate "Free lock (%p): Giving mapping to new owner %d\n", 13727c478bd9Sstevel@tonic-gate (void *)lp, ddi_get_pid())); 13737c478bd9Sstevel@tonic-gate return (give_mapping(lp, sdp, rw)); 13747c478bd9Sstevel@tonic-gate } 13757c478bd9Sstevel@tonic-gate 13767c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, " lock held, sleeping\n")); 13777c478bd9Sstevel@tonic-gate 13787c478bd9Sstevel@tonic-gate /* 13797c478bd9Sstevel@tonic-gate * A non-owning process tried to write (presumably to the lockpage, 13807c478bd9Sstevel@tonic-gate * but it doesn't matter) but the lock is held; we need to sleep for 13817c478bd9Sstevel@tonic-gate * the lock while there is an owner. 13827c478bd9Sstevel@tonic-gate */ 13837c478bd9Sstevel@tonic-gate 13847c478bd9Sstevel@tonic-gate lp->sleepers++; 13857c478bd9Sstevel@tonic-gate while ((owner = lp->owner) != NULL) { 13867c478bd9Sstevel@tonic-gate int rval; 13877c478bd9Sstevel@tonic-gate 13887c478bd9Sstevel@tonic-gate if ((lp->timeout == 0) || (owner->flag & SY_NOTIMEOUT)) { 13897c478bd9Sstevel@tonic-gate /* 13907c478bd9Sstevel@tonic-gate * No timeout has been specified for this lock; 13917c478bd9Sstevel@tonic-gate * we'll simply sleep on the condition variable. 13927c478bd9Sstevel@tonic-gate */ 13937c478bd9Sstevel@tonic-gate rval = cv_wait_sig(&lp->locksleep, &lp->mutex); 13947c478bd9Sstevel@tonic-gate } else { 13957c478bd9Sstevel@tonic-gate /* 13967c478bd9Sstevel@tonic-gate * A timeout _has_ been specified for this lock. We need 13977c478bd9Sstevel@tonic-gate * to wake up and possibly steal this lock if the owner 13987c478bd9Sstevel@tonic-gate * does not let it go. Note that all sleepers on a lock 13997c478bd9Sstevel@tonic-gate * with a timeout wait; the sleeper with the earliest 14007c478bd9Sstevel@tonic-gate * timeout will wakeup, and potentially steal the lock 14017c478bd9Sstevel@tonic-gate * Stealing the lock will cause a broadcast on the 14027c478bd9Sstevel@tonic-gate * locksleep cv and thus kick the other timed waiters 14037c478bd9Sstevel@tonic-gate * and cause everyone to restart in a new timedwait 14047c478bd9Sstevel@tonic-gate */ 1405*d3d50737SRafael Vanoni rval = cv_reltimedwait_sig(&lp->locksleep, 1406*d3d50737SRafael Vanoni &lp->mutex, lp->timeout, TR_CLOCK_TICK); 14077c478bd9Sstevel@tonic-gate } 14087c478bd9Sstevel@tonic-gate 14097c478bd9Sstevel@tonic-gate /* 14107c478bd9Sstevel@tonic-gate * Timeout and still old owner - steal lock 14117c478bd9Sstevel@tonic-gate * Force-Release lock and give old owner a trashpage mapping 14127c478bd9Sstevel@tonic-gate */ 14137c478bd9Sstevel@tonic-gate if ((rval == -1) && (lp->owner == owner)) { 14147c478bd9Sstevel@tonic-gate /* 14157c478bd9Sstevel@tonic-gate * if any errors in lock_giveup, go back and sleep/retry 14167c478bd9Sstevel@tonic-gate * If successful, will break out of loop 14177c478bd9Sstevel@tonic-gate */ 14187c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, "Process %d timed out on lock %d\n", 14197c478bd9Sstevel@tonic-gate ddi_get_pid(), lp->cookie); 14207c478bd9Sstevel@tonic-gate (void) lock_giveup(lp, 1); 14217c478bd9Sstevel@tonic-gate } else if (rval == 0) { /* signal pending */ 14227c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, 14237c478bd9Sstevel@tonic-gate "Process %d signalled while waiting on lock %d\n", 14247c478bd9Sstevel@tonic-gate ddi_get_pid(), lp->cookie); 14257c478bd9Sstevel@tonic-gate lp->sleepers--; 14267c478bd9Sstevel@tonic-gate return (FC_MAKE_ERR(EINTR)); 14277c478bd9Sstevel@tonic-gate } 14287c478bd9Sstevel@tonic-gate } 14297c478bd9Sstevel@tonic-gate 14307c478bd9Sstevel@tonic-gate lp->sleepers--; 14317c478bd9Sstevel@tonic-gate /* 14327c478bd9Sstevel@tonic-gate * Give mapping to this process and save a fault later 14337c478bd9Sstevel@tonic-gate */ 14347c478bd9Sstevel@tonic-gate return (give_mapping(lp, sdp, rw)); 14357c478bd9Sstevel@tonic-gate } 14367c478bd9Sstevel@tonic-gate 14377c478bd9Sstevel@tonic-gate /* 14387c478bd9Sstevel@tonic-gate * Utility: give a valid mapping to lock and unlock pages to current process. 14397c478bd9Sstevel@tonic-gate * Caller responsible for unloading old owner's mappings 14407c478bd9Sstevel@tonic-gate */ 14417c478bd9Sstevel@tonic-gate 14427c478bd9Sstevel@tonic-gate static int 14437c478bd9Sstevel@tonic-gate give_mapping(SegLock *lp, SegProc *sdp, uint_t rw) 14447c478bd9Sstevel@tonic-gate { 14457c478bd9Sstevel@tonic-gate int err = 0; 14467c478bd9Sstevel@tonic-gate 14477c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&lp->mutex)); 14487c478bd9Sstevel@tonic-gate ASSERT(!((lp->owner == NULL) && (LOCK(lp) != 0))); 14497c478bd9Sstevel@tonic-gate /* give_mapping is always called with sdp in current process context */ 14507c478bd9Sstevel@tonic-gate ASSERT(ID(sdp) == CURPROC_ID); 14517c478bd9Sstevel@tonic-gate 14527c478bd9Sstevel@tonic-gate /* remap any old trash mappings */ 14537c478bd9Sstevel@tonic-gate if (sdp->flag & TRASHPAGE) { 14547c478bd9Sstevel@tonic-gate /* current owner should not have a trash mapping */ 14557c478bd9Sstevel@tonic-gate ASSERT(sdp != lp->owner); 14567c478bd9Sstevel@tonic-gate 14577c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, 14587c478bd9Sstevel@tonic-gate "new owner %d remapping old trash mapping\n", 14597c478bd9Sstevel@tonic-gate ddi_get_pid())); 14607c478bd9Sstevel@tonic-gate if ((err = devmap_umem_remap(sdp->unlockseg, winlock_dip, 14617c478bd9Sstevel@tonic-gate lp->umem_cookie, 0, PAGESIZE, WINLOCK_PROT, 0, 0)) != 0) { 14627c478bd9Sstevel@tonic-gate /* 14637c478bd9Sstevel@tonic-gate * unable to remap old trash page, 14647c478bd9Sstevel@tonic-gate * abort before changing owner 14657c478bd9Sstevel@tonic-gate */ 14667c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, 14677c478bd9Sstevel@tonic-gate "aborting: error in umem_remap %d\n", err)); 14687c478bd9Sstevel@tonic-gate return (err); 14697c478bd9Sstevel@tonic-gate } 14707c478bd9Sstevel@tonic-gate sdp->flag &= ~TRASHPAGE; 14717c478bd9Sstevel@tonic-gate } 14727c478bd9Sstevel@tonic-gate 14737c478bd9Sstevel@tonic-gate /* we have a new owner now */ 14747c478bd9Sstevel@tonic-gate lp->owner = sdp; 14757c478bd9Sstevel@tonic-gate 14767c478bd9Sstevel@tonic-gate if ((err = devmap_load(sdp->lockseg, lp->cookie, PAGESIZE, 14777c478bd9Sstevel@tonic-gate DEVMAP_ACCESS, rw)) != 0) { 14787c478bd9Sstevel@tonic-gate return (err); 14797c478bd9Sstevel@tonic-gate } 14807c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, "new owner %d gets lock mapping", ddi_get_pid())); 14817c478bd9Sstevel@tonic-gate 14827c478bd9Sstevel@tonic-gate if (lp->sleepers) { 14837c478bd9Sstevel@tonic-gate /* Force unload unlock mapping if there are waiters */ 14847c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, 14857c478bd9Sstevel@tonic-gate " lock has %d sleepers => remove unlock mapping\n", 14867c478bd9Sstevel@tonic-gate lp->sleepers)); 14877c478bd9Sstevel@tonic-gate err = devmap_unload(sdp->unlockseg, lp->cookie, PAGESIZE); 14887c478bd9Sstevel@tonic-gate } else { 14897c478bd9Sstevel@tonic-gate /* 14907c478bd9Sstevel@tonic-gate * while here, give new owner a valid mapping to unlock 14917c478bd9Sstevel@tonic-gate * page so we don't get called again. 14927c478bd9Sstevel@tonic-gate */ 14937c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, " and unlock mapping\n")); 14947c478bd9Sstevel@tonic-gate err = devmap_load(sdp->unlockseg, lp->cookie, PAGESIZE, 14957c478bd9Sstevel@tonic-gate DEVMAP_ACCESS, PROT_WRITE); 14967c478bd9Sstevel@tonic-gate } 14977c478bd9Sstevel@tonic-gate return (err); 14987c478bd9Sstevel@tonic-gate } 14997c478bd9Sstevel@tonic-gate 15007c478bd9Sstevel@tonic-gate /* 15017c478bd9Sstevel@tonic-gate * Unload owner's mappings, release the lock and wakeup any sleepers 15027c478bd9Sstevel@tonic-gate * If trash, then the old owner is given a trash mapping 15037c478bd9Sstevel@tonic-gate * => old owner held lock too long and caused a timeout 15047c478bd9Sstevel@tonic-gate */ 15057c478bd9Sstevel@tonic-gate static int 15067c478bd9Sstevel@tonic-gate lock_giveup(SegLock *lp, int trash) 15077c478bd9Sstevel@tonic-gate { 15087c478bd9Sstevel@tonic-gate SegProc *owner = lp->owner; 15097c478bd9Sstevel@tonic-gate 15107c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, "winlock_giveup: lp=%p, owner=%p, trash %d\n", 15117c478bd9Sstevel@tonic-gate (void *)lp, (void *)ID(lp->owner), trash)); 15127c478bd9Sstevel@tonic-gate 15137c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&lp->mutex)); 15147c478bd9Sstevel@tonic-gate ASSERT(owner != NULL); 15157c478bd9Sstevel@tonic-gate 15167c478bd9Sstevel@tonic-gate /* 15177c478bd9Sstevel@tonic-gate * owner loses lockpage/unlockpage mappings and gains a 15187c478bd9Sstevel@tonic-gate * trashpage mapping, if needed. 15197c478bd9Sstevel@tonic-gate */ 15207c478bd9Sstevel@tonic-gate if (!trash) { 15217c478bd9Sstevel@tonic-gate /* 15227c478bd9Sstevel@tonic-gate * We do not handle errors in devmap_unload in the !trash case, 15237c478bd9Sstevel@tonic-gate * as the process is attempting to unmap/exit or otherwise 15247c478bd9Sstevel@tonic-gate * release the lock. Errors in unloading the mapping are not 15257c478bd9Sstevel@tonic-gate * going to affect that (unmap does not take error return). 15267c478bd9Sstevel@tonic-gate */ 15277c478bd9Sstevel@tonic-gate (void) devmap_unload(owner->lockseg, lp->cookie, PAGESIZE); 15287c478bd9Sstevel@tonic-gate (void) devmap_unload(owner->unlockseg, lp->cookie, PAGESIZE); 15297c478bd9Sstevel@tonic-gate } else { 15307c478bd9Sstevel@tonic-gate int err; 15317c478bd9Sstevel@tonic-gate 15327c478bd9Sstevel@tonic-gate if (err = devmap_unload(owner->lockseg, lp->cookie, PAGESIZE)) { 15337c478bd9Sstevel@tonic-gate /* error unloading lockseg mapping. abort giveup */ 15347c478bd9Sstevel@tonic-gate return (err); 15357c478bd9Sstevel@tonic-gate } 15367c478bd9Sstevel@tonic-gate 15377c478bd9Sstevel@tonic-gate /* 15387c478bd9Sstevel@tonic-gate * old owner gets mapping to trash page so it can continue 15397c478bd9Sstevel@tonic-gate * devmap_umem_remap does a hat_unload (and does it holding 15407c478bd9Sstevel@tonic-gate * the right locks), so no need to devmap_unload on unlockseg 15417c478bd9Sstevel@tonic-gate */ 15427c478bd9Sstevel@tonic-gate if ((err = devmap_umem_remap(owner->unlockseg, winlock_dip, 15437c478bd9Sstevel@tonic-gate trashpage_cookie, 0, PAGESIZE, WINLOCK_PROT, 0, 0)) != 0) { 15447c478bd9Sstevel@tonic-gate /* error remapping to trash page, abort giveup */ 15457c478bd9Sstevel@tonic-gate return (err); 15467c478bd9Sstevel@tonic-gate } 15477c478bd9Sstevel@tonic-gate owner->flag |= TRASHPAGE; 15487c478bd9Sstevel@tonic-gate /* 15497c478bd9Sstevel@tonic-gate * Preload mapping to trash page by calling devmap_load 15507c478bd9Sstevel@tonic-gate * However, devmap_load can only be called on the faulting 15517c478bd9Sstevel@tonic-gate * process context and not on the owner's process context 15527c478bd9Sstevel@tonic-gate * we preload only if we happen to be in owner process context 15537c478bd9Sstevel@tonic-gate * Other processes will fault on the unlock mapping 15547c478bd9Sstevel@tonic-gate * and be given a trash mapping at that time. 15557c478bd9Sstevel@tonic-gate */ 15567c478bd9Sstevel@tonic-gate if (ID(owner) == CURPROC_ID) { 155719397407SSherry Moore (void) devmap_load(owner->unlockseg, lp->cookie, 155819397407SSherry Moore PAGESIZE, DEVMAP_ACCESS, PROT_WRITE); 15597c478bd9Sstevel@tonic-gate } 15607c478bd9Sstevel@tonic-gate } 15617c478bd9Sstevel@tonic-gate 15627c478bd9Sstevel@tonic-gate lp->owner = NULL; 15637c478bd9Sstevel@tonic-gate 15647c478bd9Sstevel@tonic-gate /* Clear the lock value in underlying page so new owner can grab it */ 15657c478bd9Sstevel@tonic-gate LOCK(lp) = 0; 15667c478bd9Sstevel@tonic-gate 15677c478bd9Sstevel@tonic-gate if (lp->sleepers) { 15687c478bd9Sstevel@tonic-gate DEBUGF(4, (CE_CONT, " waking up, lp=%p\n", (void *)lp)); 15697c478bd9Sstevel@tonic-gate cv_broadcast(&lp->locksleep); 15707c478bd9Sstevel@tonic-gate } 15717c478bd9Sstevel@tonic-gate return (0); 15727c478bd9Sstevel@tonic-gate } 15737c478bd9Sstevel@tonic-gate 15747c478bd9Sstevel@tonic-gate /* 15757c478bd9Sstevel@tonic-gate * destroy all allocated memory. 15767c478bd9Sstevel@tonic-gate */ 15777c478bd9Sstevel@tonic-gate 15787c478bd9Sstevel@tonic-gate static void 15797c478bd9Sstevel@tonic-gate lock_destroyall(void) 15807c478bd9Sstevel@tonic-gate { 15817c478bd9Sstevel@tonic-gate SegLock *lp, *lpnext; 15827c478bd9Sstevel@tonic-gate 15837c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&winlock_mutex)); 15847c478bd9Sstevel@tonic-gate ASSERT(lock_list == NULL); 15857c478bd9Sstevel@tonic-gate 15867c478bd9Sstevel@tonic-gate DEBUGF(1, (CE_CONT, "Lock list empty. Releasing free list\n")); 15877c478bd9Sstevel@tonic-gate for (lp = lock_free_list; lp != NULL; lp = lpnext) { 15887c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 15897c478bd9Sstevel@tonic-gate lpnext = lp->next; 15907c478bd9Sstevel@tonic-gate ASSERT(lp->clients == NULL); 15917c478bd9Sstevel@tonic-gate ASSERT(lp->owner == NULL); 15927c478bd9Sstevel@tonic-gate ASSERT(lp->alloccount == 0); 15937c478bd9Sstevel@tonic-gate mutex_destroy(&lp->mutex); 15947c478bd9Sstevel@tonic-gate cv_destroy(&lp->locksleep); 15957c478bd9Sstevel@tonic-gate kmem_free(lp, sizeof (SegLock)); 15967c478bd9Sstevel@tonic-gate } 15977c478bd9Sstevel@tonic-gate lock_free_list = NULL; 15987c478bd9Sstevel@tonic-gate next_lock = 0; 15997c478bd9Sstevel@tonic-gate } 16007c478bd9Sstevel@tonic-gate 16017c478bd9Sstevel@tonic-gate 16027c478bd9Sstevel@tonic-gate /* RFE: create mdb walkers instead of dump routines? */ 16037c478bd9Sstevel@tonic-gate static void 16047c478bd9Sstevel@tonic-gate seglock_dump_all(void) 16057c478bd9Sstevel@tonic-gate { 16067c478bd9Sstevel@tonic-gate SegLock *lp; 16077c478bd9Sstevel@tonic-gate 16087c478bd9Sstevel@tonic-gate mutex_enter(&winlock_mutex); 16097c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "ID\tKEY\tNALLOC\tATTCH\tOWNED\tLOCK\tWAITER\n"); 16107c478bd9Sstevel@tonic-gate 16117c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "Lock List:\n"); 16127c478bd9Sstevel@tonic-gate for (lp = lock_list; lp != NULL; lp = lp->next) { 16137c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 16147c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "%d\t%d\t%u\t%c\t%c\t%c\t%d\n", 16157c478bd9Sstevel@tonic-gate lp->cookie, lp->key, lp->alloccount, 16167c478bd9Sstevel@tonic-gate lp->clients ? 'Y' : 'N', 16177c478bd9Sstevel@tonic-gate lp->owner ? 'Y' : 'N', 16187c478bd9Sstevel@tonic-gate lp->lockptr != 0 && LOCK(lp) ? 'Y' : 'N', 16197c478bd9Sstevel@tonic-gate lp->sleepers); 16207c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 16217c478bd9Sstevel@tonic-gate } 16227c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "Free Lock List:\n"); 16237c478bd9Sstevel@tonic-gate for (lp = lock_free_list; lp != NULL; lp = lp->next) { 16247c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 16257c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, "%d\t%d\t%u\t%c\t%c\t%c\t%d\n", 16267c478bd9Sstevel@tonic-gate lp->cookie, lp->key, lp->alloccount, 16277c478bd9Sstevel@tonic-gate lp->clients ? 'Y' : 'N', 16287c478bd9Sstevel@tonic-gate lp->owner ? 'Y' : 'N', 16297c478bd9Sstevel@tonic-gate lp->lockptr != 0 && LOCK(lp) ? 'Y' : 'N', 16307c478bd9Sstevel@tonic-gate lp->sleepers); 16317c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 16327c478bd9Sstevel@tonic-gate } 16337c478bd9Sstevel@tonic-gate 16347c478bd9Sstevel@tonic-gate #ifdef DEBUG 16357c478bd9Sstevel@tonic-gate if (lock_debug < 3) { 16367c478bd9Sstevel@tonic-gate mutex_exit(&winlock_mutex); 16377c478bd9Sstevel@tonic-gate return; 16387c478bd9Sstevel@tonic-gate } 16397c478bd9Sstevel@tonic-gate 16407c478bd9Sstevel@tonic-gate for (lp = lock_list; lp != NULL; lp = lp->next) { 16417c478bd9Sstevel@tonic-gate SegProc *sdp; 16427c478bd9Sstevel@tonic-gate 16437c478bd9Sstevel@tonic-gate mutex_enter(&lp->mutex); 16447c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, 16457c478bd9Sstevel@tonic-gate "lock %p, key=%d, cookie=%d, nalloc=%u, lock=%d, wait=%d\n", 16467c478bd9Sstevel@tonic-gate (void *)lp, lp->key, lp->cookie, lp->alloccount, 16477c478bd9Sstevel@tonic-gate lp->lockptr != 0 ? LOCK(lp) : -1, lp->sleepers); 16487c478bd9Sstevel@tonic-gate 16497c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, 16507c478bd9Sstevel@tonic-gate "style=%d, lockptr=%p, timeout=%ld, clients=%p, owner=%p\n", 16517c478bd9Sstevel@tonic-gate lp->style, (void *)lp->lockptr, lp->timeout, 16527c478bd9Sstevel@tonic-gate (void *)lp->clients, (void *)lp->owner); 16537c478bd9Sstevel@tonic-gate 16547c478bd9Sstevel@tonic-gate 16557c478bd9Sstevel@tonic-gate for (sdp = lp->clients; sdp != NULL; sdp = sdp->next) { 16567c478bd9Sstevel@tonic-gate cmn_err(CE_CONT, " client %p%s, lp=%p, flag=%x, " 16577c478bd9Sstevel@tonic-gate "process tag=%p, lockseg=%p, unlockseg=%p\n", 16587c478bd9Sstevel@tonic-gate (void *)sdp, sdp == lp->owner ? " (owner)" : "", 16597c478bd9Sstevel@tonic-gate (void *)sdp->lp, sdp->flag, (void *)ID(sdp), 16607c478bd9Sstevel@tonic-gate (void *)sdp->lockseg, (void *)sdp->unlockseg); 16617c478bd9Sstevel@tonic-gate } 16627c478bd9Sstevel@tonic-gate mutex_exit(&lp->mutex); 16637c478bd9Sstevel@tonic-gate } 16647c478bd9Sstevel@tonic-gate #endif 16657c478bd9Sstevel@tonic-gate mutex_exit(&winlock_mutex); 16667c478bd9Sstevel@tonic-gate } 16677c478bd9Sstevel@tonic-gate 16687c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 16697c478bd9Sstevel@tonic-gate 16707c478bd9Sstevel@tonic-gate static struct modldrv modldrv = { 16717c478bd9Sstevel@tonic-gate &mod_driverops, /* Type of module. This one is a driver */ 167219397407SSherry Moore "Winlock Driver", /* Name of the module */ 16737c478bd9Sstevel@tonic-gate &winlock_ops, /* driver ops */ 16747c478bd9Sstevel@tonic-gate }; 16757c478bd9Sstevel@tonic-gate 16767c478bd9Sstevel@tonic-gate static struct modlinkage modlinkage = { 16777c478bd9Sstevel@tonic-gate MODREV_1, 16787c478bd9Sstevel@tonic-gate (void *)&modldrv, 16797c478bd9Sstevel@tonic-gate 0, 16807c478bd9Sstevel@tonic-gate 0, 16817c478bd9Sstevel@tonic-gate 0 16827c478bd9Sstevel@tonic-gate }; 16837c478bd9Sstevel@tonic-gate 16847c478bd9Sstevel@tonic-gate int 16857c478bd9Sstevel@tonic-gate _init(void) 16867c478bd9Sstevel@tonic-gate { 16877c478bd9Sstevel@tonic-gate int e; 16887c478bd9Sstevel@tonic-gate 16897c478bd9Sstevel@tonic-gate mutex_init(&winlock_mutex, NULL, MUTEX_DEFAULT, NULL); 16907c478bd9Sstevel@tonic-gate e = mod_install(&modlinkage); 16917c478bd9Sstevel@tonic-gate if (e) { 16927c478bd9Sstevel@tonic-gate mutex_destroy(&winlock_mutex); 16937c478bd9Sstevel@tonic-gate } 16947c478bd9Sstevel@tonic-gate return (e); 16957c478bd9Sstevel@tonic-gate } 16967c478bd9Sstevel@tonic-gate 16977c478bd9Sstevel@tonic-gate 16987c478bd9Sstevel@tonic-gate int 16997c478bd9Sstevel@tonic-gate _info(struct modinfo *modinfop) 17007c478bd9Sstevel@tonic-gate { 17017c478bd9Sstevel@tonic-gate return (mod_info(&modlinkage, modinfop)); 17027c478bd9Sstevel@tonic-gate } 17037c478bd9Sstevel@tonic-gate 17047c478bd9Sstevel@tonic-gate int 17057c478bd9Sstevel@tonic-gate _fini(void) 17067c478bd9Sstevel@tonic-gate { 17077c478bd9Sstevel@tonic-gate int e; 17087c478bd9Sstevel@tonic-gate 17097c478bd9Sstevel@tonic-gate e = mod_remove(&modlinkage); 17107c478bd9Sstevel@tonic-gate if (e == 0) { 17117c478bd9Sstevel@tonic-gate mutex_destroy(&winlock_mutex); 17127c478bd9Sstevel@tonic-gate } 17137c478bd9Sstevel@tonic-gate return (e); 17147c478bd9Sstevel@tonic-gate } 1715