1 /*- 2 * Copyright (c) 2014 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 2003, 2005 Alan L. Cox <alc@cs.rice.edu> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/kernel.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/sf_buf.h> 37 #include <sys/smp.h> 38 #include <sys/sysctl.h> 39 40 #include <vm/vm.h> 41 #include <vm/vm_extern.h> 42 #include <vm/vm_page.h> 43 44 #ifndef NSFBUFS 45 #define NSFBUFS (512 + maxusers * 16) 46 #endif 47 48 static int nsfbufs; 49 static int nsfbufspeak; 50 static int nsfbufsused; 51 52 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0, 53 "Maximum number of sendfile(2) sf_bufs available"); 54 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0, 55 "Number of sendfile(2) sf_bufs at peak usage"); 56 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0, 57 "Number of sendfile(2) sf_bufs in use"); 58 59 static void sf_buf_init(void *arg); 60 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 61 62 LIST_HEAD(sf_head, sf_buf); 63 64 /* 65 * A hash table of active sendfile(2) buffers 66 */ 67 static struct sf_head *sf_buf_active; 68 static u_long sf_buf_hashmask; 69 70 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) 71 72 static TAILQ_HEAD(, sf_buf) sf_buf_freelist; 73 static u_int sf_buf_alloc_want; 74 75 /* 76 * A lock used to synchronize access to the hash table and free list 77 */ 78 static struct mtx sf_buf_lock; 79 80 /* 81 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 82 */ 83 static void 84 sf_buf_init(void *arg) 85 { 86 struct sf_buf *sf_bufs; 87 vm_offset_t sf_base; 88 int i; 89 90 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 91 if (SFBUF_OPTIONAL_DIRECT_MAP) 92 return; 93 #endif 94 95 nsfbufs = NSFBUFS; 96 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 97 98 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); 99 TAILQ_INIT(&sf_buf_freelist); 100 sf_base = kva_alloc(nsfbufs * PAGE_SIZE); 101 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 102 M_NOWAIT | M_ZERO); 103 KASSERT(sf_bufs, ("%s: malloc failure", __func__)); 104 for (i = 0; i < nsfbufs; i++) { 105 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 106 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); 107 } 108 sf_buf_alloc_want = 0; 109 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); 110 } 111 112 /* 113 * Get an sf_buf from the freelist. May block if none are available. 114 */ 115 struct sf_buf * 116 sf_buf_alloc(struct vm_page *m, int flags) 117 { 118 struct sf_head *hash_list; 119 struct sf_buf *sf; 120 int error; 121 122 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 123 if (SFBUF_OPTIONAL_DIRECT_MAP) 124 return ((struct sf_buf *)m); 125 #endif 126 127 KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0, 128 ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned")); 129 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 130 mtx_lock(&sf_buf_lock); 131 LIST_FOREACH(sf, hash_list, list_entry) { 132 if (sf->m == m) { 133 sf->ref_count++; 134 if (sf->ref_count == 1) { 135 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 136 nsfbufsused++; 137 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 138 } 139 #if defined(SMP) && defined(SFBUF_CPUSET) 140 sf_buf_shootdown(sf, flags); 141 #endif 142 goto done; 143 } 144 } 145 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { 146 if (flags & SFB_NOWAIT) 147 goto done; 148 sf_buf_alloc_want++; 149 SFSTAT_INC(sf_allocwait); 150 error = msleep(&sf_buf_freelist, &sf_buf_lock, 151 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 152 sf_buf_alloc_want--; 153 154 /* 155 * If we got a signal, don't risk going back to sleep. 156 */ 157 if (error) 158 goto done; 159 } 160 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 161 if (sf->m != NULL) 162 LIST_REMOVE(sf, list_entry); 163 LIST_INSERT_HEAD(hash_list, sf, list_entry); 164 sf->ref_count = 1; 165 sf->m = m; 166 nsfbufsused++; 167 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 168 sf_buf_map(sf, flags); 169 done: 170 mtx_unlock(&sf_buf_lock); 171 return (sf); 172 } 173 174 /* 175 * Remove a reference from the given sf_buf, adding it to the free 176 * list when its reference count reaches zero. A freed sf_buf still, 177 * however, retains its virtual-to-physical mapping until it is 178 * recycled or reactivated by sf_buf_alloc(9). 179 */ 180 void 181 sf_buf_free(struct sf_buf *sf) 182 { 183 184 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 185 if (SFBUF_OPTIONAL_DIRECT_MAP) 186 return; 187 #endif 188 189 mtx_lock(&sf_buf_lock); 190 sf->ref_count--; 191 if (sf->ref_count == 0) { 192 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); 193 nsfbufsused--; 194 if (sf_buf_unmap(sf)) { 195 sf->m = NULL; 196 LIST_REMOVE(sf, list_entry); 197 } 198 if (sf_buf_alloc_want > 0) 199 wakeup(&sf_buf_freelist); 200 } 201 mtx_unlock(&sf_buf_lock); 202 } 203 204 #ifdef SFBUF_PROCESS_PAGE 205 /* 206 * Run callback function on sf_buf that holds a certain page. 207 */ 208 boolean_t 209 sf_buf_process_page(vm_page_t m, void (*cb)(struct sf_buf *)) 210 { 211 struct sf_head *hash_list; 212 struct sf_buf *sf; 213 214 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 215 mtx_lock(&sf_buf_lock); 216 LIST_FOREACH(sf, hash_list, list_entry) { 217 if (sf->m == m) { 218 cb(sf); 219 mtx_unlock(&sf_buf_lock); 220 return (TRUE); 221 } 222 } 223 mtx_unlock(&sf_buf_lock); 224 return (FALSE); 225 } 226 #endif /* SFBUF_PROCESS_PAGE */ 227