1 /*- 2 * Copyright (c) 2014 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 2003, 2005 Alan L. Cox <alc@cs.rice.edu> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/kernel.h> 33 #include <sys/lock.h> 34 #include <sys/malloc.h> 35 #include <sys/mutex.h> 36 #include <sys/sf_buf.h> 37 #include <sys/smp.h> 38 #include <sys/sysctl.h> 39 40 #include <vm/vm.h> 41 #include <vm/vm_extern.h> 42 #include <vm/vm_page.h> 43 44 #ifndef NSFBUFS 45 #define NSFBUFS (512 + maxusers * 16) 46 #endif 47 48 static int nsfbufs; 49 static int nsfbufspeak; 50 static int nsfbufsused; 51 52 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0, 53 "Maximum number of sendfile(2) sf_bufs available"); 54 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0, 55 "Number of sendfile(2) sf_bufs at peak usage"); 56 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0, 57 "Number of sendfile(2) sf_bufs in use"); 58 59 static void sf_buf_init(void *arg); 60 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 61 62 LIST_HEAD(sf_head, sf_buf); 63 64 /* 65 * A hash table of active sendfile(2) buffers 66 */ 67 static struct sf_head *sf_buf_active; 68 static u_long sf_buf_hashmask; 69 70 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) 71 72 static TAILQ_HEAD(, sf_buf) sf_buf_freelist; 73 static u_int sf_buf_alloc_want; 74 75 /* 76 * A lock used to synchronize access to the hash table and free list 77 */ 78 static struct mtx sf_buf_lock; 79 80 /* 81 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 82 */ 83 static void 84 sf_buf_init(void *arg) 85 { 86 struct sf_buf *sf_bufs; 87 vm_offset_t sf_base; 88 int i; 89 90 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 91 if (SFBUF_OPTIONAL_DIRECT_MAP) 92 return; 93 #endif 94 95 nsfbufs = NSFBUFS; 96 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 97 98 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); 99 TAILQ_INIT(&sf_buf_freelist); 100 sf_base = kva_alloc(nsfbufs * PAGE_SIZE); 101 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 102 M_WAITOK | M_ZERO); 103 for (i = 0; i < nsfbufs; i++) { 104 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 105 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); 106 } 107 sf_buf_alloc_want = 0; 108 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); 109 } 110 111 /* 112 * Get an sf_buf from the freelist. May block if none are available. 113 */ 114 struct sf_buf * 115 sf_buf_alloc(struct vm_page *m, int flags) 116 { 117 struct sf_head *hash_list; 118 struct sf_buf *sf; 119 int error; 120 121 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 122 if (SFBUF_OPTIONAL_DIRECT_MAP) 123 return ((struct sf_buf *)m); 124 #endif 125 126 KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0, 127 ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned")); 128 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 129 mtx_lock(&sf_buf_lock); 130 LIST_FOREACH(sf, hash_list, list_entry) { 131 if (sf->m == m) { 132 sf->ref_count++; 133 if (sf->ref_count == 1) { 134 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 135 nsfbufsused++; 136 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 137 } 138 #if defined(SMP) && defined(SFBUF_CPUSET) 139 sf_buf_shootdown(sf, flags); 140 #endif 141 goto done; 142 } 143 } 144 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { 145 if (flags & SFB_NOWAIT) 146 goto done; 147 sf_buf_alloc_want++; 148 SFSTAT_INC(sf_allocwait); 149 error = msleep(&sf_buf_freelist, &sf_buf_lock, 150 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 151 sf_buf_alloc_want--; 152 153 /* 154 * If we got a signal, don't risk going back to sleep. 155 */ 156 if (error) 157 goto done; 158 } 159 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 160 if (sf->m != NULL) 161 LIST_REMOVE(sf, list_entry); 162 LIST_INSERT_HEAD(hash_list, sf, list_entry); 163 sf->ref_count = 1; 164 sf->m = m; 165 nsfbufsused++; 166 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 167 sf_buf_map(sf, flags); 168 done: 169 mtx_unlock(&sf_buf_lock); 170 return (sf); 171 } 172 173 /* 174 * Remove a reference from the given sf_buf, adding it to the free 175 * list when its reference count reaches zero. A freed sf_buf still, 176 * however, retains its virtual-to-physical mapping until it is 177 * recycled or reactivated by sf_buf_alloc(9). 178 */ 179 void 180 sf_buf_free(struct sf_buf *sf) 181 { 182 183 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 184 if (SFBUF_OPTIONAL_DIRECT_MAP) 185 return; 186 #endif 187 188 mtx_lock(&sf_buf_lock); 189 sf->ref_count--; 190 if (sf->ref_count == 0) { 191 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); 192 nsfbufsused--; 193 if (sf_buf_unmap(sf)) { 194 sf->m = NULL; 195 LIST_REMOVE(sf, list_entry); 196 } 197 if (sf_buf_alloc_want > 0) 198 wakeup(&sf_buf_freelist); 199 } 200 mtx_unlock(&sf_buf_lock); 201 } 202 203 void 204 sf_buf_ref(struct sf_buf *sf) 205 { 206 207 #ifdef SFBUF_OPTIONAL_DIRECT_MAP 208 if (SFBUF_OPTIONAL_DIRECT_MAP) 209 return; 210 #endif 211 212 mtx_lock(&sf_buf_lock); 213 KASSERT(sf->ref_count > 0, ("%s: sf %p not allocated", __func__, sf)); 214 sf->ref_count++; 215 mtx_unlock(&sf_buf_lock); 216 } 217 218 #ifdef SFBUF_PROCESS_PAGE 219 /* 220 * Run callback function on sf_buf that holds a certain page. 221 */ 222 boolean_t 223 sf_buf_process_page(vm_page_t m, void (*cb)(struct sf_buf *)) 224 { 225 struct sf_head *hash_list; 226 struct sf_buf *sf; 227 228 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 229 mtx_lock(&sf_buf_lock); 230 LIST_FOREACH(sf, hash_list, list_entry) { 231 if (sf->m == m) { 232 cb(sf); 233 mtx_unlock(&sf_buf_lock); 234 return (TRUE); 235 } 236 } 237 mtx_unlock(&sf_buf_lock); 238 return (FALSE); 239 } 240 #endif /* SFBUF_PROCESS_PAGE */ 241