1 /*- 2 * Copyright (c) 2014 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 2003, 2005 Alan L. Cox <alc@cs.rice.edu> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 #include <sys/param.h> 30 #include <sys/kernel.h> 31 #include <sys/lock.h> 32 #include <sys/malloc.h> 33 #include <sys/mutex.h> 34 #include <sys/proc.h> 35 #include <sys/sf_buf.h> 36 #include <sys/smp.h> 37 #include <sys/sysctl.h> 38 39 #include <vm/vm.h> 40 #include <vm/vm_extern.h> 41 #include <vm/vm_page.h> 42 43 #ifndef NSFBUFS 44 #define NSFBUFS (512 + maxusers * 16) 45 #endif 46 47 static int nsfbufs; 48 static int nsfbufspeak; 49 static int nsfbufsused; 50 51 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0, 52 "Maximum number of sendfile(2) sf_bufs available"); 53 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0, 54 "Number of sendfile(2) sf_bufs at peak usage"); 55 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0, 56 "Number of sendfile(2) sf_bufs in use"); 57 58 static void sf_buf_init(void *arg); 59 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); 60 61 LIST_HEAD(sf_head, sf_buf); 62 63 /* 64 * A hash table of active sendfile(2) buffers 65 */ 66 static struct sf_head *sf_buf_active; 67 static u_long sf_buf_hashmask; 68 69 #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) 70 71 static TAILQ_HEAD(, sf_buf) sf_buf_freelist; 72 static u_int sf_buf_alloc_want; 73 74 /* 75 * A lock used to synchronize access to the hash table and free list 76 */ 77 static struct mtx sf_buf_lock; 78 79 /* 80 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) 81 */ 82 static void 83 sf_buf_init(void *arg) 84 { 85 struct sf_buf *sf_bufs; 86 vm_offset_t sf_base; 87 int i; 88 89 if (PMAP_HAS_DMAP) 90 return; 91 92 nsfbufs = NSFBUFS; 93 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); 94 95 sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); 96 TAILQ_INIT(&sf_buf_freelist); 97 sf_base = kva_alloc(nsfbufs * PAGE_SIZE); 98 sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, 99 M_WAITOK | M_ZERO); 100 for (i = 0; i < nsfbufs; i++) { 101 sf_bufs[i].kva = sf_base + i * PAGE_SIZE; 102 TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); 103 } 104 sf_buf_alloc_want = 0; 105 mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); 106 } 107 108 /* 109 * Get an sf_buf from the freelist. May block if none are available. 110 */ 111 struct sf_buf * 112 sf_buf_alloc(struct vm_page *m, int flags) 113 { 114 struct sf_head *hash_list; 115 struct sf_buf *sf; 116 int error; 117 118 if (PMAP_HAS_DMAP) 119 return ((struct sf_buf *)m); 120 121 KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0, 122 ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned")); 123 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 124 mtx_lock(&sf_buf_lock); 125 LIST_FOREACH(sf, hash_list, list_entry) { 126 if (sf->m == m) { 127 sf->ref_count++; 128 if (sf->ref_count == 1) { 129 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 130 nsfbufsused++; 131 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 132 } 133 #if defined(SMP) && defined(SFBUF_CPUSET) 134 sf_buf_shootdown(sf, flags); 135 #endif 136 goto done; 137 } 138 } 139 while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { 140 if (flags & SFB_NOWAIT) 141 goto done; 142 sf_buf_alloc_want++; 143 SFSTAT_INC(sf_allocwait); 144 error = msleep(&sf_buf_freelist, &sf_buf_lock, 145 (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); 146 sf_buf_alloc_want--; 147 148 /* 149 * If we got a signal, don't risk going back to sleep. 150 */ 151 if (error) 152 goto done; 153 } 154 TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); 155 if (sf->m != NULL) 156 LIST_REMOVE(sf, list_entry); 157 LIST_INSERT_HEAD(hash_list, sf, list_entry); 158 sf->ref_count = 1; 159 sf->m = m; 160 nsfbufsused++; 161 nsfbufspeak = imax(nsfbufspeak, nsfbufsused); 162 sf_buf_map(sf, flags); 163 done: 164 mtx_unlock(&sf_buf_lock); 165 return (sf); 166 } 167 168 /* 169 * Remove a reference from the given sf_buf, adding it to the free 170 * list when its reference count reaches zero. A freed sf_buf still, 171 * however, retains its virtual-to-physical mapping until it is 172 * recycled or reactivated by sf_buf_alloc(9). 173 */ 174 void 175 sf_buf_free(struct sf_buf *sf) 176 { 177 178 if (PMAP_HAS_DMAP) 179 return; 180 181 mtx_lock(&sf_buf_lock); 182 sf->ref_count--; 183 if (sf->ref_count == 0) { 184 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); 185 nsfbufsused--; 186 if (sf_buf_unmap(sf)) { 187 sf->m = NULL; 188 LIST_REMOVE(sf, list_entry); 189 } 190 if (sf_buf_alloc_want > 0) 191 wakeup(&sf_buf_freelist); 192 } 193 mtx_unlock(&sf_buf_lock); 194 } 195 196 void 197 sf_buf_ref(struct sf_buf *sf) 198 { 199 200 if (PMAP_HAS_DMAP) 201 return; 202 203 mtx_lock(&sf_buf_lock); 204 KASSERT(sf->ref_count > 0, ("%s: sf %p not allocated", __func__, sf)); 205 sf->ref_count++; 206 mtx_unlock(&sf_buf_lock); 207 } 208 209 #ifdef SFBUF_PROCESS_PAGE 210 /* 211 * Run callback function on sf_buf that holds a certain page. 212 */ 213 boolean_t 214 sf_buf_process_page(vm_page_t m, void (*cb)(struct sf_buf *)) 215 { 216 struct sf_head *hash_list; 217 struct sf_buf *sf; 218 219 hash_list = &sf_buf_active[SF_BUF_HASH(m)]; 220 mtx_lock(&sf_buf_lock); 221 LIST_FOREACH(sf, hash_list, list_entry) { 222 if (sf->m == m) { 223 cb(sf); 224 mtx_unlock(&sf_buf_lock); 225 return (TRUE); 226 } 227 } 228 mtx_unlock(&sf_buf_lock); 229 return (FALSE); 230 } 231 #endif /* SFBUF_PROCESS_PAGE */ 232