1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #ifndef _SYS_SOCKETVAR_H_
33 #define _SYS_SOCKETVAR_H_
34
35 /*
36 * Socket generation count type. Also used in xinpcb, xtcpcb, xunpcb.
37 */
38 typedef uint64_t so_gen_t;
39
40 #if defined(_KERNEL) || defined(_WANT_SOCKET)
41 #include <sys/queue.h> /* for TAILQ macros */
42 #include <sys/selinfo.h> /* for struct selinfo */
43 #include <sys/_lock.h>
44 #include <sys/_mutex.h>
45 #include <sys/osd.h>
46 #include <sys/_sx.h>
47 #include <sys/sockbuf.h>
48 #include <sys/_task.h>
49 #ifdef _KERNEL
50 #include <sys/caprights.h>
51 #include <sys/sockopt.h>
52 #else
53 #include <stdbool.h>
54 #endif
55
56 struct vnet;
57
58 /*
59 * Kernel structure per socket.
60 * Contains send and receive buffer queues,
61 * handle on protocol and pointer to protocol
62 * private data and error information.
63 */
64 typedef int so_upcall_t(struct socket *, void *, int);
65 typedef void so_dtor_t(struct socket *);
66
67 struct socket;
68
69 enum socket_qstate {
70 SQ_NONE = 0,
71 SQ_INCOMP = 0x0800, /* on sol_incomp */
72 SQ_COMP = 0x1000, /* on sol_comp */
73 };
74
75
76 struct so_splice {
77 struct socket *src;
78 struct socket *dst;
79 off_t max; /* maximum bytes to splice, or -1 */
80 struct mtx mtx;
81 unsigned int wq_index;
82 enum so_splice_state {
83 SPLICE_INIT, /* embryonic state, don't queue work yet */
84 SPLICE_IDLE, /* waiting for work to arrive */
85 SPLICE_QUEUED, /* a wakeup has queued some work */
86 SPLICE_RUNNING, /* currently transferring data */
87 SPLICE_CLOSING, /* waiting for work to drain */
88 SPLICE_CLOSED, /* unsplicing, terminal state */
89 SPLICE_EXCEPTION, /* I/O error or limit, implicit unsplice */
90 } state;
91 struct timeout_task timeout;
92 STAILQ_ENTRY(so_splice) next;
93 };
94
95 /*-
96 * Locking key to struct socket:
97 * (a) constant after allocation, no locking required.
98 * (b) locked by SOCK_LOCK(so).
99 * (cr) locked by SOCK_RECVBUF_LOCK(so)
100 * (cs) locked by SOCK_SENDBUF_LOCK(so)
101 * (e) locked by SOLISTEN_LOCK() of corresponding listening socket.
102 * (f) not locked since integer reads/writes are atomic.
103 * (g) used only as a sleep/wakeup address, no value.
104 * (h) locked by global mutex so_global_mtx.
105 * (ir,is) locked by recv or send I/O locks.
106 * (k) locked by KTLS workqueue mutex
107 */
108 TAILQ_HEAD(accept_queue, socket);
109 struct socket {
110 struct mtx so_lock;
111 volatile u_int so_count; /* (b / refcount) */
112 struct selinfo so_rdsel; /* (b/cr) for so_rcv/so_comp */
113 struct selinfo so_wrsel; /* (b/cs) for so_snd */
114 int so_options; /* (b) from socket call, see socket.h */
115 short so_type; /* (a) generic type, see socket.h */
116 short so_state; /* (b) internal state flags SS_* */
117 void *so_pcb; /* protocol control block */
118 struct vnet *so_vnet; /* (a) network stack instance */
119 struct protosw *so_proto; /* (a) protocol handle */
120 short so_linger; /* time to linger close(2) */
121 short so_timeo; /* (g) connection timeout */
122 u_short so_error; /* (f) error affecting connection */
123 u_short so_rerror; /* (f) error affecting connection */
124 struct sigio *so_sigio; /* [sg] information for async I/O or
125 out of band data (SIGURG) */
126 struct ucred *so_cred; /* (a) user credentials */
127 struct label *so_label; /* (b) MAC label for socket */
128 /* NB: generation count must not be first. */
129 so_gen_t so_gencnt; /* (h) generation count */
130 void *so_emuldata; /* (b) private data for emulators */
131 so_dtor_t *so_dtor; /* (b) optional destructor */
132 struct osd osd; /* Object Specific extensions */
133 /*
134 * so_fibnum, so_user_cookie and friends can be used to attach
135 * some user-specified metadata to a socket, which then can be
136 * used by the kernel for various actions.
137 * so_user_cookie is used by ipfw/dummynet.
138 */
139 int so_fibnum; /* routing domain for this socket */
140 uint32_t so_user_cookie;
141
142 int so_ts_clock; /* type of the clock used for timestamps */
143 uint32_t so_max_pacing_rate; /* (f) TX rate limit in bytes/s */
144 struct so_splice *so_splice; /* (b) splice state for sink */
145 struct so_splice *so_splice_back; /* (b) splice state for source */
146 off_t so_splice_sent; /* (ir) splice bytes sent so far */
147
148 /*
149 * Mutexes to prevent interleaving of socket I/O. These have to be
150 * outside of the socket buffers in order to interlock with listen(2).
151 */
152 struct sx so_snd_sx __aligned(CACHE_LINE_SIZE);
153 struct mtx so_snd_mtx;
154
155 struct sx so_rcv_sx __aligned(CACHE_LINE_SIZE);
156 struct mtx so_rcv_mtx;
157
158 union {
159 /* Regular (data flow) socket. */
160 struct {
161 /* (cr, cs) Receive and send buffers. */
162 struct sockbuf so_rcv, so_snd;
163
164 /* (e) Our place on accept queue. */
165 TAILQ_ENTRY(socket) so_list;
166 struct socket *so_listen; /* (b) */
167 enum socket_qstate so_qstate; /* (b) */
168 /* (b) cached MAC label for peer */
169 struct label *so_peerlabel;
170 u_long so_oobmark; /* chars to oob mark */
171
172 /* (k) Our place on KTLS RX work queue. */
173 STAILQ_ENTRY(socket) so_ktls_rx_list;
174 };
175 /*
176 * Listening socket, where accepts occur, is so_listen in all
177 * subsidiary sockets. If so_listen is NULL, socket is not
178 * related to an accept. For a listening socket itself
179 * sol_incomp queues partially completed connections, while
180 * sol_comp is a queue of connections ready to be accepted.
181 * If a connection is aborted and it has so_listen set, then
182 * it has to be pulled out of either sol_incomp or sol_comp.
183 * We allow connections to queue up based on current queue
184 * lengths and limit on number of queued connections for this
185 * socket.
186 */
187 struct {
188 /* (e) queue of partial unaccepted connections */
189 struct accept_queue sol_incomp;
190 /* (e) queue of complete unaccepted connections */
191 struct accept_queue sol_comp;
192 u_int sol_qlen; /* (e) sol_comp length */
193 u_int sol_incqlen; /* (e) sol_incomp length */
194 u_int sol_qlimit; /* (e) queue limit */
195
196 /* accept_filter(9) optional data */
197 struct accept_filter *sol_accept_filter;
198 void *sol_accept_filter_arg; /* saved filter args */
199 char *sol_accept_filter_str; /* saved user args */
200
201 /* Optional upcall, for kernel socket. */
202 so_upcall_t *sol_upcall; /* (e) */
203 void *sol_upcallarg; /* (e) */
204
205 /* Socket buffer parameters, to be copied to
206 * dataflow sockets, accepted from this one. */
207 int sol_sbrcv_lowat;
208 int sol_sbsnd_lowat;
209 u_int sol_sbrcv_hiwat;
210 u_int sol_sbsnd_hiwat;
211 short sol_sbrcv_flags;
212 short sol_sbsnd_flags;
213 sbintime_t sol_sbrcv_timeo;
214 sbintime_t sol_sbsnd_timeo;
215
216 /* Information tracking listen queue overflows. */
217 struct timeval sol_lastover; /* (e) */
218 int sol_overcount; /* (e) */
219 };
220 };
221 };
222 #endif /* defined(_KERNEL) || defined(_WANT_SOCKET) */
223
224 /*
225 * Socket state bits.
226 *
227 * Historically, these bits were all kept in the so_state field.
228 * They are now split into separate, lock-specific fields.
229 * so_state maintains basic socket state protected by the socket lock.
230 * so_qstate holds information about the socket accept queues.
231 * Each socket buffer also has a state field holding information
232 * relevant to that socket buffer (can't send, rcv).
233 * Many fields will be read without locks to improve performance and avoid
234 * lock order issues. However, this approach must be used with caution.
235 */
236 #define SS_ISCONNECTED 0x0002 /* socket connected to a peer */
237 #define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */
238 #define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */
239 #define SS_NBIO 0x0100 /* non-blocking ops */
240 #define SS_ASYNC 0x0200 /* async i/o notify */
241 /* was SS_ISCONFIRMING 0x0400 */
242 #define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */
243
244 #ifdef _KERNEL
245
246 #define SOCK_MTX(so) (&(so)->so_lock)
247 #define SOCK_LOCK(so) mtx_lock(&(so)->so_lock)
248 #define SOCK_OWNED(so) mtx_owned(&(so)->so_lock)
249 #define SOCK_UNLOCK(so) mtx_unlock(&(so)->so_lock)
250 #define SOCK_LOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_OWNED)
251 #define SOCK_UNLOCK_ASSERT(so) mtx_assert(&(so)->so_lock, MA_NOTOWNED)
252
253 #define SOLISTENING(sol) (((sol)->so_options & SO_ACCEPTCONN) != 0)
254 #define SOLISTEN_LOCK(sol) do { \
255 mtx_lock(&(sol)->so_lock); \
256 KASSERT(SOLISTENING(sol), \
257 ("%s: %p not listening", __func__, (sol))); \
258 } while (0)
259 #define SOLISTEN_TRYLOCK(sol) mtx_trylock(&(sol)->so_lock)
260 #define SOLISTEN_UNLOCK(sol) do { \
261 KASSERT(SOLISTENING(sol), \
262 ("%s: %p not listening", __func__, (sol))); \
263 mtx_unlock(&(sol)->so_lock); \
264 } while (0)
265 #define SOLISTEN_LOCK_ASSERT(sol) do { \
266 mtx_assert(&(sol)->so_lock, MA_OWNED); \
267 KASSERT(SOLISTENING(sol), \
268 ("%s: %p not listening", __func__, (sol))); \
269 } while (0)
270 #define SOLISTEN_UNLOCK_ASSERT(sol) do { \
271 mtx_assert(&(sol)->so_lock, MA_NOTOWNED); \
272 KASSERT(SOLISTENING(sol), \
273 ("%s: %p not listening", __func__, (sol))); \
274 } while (0)
275
276 /*
277 * Socket buffer locks. These are strongly preferred over SOCKBUF_LOCK(sb)
278 * macros, as we are moving towards protocol specific socket buffers.
279 */
280 #define SOCK_RECVBUF_MTX(so) \
281 (&(so)->so_rcv_mtx)
282 #define SOCK_RECVBUF_LOCK(so) \
283 mtx_lock(SOCK_RECVBUF_MTX(so))
284 #define SOCK_RECVBUF_UNLOCK(so) \
285 mtx_unlock(SOCK_RECVBUF_MTX(so))
286 #define SOCK_RECVBUF_LOCK_ASSERT(so) \
287 mtx_assert(SOCK_RECVBUF_MTX(so), MA_OWNED)
288 #define SOCK_RECVBUF_UNLOCK_ASSERT(so) \
289 mtx_assert(SOCK_RECVBUF_MTX(so), MA_NOTOWNED)
290
291 #define SOCK_SENDBUF_MTX(so) \
292 (&(so)->so_snd_mtx)
293 #define SOCK_SENDBUF_LOCK(so) \
294 mtx_lock(SOCK_SENDBUF_MTX(so))
295 #define SOCK_SENDBUF_UNLOCK(so) \
296 mtx_unlock(SOCK_SENDBUF_MTX(so))
297 #define SOCK_SENDBUF_LOCK_ASSERT(so) \
298 mtx_assert(SOCK_SENDBUF_MTX(so), MA_OWNED)
299 #define SOCK_SENDBUF_UNLOCK_ASSERT(so) \
300 mtx_assert(SOCK_SENDBUF_MTX(so), MA_NOTOWNED)
301
302 #define SOCK_BUF_LOCK(so, which) \
303 mtx_lock(soeventmtx(so, which))
304 #define SOCK_BUF_UNLOCK(so, which) \
305 mtx_unlock(soeventmtx(so, which))
306 #define SOCK_BUF_LOCK_ASSERT(so, which) \
307 mtx_assert(soeventmtx(so, which), MA_OWNED)
308 #define SOCK_BUF_UNLOCK_ASSERT(so, which) \
309 mtx_assert(soeventmtx(so, which), MA_NOTOWNED)
310
311 static inline struct sockbuf *
sobuf(struct socket * so,const sb_which which)312 sobuf(struct socket *so, const sb_which which)
313 {
314 return (which == SO_RCV ? &so->so_rcv : &so->so_snd);
315 }
316
317 static inline struct mtx *
soeventmtx(struct socket * so,const sb_which which)318 soeventmtx(struct socket *so, const sb_which which)
319 {
320 return (which == SO_RCV ? SOCK_RECVBUF_MTX(so) : SOCK_SENDBUF_MTX(so));
321 }
322
323 /*
324 * Macros for sockets and socket buffering.
325 */
326
327
328 #define isspliced(so) ((so->so_splice != NULL && \
329 so->so_splice->src != NULL))
330 #define issplicedback(so) ((so->so_splice_back != NULL && \
331 so->so_splice_back->dst != NULL))
332 /*
333 * Flags to soiolock().
334 */
335 #define SBL_WAIT 0x00000001 /* Wait if not immediately available. */
336 #define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */
337 #define SBL_VALID (SBL_WAIT | SBL_NOINTR)
338
339 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
340
341 #define SOCK_IO_SEND_LOCK(so, flags) \
342 soiolock((so), &(so)->so_snd_sx, (flags))
343 #define SOCK_IO_SEND_UNLOCK(so) \
344 soiounlock(&(so)->so_snd_sx)
345 #define SOCK_IO_SEND_ASSERT_LOCKED(so) \
346 sx_assert(&(so)->so_snd_sx, SA_LOCKED)
347 #define SOCK_IO_RECV_LOCK(so, flags) \
348 soiolock((so), &(so)->so_rcv_sx, (flags))
349 #define SOCK_IO_RECV_UNLOCK(so) \
350 soiounlock(&(so)->so_rcv_sx)
351 #define SOCK_IO_RECV_ASSERT_LOCKED(so) \
352 sx_assert(&(so)->so_rcv_sx, SA_LOCKED)
353
354 /* do we have to send all at once on a socket? */
355 #define sosendallatonce(so) \
356 ((so)->so_proto->pr_flags & PR_ATOMIC)
357
358 /* can we read something from so? */
359 #define soreadabledata(so) \
360 (sbavail(&(so)->so_rcv) >= (so)->so_rcv.sb_lowat || \
361 (so)->so_error || (so)->so_rerror)
362 #define _soreadable(so) \
363 (soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE))
364
365 static inline bool
soreadable(struct socket * so)366 soreadable(struct socket *so)
367 {
368 if (isspliced(so))
369 return (false);
370 return (_soreadable(so));
371 }
372
373 /* can we write something to so? */
374 #define sowriteable(so) \
375 ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
376 (((so)->so_state&SS_ISCONNECTED) || \
377 ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \
378 ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
379 (so)->so_error)
380
381 /*
382 * soref()/sorele() ref-count the socket structure.
383 * soref() may be called without owning socket lock, but in that case a
384 * caller must own something that holds socket, and so_count must be not 0.
385 * Note that you must still explicitly close the socket, but the last ref
386 * count will free the structure.
387 */
388 #define soref(so) refcount_acquire(&(so)->so_count)
389 #define sorele(so) do { \
390 SOCK_UNLOCK_ASSERT(so); \
391 if (!refcount_release_if_not_last(&(so)->so_count)) { \
392 SOCK_LOCK(so); \
393 sorele_locked(so); \
394 } \
395 } while (0)
396
397 /*
398 * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to
399 * avoid a non-atomic test-and-wakeup. However, sowakeup is
400 * responsible for releasing the lock if it is called. We unlock only
401 * if we don't call into sowakeup. If any code is introduced that
402 * directly invokes the underlying sowakeup() primitives, it must
403 * maintain the same semantics.
404 */
405 #define sorwakeup(so) do { \
406 SOCK_RECVBUF_LOCK(so); \
407 sorwakeup_locked(so); \
408 } while (0)
409
410 #define sowwakeup(so) do { \
411 SOCK_SENDBUF_LOCK(so); \
412 sowwakeup_locked(so); \
413 } while (0)
414
415 struct accept_filter {
416 char accf_name[16];
417 int (*accf_callback)
418 (struct socket *so, void *arg, int waitflag);
419 void * (*accf_create)
420 (struct socket *so, char *arg);
421 void (*accf_destroy)
422 (struct socket *so);
423 SLIST_ENTRY(accept_filter) accf_next;
424 };
425
426 #define ACCEPT_FILTER_DEFINE(modname, filtname, cb, create, destroy, ver) \
427 static struct accept_filter modname##_filter = { \
428 .accf_name = filtname, \
429 .accf_callback = cb, \
430 .accf_create = create, \
431 .accf_destroy = destroy, \
432 }; \
433 static moduledata_t modname##_mod = { \
434 .name = __XSTRING(modname), \
435 .evhand = accept_filt_generic_mod_event, \
436 .priv = &modname##_filter, \
437 }; \
438 DECLARE_MODULE(modname, modname##_mod, SI_SUB_DRIVERS, \
439 SI_ORDER_MIDDLE); \
440 MODULE_VERSION(modname, ver)
441
442 #ifdef MALLOC_DECLARE
443 MALLOC_DECLARE(M_ACCF);
444 MALLOC_DECLARE(M_PCB);
445 MALLOC_DECLARE(M_SONAME);
446 #endif
447
448 /*
449 * Socket specific helper hook point identifiers
450 * Do not leave holes in the sequence, hook registration is a loop.
451 */
452 #define HHOOK_SOCKET_OPT 0
453 #define HHOOK_SOCKET_CREATE 1
454 #define HHOOK_SOCKET_RCV 2
455 #define HHOOK_SOCKET_SND 3
456 #define HHOOK_FILT_SOREAD 4
457 #define HHOOK_FILT_SOWRITE 5
458 #define HHOOK_SOCKET_CLOSE 6
459 #define HHOOK_SOCKET_NEWCONN 7
460 #define HHOOK_SOCKET_LAST HHOOK_SOCKET_NEWCONN
461
462 struct socket_hhook_data {
463 struct socket *so;
464 struct mbuf *m;
465 void *hctx; /* hook point specific data*/
466 int status;
467 };
468
469 extern int maxsockets;
470 extern u_long sb_max;
471 extern so_gen_t so_gencnt;
472
473 struct file;
474 struct filecaps;
475 struct filedesc;
476 struct mbuf;
477 struct sockaddr;
478 struct ucred;
479 struct uio;
480 enum shutdown_how;
481
482 /* Return values for socket upcalls. */
483 #define SU_OK 0
484 #define SU_ISCONNECTED 1
485
486 /*
487 * From uipc_socket and friends
488 */
489 int getsockaddr(struct sockaddr **namp, const struct sockaddr *uaddr,
490 size_t len);
491 int getsock_cap(struct thread *td, int fd, cap_rights_t *rightsp,
492 struct file **fpp, struct filecaps *havecaps);
493 int getsock(struct thread *td, int fd, cap_rights_t *rightsp,
494 struct file **fpp);
495 void soabort(struct socket *so);
496 int soaccept(struct socket *so, struct sockaddr *sa);
497 int sopeeraddr(struct socket *so, struct sockaddr *sa);
498 int sosockaddr(struct socket *so, struct sockaddr *sa);
499 void soaio_enqueue(struct task *task);
500 void soaio_rcv(void *context, int pending);
501 void soaio_snd(void *context, int pending);
502 int socheckuid(struct socket *so, uid_t uid);
503 int sobind(struct socket *so, struct sockaddr *nam, struct thread *td);
504 int sobindat(int fd, struct socket *so, struct sockaddr *nam,
505 struct thread *td);
506 int soclose(struct socket *so);
507 int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td);
508 int soconnectat(int fd, struct socket *so, struct sockaddr *nam,
509 struct thread *td);
510 int soconnect2(struct socket *so1, struct socket *so2);
511 int socreate(int dom, struct socket **aso, int type, int proto,
512 struct ucred *cred, struct thread *td);
513 int sodisconnect(struct socket *so);
514 void sodtor_set(struct socket *, so_dtor_t *);
515 struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags);
516 void sohasoutofband(struct socket *so);
517 int solisten(struct socket *so, int backlog, struct thread *td);
518 void solisten_proto(struct socket *so, int backlog);
519 void solisten_proto_abort(struct socket *so);
520 int solisten_proto_check(struct socket *so);
521 bool solisten_enqueue(struct socket *, int);
522 int solisten_dequeue(struct socket *, struct socket **, int);
523 struct socket *
524 solisten_clone(struct socket *);
525 struct socket *
526 sonewconn(struct socket *head, int connstatus);
527 struct socket *
528 sopeeloff(struct socket *);
529 int sopoll_generic(struct socket *so, int events, struct thread *td);
530 int soaio_queue_generic(struct socket *so, struct kaiocb *job);
531 int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio,
532 struct mbuf **mp0, struct mbuf **controlp, int *flagsp);
533 int soreceive_stream(struct socket *so, struct sockaddr **paddr,
534 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
535 int *flagsp);
536 int soreceive_dgram(struct socket *so, struct sockaddr **paddr,
537 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
538 int *flagsp);
539 int soreceive_generic(struct socket *so, struct sockaddr **paddr,
540 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
541 int *flagsp);
542 void sorele_locked(struct socket *so);
543 void sodealloc(struct socket *);
544 int soreserve(struct socket *so, u_long sndcc, u_long rcvcc);
545 void sorflush(struct socket *so);
546 int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
547 struct mbuf *top, struct mbuf *control, int flags,
548 struct thread *td);
549 int sousrsend(struct socket *so, struct sockaddr *addr, struct uio *uio,
550 struct mbuf *control, int flags, struct proc *);
551 int sosend_dgram(struct socket *so, struct sockaddr *addr,
552 struct uio *uio, struct mbuf *top, struct mbuf *control,
553 int flags, struct thread *td);
554 int sosend_generic(struct socket *so, struct sockaddr *addr,
555 struct uio *uio, struct mbuf *top, struct mbuf *control,
556 int flags, struct thread *td);
557 int sosetfib(struct socket *so, int fibnum);
558 int soshutdown(struct socket *so, enum shutdown_how);
559 void soupcall_clear(struct socket *, sb_which);
560 void soupcall_set(struct socket *, sb_which, so_upcall_t, void *);
561 void solisten_upcall_set(struct socket *, so_upcall_t, void *);
562 void sorwakeup_locked(struct socket *);
563 void sowwakeup_locked(struct socket *);
564 void sowakeup_aio(struct socket *, sb_which);
565 void solisten_wakeup(struct socket *);
566 int selsocket(struct socket *so, int events, struct timeval *tv,
567 struct thread *td);
568 void soisconnected(struct socket *so);
569 void soisconnecting(struct socket *so);
570 void soisdisconnected(struct socket *so);
571 void soisdisconnecting(struct socket *so);
572 void socantrcvmore(struct socket *so);
573 void socantrcvmore_locked(struct socket *so);
574 void socantsendmore(struct socket *so);
575 void socantsendmore_locked(struct socket *so);
576 void soroverflow(struct socket *so);
577 void soroverflow_locked(struct socket *so);
578 int soiolock(struct socket *so, struct sx *sx, int flags);
579 void soiounlock(struct sx *sx);
580
581 /*
582 * Socket splicing routines.
583 */
584 void so_splice_dispatch(struct so_splice *sp);
585
586 /*
587 * Accept filter functions (duh).
588 */
589 int accept_filt_add(struct accept_filter *filt);
590 int accept_filt_del(char *name);
591 struct accept_filter *accept_filt_get(char *name);
592 #ifdef ACCEPT_FILTER_MOD
593 #ifdef SYSCTL_DECL
594 SYSCTL_DECL(_net_inet_accf);
595 #endif
596 int accept_filt_generic_mod_event(module_t mod, int event, void *data);
597 #endif
598
599 int pr_listen_notsupp(struct socket *so, int backlog, struct thread *td);
600
601 #endif /* _KERNEL */
602
603 /*
604 * Structure to export socket from kernel to utilities, via sysctl(3).
605 */
606 struct xsocket {
607 ksize_t xso_len; /* length of this structure */
608 kvaddr_t xso_so; /* kernel address of struct socket */
609 kvaddr_t so_pcb; /* kernel address of struct inpcb */
610 uint64_t so_oobmark;
611 kvaddr_t so_splice_so; /* kernel address of spliced socket */
612 int64_t so_spare64[7];
613 int32_t xso_protocol;
614 int32_t xso_family;
615 uint32_t so_qlen;
616 uint32_t so_incqlen;
617 uint32_t so_qlimit;
618 pid_t so_pgid;
619 uid_t so_uid;
620 int32_t so_fibnum;
621 int32_t so_spare32[7];
622 int16_t so_type;
623 int16_t so_options;
624 int16_t so_linger;
625 int16_t so_state;
626 int16_t so_timeo;
627 uint16_t so_error;
628 struct xsockbuf {
629 uint32_t sb_cc;
630 uint32_t sb_hiwat;
631 uint32_t sb_mbcnt;
632 uint32_t sb_spare0; /* was sb_mcnt */
633 uint32_t sb_spare1; /* was sb_ccnt */
634 uint32_t sb_mbmax;
635 int32_t sb_lowat;
636 int32_t sb_timeo;
637 int16_t sb_flags;
638 } so_rcv, so_snd;
639 };
640
641 #ifdef _KERNEL
642 void sotoxsocket(struct socket *so, struct xsocket *xso);
643 void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb);
644 #endif
645
646 /*
647 * Socket buffer state bits. Exported via libprocstat(3).
648 */
649 #define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */
650 #define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */
651 #define SBS_RCVATMARK 0x0040 /* at mark on input */
652
653 #endif /* !_SYS_SOCKETVAR_H_ */
654