]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socket.c
xnu-1228.5.18.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socket.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define __KPI__
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <sys/socketvar.h>
35 #include <sys/param.h>
36 #include <sys/proc.h>
37 #include <sys/errno.h>
38 #include <sys/malloc.h>
39 #include <sys/protosw.h>
40 #include <sys/domain.h>
41 #include <sys/mbuf.h>
42 #include <sys/fcntl.h>
43 #include <sys/filio.h>
44 #include <sys/uio_internal.h>
45 #include <kern/lock.h>
46
47 extern int soclose_locked(struct socket *so);
48 extern void soclose_wait_locked(struct socket *so);
49
50 errno_t sock_send_internal(
51 socket_t sock,
52 const struct msghdr *msg,
53 mbuf_t data,
54 int flags,
55 size_t *sentlen);
56
57 typedef void (*so_upcall)(struct socket *, caddr_t , int );
58
59
60 errno_t
61 sock_accept(
62 socket_t sock,
63 struct sockaddr *from,
64 int fromlen,
65 int flags,
66 sock_upcall callback,
67 void* cookie,
68 socket_t *new_sock)
69 {
70 struct sockaddr *sa;
71 struct socket *new_so;
72 lck_mtx_t *mutex_held;
73 int dosocklock;
74 errno_t error = 0;
75
76 if (sock == NULL || new_sock == NULL) return EINVAL;
77 socket_lock(sock, 1);
78 if ((sock->so_options & SO_ACCEPTCONN) == 0) {
79 socket_unlock(sock, 1);
80 return EINVAL;
81 }
82 if ((flags & ~(MSG_DONTWAIT)) != 0) {
83 socket_unlock(sock, 1);
84 return ENOTSUP;
85 }
86 if (((flags & MSG_DONTWAIT) != 0 || (sock->so_state & SS_NBIO) != 0) &&
87 sock->so_comp.tqh_first == NULL) {
88 socket_unlock(sock, 1);
89 return EWOULDBLOCK;
90 }
91
92 if (sock->so_proto->pr_getlock != NULL) {
93 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
94 dosocklock = 1;
95 }
96 else {
97 mutex_held = sock->so_proto->pr_domain->dom_mtx;
98 dosocklock = 0;
99 }
100
101 while (TAILQ_EMPTY(&sock->so_comp) && sock->so_error == 0) {
102 if (sock->so_state & SS_CANTRCVMORE) {
103 sock->so_error = ECONNABORTED;
104 break;
105 }
106 error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH, "sock_accept", NULL);
107 if (error) {
108 socket_unlock(sock, 1);
109 return (error);
110 }
111 }
112 if (sock->so_error) {
113 error = sock->so_error;
114 sock->so_error = 0;
115 socket_unlock(sock, 1);
116 return (error);
117 }
118
119 new_so = TAILQ_FIRST(&sock->so_comp);
120 TAILQ_REMOVE(&sock->so_comp, new_so, so_list);
121 sock->so_qlen--;
122
123 /*
124 * Pass the pre-accepted socket to any interested socket filter(s).
125 * Upon failure, the socket would have been closed by the callee.
126 */
127 if (new_so->so_filt != NULL) {
128 /*
129 * Temporarily drop the listening socket's lock before we
130 * hand off control over to the socket filter(s), but keep
131 * a reference so that it won't go away. We'll grab it
132 * again once we're done with the filter(s).
133 */
134 socket_unlock(sock, 0);
135 if ((error = soacceptfilter(new_so)) != 0) {
136 /* Drop reference on listening socket */
137 sodereference(sock);
138 return (error);
139 }
140 socket_lock(sock, 0);
141 }
142
143 if (dosocklock) {
144 lck_mtx_assert(new_so->so_proto->pr_getlock(new_so, 0),
145 LCK_MTX_ASSERT_NOTOWNED);
146 socket_lock(new_so, 1);
147 }
148
149 new_so->so_state &= ~SS_COMP;
150 new_so->so_head = NULL;
151 (void) soacceptlock(new_so, &sa, 0);
152
153 socket_unlock(sock, 1); /* release the head */
154
155 if (callback) {
156 new_so->so_upcall = (so_upcall) callback;
157 new_so->so_upcallarg = cookie;
158 new_so->so_rcv.sb_flags |= SB_UPCALL;
159 #if CONFIG_SOWUPCALL
160 new_so->so_snd.sb_flags |= SB_UPCALL;
161 #endif
162 }
163
164 if (sa && from)
165 {
166 if (fromlen > sa->sa_len) fromlen = sa->sa_len;
167 memcpy(from, sa, fromlen);
168 }
169 if (sa) FREE(sa, M_SONAME);
170
171 /*
172 * If the socket has been marked as inactive by soacceptfilter(),
173 * disallow further operations on it. We explicitly call shutdown
174 * on both data directions to ensure that SS_CANT{RCV,SEND}MORE
175 * states are set for the socket. This would also flush out data
176 * hanging off the receive list of this socket.
177 */
178 if (new_so->so_flags & SOF_DEFUNCT) {
179 (void) soshutdownlock(new_so, SHUT_RD);
180 (void) soshutdownlock(new_so, SHUT_WR);
181 (void) sodisconnectlocked(new_so);
182 }
183
184 *new_sock = new_so;
185 if (dosocklock)
186 socket_unlock(new_so, 1);
187 return error;
188 }
189
190 errno_t
191 sock_bind(
192 socket_t sock,
193 const struct sockaddr *to)
194 {
195 if (sock == NULL || to == NULL) return EINVAL;
196
197 return sobind(sock, (struct sockaddr*)to);
198 }
199
200 errno_t
201 sock_connect(
202 socket_t sock,
203 const struct sockaddr *to,
204 int flags)
205 {
206 int error = 0;
207 lck_mtx_t *mutex_held;
208
209 if (sock == NULL || to == NULL) return EINVAL;
210
211 socket_lock(sock, 1);
212
213 if ((sock->so_state & SS_ISCONNECTING) &&
214 ((sock->so_state & SS_NBIO) != 0 ||
215 (flags & MSG_DONTWAIT) != 0)) {
216 socket_unlock(sock, 1);
217 return EALREADY;
218 }
219 error = soconnectlock(sock, (struct sockaddr*)to, 0);
220 if (!error) {
221 if ((sock->so_state & SS_ISCONNECTING) &&
222 ((sock->so_state & SS_NBIO) != 0 || (flags & MSG_DONTWAIT) != 0)) {
223 socket_unlock(sock, 1);
224 return EINPROGRESS;
225 }
226
227 if (sock->so_proto->pr_getlock != NULL)
228 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
229 else
230 mutex_held = sock->so_proto->pr_domain->dom_mtx;
231
232 while ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
233 error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH,
234 "sock_connect", NULL);
235 if (error)
236 break;
237 }
238
239 if (error == 0) {
240 error = sock->so_error;
241 sock->so_error = 0;
242 }
243 }
244 else {
245 sock->so_state &= ~SS_ISCONNECTING;
246 }
247 socket_unlock(sock, 1);
248 return error;
249 }
250
251 errno_t
252 sock_connectwait(
253 socket_t sock,
254 const struct timeval *tv)
255 {
256 lck_mtx_t * mutex_held;
257 errno_t retval = 0;
258 struct timespec ts;
259
260 socket_lock(sock, 1);
261
262 // Check if we're already connected or if we've already errored out
263 if ((sock->so_state & SS_ISCONNECTING) == 0 || sock->so_error) {
264 if (sock->so_error) {
265 retval = sock->so_error;
266 sock->so_error = 0;
267 }
268 else {
269 if ((sock->so_state & SS_ISCONNECTED) != 0)
270 retval = 0;
271 else
272 retval = EINVAL;
273 }
274 goto done;
275 }
276
277 // copied translation from timeval to hertz from SO_RCVTIMEO handling
278 if (tv->tv_sec < 0 || tv->tv_sec > SHRT_MAX / hz ||
279 tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
280 retval = EDOM;
281 goto done;
282 }
283
284 ts.tv_sec = tv->tv_sec;
285 ts.tv_nsec = (tv->tv_usec * NSEC_PER_USEC);
286 if ( (ts.tv_sec + (ts.tv_nsec/NSEC_PER_SEC))/100 > SHRT_MAX) {
287 retval = EDOM;
288 goto done;
289 }
290
291 if (sock->so_proto->pr_getlock != NULL)
292 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
293 else
294 mutex_held = sock->so_proto->pr_domain->dom_mtx;
295
296 msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK, "sock_connectwait", &ts);
297
298 // Check if we're still waiting to connect
299 if ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
300 retval = EINPROGRESS;
301 goto done;
302 }
303
304 if (sock->so_error) {
305 retval = sock->so_error;
306 sock->so_error = 0;
307 }
308
309 done:
310 socket_unlock(sock, 1);
311 return retval;
312 }
313
314 errno_t
315 sock_nointerrupt(
316 socket_t sock,
317 int on)
318 {
319 socket_lock(sock, 1);
320
321 if (on) {
322 sock->so_rcv.sb_flags |= SB_NOINTR; // This isn't safe
323 sock->so_snd.sb_flags |= SB_NOINTR; // This isn't safe
324 }
325 else {
326 sock->so_rcv.sb_flags &= ~SB_NOINTR; // This isn't safe
327 sock->so_snd.sb_flags &= ~SB_NOINTR; // This isn't safe
328 }
329
330 socket_unlock(sock, 1);
331
332 return 0;
333 }
334
335 errno_t
336 sock_getpeername(socket_t sock, struct sockaddr *peername, int peernamelen)
337 {
338 int error;
339 struct sockaddr *sa = NULL;
340
341 if (sock == NULL || peername == NULL || peernamelen < 0)
342 return (EINVAL);
343
344 socket_lock(sock, 1);
345 if (!(sock->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING))) {
346 socket_unlock(sock, 1);
347 return (ENOTCONN);
348 }
349 error = sock_getaddr(sock, &sa, 1);
350 socket_unlock(sock, 1);
351 if (error == 0) {
352 if (peernamelen > sa->sa_len)
353 peernamelen = sa->sa_len;
354 memcpy(peername, sa, peernamelen);
355 FREE(sa, M_SONAME);
356 }
357 return (error);
358 }
359
360 errno_t
361 sock_getsockname(socket_t sock, struct sockaddr *sockname, int socknamelen)
362 {
363 int error;
364 struct sockaddr *sa = NULL;
365
366 if (sock == NULL || sockname == NULL || socknamelen < 0)
367 return (EINVAL);
368
369 socket_lock(sock, 1);
370 error = sock_getaddr(sock, &sa, 0);
371 socket_unlock(sock, 1);
372 if (error == 0) {
373 if (socknamelen > sa->sa_len)
374 socknamelen = sa->sa_len;
375 memcpy(sockname, sa, socknamelen);
376 FREE(sa, M_SONAME);
377 }
378 return (error);
379 }
380
381 errno_t
382 sock_getaddr(socket_t sock, struct sockaddr **psa, int peer)
383 {
384 int error;
385
386 if (sock == NULL || psa == NULL)
387 return (EINVAL);
388
389 *psa = NULL;
390 error = peer ? sock->so_proto->pr_usrreqs->pru_peeraddr(sock, psa) :
391 sock->so_proto->pr_usrreqs->pru_sockaddr(sock, psa);
392
393 if (error == 0 && *psa == NULL) {
394 error = ENOMEM;
395 } else if (error != 0 && *psa != NULL) {
396 FREE(*psa, M_SONAME);
397 *psa = NULL;
398 }
399 return (error);
400 }
401
402 void
403 sock_freeaddr(struct sockaddr *sa)
404 {
405 if (sa != NULL)
406 FREE(sa, M_SONAME);
407 }
408
409 errno_t
410 sock_getsockopt(
411 socket_t sock,
412 int level,
413 int optname,
414 void *optval,
415 int *optlen)
416 {
417 int error = 0;
418 struct sockopt sopt;
419
420 if (sock == NULL || optval == NULL || optlen == NULL) return EINVAL;
421 sopt.sopt_dir = SOPT_GET;
422 sopt.sopt_level = level;
423 sopt.sopt_name = optname;
424 sopt.sopt_val = CAST_USER_ADDR_T(optval);
425 sopt.sopt_valsize = *optlen;
426 sopt.sopt_p = NULL;
427 error = sogetopt(sock, &sopt); /* will lock socket */
428 if (error == 0) *optlen = sopt.sopt_valsize;
429 return error;
430 }
431
432 errno_t
433 sock_ioctl(
434 socket_t sock,
435 unsigned long request,
436 void *argp)
437 {
438 return soioctl(sock, request, argp, NULL); /* will lock socket */
439 }
440
441 errno_t
442 sock_setsockopt(
443 socket_t sock,
444 int level,
445 int optname,
446 const void *optval,
447 int optlen)
448 {
449 struct sockopt sopt;
450
451 if (sock == NULL || optval == NULL) return EINVAL;
452 sopt.sopt_dir = SOPT_SET;
453 sopt.sopt_level = level;
454 sopt.sopt_name = optname;
455 sopt.sopt_val = CAST_USER_ADDR_T(optval);
456 sopt.sopt_valsize = optlen;
457 sopt.sopt_p = NULL;
458 return sosetopt(sock, &sopt); /* will lock socket */
459 }
460
461 errno_t
462 sock_listen(
463 socket_t sock,
464 int backlog)
465 {
466 if (sock == NULL) return EINVAL;
467 return solisten(sock, backlog); /* will lock socket */
468 }
469
470 static errno_t
471 sock_receive_internal(
472 socket_t sock,
473 struct msghdr *msg,
474 mbuf_t *data,
475 int flags,
476 size_t *recvdlen)
477 {
478 uio_t auio;
479 struct mbuf *control = NULL;
480 int error = 0;
481 int length = 0;
482 struct sockaddr *fromsa;
483 char uio_buf[ UIO_SIZEOF((msg != NULL) ? msg->msg_iovlen : 0) ];
484
485 if (sock == NULL) return EINVAL;
486
487 auio = uio_createwithbuffer(((msg != NULL) ? msg->msg_iovlen : 0),
488 0, UIO_SYSSPACE, UIO_READ,
489 &uio_buf[0], sizeof(uio_buf));
490 if (msg && data == NULL) {
491 int i;
492 struct iovec_32 *tempp = (struct iovec_32 *) msg->msg_iov;
493
494 for (i = 0; i < msg->msg_iovlen; i++) {
495 uio_addiov(auio, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len);
496 }
497 if (uio_resid(auio) < 0) return EINVAL;
498 }
499 else {
500 uio_setresid(auio, (uio_resid(auio) + *recvdlen));
501 }
502 length = uio_resid(auio);
503
504 if (recvdlen)
505 *recvdlen = 0;
506
507 if (msg && msg->msg_control) {
508 if ((size_t)msg->msg_controllen < sizeof(struct cmsghdr)) return EINVAL;
509 if ((size_t)msg->msg_controllen > MLEN) return EINVAL;
510 control = m_get(M_NOWAIT, MT_CONTROL);
511 if (control == NULL) return ENOMEM;
512 memcpy(mtod(control, caddr_t), msg->msg_control, msg->msg_controllen);
513 control->m_len = msg->msg_controllen;
514 }
515
516 /* let pru_soreceive handle the socket locking */
517 error = sock->so_proto->pr_usrreqs->pru_soreceive(sock, &fromsa, auio,
518 data, control ? &control : NULL, &flags);
519 if (error) goto cleanup;
520
521 if (recvdlen)
522 *recvdlen = length - uio_resid(auio);
523 if (msg) {
524 msg->msg_flags = flags;
525
526 if (msg->msg_name)
527 {
528 int salen;
529 salen = msg->msg_namelen;
530 if (msg->msg_namelen > 0 && fromsa != 0)
531 {
532 salen = MIN(salen, fromsa->sa_len);
533 memcpy(msg->msg_name, fromsa,
534 msg->msg_namelen > fromsa->sa_len ? fromsa->sa_len : msg->msg_namelen);
535 }
536 }
537
538 if (msg->msg_control)
539 {
540 struct mbuf* m = control;
541 u_char* ctlbuf = msg->msg_control;
542 int clen = msg->msg_controllen;
543 msg->msg_controllen = 0;
544
545 while (m && clen > 0)
546 {
547 unsigned int tocopy;
548 if (clen >= m->m_len)
549 {
550 tocopy = m->m_len;
551 }
552 else
553 {
554 msg->msg_flags |= MSG_CTRUNC;
555 tocopy = clen;
556 }
557 memcpy(ctlbuf, mtod(m, caddr_t), tocopy);
558 ctlbuf += tocopy;
559 clen -= tocopy;
560 m = m->m_next;
561 }
562 msg->msg_controllen = (u_int32_t)ctlbuf - (u_int32_t)msg->msg_control;
563 }
564 }
565
566 cleanup:
567 if (control) m_freem(control);
568 if (fromsa) FREE(fromsa, M_SONAME);
569 return error;
570 }
571
572 errno_t
573 sock_receive(
574 socket_t sock,
575 struct msghdr *msg,
576 int flags,
577 size_t *recvdlen)
578 {
579 if ((msg == NULL) ||
580 (msg->msg_iovlen < 1) ||
581 (msg->msg_iov[0].iov_len == 0) ||
582 (msg->msg_iov[0].iov_base == NULL))
583 return EINVAL;
584 return sock_receive_internal(sock, msg, NULL, flags, recvdlen);
585 }
586
587 errno_t
588 sock_receivembuf(
589 socket_t sock,
590 struct msghdr *msg,
591 mbuf_t *data,
592 int flags,
593 size_t *recvlen)
594 {
595 if (data == NULL || recvlen == 0 || *recvlen <= 0 || (msg &&
596 (msg->msg_iov != NULL || msg->msg_iovlen != 0)))
597 return EINVAL;
598 return sock_receive_internal(sock, msg, data, flags, recvlen);
599 }
600
601 errno_t
602 sock_send_internal(
603 socket_t sock,
604 const struct msghdr *msg,
605 mbuf_t data,
606 int flags,
607 size_t *sentlen)
608 {
609 uio_t auio = NULL;
610 struct mbuf *control = NULL;
611 int error = 0;
612 int datalen = 0;
613 char uio_buf[ UIO_SIZEOF((msg != NULL ? msg->msg_iovlen : 1)) ];
614
615 if (sock == NULL) {
616 error = EINVAL;
617 goto errorout;
618 }
619
620 if (data == 0 && msg != NULL) {
621 struct iovec_32 *tempp = (struct iovec_32 *) msg->msg_iov;
622
623 auio = uio_createwithbuffer(msg->msg_iovlen, 0, UIO_SYSSPACE, UIO_WRITE,
624 &uio_buf[0], sizeof(uio_buf));
625 if (tempp != NULL)
626 {
627 int i;
628
629 for (i = 0; i < msg->msg_iovlen; i++) {
630 uio_addiov(auio, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len);
631 }
632
633 if (uio_resid(auio) < 0) {
634 error = EINVAL;
635 goto errorout;
636 }
637 }
638 }
639
640 if (sentlen)
641 *sentlen = 0;
642
643 if (auio)
644 datalen = uio_resid(auio);
645 else
646 datalen = data->m_pkthdr.len;
647
648 if (msg && msg->msg_control)
649 {
650 if ((size_t)msg->msg_controllen < sizeof(struct cmsghdr)) return EINVAL;
651 if ((size_t)msg->msg_controllen > MLEN) return EINVAL;
652 control = m_get(M_NOWAIT, MT_CONTROL);
653 if (control == NULL) {
654 error = ENOMEM;
655 goto errorout;
656 }
657 memcpy(mtod(control, caddr_t), msg->msg_control, msg->msg_controllen);
658 control->m_len = msg->msg_controllen;
659 }
660
661 error = sock->so_proto->pr_usrreqs->pru_sosend(sock, msg != NULL ?
662 (struct sockaddr*)msg->msg_name : NULL, auio, data, control, flags);
663
664 /*
665 * Residual data is possible in the case of IO vectors but not
666 * in the mbuf case since the latter is treated as atomic send.
667 * If pru_sosend() consumed a portion of the iovecs data and
668 * the error returned is transient, treat it as success; this
669 * is consistent with sendit() behavior.
670 */
671 if (auio != NULL && uio_resid(auio) != datalen &&
672 (error == ERESTART || error == EINTR || error == EWOULDBLOCK))
673 error = 0;
674
675 if (error == 0 && sentlen != NULL) {
676 if (auio != NULL)
677 *sentlen = datalen - uio_resid(auio);
678 else
679 *sentlen = datalen;
680 }
681
682 return error;
683
684 /*
685 * In cases where we detect an error before returning, we need to
686 * free the mbuf chain if there is one. sosend (and pru_sosend) will
687 * free the mbuf chain if they encounter an error.
688 */
689 errorout:
690 if (control)
691 m_freem(control);
692 if (data)
693 m_freem(data);
694 if (sentlen)
695 *sentlen = 0;
696 return error;
697 }
698
699 errno_t
700 sock_send(
701 socket_t sock,
702 const struct msghdr *msg,
703 int flags,
704 size_t *sentlen)
705 {
706 if (msg == NULL || msg->msg_iov == NULL || msg->msg_iovlen < 1)
707 return EINVAL;
708 return sock_send_internal(sock, msg, NULL, flags, sentlen);
709 }
710
711 errno_t
712 sock_sendmbuf(
713 socket_t sock,
714 const struct msghdr *msg,
715 mbuf_t data,
716 int flags,
717 size_t *sentlen)
718 {
719 if (data == NULL || (msg &&
720 (msg->msg_iov != NULL || msg->msg_iovlen != 0))) {
721 if (data)
722 m_freem(data);
723 return EINVAL;
724 }
725 return sock_send_internal(sock, msg, data, flags, sentlen);
726 }
727
728 errno_t
729 sock_shutdown(
730 socket_t sock,
731 int how)
732 {
733 if (sock == NULL) return EINVAL;
734 return soshutdown(sock, how);
735 }
736
737
738 errno_t
739 sock_socket(
740 int domain,
741 int type,
742 int protocol,
743 sock_upcall callback,
744 void* context,
745 socket_t *new_so)
746 {
747 int error = 0;
748 if (new_so == NULL) return EINVAL;
749 /* socreate will create an initial so_count */
750 error = socreate(domain, new_so, type, protocol);
751 if (error == 0 && callback)
752 {
753 (*new_so)->so_rcv.sb_flags |= SB_UPCALL;
754 #if CONFIG_SOWUPCALL
755 (*new_so)->so_snd.sb_flags |= SB_UPCALL;
756 #endif
757 (*new_so)->so_upcall = (so_upcall)callback;
758 (*new_so)->so_upcallarg = context;
759 }
760 return error;
761 }
762
763 void
764 sock_close(
765 socket_t sock)
766 {
767 if (sock == NULL) return;
768 soclose(sock);
769 }
770
771 /* Do we want this to be APPLE_PRIVATE API?: YES (LD 12/23/04)*/
772 void
773 sock_retain(
774 socket_t sock)
775 {
776 if (sock == NULL) return;
777 socket_lock(sock, 1);
778 sock->so_retaincnt++;
779 sock->so_usecount++; /* add extra reference for holding the socket */
780 socket_unlock(sock, 1);
781 }
782
783 /* Do we want this to be APPLE_PRIVATE API? */
784 void
785 sock_release(socket_t sock)
786 {
787 if (sock == NULL)
788 return;
789 socket_lock(sock, 1);
790
791 if (sock->so_flags & SOF_UPCALLINUSE)
792 soclose_wait_locked(sock);
793
794 sock->so_retaincnt--;
795 if (sock->so_retaincnt < 0)
796 panic("sock_release: negative retain count for sock=%p "
797 "cnt=%x\n", sock, sock->so_retaincnt);
798 if ((sock->so_retaincnt == 0) && (sock->so_usecount == 2)) {
799 /* close socket only if the FD is not holding it */
800 soclose_locked(sock);
801 } else {
802 /* remove extra reference holding the socket */
803 sock->so_usecount--;
804 }
805 socket_unlock(sock, 1);
806 }
807
808 errno_t
809 sock_setpriv(
810 socket_t sock,
811 int on)
812 {
813 if (sock == NULL) return EINVAL;
814 socket_lock(sock, 1);
815 if (on)
816 {
817 sock->so_state |= SS_PRIV;
818 }
819 else
820 {
821 sock->so_state &= ~SS_PRIV;
822 }
823 socket_unlock(sock, 1);
824 return 0;
825 }
826
827 int
828 sock_isconnected(
829 socket_t sock)
830 {
831 int retval;
832 socket_lock(sock, 1);
833 retval = (sock->so_state & SS_ISCONNECTED) != 0;
834 socket_unlock(sock, 1);
835 return (retval);
836 }
837
838 int
839 sock_isnonblocking(
840 socket_t sock)
841 {
842 int retval;
843 socket_lock(sock, 1);
844 retval = (sock->so_state & SS_NBIO) != 0;
845 socket_unlock(sock, 1);
846 return (retval);
847 }
848
849 errno_t
850 sock_gettype(
851 socket_t sock,
852 int *outDomain,
853 int *outType,
854 int *outProtocol)
855 {
856 socket_lock(sock, 1);
857 if (outDomain)
858 *outDomain = sock->so_proto->pr_domain->dom_family;
859 if (outType)
860 *outType = sock->so_type;
861 if (outProtocol)
862 *outProtocol = sock->so_proto->pr_protocol;
863 socket_unlock(sock, 1);
864 return 0;
865 }
866
867 /*
868 * Return the listening socket of a pre-accepted socket. It returns the
869 * listener (so_head) value of a given socket. This is intended to be
870 * called by a socket filter during a filter attach (sf_attach) callback.
871 * The value returned by this routine is safe to be used only in the
872 * context of that callback, because we hold the listener's lock across
873 * the sflt_initsock() call.
874 */
875 socket_t
876 sock_getlistener(socket_t sock)
877 {
878 return (sock->so_head);
879 }