]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socket.c
357e1f40436a4dda94e33de060be4ba3c2266085
[apple/xnu.git] / bsd / kern / kpi_socket.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define __KPI__
30 #include <sys/kernel.h>
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <sys/socketvar.h>
34 #include <sys/param.h>
35 #include <sys/proc.h>
36 #include <sys/errno.h>
37 #include <sys/malloc.h>
38 #include <sys/protosw.h>
39 #include <sys/domain.h>
40 #include <sys/mbuf.h>
41 #include <sys/fcntl.h>
42 #include <sys/filio.h>
43 #include <sys/uio_internal.h>
44 #include <kern/lock.h>
45
46 extern void *memcpy(void *, const void *, size_t);
47 extern int soclose_locked(struct socket *so);
48
49 errno_t sock_send_internal(
50 socket_t sock,
51 const struct msghdr *msg,
52 mbuf_t data,
53 int flags,
54 size_t *sentlen);
55
56
57
58 errno_t
59 sock_accept(
60 socket_t sock,
61 struct sockaddr *from,
62 int fromlen,
63 int flags,
64 sock_upcall callback,
65 void* cookie,
66 socket_t *new_sock)
67 {
68 struct sockaddr *sa;
69 struct socket *new_so;
70 lck_mtx_t *mutex_held;
71 int dosocklock;
72 errno_t error = 0;
73
74 if (sock == NULL || new_sock == NULL) return EINVAL;
75 socket_lock(sock, 1);
76 if ((sock->so_options & SO_ACCEPTCONN) == 0) {
77 socket_unlock(sock, 1);
78 return EINVAL;
79 }
80 if ((flags & ~(MSG_DONTWAIT)) != 0) {
81 socket_unlock(sock, 1);
82 return ENOTSUP;
83 }
84 if (((flags & MSG_DONTWAIT) != 0 || (sock->so_state & SS_NBIO) != 0) &&
85 sock->so_comp.tqh_first == NULL) {
86 socket_unlock(sock, 1);
87 return EWOULDBLOCK;
88 }
89
90 if (sock->so_proto->pr_getlock != NULL) {
91 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
92 dosocklock = 1;
93 }
94 else {
95 mutex_held = sock->so_proto->pr_domain->dom_mtx;
96 dosocklock = 0;
97 }
98
99 while (TAILQ_EMPTY(&sock->so_comp) && sock->so_error == 0) {
100 if (sock->so_state & SS_CANTRCVMORE) {
101 sock->so_error = ECONNABORTED;
102 break;
103 }
104 error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH, "sock_accept", 0);
105 if (error) {
106 socket_unlock(sock, 1);
107 return (error);
108 }
109 }
110 if (sock->so_error) {
111 error = sock->so_error;
112 sock->so_error = 0;
113 socket_unlock(sock, 1);
114 return (error);
115 }
116
117 new_so = TAILQ_FIRST(&sock->so_comp);
118 TAILQ_REMOVE(&sock->so_comp, new_so, so_list);
119 sock->so_qlen--;
120 socket_unlock(sock, 1); /* release the head */
121
122 if (dosocklock) {
123 lck_mtx_assert(new_so->so_proto->pr_getlock(new_so, 0),
124 LCK_MTX_ASSERT_NOTOWNED);
125 socket_lock(new_so, 1);
126 }
127
128 new_so->so_state &= ~SS_COMP;
129 new_so->so_head = NULL;
130 soacceptlock(new_so, &sa, 0);
131
132 if (callback) {
133 new_so->so_upcall = callback;
134 new_so->so_upcallarg = cookie;
135 new_so->so_rcv.sb_flags |= SB_UPCALL;
136 }
137
138 if (sa && from)
139 {
140 if (fromlen > sa->sa_len) fromlen = sa->sa_len;
141 memcpy(from, sa, fromlen);
142 }
143 if (sa) FREE(sa, M_SONAME);
144 *new_sock = new_so;
145 if (dosocklock)
146 socket_unlock(new_so, 1);
147 return error;
148 }
149
150 errno_t
151 sock_bind(
152 socket_t sock,
153 const struct sockaddr *to)
154 {
155 if (sock == NULL || to == NULL) return EINVAL;
156
157 return sobind(sock, (struct sockaddr*)to);
158 }
159
160 errno_t
161 sock_connect(
162 socket_t sock,
163 const struct sockaddr *to,
164 int flags)
165 {
166 int error = 0;
167 lck_mtx_t *mutex_held;
168
169 if (sock == NULL || to == NULL) return EINVAL;
170
171 socket_lock(sock, 1);
172
173 if ((sock->so_state & SS_ISCONNECTING) &&
174 ((sock->so_state & SS_NBIO) != 0 ||
175 (flags & MSG_DONTWAIT) != 0)) {
176 socket_unlock(sock, 1);
177 return EALREADY;
178 }
179 error = soconnectlock(sock, (struct sockaddr*)to, 0);
180 if (!error) {
181 if ((sock->so_state & SS_ISCONNECTING) &&
182 ((sock->so_state & SS_NBIO) != 0 || (flags & MSG_DONTWAIT) != 0)) {
183 socket_unlock(sock, 1);
184 return EINPROGRESS;
185 }
186
187 if (sock->so_proto->pr_getlock != NULL)
188 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
189 else
190 mutex_held = sock->so_proto->pr_domain->dom_mtx;
191
192 while ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
193 error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH,
194 "sock_connect", 0);
195 if (error)
196 break;
197 }
198
199 if (error == 0) {
200 error = sock->so_error;
201 sock->so_error = 0;
202 }
203 }
204 else {
205 sock->so_state &= ~SS_ISCONNECTING;
206 }
207 socket_unlock(sock, 1);
208 return error;
209 }
210
211 errno_t
212 sock_connectwait(
213 socket_t sock,
214 const struct timeval *tv)
215 {
216 lck_mtx_t * mutex_held;
217 errno_t retval = 0;
218 struct timespec ts;
219
220 socket_lock(sock, 1);
221
222 // Check if we're already connected or if we've already errored out
223 if ((sock->so_state & SS_ISCONNECTING) == 0 || sock->so_error) {
224 if (sock->so_error) {
225 retval = sock->so_error;
226 sock->so_error = 0;
227 }
228 else {
229 if ((sock->so_state & SS_ISCONNECTED) != 0)
230 retval = 0;
231 else
232 retval = EINVAL;
233 }
234 goto done;
235 }
236
237 // copied translation from timeval to hertz from SO_RCVTIMEO handling
238 if (tv->tv_sec < 0 || tv->tv_sec > SHRT_MAX / hz ||
239 tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
240 retval = EDOM;
241 goto done;
242 }
243
244 ts.tv_sec = tv->tv_sec;
245 ts.tv_nsec = (tv->tv_usec * NSEC_PER_USEC);
246 if ( (ts.tv_sec + (ts.tv_nsec/NSEC_PER_SEC))/100 > SHRT_MAX) {
247 retval = EDOM;
248 goto done;
249 }
250
251 if (sock->so_proto->pr_getlock != NULL)
252 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
253 else
254 mutex_held = sock->so_proto->pr_domain->dom_mtx;
255
256 msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK, "sock_connectwait", &ts);
257
258 // Check if we're still waiting to connect
259 if ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
260 retval = EINPROGRESS;
261 goto done;
262 }
263
264 if (sock->so_error) {
265 retval = sock->so_error;
266 sock->so_error = 0;
267 }
268
269 done:
270 socket_unlock(sock, 1);
271 return retval;
272 }
273
274 errno_t
275 sock_nointerrupt(
276 socket_t sock,
277 int on)
278 {
279 socket_lock(sock, 1);
280
281 if (on) {
282 sock->so_rcv.sb_flags |= SB_NOINTR; // This isn't safe
283 sock->so_snd.sb_flags |= SB_NOINTR; // This isn't safe
284 }
285 else {
286 sock->so_rcv.sb_flags &= ~SB_NOINTR; // This isn't safe
287 sock->so_snd.sb_flags &= ~SB_NOINTR; // This isn't safe
288 }
289
290 socket_unlock(sock, 1);
291
292 return 0;
293 }
294
295 errno_t
296 sock_getpeername(
297 socket_t sock,
298 struct sockaddr *peername,
299 int peernamelen)
300 {
301 int error = 0;
302 struct sockaddr *sa = NULL;
303
304 if (sock == NULL || peername == NULL || peernamelen < 0) return EINVAL;
305 socket_lock(sock, 1);
306 if ((sock->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
307 socket_unlock(sock, 1);
308 return ENOTCONN;
309 }
310 error = sock->so_proto->pr_usrreqs->pru_peeraddr(sock, &sa);
311 if (!error)
312 {
313 if (peernamelen > sa->sa_len) peernamelen = sa->sa_len;
314 memcpy(peername, sa, peernamelen);
315 }
316 if (sa) FREE(sa, M_SONAME);
317 socket_unlock(sock, 1);
318 return error;
319 }
320
321 errno_t
322 sock_getsockname(
323 socket_t sock,
324 struct sockaddr *sockname,
325 int socknamelen)
326 {
327 int error = 0;
328 struct sockaddr *sa = NULL;
329
330 if (sock == NULL || sockname == NULL || socknamelen < 0) return EINVAL;
331 socket_lock(sock, 1);
332 error = sock->so_proto->pr_usrreqs->pru_sockaddr(sock, &sa);
333 if (!error)
334 {
335 if (socknamelen > sa->sa_len) socknamelen = sa->sa_len;
336 memcpy(sockname, sa, socknamelen);
337 }
338 if (sa) FREE(sa, M_SONAME);
339 socket_unlock(sock, 1);
340 return error;
341 }
342
343 errno_t
344 sock_getsockopt(
345 socket_t sock,
346 int level,
347 int optname,
348 void *optval,
349 int *optlen)
350 {
351 int error = 0;
352 struct sockopt sopt;
353
354 if (sock == NULL || optval == NULL || optlen == NULL) return EINVAL;
355 sopt.sopt_dir = SOPT_GET;
356 sopt.sopt_level = level;
357 sopt.sopt_name = optname;
358 sopt.sopt_val = CAST_USER_ADDR_T(optval);
359 sopt.sopt_valsize = *optlen;
360 sopt.sopt_p = NULL;
361 error = sogetopt(sock, &sopt); /* will lock socket */
362 if (error == 0) *optlen = sopt.sopt_valsize;
363 return error;
364 }
365
366 errno_t
367 sock_ioctl(
368 socket_t sock,
369 unsigned long request,
370 void *argp)
371 {
372 return soioctl(sock, request, argp, NULL); /* will lock socket */
373 }
374
375 errno_t
376 sock_setsockopt(
377 socket_t sock,
378 int level,
379 int optname,
380 const void *optval,
381 int optlen)
382 {
383 struct sockopt sopt;
384
385 if (sock == NULL || optval == NULL) return EINVAL;
386 sopt.sopt_dir = SOPT_SET;
387 sopt.sopt_level = level;
388 sopt.sopt_name = optname;
389 sopt.sopt_val = CAST_USER_ADDR_T(optval);
390 sopt.sopt_valsize = optlen;
391 sopt.sopt_p = NULL;
392 return sosetopt(sock, &sopt); /* will lock socket */
393 }
394
395 errno_t
396 sock_listen(
397 socket_t sock,
398 int backlog)
399 {
400 if (sock == NULL) return EINVAL;
401 return solisten(sock, backlog); /* will lock socket */
402 }
403
404 static errno_t
405 sock_receive_internal(
406 socket_t sock,
407 struct msghdr *msg,
408 mbuf_t *data,
409 int flags,
410 size_t *recvdlen)
411 {
412 uio_t auio;
413 struct mbuf *control = NULL;
414 int error = 0;
415 int length = 0;
416 struct sockaddr *fromsa;
417 char uio_buf[ UIO_SIZEOF((msg != NULL) ? msg->msg_iovlen : 0) ];
418
419 if (sock == NULL) return EINVAL;
420
421 auio = uio_createwithbuffer(((msg != NULL) ? msg->msg_iovlen : 0),
422 0, UIO_SYSSPACE, UIO_READ,
423 &uio_buf[0], sizeof(uio_buf));
424 if (msg && data == NULL) {
425 int i;
426 struct iovec_32 *tempp = (struct iovec_32 *) msg->msg_iov;
427
428 for (i = 0; i < msg->msg_iovlen; i++) {
429 uio_addiov(auio, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len);
430 }
431 if (uio_resid(auio) < 0) return EINVAL;
432 }
433 else {
434 uio_setresid(auio, (uio_resid(auio) + *recvdlen));
435 }
436 length = uio_resid(auio);
437
438 if (recvdlen)
439 *recvdlen = 0;
440
441 if (msg && msg->msg_control) {
442 if ((size_t)msg->msg_controllen < sizeof(struct cmsghdr)) return EINVAL;
443 if ((size_t)msg->msg_controllen > MLEN) return EINVAL;
444 control = m_get(M_NOWAIT, MT_CONTROL);
445 if (control == NULL) return ENOMEM;
446 memcpy(mtod(control, caddr_t), msg->msg_control, msg->msg_controllen);
447 control->m_len = msg->msg_controllen;
448 }
449
450 /* let pru_soreceive handle the socket locking */
451 error = sock->so_proto->pr_usrreqs->pru_soreceive(sock, &fromsa, auio,
452 data, control ? &control : NULL, &flags);
453 if (error) goto cleanup;
454
455 if (recvdlen)
456 *recvdlen = length - uio_resid(auio);
457 if (msg) {
458 msg->msg_flags = flags;
459
460 if (msg->msg_name)
461 {
462 int salen;
463 salen = msg->msg_namelen;
464 if (msg->msg_namelen > 0 && fromsa != 0)
465 {
466 salen = MIN(salen, fromsa->sa_len);
467 memcpy(msg->msg_name, fromsa,
468 msg->msg_namelen > fromsa->sa_len ? fromsa->sa_len : msg->msg_namelen);
469 }
470 }
471
472 if (msg->msg_control)
473 {
474 struct mbuf* m = control;
475 u_char* ctlbuf = msg->msg_control;
476 int clen = msg->msg_controllen;
477 msg->msg_controllen = 0;
478
479 while (m && clen > 0)
480 {
481 unsigned int tocopy;
482 if (clen >= m->m_len)
483 {
484 tocopy = m->m_len;
485 }
486 else
487 {
488 msg->msg_flags |= MSG_CTRUNC;
489 tocopy = clen;
490 }
491 memcpy(ctlbuf, mtod(m, caddr_t), tocopy);
492 ctlbuf += tocopy;
493 clen -= tocopy;
494 m = m->m_next;
495 }
496 msg->msg_controllen = (u_int32_t)ctlbuf - (u_int32_t)msg->msg_control;
497 }
498 }
499
500 cleanup:
501 if (control) m_freem(control);
502 if (fromsa) FREE(fromsa, M_SONAME);
503 return error;
504 }
505
506 errno_t
507 sock_receive(
508 socket_t sock,
509 struct msghdr *msg,
510 int flags,
511 size_t *recvdlen)
512 {
513 if ((msg == NULL) ||
514 (msg->msg_iovlen < 1) ||
515 (msg->msg_iov[0].iov_len == 0) ||
516 (msg->msg_iov[0].iov_base == NULL))
517 return EINVAL;
518 return sock_receive_internal(sock, msg, NULL, flags, recvdlen);
519 }
520
521 errno_t
522 sock_receivembuf(
523 socket_t sock,
524 struct msghdr *msg,
525 mbuf_t *data,
526 int flags,
527 size_t *recvlen)
528 {
529 if (data == NULL || recvlen == 0 || *recvlen <= 0 || (msg &&
530 (msg->msg_iov != NULL || msg->msg_iovlen != 0)))
531 return EINVAL;
532 return sock_receive_internal(sock, msg, data, flags, recvlen);
533 }
534
535 errno_t
536 sock_send_internal(
537 socket_t sock,
538 const struct msghdr *msg,
539 mbuf_t data,
540 int flags,
541 size_t *sentlen)
542 {
543 uio_t auio = NULL;
544 struct mbuf *control = NULL;
545 int error = 0;
546 int datalen = 0;
547 char uio_buf[ UIO_SIZEOF((msg != NULL ? msg->msg_iovlen : 1)) ];
548
549 if (sock == NULL) {
550 error = EINVAL;
551 goto errorout;
552 }
553
554 if (data == 0 && msg != NULL) {
555 struct iovec_32 *tempp = (struct iovec_32 *) msg->msg_iov;
556
557 auio = uio_createwithbuffer(msg->msg_iovlen, 0, UIO_SYSSPACE, UIO_WRITE,
558 &uio_buf[0], sizeof(uio_buf));
559 if (tempp != NULL)
560 {
561 int i;
562
563 for (i = 0; i < msg->msg_iovlen; i++) {
564 uio_addiov(auio, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len);
565 }
566
567 if (uio_resid(auio) < 0) {
568 error = EINVAL;
569 goto errorout;
570 }
571 }
572 }
573
574 if (sentlen)
575 *sentlen = 0;
576
577 if (auio)
578 datalen = uio_resid(auio);
579 else
580 datalen = data->m_pkthdr.len;
581
582 if (msg && msg->msg_control)
583 {
584 if ((size_t)msg->msg_controllen < sizeof(struct cmsghdr)) return EINVAL;
585 if ((size_t)msg->msg_controllen > MLEN) return EINVAL;
586 control = m_get(M_NOWAIT, MT_CONTROL);
587 if (control == NULL) {
588 error = ENOMEM;
589 goto errorout;
590 }
591 memcpy(mtod(control, caddr_t), msg->msg_control, msg->msg_controllen);
592 control->m_len = msg->msg_controllen;
593 }
594
595 error = sock->so_proto->pr_usrreqs->pru_sosend(sock, msg != NULL ?
596 (struct sockaddr*)msg->msg_name : NULL, auio, data, control, flags);
597
598 /*
599 * Residual data is possible in the case of IO vectors but not
600 * in the mbuf case since the latter is treated as atomic send.
601 * If pru_sosend() consumed a portion of the iovecs data and
602 * the error returned is transient, treat it as success; this
603 * is consistent with sendit() behavior.
604 */
605 if (auio != NULL && uio_resid(auio) != datalen &&
606 (error == ERESTART || error == EINTR || error == EWOULDBLOCK))
607 error = 0;
608
609 if (error == 0 && sentlen != NULL) {
610 if (auio != NULL)
611 *sentlen = datalen - uio_resid(auio);
612 else
613 *sentlen = datalen;
614 }
615
616 return error;
617
618 /*
619 * In cases where we detect an error before returning, we need to
620 * free the mbuf chain if there is one. sosend (and pru_sosend) will
621 * free the mbuf chain if they encounter an error.
622 */
623 errorout:
624 if (control)
625 m_freem(control);
626 if (data)
627 m_freem(data);
628 if (sentlen)
629 *sentlen = 0;
630 return error;
631 }
632
633 errno_t
634 sock_send(
635 socket_t sock,
636 const struct msghdr *msg,
637 int flags,
638 size_t *sentlen)
639 {
640 if (msg == NULL || msg->msg_iov == NULL || msg->msg_iovlen < 1)
641 return EINVAL;
642 return sock_send_internal(sock, msg, NULL, flags, sentlen);
643 }
644
645 errno_t
646 sock_sendmbuf(
647 socket_t sock,
648 const struct msghdr *msg,
649 mbuf_t data,
650 int flags,
651 size_t *sentlen)
652 {
653 if (data == NULL || (msg &&
654 (msg->msg_iov != NULL || msg->msg_iovlen != 0))) {
655 if (data)
656 m_freem(data);
657 return EINVAL;
658 }
659 return sock_send_internal(sock, msg, data, flags, sentlen);
660 }
661
662 errno_t
663 sock_shutdown(
664 socket_t sock,
665 int how)
666 {
667 if (sock == NULL) return EINVAL;
668 return soshutdown(sock, how);
669 }
670
671 typedef void (*so_upcall)(struct socket *sock, void* arg, int waitf);
672
673 errno_t
674 sock_socket(
675 int domain,
676 int type,
677 int protocol,
678 sock_upcall callback,
679 void* context,
680 socket_t *new_so)
681 {
682 int error = 0;
683 if (new_so == NULL) return EINVAL;
684 /* socreate will create an initial so_count */
685 error = socreate(domain, new_so, type, protocol);
686 if (error == 0 && callback)
687 {
688 (*new_so)->so_rcv.sb_flags |= SB_UPCALL;
689 (*new_so)->so_upcall = (so_upcall)callback;
690 (*new_so)->so_upcallarg = context;
691 }
692 return error;
693 }
694
695 void
696 sock_close(
697 socket_t sock)
698 {
699 if (sock == NULL) return;
700 soclose(sock);
701 }
702
703 /* Do we want this to be APPLE_PRIVATE API?: YES (LD 12/23/04)*/
704 void
705 sock_retain(
706 socket_t sock)
707 {
708 if (sock == NULL) return;
709 socket_lock(sock, 1);
710 sock->so_retaincnt++;
711 sock->so_usecount++; /* add extra reference for holding the socket */
712 socket_unlock(sock, 1);
713 }
714
715 /* Do we want this to be APPLE_PRIVATE API? */
716 void
717 sock_release(
718 socket_t sock)
719 {
720 if (sock == NULL) return;
721 socket_lock(sock, 1);
722 sock->so_retaincnt--;
723 if (sock->so_retaincnt < 0)
724 panic("sock_release: negative retain count for sock=%x cnt=%x\n",
725 sock, sock->so_retaincnt);
726 if ((sock->so_retaincnt == 0) && (sock->so_usecount == 2))
727 soclose_locked(sock); /* close socket only if the FD is not holding it */
728 else
729 sock->so_usecount--; /* remove extra reference holding the socket */
730 socket_unlock(sock, 1);
731 }
732
733 errno_t
734 sock_setpriv(
735 socket_t sock,
736 int on)
737 {
738 if (sock == NULL) return EINVAL;
739 socket_lock(sock, 1);
740 if (on)
741 {
742 sock->so_state |= SS_PRIV;
743 }
744 else
745 {
746 sock->so_state &= ~SS_PRIV;
747 }
748 socket_unlock(sock, 1);
749 return 0;
750 }
751
752 int
753 sock_isconnected(
754 socket_t sock)
755 {
756 int retval;
757 socket_lock(sock, 1);
758 retval = (sock->so_state & SS_ISCONNECTED) != 0;
759 socket_unlock(sock, 1);
760 return (retval);
761 }
762
763 int
764 sock_isnonblocking(
765 socket_t sock)
766 {
767 int retval;
768 socket_lock(sock, 1);
769 retval = (sock->so_state & SS_NBIO) != 0;
770 socket_unlock(sock, 1);
771 return (retval);
772 }
773
774 errno_t
775 sock_gettype(
776 socket_t sock,
777 int *outDomain,
778 int *outType,
779 int *outProtocol)
780 {
781 socket_lock(sock, 1);
782 if (outDomain)
783 *outDomain = sock->so_proto->pr_domain->dom_family;
784 if (outType)
785 *outType = sock->so_type;
786 if (outProtocol)
787 *outProtocol = sock->so_proto->pr_protocol;
788 socket_unlock(sock, 1);
789 return 0;
790 }