]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_socket.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / bsd / kern / kpi_socket.c
1 /*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #define __KPI__
24 #include <sys/kernel.h>
25 #include <sys/types.h>
26 #include <sys/socket.h>
27 #include <sys/socketvar.h>
28 #include <sys/param.h>
29 #include <sys/proc.h>
30 #include <sys/errno.h>
31 #include <sys/malloc.h>
32 #include <sys/protosw.h>
33 #include <sys/domain.h>
34 #include <sys/mbuf.h>
35 #include <sys/fcntl.h>
36 #include <sys/filio.h>
37 #include <sys/uio_internal.h>
38 #include <kern/lock.h>
39
40 extern void *memcpy(void *, const void *, size_t);
41 extern int soclose_locked(struct socket *so);
42
43 errno_t sock_send_internal(
44 socket_t sock,
45 const struct msghdr *msg,
46 mbuf_t data,
47 int flags,
48 size_t *sentlen);
49
50
51
52 errno_t
53 sock_accept(
54 socket_t sock,
55 struct sockaddr *from,
56 int fromlen,
57 int flags,
58 sock_upcall callback,
59 void* cookie,
60 socket_t *new_sock)
61 {
62 struct sockaddr *sa;
63 struct socket *new_so;
64 lck_mtx_t *mutex_held;
65 int dosocklock;
66 errno_t error = 0;
67
68 if (sock == NULL || new_sock == NULL) return EINVAL;
69 socket_lock(sock, 1);
70 if ((sock->so_options & SO_ACCEPTCONN) == 0) {
71 socket_unlock(sock, 1);
72 return EINVAL;
73 }
74 if ((flags & ~(MSG_DONTWAIT)) != 0) {
75 socket_unlock(sock, 1);
76 return ENOTSUP;
77 }
78 if (((flags & MSG_DONTWAIT) != 0 || (sock->so_state & SS_NBIO) != 0) &&
79 sock->so_comp.tqh_first == NULL) {
80 socket_unlock(sock, 1);
81 return EWOULDBLOCK;
82 }
83
84 if (sock->so_proto->pr_getlock != NULL) {
85 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
86 dosocklock = 1;
87 }
88 else {
89 mutex_held = sock->so_proto->pr_domain->dom_mtx;
90 dosocklock = 0;
91 }
92
93 while (TAILQ_EMPTY(&sock->so_comp) && sock->so_error == 0) {
94 if (sock->so_state & SS_CANTRCVMORE) {
95 sock->so_error = ECONNABORTED;
96 break;
97 }
98 error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH, "sock_accept", 0);
99 if (error) {
100 socket_unlock(sock, 1);
101 return (error);
102 }
103 }
104 if (sock->so_error) {
105 error = sock->so_error;
106 sock->so_error = 0;
107 socket_unlock(sock, 1);
108 return (error);
109 }
110
111 new_so = TAILQ_FIRST(&sock->so_comp);
112 TAILQ_REMOVE(&sock->so_comp, new_so, so_list);
113 sock->so_qlen--;
114 socket_unlock(sock, 1); /* release the head */
115
116 if (dosocklock) {
117 lck_mtx_assert(new_so->so_proto->pr_getlock(new_so, 0),
118 LCK_MTX_ASSERT_NOTOWNED);
119 socket_lock(new_so, 1);
120 }
121
122 new_so->so_state &= ~SS_COMP;
123 new_so->so_head = NULL;
124 soacceptlock(new_so, &sa, 0);
125
126 if (callback) {
127 new_so->so_upcall = callback;
128 new_so->so_upcallarg = cookie;
129 new_so->so_rcv.sb_flags |= SB_UPCALL;
130 }
131
132 if (sa && from)
133 {
134 if (fromlen > sa->sa_len) fromlen = sa->sa_len;
135 memcpy(from, sa, fromlen);
136 }
137 if (sa) FREE(sa, M_SONAME);
138 *new_sock = new_so;
139 if (dosocklock)
140 socket_unlock(new_so, 1);
141 return error;
142 }
143
144 errno_t
145 sock_bind(
146 socket_t sock,
147 const struct sockaddr *to)
148 {
149 if (sock == NULL || to == NULL) return EINVAL;
150
151 return sobind(sock, (struct sockaddr*)to);
152 }
153
154 errno_t
155 sock_connect(
156 socket_t sock,
157 const struct sockaddr *to,
158 int flags)
159 {
160 int error = 0;
161 lck_mtx_t *mutex_held;
162
163 if (sock == NULL || to == NULL) return EINVAL;
164
165 socket_lock(sock, 1);
166
167 if ((sock->so_state & SS_ISCONNECTING) &&
168 ((sock->so_state & SS_NBIO) != 0 ||
169 (flags & MSG_DONTWAIT) != 0)) {
170 socket_unlock(sock, 1);
171 return EALREADY;
172 }
173 error = soconnectlock(sock, (struct sockaddr*)to, 0);
174 if (!error) {
175 if ((sock->so_state & SS_ISCONNECTING) &&
176 ((sock->so_state & SS_NBIO) != 0 || (flags & MSG_DONTWAIT) != 0)) {
177 socket_unlock(sock, 1);
178 return EINPROGRESS;
179 }
180
181 if (sock->so_proto->pr_getlock != NULL)
182 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
183 else
184 mutex_held = sock->so_proto->pr_domain->dom_mtx;
185
186 while ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
187 error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH,
188 "sock_connect", 0);
189 if (error)
190 break;
191 }
192
193 if (error == 0) {
194 error = sock->so_error;
195 sock->so_error = 0;
196 }
197 }
198 else {
199 sock->so_state &= ~SS_ISCONNECTING;
200 }
201 socket_unlock(sock, 1);
202 return error;
203 }
204
205 errno_t
206 sock_connectwait(
207 socket_t sock,
208 const struct timeval *tv)
209 {
210 lck_mtx_t * mutex_held;
211 errno_t retval = 0;
212 struct timespec ts;
213
214 socket_lock(sock, 1);
215
216 // Check if we're already connected or if we've already errored out
217 if ((sock->so_state & SS_ISCONNECTING) == 0 || sock->so_error) {
218 if (sock->so_error) {
219 retval = sock->so_error;
220 sock->so_error = 0;
221 }
222 else {
223 if ((sock->so_state & SS_ISCONNECTED) != 0)
224 retval = 0;
225 else
226 retval = EINVAL;
227 }
228 goto done;
229 }
230
231 // copied translation from timeval to hertz from SO_RCVTIMEO handling
232 if (tv->tv_sec < 0 || tv->tv_sec > SHRT_MAX / hz ||
233 tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
234 retval = EDOM;
235 goto done;
236 }
237
238 ts.tv_sec = tv->tv_sec;
239 ts.tv_nsec = (tv->tv_usec * NSEC_PER_USEC);
240 if ( (ts.tv_sec + (ts.tv_nsec/NSEC_PER_SEC))/100 > SHRT_MAX) {
241 retval = EDOM;
242 goto done;
243 }
244
245 if (sock->so_proto->pr_getlock != NULL)
246 mutex_held = (*sock->so_proto->pr_getlock)(sock, 0);
247 else
248 mutex_held = sock->so_proto->pr_domain->dom_mtx;
249
250 msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK, "sock_connectwait", &ts);
251
252 // Check if we're still waiting to connect
253 if ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) {
254 retval = EINPROGRESS;
255 goto done;
256 }
257
258 if (sock->so_error) {
259 retval = sock->so_error;
260 sock->so_error = 0;
261 }
262
263 done:
264 socket_unlock(sock, 1);
265 return retval;
266 }
267
268 errno_t
269 sock_nointerrupt(
270 socket_t sock,
271 int on)
272 {
273 socket_lock(sock, 1);
274
275 if (on) {
276 sock->so_rcv.sb_flags |= SB_NOINTR; // This isn't safe
277 sock->so_snd.sb_flags |= SB_NOINTR; // This isn't safe
278 }
279 else {
280 sock->so_rcv.sb_flags &= ~SB_NOINTR; // This isn't safe
281 sock->so_snd.sb_flags &= ~SB_NOINTR; // This isn't safe
282 }
283
284 socket_unlock(sock, 1);
285
286 return 0;
287 }
288
289 errno_t
290 sock_getpeername(
291 socket_t sock,
292 struct sockaddr *peername,
293 int peernamelen)
294 {
295 int error = 0;
296 struct sockaddr *sa = NULL;
297
298 if (sock == NULL || peername == NULL || peernamelen < 0) return EINVAL;
299 socket_lock(sock, 1);
300 if ((sock->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
301 socket_unlock(sock, 1);
302 return ENOTCONN;
303 }
304 error = sock->so_proto->pr_usrreqs->pru_peeraddr(sock, &sa);
305 if (!error)
306 {
307 if (peernamelen > sa->sa_len) peernamelen = sa->sa_len;
308 memcpy(peername, sa, peernamelen);
309 }
310 if (sa) FREE(sa, M_SONAME);
311 socket_unlock(sock, 1);
312 return error;
313 }
314
315 errno_t
316 sock_getsockname(
317 socket_t sock,
318 struct sockaddr *sockname,
319 int socknamelen)
320 {
321 int error = 0;
322 struct sockaddr *sa = NULL;
323
324 if (sock == NULL || sockname == NULL || socknamelen < 0) return EINVAL;
325 socket_lock(sock, 1);
326 error = sock->so_proto->pr_usrreqs->pru_sockaddr(sock, &sa);
327 if (!error)
328 {
329 if (socknamelen > sa->sa_len) socknamelen = sa->sa_len;
330 memcpy(sockname, sa, socknamelen);
331 }
332 if (sa) FREE(sa, M_SONAME);
333 socket_unlock(sock, 1);
334 return error;
335 }
336
337 errno_t
338 sock_getsockopt(
339 socket_t sock,
340 int level,
341 int optname,
342 void *optval,
343 int *optlen)
344 {
345 int error = 0;
346 struct sockopt sopt;
347
348 if (sock == NULL || optval == NULL || optlen == NULL) return EINVAL;
349 sopt.sopt_dir = SOPT_GET;
350 sopt.sopt_level = level;
351 sopt.sopt_name = optname;
352 sopt.sopt_val = CAST_USER_ADDR_T(optval);
353 sopt.sopt_valsize = *optlen;
354 sopt.sopt_p = NULL;
355 error = sogetopt(sock, &sopt); /* will lock socket */
356 if (error == 0) *optlen = sopt.sopt_valsize;
357 return error;
358 }
359
360 errno_t
361 sock_ioctl(
362 socket_t sock,
363 unsigned long request,
364 void *argp)
365 {
366 return soioctl(sock, request, argp, NULL); /* will lock socket */
367 }
368
369 errno_t
370 sock_setsockopt(
371 socket_t sock,
372 int level,
373 int optname,
374 const void *optval,
375 int optlen)
376 {
377 struct sockopt sopt;
378
379 if (sock == NULL || optval == NULL) return EINVAL;
380 sopt.sopt_dir = SOPT_SET;
381 sopt.sopt_level = level;
382 sopt.sopt_name = optname;
383 sopt.sopt_val = CAST_USER_ADDR_T(optval);
384 sopt.sopt_valsize = optlen;
385 sopt.sopt_p = NULL;
386 return sosetopt(sock, &sopt); /* will lock socket */
387 }
388
389 errno_t
390 sock_listen(
391 socket_t sock,
392 int backlog)
393 {
394 if (sock == NULL) return EINVAL;
395 return solisten(sock, backlog); /* will lock socket */
396 }
397
398 static errno_t
399 sock_receive_internal(
400 socket_t sock,
401 struct msghdr *msg,
402 mbuf_t *data,
403 int flags,
404 size_t *recvdlen)
405 {
406 uio_t auio;
407 struct mbuf *control = NULL;
408 int error = 0;
409 int length = 0;
410 struct sockaddr *fromsa;
411 char uio_buf[ UIO_SIZEOF((msg != NULL) ? msg->msg_iovlen : 0) ];
412
413 if (sock == NULL) return EINVAL;
414
415 auio = uio_createwithbuffer(((msg != NULL) ? msg->msg_iovlen : 0),
416 0, UIO_SYSSPACE, UIO_READ,
417 &uio_buf[0], sizeof(uio_buf));
418 if (msg && data == NULL) {
419 int i;
420 struct iovec_32 *tempp = (struct iovec_32 *) msg->msg_iov;
421
422 for (i = 0; i < msg->msg_iovlen; i++) {
423 uio_addiov(auio, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len);
424 }
425 if (uio_resid(auio) < 0) return EINVAL;
426 }
427 else {
428 uio_setresid(auio, (uio_resid(auio) + *recvdlen));
429 }
430 length = uio_resid(auio);
431
432 if (recvdlen)
433 *recvdlen = 0;
434
435 if (msg && msg->msg_control) {
436 if ((size_t)msg->msg_controllen < sizeof(struct cmsghdr)) return EINVAL;
437 if ((size_t)msg->msg_controllen > MLEN) return EINVAL;
438 control = m_get(M_NOWAIT, MT_CONTROL);
439 if (control == NULL) return ENOMEM;
440 memcpy(mtod(control, caddr_t), msg->msg_control, msg->msg_controllen);
441 control->m_len = msg->msg_controllen;
442 }
443
444 /* let pru_soreceive handle the socket locking */
445 error = sock->so_proto->pr_usrreqs->pru_soreceive(sock, &fromsa, auio,
446 data, control ? &control : NULL, &flags);
447 if (error) goto cleanup;
448
449 if (recvdlen)
450 *recvdlen = length - uio_resid(auio);
451 if (msg) {
452 msg->msg_flags = flags;
453
454 if (msg->msg_name)
455 {
456 int salen;
457 salen = msg->msg_namelen;
458 if (msg->msg_namelen > 0 && fromsa != 0)
459 {
460 salen = MIN(salen, fromsa->sa_len);
461 memcpy(msg->msg_name, fromsa,
462 msg->msg_namelen > fromsa->sa_len ? fromsa->sa_len : msg->msg_namelen);
463 }
464 }
465
466 if (msg->msg_control)
467 {
468 struct mbuf* m = control;
469 u_char* ctlbuf = msg->msg_control;
470 int clen = msg->msg_controllen;
471 msg->msg_controllen = 0;
472
473 while (m && clen > 0)
474 {
475 unsigned int tocopy;
476 if (clen >= m->m_len)
477 {
478 tocopy = m->m_len;
479 }
480 else
481 {
482 msg->msg_flags |= MSG_CTRUNC;
483 tocopy = clen;
484 }
485 memcpy(ctlbuf, mtod(m, caddr_t), tocopy);
486 ctlbuf += tocopy;
487 clen -= tocopy;
488 m = m->m_next;
489 }
490 msg->msg_controllen = (u_int32_t)ctlbuf - (u_int32_t)msg->msg_control;
491 }
492 }
493
494 cleanup:
495 if (control) m_freem(control);
496 if (fromsa) FREE(fromsa, M_SONAME);
497 return error;
498 }
499
500 errno_t
501 sock_receive(
502 socket_t sock,
503 struct msghdr *msg,
504 int flags,
505 size_t *recvdlen)
506 {
507 if ((msg == NULL) ||
508 (msg->msg_iovlen < 1) ||
509 (msg->msg_iov[0].iov_len == 0) ||
510 (msg->msg_iov[0].iov_base == NULL))
511 return EINVAL;
512 return sock_receive_internal(sock, msg, NULL, flags, recvdlen);
513 }
514
515 errno_t
516 sock_receivembuf(
517 socket_t sock,
518 struct msghdr *msg,
519 mbuf_t *data,
520 int flags,
521 size_t *recvlen)
522 {
523 if (data == NULL || recvlen == 0 || *recvlen <= 0 || (msg &&
524 (msg->msg_iov != NULL || msg->msg_iovlen != 0)))
525 return EINVAL;
526 return sock_receive_internal(sock, msg, data, flags, recvlen);
527 }
528
529 errno_t
530 sock_send_internal(
531 socket_t sock,
532 const struct msghdr *msg,
533 mbuf_t data,
534 int flags,
535 size_t *sentlen)
536 {
537 uio_t auio = NULL;
538 struct mbuf *control = NULL;
539 int error = 0;
540 int datalen = 0;
541 char uio_buf[ UIO_SIZEOF((msg != NULL ? msg->msg_iovlen : 1)) ];
542
543 if (sock == NULL) {
544 error = EINVAL;
545 goto errorout;
546 }
547
548 if (data == 0 && msg != NULL) {
549 struct iovec_32 *tempp = (struct iovec_32 *) msg->msg_iov;
550
551 auio = uio_createwithbuffer(msg->msg_iovlen, 0, UIO_SYSSPACE, UIO_WRITE,
552 &uio_buf[0], sizeof(uio_buf));
553 if (tempp != NULL)
554 {
555 int i;
556
557 for (i = 0; i < msg->msg_iovlen; i++) {
558 uio_addiov(auio, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len);
559 }
560
561 if (uio_resid(auio) < 0) {
562 error = EINVAL;
563 goto errorout;
564 }
565 }
566 }
567
568 if (sentlen)
569 *sentlen = 0;
570
571 if (auio)
572 datalen = uio_resid(auio);
573 else
574 datalen = data->m_pkthdr.len;
575
576 if (msg && msg->msg_control)
577 {
578 if ((size_t)msg->msg_controllen < sizeof(struct cmsghdr)) return EINVAL;
579 if ((size_t)msg->msg_controllen > MLEN) return EINVAL;
580 control = m_get(M_NOWAIT, MT_CONTROL);
581 if (control == NULL) {
582 error = ENOMEM;
583 goto errorout;
584 }
585 memcpy(mtod(control, caddr_t), msg->msg_control, msg->msg_controllen);
586 control->m_len = msg->msg_controllen;
587 }
588
589 error = sock->so_proto->pr_usrreqs->pru_sosend(sock, msg != NULL ?
590 (struct sockaddr*)msg->msg_name : NULL, auio, data, control, flags);
591
592 /*
593 * Residual data is possible in the case of IO vectors but not
594 * in the mbuf case since the latter is treated as atomic send.
595 * If pru_sosend() consumed a portion of the iovecs data and
596 * the error returned is transient, treat it as success; this
597 * is consistent with sendit() behavior.
598 */
599 if (auio != NULL && uio_resid(auio) != datalen &&
600 (error == ERESTART || error == EINTR || error == EWOULDBLOCK))
601 error = 0;
602
603 if (error == 0 && sentlen != NULL) {
604 if (auio != NULL)
605 *sentlen = datalen - uio_resid(auio);
606 else
607 *sentlen = datalen;
608 }
609
610 return error;
611
612 /*
613 * In cases where we detect an error before returning, we need to
614 * free the mbuf chain if there is one. sosend (and pru_sosend) will
615 * free the mbuf chain if they encounter an error.
616 */
617 errorout:
618 if (control)
619 m_freem(control);
620 if (data)
621 m_freem(data);
622 if (sentlen)
623 *sentlen = 0;
624 return error;
625 }
626
627 errno_t
628 sock_send(
629 socket_t sock,
630 const struct msghdr *msg,
631 int flags,
632 size_t *sentlen)
633 {
634 if (msg == NULL || msg->msg_iov == NULL || msg->msg_iovlen < 1)
635 return EINVAL;
636 return sock_send_internal(sock, msg, NULL, flags, sentlen);
637 }
638
639 errno_t
640 sock_sendmbuf(
641 socket_t sock,
642 const struct msghdr *msg,
643 mbuf_t data,
644 int flags,
645 size_t *sentlen)
646 {
647 if (data == NULL || (msg &&
648 (msg->msg_iov != NULL || msg->msg_iovlen != 0))) {
649 if (data)
650 m_freem(data);
651 return EINVAL;
652 }
653 return sock_send_internal(sock, msg, data, flags, sentlen);
654 }
655
656 errno_t
657 sock_shutdown(
658 socket_t sock,
659 int how)
660 {
661 if (sock == NULL) return EINVAL;
662 return soshutdown(sock, how);
663 }
664
665 typedef void (*so_upcall)(struct socket *sock, void* arg, int waitf);
666
667 errno_t
668 sock_socket(
669 int domain,
670 int type,
671 int protocol,
672 sock_upcall callback,
673 void* context,
674 socket_t *new_so)
675 {
676 int error = 0;
677 if (new_so == NULL) return EINVAL;
678 /* socreate will create an initial so_count */
679 error = socreate(domain, new_so, type, protocol);
680 if (error == 0 && callback)
681 {
682 (*new_so)->so_rcv.sb_flags |= SB_UPCALL;
683 (*new_so)->so_upcall = (so_upcall)callback;
684 (*new_so)->so_upcallarg = context;
685 }
686 return error;
687 }
688
689 void
690 sock_close(
691 socket_t sock)
692 {
693 if (sock == NULL) return;
694 soclose(sock);
695 }
696
697 /* Do we want this to be APPLE_PRIVATE API?: YES (LD 12/23/04)*/
698 void
699 sock_retain(
700 socket_t sock)
701 {
702 if (sock == NULL) return;
703 socket_lock(sock, 1);
704 sock->so_retaincnt++;
705 sock->so_usecount++; /* add extra reference for holding the socket */
706 socket_unlock(sock, 1);
707 }
708
709 /* Do we want this to be APPLE_PRIVATE API? */
710 void
711 sock_release(
712 socket_t sock)
713 {
714 if (sock == NULL) return;
715 socket_lock(sock, 1);
716 sock->so_retaincnt--;
717 if (sock->so_retaincnt < 0)
718 panic("sock_release: negative retain count for sock=%x cnt=%x\n",
719 sock, sock->so_retaincnt);
720 if ((sock->so_retaincnt == 0) && (sock->so_usecount == 2))
721 soclose_locked(sock); /* close socket only if the FD is not holding it */
722 else
723 sock->so_usecount--; /* remove extra reference holding the socket */
724 socket_unlock(sock, 1);
725 }
726
727 errno_t
728 sock_setpriv(
729 socket_t sock,
730 int on)
731 {
732 if (sock == NULL) return EINVAL;
733 socket_lock(sock, 1);
734 if (on)
735 {
736 sock->so_state |= SS_PRIV;
737 }
738 else
739 {
740 sock->so_state &= ~SS_PRIV;
741 }
742 socket_unlock(sock, 1);
743 return 0;
744 }
745
746 int
747 sock_isconnected(
748 socket_t sock)
749 {
750 int retval;
751 socket_lock(sock, 1);
752 retval = (sock->so_state & SS_ISCONNECTED) != 0;
753 socket_unlock(sock, 1);
754 return (retval);
755 }
756
757 int
758 sock_isnonblocking(
759 socket_t sock)
760 {
761 int retval;
762 socket_lock(sock, 1);
763 retval = (sock->so_state & SS_NBIO) != 0;
764 socket_unlock(sock, 1);
765 return (retval);
766 }
767
768 errno_t
769 sock_gettype(
770 socket_t sock,
771 int *outDomain,
772 int *outType,
773 int *outProtocol)
774 {
775 socket_lock(sock, 1);
776 if (outDomain)
777 *outDomain = sock->so_proto->pr_domain->dom_family;
778 if (outType)
779 *outType = sock->so_type;
780 if (outProtocol)
781 *outProtocol = sock->so_proto->pr_protocol;
782 socket_unlock(sock, 1);
783 return 0;
784 }