+sock_setupcalls_locked(socket_t sock, sock_upcall rcallback, void *rcontext,
+ sock_upcall wcallback, void *wcontext, int locked)
+{
+ if (rcallback != NULL) {
+ sock->so_rcv.sb_flags |= SB_UPCALL;
+ if (locked) {
+ sock->so_rcv.sb_flags |= SB_UPCALL_LOCK;
+ }
+ sock->so_rcv.sb_upcall = rcallback;
+ sock->so_rcv.sb_upcallarg = rcontext;
+ } else {
+ sock->so_rcv.sb_flags &= ~(SB_UPCALL | SB_UPCALL_LOCK);
+ sock->so_rcv.sb_upcall = NULL;
+ sock->so_rcv.sb_upcallarg = NULL;
+ }
+
+ if (wcallback != NULL) {
+ sock->so_snd.sb_flags |= SB_UPCALL;
+ if (locked) {
+ sock->so_snd.sb_flags |= SB_UPCALL_LOCK;
+ }
+ sock->so_snd.sb_upcall = wcallback;
+ sock->so_snd.sb_upcallarg = wcontext;
+ } else {
+ sock->so_snd.sb_flags &= ~(SB_UPCALL | SB_UPCALL_LOCK);
+ sock->so_snd.sb_upcall = NULL;
+ sock->so_snd.sb_upcallarg = NULL;
+ }
+}
+
+errno_t
+sock_setupcall(socket_t sock, sock_upcall callback, void *context)
+{
+ if (sock == NULL) {
+ return EINVAL;
+ }
+
+ /*
+ * Note that we don't wait for any in progress upcall to complete.
+ * On embedded, sock_setupcall() causes both read and write
+ * callbacks to be set; on desktop, only read callback is set
+ * to maintain legacy KPI behavior.
+ *
+ * The newer sock_setupcalls() KPI should be used instead to set
+ * the read and write callbacks and their respective parameters.
+ */
+ socket_lock(sock, 1);
+#if (defined(__arm__) || defined(__arm64__))
+ sock_setupcalls_locked(sock, callback, context, callback, context, 0);
+#else /* (defined(__arm__) || defined(__arm64__)) */
+ sock_setupcalls_locked(sock, callback, context, NULL, NULL, 0);
+#endif /* (defined(__arm__) || defined(__arm64__)) */
+ socket_unlock(sock, 1);
+
+ return 0;
+}
+
+errno_t
+sock_setupcalls(socket_t sock, sock_upcall rcallback, void *rcontext,
+ sock_upcall wcallback, void *wcontext)
+{
+ if (sock == NULL) {
+ return EINVAL;
+ }
+
+ /*
+ * Note that we don't wait for any in progress upcall to complete.
+ */
+ socket_lock(sock, 1);
+ sock_setupcalls_locked(sock, rcallback, rcontext, wcallback, wcontext, 0);
+ socket_unlock(sock, 1);
+
+ return 0;
+}
+
+void
+sock_catchevents_locked(socket_t sock, sock_evupcall ecallback, void *econtext,
+ long emask)
+{
+ socket_lock_assert_owned(sock);
+
+ /*
+ * Note that we don't wait for any in progress upcall to complete.
+ */
+ if (ecallback != NULL) {
+ sock->so_event = ecallback;
+ sock->so_eventarg = econtext;
+ sock->so_eventmask = (uint32_t)emask;
+ } else {
+ sock->so_event = sonullevent;
+ sock->so_eventarg = NULL;
+ sock->so_eventmask = 0;
+ }
+}
+
+errno_t
+sock_catchevents(socket_t sock, sock_evupcall ecallback, void *econtext,
+ long emask)
+{
+ if (sock == NULL) {
+ return EINVAL;
+ }
+
+ socket_lock(sock, 1);
+ sock_catchevents_locked(sock, ecallback, econtext, emask);
+ socket_unlock(sock, 1);
+
+ return 0;
+}
+
+/*
+ * Returns true whether or not a socket belongs to the kernel.
+ */
+int
+sock_iskernel(socket_t so)