2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1991, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)kern_descrip.c 8.8 (Berkeley) 2/14/95
69 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/kernel.h>
79 #include <sys/vnode_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/kauth.h>
82 #include <sys/file_internal.h>
83 #include <sys/guarded.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
88 #include <sys/ioctl.h>
89 #include <sys/fcntl.h>
90 #include <sys/fsctl.h>
91 #include <sys/malloc.h>
93 #include <sys/syslog.h>
94 #include <sys/unistd.h>
95 #include <sys/resourcevar.h>
96 #include <sys/aio_kern.h>
98 #include <kern/locks.h>
99 #include <sys/uio_internal.h>
100 #include <sys/codesign.h>
101 #include <sys/codedir_internal.h>
102 #include <sys/mount_internal.h>
103 #include <sys/kdebug.h>
104 #include <sys/sysproto.h>
105 #include <sys/pipe.h>
106 #include <sys/spawn.h>
107 #include <sys/cprotect.h>
108 #include <sys/ubc_internal.h>
110 #include <kern/kern_types.h>
111 #include <kern/kalloc.h>
112 #include <kern/waitq.h>
113 #include <kern/ipc_misc.h>
115 #include <vm/vm_protos.h>
116 #include <mach/mach_port.h>
118 #include <security/audit/audit.h>
120 #include <security/mac_framework.h>
124 #include <os/atomic_private.h>
125 #include <IOKit/IOBSD.h>
127 #define IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND 0x1
128 kern_return_t
ipc_object_copyin(ipc_space_t
, mach_port_name_t
,
129 mach_msg_type_name_t
, ipc_port_t
*, mach_port_context_t
, mach_msg_guard_flags_t
*, uint32_t);
130 void ipc_port_release_send(ipc_port_t
);
132 static void fileproc_drain(proc_t
, struct fileproc
*);
133 static int finishdup(proc_t p
,
134 struct filedesc
*fdp
, int old
, int new, int flags
, int32_t *retval
);
136 void fileport_releasefg(struct fileglob
*fg
);
138 /* flags for fp_close_and_unlock */
139 #define FD_DUP2RESV 1
141 /* We don't want these exported */
144 int unlink1(vfs_context_t
, vnode_t
, user_addr_t
, enum uio_seg
, int);
146 static void fdrelse(struct proc
* p
, int fd
);
148 extern void file_lock_init(void);
150 extern kauth_scope_t kauth_scope_fileop
;
152 /* Conflict wait queue for when selects collide (opaque type) */
153 extern struct waitq select_conflict_queue
;
155 #ifndef HFS_GET_BOOT_INFO
156 #define HFS_GET_BOOT_INFO (FCNTL_FS_SPECIFIC_BASE + 0x00004)
159 #ifndef HFS_SET_BOOT_INFO
160 #define HFS_SET_BOOT_INFO (FCNTL_FS_SPECIFIC_BASE + 0x00005)
163 #ifndef APFSIOC_REVERT_TO_SNAPSHOT
164 #define APFSIOC_REVERT_TO_SNAPSHOT _IOW('J', 1, u_int64_t)
167 #define f_flag fp_glob->fg_flag
168 #define f_type fp_glob->fg_ops->fo_type
169 #define f_cred fp_glob->fg_cred
170 #define f_ops fp_glob->fg_ops
171 #define f_offset fp_glob->fg_offset
172 #define f_data fp_glob->fg_data
173 #define CHECK_ADD_OVERFLOW_INT64L(x, y) \
174 (((((x) > 0) && ((y) > 0) && ((x) > LLONG_MAX - (y))) || \
175 (((x) < 0) && ((y) < 0) && ((x) < LLONG_MIN - (y)))) \
178 ZONE_DECLARE(fg_zone
, "fileglob",
179 sizeof(struct fileglob
), ZC_NOENCRYPT
| ZC_ZFREE_CLEARMEM
);
180 ZONE_DECLARE(fp_zone
, "fileproc",
181 sizeof(struct fileproc
), ZC_NOENCRYPT
| ZC_ZFREE_CLEARMEM
);
182 ZONE_DECLARE(fdp_zone
, "filedesc",
183 sizeof(struct filedesc
), ZC_NOENCRYPT
| ZC_ZFREE_CLEARMEM
);
186 * Descriptor management.
188 int nfiles
; /* actual number of open files */
190 * "uninitialized" ops -- ensure FILEGLOB_DTYPE(fg) always exists
192 static const struct fileops uninitops
;
194 os_refgrp_decl(, f_refgrp
, "files refcounts", NULL
);
195 lck_grp_attr_t
* file_lck_grp_attr
;
196 lck_grp_t
* file_lck_grp
;
197 lck_attr_t
* file_lck_attr
;
199 #pragma mark fileglobs
205 * Free a file structure.
208 fg_free(struct fileglob
*fg
)
210 os_atomic_dec(&nfiles
, relaxed
);
212 if (fg
->fg_vn_data
) {
213 fg_vn_data_free(fg
->fg_vn_data
);
214 fg
->fg_vn_data
= NULL
;
217 if (IS_VALID_CRED(fg
->fg_cred
)) {
218 kauth_cred_unref(&fg
->fg_cred
);
220 lck_mtx_destroy(&fg
->fg_lock
, file_lck_grp
);
223 mac_file_label_destroy(fg
);
230 fg_ref(proc_t p
, struct fileglob
*fg
)
232 #if DEBUG || DEVELOPMENT
233 proc_fdlock_assert(p
, LCK_MTX_ASSERT_OWNED
);
237 os_ref_retain_raw(&fg
->fg_count
, &f_refgrp
);
241 fg_drop_live(struct fileglob
*fg
)
243 os_ref_release_live_raw(&fg
->fg_count
, &f_refgrp
);
247 fg_drop(proc_t p
, struct fileglob
*fg
)
250 struct vfs_context context
;
257 /* Set up context with cred stashed in fg */
258 if (p
== current_proc()) {
259 context
.vc_thread
= current_thread();
261 context
.vc_thread
= NULL
;
263 context
.vc_ucred
= fg
->fg_cred
;
266 * POSIX record locking dictates that any close releases ALL
267 * locks owned by this process. This is handled by setting
268 * a flag in the unlock to free ONLY locks obeying POSIX
269 * semantics, and not to free BSD-style file locks.
270 * If the descriptor was in a message, POSIX-style locks
271 * aren't passed with the descriptor.
273 if (p
&& DTYPE_VNODE
== FILEGLOB_DTYPE(fg
) &&
274 (p
->p_ladvflag
& P_LADVLOCK
)) {
276 .l_whence
= SEEK_SET
,
280 vp
= (struct vnode
*)fg
->fg_data
;
281 if ((error
= vnode_getwithref(vp
)) == 0) {
282 (void)VNOP_ADVLOCK(vp
, (caddr_t
)p
, F_UNLCK
, &lf
, F_POSIX
, &context
, NULL
);
287 if (os_ref_release_raw(&fg
->fg_count
, &f_refgrp
) == 0) {
289 * Since we ensure that fg->fg_ops is always initialized,
290 * it is safe to invoke fo_close on the fg
292 error
= fo_close(fg
, &context
);
303 * Description: Return vnode associated with the file structure, if
304 * any. The lifetime of the returned vnode is bound to
305 * the lifetime of the file structure.
307 * Parameters: fg Pointer to fileglob to
313 fg_get_vnode(struct fileglob
*fg
)
315 if (FILEGLOB_DTYPE(fg
) == DTYPE_VNODE
) {
316 return (vnode_t
)fg
->fg_data
;
323 fg_sendable(struct fileglob
*fg
)
325 switch (FILEGLOB_DTYPE(fg
)) {
330 case DTYPE_NETPOLICY
:
331 return (fg
->fg_lflags
& FG_CONFINED
) == 0;
339 #pragma mark fileprocs
342 * check_file_seek_range
344 * Description: Checks if seek offsets are in the range of 0 to LLONG_MAX.
346 * Parameters: fl Flock structure.
347 * cur_file_offset Current offset in the file.
349 * Returns: 0 on Success.
350 * EOVERFLOW on overflow.
351 * EINVAL on offset less than zero.
355 check_file_seek_range(struct flock
*fl
, off_t cur_file_offset
)
357 if (fl
->l_whence
== SEEK_CUR
) {
358 /* Check if the start marker is beyond LLONG_MAX. */
359 if (CHECK_ADD_OVERFLOW_INT64L(fl
->l_start
, cur_file_offset
)) {
360 /* Check if start marker is negative */
361 if (fl
->l_start
< 0) {
366 /* Check if the start marker is negative. */
367 if (fl
->l_start
+ cur_file_offset
< 0) {
370 /* Check if end marker is beyond LLONG_MAX. */
371 if ((fl
->l_len
> 0) && (CHECK_ADD_OVERFLOW_INT64L(fl
->l_start
+
372 cur_file_offset
, fl
->l_len
- 1))) {
375 /* Check if the end marker is negative. */
376 if ((fl
->l_len
<= 0) && (fl
->l_start
+ cur_file_offset
+
380 } else if (fl
->l_whence
== SEEK_SET
) {
381 /* Check if the start marker is negative. */
382 if (fl
->l_start
< 0) {
385 /* Check if the end marker is beyond LLONG_MAX. */
386 if ((fl
->l_len
> 0) &&
387 CHECK_ADD_OVERFLOW_INT64L(fl
->l_start
, fl
->l_len
- 1)) {
390 /* Check if the end marker is negative. */
391 if ((fl
->l_len
< 0) && fl
->l_start
+ fl
->l_len
< 0) {
402 * Description: Initialize the file lock group and the uipc and flist locks
408 * Notes: Called at system startup from bsd_init().
413 /* allocate file lock group attribute and group */
414 file_lck_grp_attr
= lck_grp_attr_alloc_init();
416 file_lck_grp
= lck_grp_alloc_init("file", file_lck_grp_attr
);
418 /* Allocate file lock attribute */
419 file_lck_attr
= lck_attr_alloc_init();
424 proc_dirs_lock_shared(proc_t p
)
426 lck_rw_lock_shared(&p
->p_dirs_lock
);
430 proc_dirs_unlock_shared(proc_t p
)
432 lck_rw_unlock_shared(&p
->p_dirs_lock
);
436 proc_dirs_lock_exclusive(proc_t p
)
438 lck_rw_lock_exclusive(&p
->p_dirs_lock
);
442 proc_dirs_unlock_exclusive(proc_t p
)
444 lck_rw_unlock_exclusive(&p
->p_dirs_lock
);
448 * proc_fdlock, proc_fdlock_spin
450 * Description: Lock to control access to the per process struct fileproc
451 * and struct filedesc
453 * Parameters: p Process to take the lock on
457 * Notes: The lock is initialized in forkproc() and destroyed in
458 * reap_child_process().
461 proc_fdlock(proc_t p
)
463 lck_mtx_lock(&p
->p_fdmlock
);
467 proc_fdlock_spin(proc_t p
)
469 lck_mtx_lock_spin(&p
->p_fdmlock
);
473 proc_fdlock_assert(proc_t p
, int assertflags
)
475 lck_mtx_assert(&p
->p_fdmlock
, assertflags
);
482 * Description: Unlock the lock previously locked by a call to proc_fdlock()
484 * Parameters: p Process to drop the lock on
489 proc_fdunlock(proc_t p
)
491 lck_mtx_unlock(&p
->p_fdmlock
);
495 fdt_next(proc_t p
, int fd
, bool only_settled
)
497 struct fdt_iterator it
;
498 struct filedesc
*fdp
= p
->p_fd
;
500 int nfds
= min(fdp
->fd_lastfile
+ 1, fdp
->fd_nfiles
);
502 while (++fd
< nfds
) {
503 fp
= fdp
->fd_ofiles
[fd
];
504 if (fp
== NULL
|| fp
->fp_glob
== NULL
) {
507 if (only_settled
&& (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
521 fdt_prev(proc_t p
, int fd
, bool only_settled
)
523 struct fdt_iterator it
;
524 struct filedesc
*fdp
= p
->p_fd
;
528 fp
= fdp
->fd_ofiles
[fd
];
529 if (fp
== NULL
|| fp
->fp_glob
== NULL
) {
532 if (only_settled
&& (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
546 * System calls on descriptors.
553 * Description: Returns the per process maximum size of the descriptor table
555 * Parameters: p Process being queried
556 * retval Pointer to the call return area
561 * *retval (modified) Size of dtable
564 sys_getdtablesize(proc_t p
, __unused
struct getdtablesize_args
*uap
, int32_t *retval
)
566 *retval
= (int32_t)MIN(proc_limitgetcur(p
, RLIMIT_NOFILE
, TRUE
), maxfilesperproc
);
573 procfdtbl_reservefd(struct proc
* p
, int fd
)
575 p
->p_fd
->fd_ofiles
[fd
] = NULL
;
576 p
->p_fd
->fd_ofileflags
[fd
] |= UF_RESERVED
;
580 procfdtbl_releasefd(struct proc
* p
, int fd
, struct fileproc
* fp
)
583 p
->p_fd
->fd_ofiles
[fd
] = fp
;
585 p
->p_fd
->fd_ofileflags
[fd
] &= ~UF_RESERVED
;
586 if ((p
->p_fd
->fd_ofileflags
[fd
] & UF_RESVWAIT
) == UF_RESVWAIT
) {
587 p
->p_fd
->fd_ofileflags
[fd
] &= ~UF_RESVWAIT
;
593 procfdtbl_waitfd(struct proc
* p
, int fd
)
595 p
->p_fd
->fd_ofileflags
[fd
] |= UF_RESVWAIT
;
596 msleep(&p
->p_fd
, &p
->p_fdmlock
, PRIBIO
, "ftbl_waitfd", NULL
);
600 procfdtbl_clearfd(struct proc
* p
, int fd
)
604 waiting
= (p
->p_fd
->fd_ofileflags
[fd
] & UF_RESVWAIT
);
605 p
->p_fd
->fd_ofiles
[fd
] = NULL
;
606 p
->p_fd
->fd_ofileflags
[fd
] = 0;
607 if (waiting
== UF_RESVWAIT
) {
615 * Description: Inline utility function to free an fd in a filedesc
617 * Parameters: fdp Pointer to filedesc fd lies in
619 * reserv fd should be reserved
623 * Locks: Assumes proc_fdlock for process pointing to fdp is held by
627 fdrelse(struct proc
* p
, int fd
)
629 struct filedesc
*fdp
= p
->p_fd
;
632 if (fd
< fdp
->fd_freefile
) {
633 fdp
->fd_freefile
= fd
;
636 if (fd
> fdp
->fd_lastfile
) {
637 panic("fdrelse: fd_lastfile inconsistent");
640 procfdtbl_clearfd(p
, fd
);
642 while ((nfd
= fdp
->fd_lastfile
) > 0 &&
643 fdp
->fd_ofiles
[nfd
] == NULL
&&
644 !(fdp
->fd_ofileflags
[nfd
] & UF_RESERVED
)) {
645 /* JMM - What about files with lingering EV_VANISHED knotes? */
668 char uio_buf
[UIO_SIZEOF(1)];
669 struct vfs_context context
= *(vfs_context_current());
673 error
= fp_lookup(p
, fd
, &fp
, 0);
678 switch (FILEGLOB_DTYPE(fp
->fp_glob
)) {
687 if (rw
== UIO_WRITE
&& !(fp
->f_flag
& FWRITE
)) {
692 if (rw
== UIO_READ
&& !(fp
->f_flag
& FREAD
)) {
697 context
.vc_ucred
= fp
->fp_glob
->fg_cred
;
699 if (UIO_SEG_IS_USER_SPACE(segflg
)) {
700 spacetype
= proc_is64bit(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
;
702 spacetype
= UIO_SYSSPACE
;
705 auio
= uio_createwithbuffer(1, offset
, spacetype
, rw
, &uio_buf
[0], sizeof(uio_buf
));
707 uio_addiov(auio
, (user_addr_t
)base
, (user_size_t
)len
);
709 if (!(io_flg
& IO_APPEND
)) {
713 if (rw
== UIO_WRITE
) {
714 user_ssize_t orig_resid
= uio_resid(auio
);
715 error
= fo_write(fp
, auio
, flags
, &context
);
716 if (uio_resid(auio
) < orig_resid
) {
717 os_atomic_or(&fp
->fp_glob
->fg_flag
, FWASWRITTEN
, relaxed
);
720 error
= fo_read(fp
, auio
, flags
, &context
);
724 *aresid
= uio_resid(auio
);
725 } else if (uio_resid(auio
) && error
== 0) {
729 fp_drop(p
, fd
, fp
, 0);
738 * Description: Duplicate a file descriptor.
740 * Parameters: p Process performing the dup
741 * uap->fd The fd to dup
742 * retval Pointer to the call return area
748 * *retval (modified) The new descriptor
751 sys_dup(proc_t p
, struct dup_args
*uap
, int32_t *retval
)
753 struct filedesc
*fdp
= p
->p_fd
;
759 if ((error
= fp_lookup(p
, old
, &fp
, 1))) {
763 if (fp_isguarded(fp
, GUARD_DUP
)) {
764 error
= fp_guard_exception(p
, old
, fp
, kGUARD_EXC_DUP
);
765 (void) fp_drop(p
, old
, fp
, 1);
769 if ((error
= fdalloc(p
, 0, &new))) {
770 fp_drop(p
, old
, fp
, 1);
774 error
= finishdup(p
, fdp
, old
, new, 0, retval
);
775 fp_drop(p
, old
, fp
, 1);
778 if (ENTR_SHOULDTRACE
&& FILEGLOB_DTYPE(fp
->fp_glob
) == DTYPE_SOCKET
) {
779 KERNEL_ENERGYTRACE(kEnTrActKernSocket
, DBG_FUNC_START
,
780 new, 0, (int64_t)VM_KERNEL_ADDRPERM(fp
->f_data
));
789 * Description: Duplicate a file descriptor to a particular value.
791 * Parameters: p Process performing the dup
792 * uap->from The fd to dup
793 * uap->to The fd to dup it to
794 * retval Pointer to the call return area
800 * *retval (modified) The new descriptor
803 sys_dup2(proc_t p
, struct dup2_args
*uap
, int32_t *retval
)
805 return dup2(p
, uap
->from
, uap
->to
, retval
);
809 dup2(proc_t p
, int old
, int new, int *retval
)
811 struct filedesc
*fdp
= p
->p_fd
;
812 struct fileproc
*fp
, *nfp
;
814 rlim_t nofile
= proc_limitgetcur(p
, RLIMIT_NOFILE
, TRUE
);
819 if ((error
= fp_lookup(p
, old
, &fp
, 1))) {
823 if (fp_isguarded(fp
, GUARD_DUP
)) {
824 error
= fp_guard_exception(p
, old
, fp
, kGUARD_EXC_DUP
);
825 (void) fp_drop(p
, old
, fp
, 1);
830 (rlim_t
)new >= nofile
||
831 new >= maxfilesperproc
) {
832 fp_drop(p
, old
, fp
, 1);
837 fp_drop(p
, old
, fp
, 1);
842 if (new < 0 || new >= fdp
->fd_nfiles
) {
843 if ((error
= fdalloc(p
, new, &i
))) {
844 fp_drop(p
, old
, fp
, 1);
854 if ((fdp
->fd_ofileflags
[new] & UF_RESERVED
) == UF_RESERVED
) {
855 fp_drop(p
, old
, fp
, 1);
856 procfdtbl_waitfd(p
, new);
858 proc_fdlock_assert(p
, LCK_MTX_ASSERT_OWNED
);
863 if ((nfp
= fdp
->fd_ofiles
[new]) != NULL
) {
864 if (fp_isguarded(nfp
, GUARD_CLOSE
)) {
865 fp_drop(p
, old
, fp
, 1);
866 error
= fp_guard_exception(p
,
867 new, nfp
, kGUARD_EXC_CLOSE
);
871 (void)fp_close_and_unlock(p
, new, nfp
, FD_DUP2RESV
);
873 assert(fdp
->fd_ofileflags
[new] & UF_RESERVED
);
876 if (fdp
->fd_ofiles
[new] != NULL
) {
877 panic("dup2: no ref on fileproc %d", new);
880 procfdtbl_reservefd(p
, new);
884 if (fdp
->fd_ofiles
[new] != 0) {
885 panic("dup2: overwriting fd_ofiles with new %d", new);
887 if ((fdp
->fd_ofileflags
[new] & UF_RESERVED
) == 0) {
888 panic("dup2: unreserved fileflags with new %d", new);
891 error
= finishdup(p
, fdp
, old
, new, 0, retval
);
892 fp_drop(p
, old
, fp
, 1);
902 * Description: The file control system call.
904 * Parameters: p Process performing the fcntl
905 * uap->fd The fd to operate against
906 * uap->cmd The command to perform
907 * uap->arg Pointer to the command argument
908 * retval Pointer to the call return area
911 * !0 Errno (see fcntl_nocancel)
914 * *retval (modified) fcntl return value (if any)
916 * Notes: This system call differs from fcntl_nocancel() in that it
917 * tests for cancellation prior to performing a potentially
918 * blocking operation.
921 sys_fcntl(proc_t p
, struct fcntl_args
*uap
, int32_t *retval
)
923 __pthread_testcancel(1);
924 return sys_fcntl_nocancel(p
, (struct fcntl_nocancel_args
*)uap
, retval
);
927 #define ACCOUNT_OPENFROM_ENTITLEMENT \
928 "com.apple.private.vfs.role-account-openfrom"
933 * Description: A non-cancel-testing file control system call.
935 * Parameters: p Process performing the fcntl
936 * uap->fd The fd to operate against
937 * uap->cmd The command to perform
938 * uap->arg Pointer to the command argument
939 * retval Pointer to the call return area
943 * fp_lookup:EBADF Bad file descriptor
955 * vnode_getwithref:???
963 * vnode_getwithref:???
970 * vnode_getwithref:???
972 * [F_SETSIZE,F_RDADVISE]
976 * vnode_getwithref:???
977 * [F_RDAHEAD,F_NOCACHE]
979 * vnode_getwithref:???
983 * *retval (modified) fcntl return value (if any)
986 sys_fcntl_nocancel(proc_t p
, struct fcntl_nocancel_args
*uap
, int32_t *retval
)
989 struct filedesc
*fdp
= p
->p_fd
;
992 struct vnode
*vp
= NULLVP
; /* for AUDIT_ARG() at end */
993 unsigned int oflags
, nflags
;
994 int i
, tmp
, error
, error2
, flg
= 0;
995 struct flock fl
= {};
996 struct flocktimeout fltimeout
;
997 struct timespec
*timeout
= NULL
;
998 struct vfs_context context
;
1006 int has_entitlement
= 0;
1008 AUDIT_ARG(fd
, uap
->fd
);
1009 AUDIT_ARG(cmd
, uap
->cmd
);
1011 nofile
= proc_limitgetcur(p
, RLIMIT_NOFILE
, TRUE
);
1014 if ((error
= fp_lookup(p
, fd
, &fp
, 1))) {
1018 context
.vc_thread
= current_thread();
1019 context
.vc_ucred
= fp
->f_cred
;
1021 is64bit
= proc_is64bit(p
);
1026 * Since the arg parameter is defined as a long but may be
1027 * either a long or a pointer we must take care to handle
1028 * sign extension issues. Our sys call munger will sign
1029 * extend a long when we are called from a 32-bit process.
1030 * Since we can never have an address greater than 32-bits
1031 * from a 32-bit process we lop off the top 32-bits to avoid
1032 * getting the wrong address
1034 argp
= CAST_USER_ADDR_T((uint32_t)uap
->arg
);
1038 error
= mac_file_check_fcntl(proc_ucred(p
), fp
->fp_glob
, uap
->cmd
,
1045 pop
= &fdp
->fd_ofileflags
[fd
];
1049 case F_DUPFD_CLOEXEC
:
1050 if (fp_isguarded(fp
, GUARD_DUP
)) {
1051 error
= fp_guard_exception(p
, fd
, fp
, kGUARD_EXC_DUP
);
1054 newmin
= CAST_DOWN_EXPLICIT(int, uap
->arg
); /* arg is an int, so we won't lose bits */
1055 AUDIT_ARG(value32
, newmin
);
1056 if ((rlim_t
)newmin
>= nofile
||
1057 newmin
>= maxfilesperproc
) {
1061 if ((error
= fdalloc(p
, newmin
, &i
))) {
1064 error
= finishdup(p
, fdp
, fd
, i
,
1065 uap
->cmd
== F_DUPFD_CLOEXEC
? UF_EXCLOSE
: 0, retval
);
1069 *retval
= (*pop
& UF_EXCLOSE
)? FD_CLOEXEC
: 0;
1074 AUDIT_ARG(value32
, (uint32_t)uap
->arg
);
1075 if (uap
->arg
& FD_CLOEXEC
) {
1078 if (fp_isguarded(fp
, 0)) {
1079 error
= fp_guard_exception(p
,
1080 fd
, fp
, kGUARD_EXC_NOCLOEXEC
);
1083 *pop
&= ~UF_EXCLOSE
;
1089 *retval
= OFLAGS(fp
->f_flag
);
1094 // FIXME (rdar://54898652)
1096 // this code is broken if fnctl(F_SETFL), ioctl() are
1097 // called concurrently for the same fileglob.
1099 tmp
= CAST_DOWN_EXPLICIT(int, uap
->arg
); /* arg is an int, so we won't lose bits */
1100 AUDIT_ARG(value32
, tmp
);
1102 os_atomic_rmw_loop(&fp
->f_flag
, oflags
, nflags
, relaxed
, {
1103 nflags
= oflags
& ~FCNTLFLAGS
;
1104 nflags
|= FFLAGS(tmp
) & FCNTLFLAGS
;
1106 tmp
= nflags
& FNONBLOCK
;
1107 error
= fo_ioctl(fp
, FIONBIO
, (caddr_t
)&tmp
, &context
);
1111 tmp
= nflags
& FASYNC
;
1112 error
= fo_ioctl(fp
, FIOASYNC
, (caddr_t
)&tmp
, &context
);
1116 os_atomic_andnot(&fp
->f_flag
, FNONBLOCK
, relaxed
);
1118 (void)fo_ioctl(fp
, FIONBIO
, (caddr_t
)&tmp
, &context
);
1122 if (fp
->f_type
== DTYPE_SOCKET
) {
1123 *retval
= ((struct socket
*)fp
->f_data
)->so_pgid
;
1127 error
= fo_ioctl(fp
, TIOCGPGRP
, (caddr_t
)retval
, &context
);
1132 tmp
= CAST_DOWN_EXPLICIT(pid_t
, uap
->arg
); /* arg is an int, so we won't lose bits */
1133 AUDIT_ARG(value32
, tmp
);
1134 if (fp
->f_type
== DTYPE_SOCKET
) {
1135 ((struct socket
*)fp
->f_data
)->so_pgid
= tmp
;
1139 if (fp
->f_type
== DTYPE_PIPE
) {
1140 error
= fo_ioctl(fp
, TIOCSPGRP
, (caddr_t
)&tmp
, &context
);
1147 proc_t p1
= proc_find(tmp
);
1152 tmp
= (int)p1
->p_pgrpid
;
1155 error
= fo_ioctl(fp
, TIOCSPGRP
, (caddr_t
)&tmp
, &context
);
1158 case F_SETNOSIGPIPE
:
1159 tmp
= CAST_DOWN_EXPLICIT(int, uap
->arg
);
1160 if (fp
->f_type
== DTYPE_SOCKET
) {
1162 error
= sock_setsockopt((struct socket
*)fp
->f_data
,
1163 SOL_SOCKET
, SO_NOSIGPIPE
, &tmp
, sizeof(tmp
));
1168 struct fileglob
*fg
= fp
->fp_glob
;
1170 lck_mtx_lock_spin(&fg
->fg_lock
);
1172 fg
->fg_lflags
|= FG_NOSIGPIPE
;
1174 fg
->fg_lflags
&= ~FG_NOSIGPIPE
;
1176 lck_mtx_unlock(&fg
->fg_lock
);
1181 case F_GETNOSIGPIPE
:
1182 if (fp
->f_type
== DTYPE_SOCKET
) {
1184 int retsize
= sizeof(*retval
);
1185 error
= sock_getsockopt((struct socket
*)fp
->f_data
,
1186 SOL_SOCKET
, SO_NOSIGPIPE
, retval
, &retsize
);
1191 *retval
= (fp
->fp_glob
->fg_lflags
& FG_NOSIGPIPE
) ?
1199 * If this is the only reference to this fglob in the process
1200 * and it's already marked as close-on-fork then mark it as
1201 * (immutably) "confined" i.e. any fd that points to it will
1202 * forever be close-on-fork, and attempts to use an IPC
1203 * mechanism to move the descriptor elsewhere will fail.
1205 if (CAST_DOWN_EXPLICIT(int, uap
->arg
)) {
1206 struct fileglob
*fg
= fp
->fp_glob
;
1208 lck_mtx_lock_spin(&fg
->fg_lock
);
1209 if (fg
->fg_lflags
& FG_CONFINED
) {
1211 } else if (1 != os_ref_get_count_raw(&fg
->fg_count
)) {
1212 error
= EAGAIN
; /* go close the dup .. */
1213 } else if (UF_FORKCLOSE
== (*pop
& UF_FORKCLOSE
)) {
1214 fg
->fg_lflags
|= FG_CONFINED
;
1217 error
= EBADF
; /* open without O_CLOFORK? */
1219 lck_mtx_unlock(&fg
->fg_lock
);
1222 * Other subsystems may have built on the immutability
1223 * of FG_CONFINED; clearing it may be tricky.
1225 error
= EPERM
; /* immutable */
1230 *retval
= (fp
->fp_glob
->fg_lflags
& FG_CONFINED
) ? 1 : 0;
1234 case F_SETLKWTIMEOUT
:
1236 case F_OFD_SETLKWTIMEOUT
:
1243 if (fp
->f_type
!= DTYPE_VNODE
) {
1247 vp
= (struct vnode
*)fp
->f_data
;
1250 offset
= fp
->f_offset
;
1253 /* Copy in the lock structure */
1254 if (F_SETLKWTIMEOUT
== uap
->cmd
||
1255 F_OFD_SETLKWTIMEOUT
== uap
->cmd
) {
1256 error
= copyin(argp
, (caddr_t
) &fltimeout
, sizeof(fltimeout
));
1261 timeout
= &fltimeout
.timeout
;
1263 error
= copyin(argp
, (caddr_t
)&fl
, sizeof(fl
));
1269 /* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */
1270 /* and ending byte for EOVERFLOW in SEEK_SET */
1271 error
= check_file_seek_range(&fl
, offset
);
1276 if ((error
= vnode_getwithref(vp
))) {
1279 if (fl
.l_whence
== SEEK_CUR
) {
1280 fl
.l_start
+= offset
;
1284 error
= mac_file_check_lock(proc_ucred(p
), fp
->fp_glob
,
1287 (void)vnode_put(vp
);
1294 case F_OFD_SETLKWTIMEOUT
:
1296 switch (fl
.l_type
) {
1298 if ((fflag
& FREAD
) == 0) {
1302 error
= VNOP_ADVLOCK(vp
, (caddr_t
)fp
->fp_glob
,
1303 F_SETLK
, &fl
, flg
, &context
, timeout
);
1306 if ((fflag
& FWRITE
) == 0) {
1310 error
= VNOP_ADVLOCK(vp
, (caddr_t
)fp
->fp_glob
,
1311 F_SETLK
, &fl
, flg
, &context
, timeout
);
1314 error
= VNOP_ADVLOCK(vp
, (caddr_t
)fp
->fp_glob
,
1315 F_UNLCK
, &fl
, F_OFD_LOCK
, &context
,
1323 (F_RDLCK
== fl
.l_type
|| F_WRLCK
== fl
.l_type
)) {
1324 struct fileglob
*fg
= fp
->fp_glob
;
1327 * arrange F_UNLCK on last close (once
1328 * set, FG_HAS_OFDLOCK is immutable)
1330 if ((fg
->fg_lflags
& FG_HAS_OFDLOCK
) == 0) {
1331 lck_mtx_lock_spin(&fg
->fg_lock
);
1332 fg
->fg_lflags
|= FG_HAS_OFDLOCK
;
1333 lck_mtx_unlock(&fg
->fg_lock
);
1339 switch (fl
.l_type
) {
1341 if ((fflag
& FREAD
) == 0) {
1345 // XXX UInt32 unsafe for LP64 kernel
1346 os_atomic_or(&p
->p_ladvflag
, P_LADVLOCK
, relaxed
);
1347 error
= VNOP_ADVLOCK(vp
, (caddr_t
)p
,
1348 F_SETLK
, &fl
, flg
, &context
, timeout
);
1351 if ((fflag
& FWRITE
) == 0) {
1355 // XXX UInt32 unsafe for LP64 kernel
1356 os_atomic_or(&p
->p_ladvflag
, P_LADVLOCK
, relaxed
);
1357 error
= VNOP_ADVLOCK(vp
, (caddr_t
)p
,
1358 F_SETLK
, &fl
, flg
, &context
, timeout
);
1361 error
= VNOP_ADVLOCK(vp
, (caddr_t
)p
,
1362 F_UNLCK
, &fl
, F_POSIX
, &context
, timeout
);
1370 (void) vnode_put(vp
);
1376 case F_OFD_GETLKPID
:
1377 if (fp
->f_type
!= DTYPE_VNODE
) {
1381 vp
= (struct vnode
*)fp
->f_data
;
1383 offset
= fp
->f_offset
;
1386 /* Copy in the lock structure */
1387 error
= copyin(argp
, (caddr_t
)&fl
, sizeof(fl
));
1392 /* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */
1393 /* and ending byte for EOVERFLOW in SEEK_SET */
1394 error
= check_file_seek_range(&fl
, offset
);
1399 if ((fl
.l_whence
== SEEK_SET
) && (fl
.l_start
< 0)) {
1404 switch (fl
.l_type
) {
1414 switch (fl
.l_whence
) {
1424 if ((error
= vnode_getwithref(vp
)) == 0) {
1425 if (fl
.l_whence
== SEEK_CUR
) {
1426 fl
.l_start
+= offset
;
1430 error
= mac_file_check_lock(proc_ucred(p
), fp
->fp_glob
,
1436 error
= VNOP_ADVLOCK(vp
, (caddr_t
)fp
->fp_glob
,
1437 F_GETLK
, &fl
, F_OFD_LOCK
, &context
, NULL
);
1439 case F_OFD_GETLKPID
:
1440 error
= VNOP_ADVLOCK(vp
, (caddr_t
)fp
->fp_glob
,
1441 F_GETLKPID
, &fl
, F_OFD_LOCK
, &context
, NULL
);
1444 error
= VNOP_ADVLOCK(vp
, (caddr_t
)p
,
1445 uap
->cmd
, &fl
, F_POSIX
, &context
, NULL
);
1449 (void)vnode_put(vp
);
1452 error
= copyout((caddr_t
)&fl
, argp
, sizeof(fl
));
1457 case F_PREALLOCATE
: {
1458 fstore_t alloc_struct
; /* structure for allocate command */
1459 u_int32_t alloc_flags
= 0;
1461 if (fp
->f_type
!= DTYPE_VNODE
) {
1466 vp
= (struct vnode
*)fp
->f_data
;
1469 /* make sure that we have write permission */
1470 if ((fp
->f_flag
& FWRITE
) == 0) {
1475 error
= copyin(argp
, (caddr_t
)&alloc_struct
, sizeof(alloc_struct
));
1480 /* now set the space allocated to 0 */
1481 alloc_struct
.fst_bytesalloc
= 0;
1484 * Do some simple parameter checking
1487 /* set up the flags */
1489 alloc_flags
|= PREALLOCATE
;
1491 if (alloc_struct
.fst_flags
& F_ALLOCATECONTIG
) {
1492 alloc_flags
|= ALLOCATECONTIG
;
1495 if (alloc_struct
.fst_flags
& F_ALLOCATEALL
) {
1496 alloc_flags
|= ALLOCATEALL
;
1500 * Do any position mode specific stuff. The only
1501 * position mode supported now is PEOFPOSMODE
1504 switch (alloc_struct
.fst_posmode
) {
1506 if (alloc_struct
.fst_offset
!= 0) {
1511 alloc_flags
|= ALLOCATEFROMPEOF
;
1515 if (alloc_struct
.fst_offset
<= 0) {
1520 alloc_flags
|= ALLOCATEFROMVOL
;
1528 if ((error
= vnode_getwithref(vp
)) == 0) {
1530 * call allocate to get the space
1532 error
= VNOP_ALLOCATE(vp
, alloc_struct
.fst_length
, alloc_flags
,
1533 &alloc_struct
.fst_bytesalloc
, alloc_struct
.fst_offset
,
1535 (void)vnode_put(vp
);
1537 error2
= copyout((caddr_t
)&alloc_struct
, argp
, sizeof(alloc_struct
));
1548 if (fp
->f_type
!= DTYPE_VNODE
) {
1553 vp
= (struct vnode
*)fp
->f_data
;
1556 /* need write permissions */
1557 if ((fp
->f_flag
& FWRITE
) == 0) {
1562 if ((error
= copyin(argp
, (caddr_t
)&args
, sizeof(args
)))) {
1566 if ((error
= vnode_getwithref(vp
))) {
1571 if ((error
= mac_vnode_check_write(&context
, fp
->fp_glob
->fg_cred
, vp
))) {
1572 (void)vnode_put(vp
);
1577 error
= VNOP_IOCTL(vp
, F_PUNCHHOLE
, (caddr_t
)&args
, 0, &context
);
1578 (void)vnode_put(vp
);
1582 case F_TRIM_ACTIVE_FILE
: {
1583 ftrimactivefile_t args
;
1585 if (priv_check_cred(kauth_cred_get(), PRIV_TRIM_ACTIVE_FILE
, 0)) {
1590 if (fp
->f_type
!= DTYPE_VNODE
) {
1595 vp
= (struct vnode
*)fp
->f_data
;
1598 /* need write permissions */
1599 if ((fp
->f_flag
& FWRITE
) == 0) {
1604 if ((error
= copyin(argp
, (caddr_t
)&args
, sizeof(args
)))) {
1608 if ((error
= vnode_getwithref(vp
))) {
1612 error
= VNOP_IOCTL(vp
, F_TRIM_ACTIVE_FILE
, (caddr_t
)&args
, 0, &context
);
1613 (void)vnode_put(vp
);
1617 case F_SPECULATIVE_READ
: {
1620 if (fp
->f_type
!= DTYPE_VNODE
) {
1625 vp
= (struct vnode
*)fp
->f_data
;
1628 if ((error
= copyin(argp
, (caddr_t
)&args
, sizeof(args
)))) {
1632 /* Discard invalid offsets or lengths */
1633 if ((args
.fsr_offset
< 0) || (args
.fsr_length
< 0)) {
1639 * Round the file offset down to a page-size boundary (or to 0).
1640 * The filesystem will need to round the length up to the end of the page boundary
1641 * or to the EOF of the file.
1643 uint64_t foff
= (((uint64_t)args
.fsr_offset
) & ~((uint64_t)PAGE_MASK
));
1644 uint64_t foff_delta
= args
.fsr_offset
- foff
;
1645 args
.fsr_offset
= (off_t
) foff
;
1648 * Now add in the delta to the supplied length. Since we may have adjusted the
1649 * offset, increase it by the amount that we adjusted.
1651 args
.fsr_length
+= foff_delta
;
1653 if ((error
= vnode_getwithref(vp
))) {
1656 error
= VNOP_IOCTL(vp
, F_SPECULATIVE_READ
, (caddr_t
)&args
, 0, &context
);
1657 (void)vnode_put(vp
);
1662 if (fp
->f_type
!= DTYPE_VNODE
) {
1666 vp
= (struct vnode
*)fp
->f_data
;
1669 error
= copyin(argp
, (caddr_t
)&offset
, sizeof(off_t
));
1673 AUDIT_ARG(value64
, offset
);
1675 error
= vnode_getwithref(vp
);
1681 error
= mac_vnode_check_truncate(&context
,
1682 fp
->fp_glob
->fg_cred
, vp
);
1684 (void)vnode_put(vp
);
1689 * Make sure that we are root. Growing a file
1690 * without zero filling the data is a security hole.
1692 if (!kauth_cred_issuser(kauth_cred_get())) {
1696 * Require privilege to change file size without zerofill,
1697 * else will change the file size and zerofill it.
1699 error
= priv_check_cred(kauth_cred_get(), PRIV_VFS_SETSIZE
, 0);
1701 error
= vnode_setsize(vp
, offset
, IO_NOZEROFILL
, &context
);
1703 error
= vnode_setsize(vp
, offset
, 0, &context
);
1708 mac_vnode_notify_truncate(&context
, fp
->fp_glob
->fg_cred
, vp
);
1713 (void)vnode_put(vp
);
1717 if (fp
->f_type
!= DTYPE_VNODE
) {
1722 os_atomic_andnot(&fp
->fp_glob
->fg_flag
, FNORDAHEAD
, relaxed
);
1724 os_atomic_or(&fp
->fp_glob
->fg_flag
, FNORDAHEAD
, relaxed
);
1729 if (fp
->f_type
!= DTYPE_VNODE
) {
1734 os_atomic_or(&fp
->fp_glob
->fg_flag
, FNOCACHE
, relaxed
);
1736 os_atomic_andnot(&fp
->fp_glob
->fg_flag
, FNOCACHE
, relaxed
);
1741 if (fp
->f_type
!= DTYPE_VNODE
) {
1746 os_atomic_or(&fp
->fp_glob
->fg_flag
, FNODIRECT
, relaxed
);
1748 os_atomic_andnot(&fp
->fp_glob
->fg_flag
, FNODIRECT
, relaxed
);
1752 case F_SINGLE_WRITER
:
1753 if (fp
->f_type
!= DTYPE_VNODE
) {
1758 os_atomic_or(&fp
->fp_glob
->fg_flag
, FSINGLE_WRITER
, relaxed
);
1760 os_atomic_andnot(&fp
->fp_glob
->fg_flag
, FSINGLE_WRITER
, relaxed
);
1764 case F_GLOBAL_NOCACHE
:
1765 if (fp
->f_type
!= DTYPE_VNODE
) {
1769 vp
= (struct vnode
*)fp
->f_data
;
1772 if ((error
= vnode_getwithref(vp
)) == 0) {
1773 *retval
= vnode_isnocache(vp
);
1776 vnode_setnocache(vp
);
1778 vnode_clearnocache(vp
);
1781 (void)vnode_put(vp
);
1785 case F_CHECK_OPENEVT
:
1786 if (fp
->f_type
!= DTYPE_VNODE
) {
1790 vp
= (struct vnode
*)fp
->f_data
;
1793 if ((error
= vnode_getwithref(vp
)) == 0) {
1794 *retval
= vnode_is_openevt(vp
);
1797 vnode_set_openevt(vp
);
1799 vnode_clear_openevt(vp
);
1802 (void)vnode_put(vp
);
1807 struct radvisory ra_struct
;
1809 if (fp
->f_type
!= DTYPE_VNODE
) {
1813 vp
= (struct vnode
*)fp
->f_data
;
1816 if ((error
= copyin(argp
, (caddr_t
)&ra_struct
, sizeof(ra_struct
)))) {
1819 if (ra_struct
.ra_offset
< 0 || ra_struct
.ra_count
< 0) {
1823 if ((error
= vnode_getwithref(vp
)) == 0) {
1824 error
= VNOP_IOCTL(vp
, F_RDADVISE
, (caddr_t
)&ra_struct
, 0, &context
);
1826 (void)vnode_put(vp
);
1833 if (fp
->f_type
!= DTYPE_VNODE
) {
1837 vp
= (struct vnode
*)fp
->f_data
;
1840 if ((error
= vnode_getwithref(vp
)) == 0) {
1841 error
= VNOP_FSYNC(vp
, MNT_NOWAIT
, &context
);
1843 (void)vnode_put(vp
);
1848 case F_LOG2PHYS_EXT
: {
1849 struct log2phys l2p_struct
= {}; /* structure for allocate command */
1852 off_t file_offset
= 0;
1856 if (uap
->cmd
== F_LOG2PHYS_EXT
) {
1857 error
= copyin(argp
, (caddr_t
)&l2p_struct
, sizeof(l2p_struct
));
1861 file_offset
= l2p_struct
.l2p_devoffset
;
1863 file_offset
= fp
->f_offset
;
1865 if (fp
->f_type
!= DTYPE_VNODE
) {
1869 vp
= (struct vnode
*)fp
->f_data
;
1871 if ((error
= vnode_getwithref(vp
))) {
1874 error
= VNOP_OFFTOBLK(vp
, file_offset
, &lbn
);
1876 (void)vnode_put(vp
);
1879 error
= VNOP_BLKTOOFF(vp
, lbn
, &offset
);
1881 (void)vnode_put(vp
);
1884 devBlockSize
= vfs_devblocksize(vnode_mount(vp
));
1885 if (uap
->cmd
== F_LOG2PHYS_EXT
) {
1886 if (l2p_struct
.l2p_contigbytes
< 0) {
1892 a_size
= (size_t)MIN((uint64_t)l2p_struct
.l2p_contigbytes
, SIZE_MAX
);
1894 a_size
= devBlockSize
;
1897 error
= VNOP_BLOCKMAP(vp
, offset
, a_size
, &bn
, &run
, NULL
, 0, &context
);
1899 (void)vnode_put(vp
);
1902 l2p_struct
.l2p_flags
= 0; /* for now */
1903 if (uap
->cmd
== F_LOG2PHYS_EXT
) {
1904 l2p_struct
.l2p_contigbytes
= run
- (file_offset
- offset
);
1906 l2p_struct
.l2p_contigbytes
= 0; /* for now */
1910 * The block number being -1 suggests that the file offset is not backed
1911 * by any real blocks on-disk. As a result, just let it be passed back up wholesale.
1914 /* Don't multiply it by the block size */
1915 l2p_struct
.l2p_devoffset
= bn
;
1917 l2p_struct
.l2p_devoffset
= bn
* devBlockSize
;
1918 l2p_struct
.l2p_devoffset
+= file_offset
- offset
;
1920 error
= copyout((caddr_t
)&l2p_struct
, argp
, sizeof(l2p_struct
));
1925 case F_GETPATH_NOFIRMLINK
: {
1929 if (fp
->f_type
!= DTYPE_VNODE
) {
1933 vp
= (struct vnode
*)fp
->f_data
;
1936 pathlen
= MAXPATHLEN
;
1937 MALLOC(pathbufp
, char *, pathlen
, M_TEMP
, M_WAITOK
);
1938 if (pathbufp
== NULL
) {
1942 if ((error
= vnode_getwithref(vp
)) == 0) {
1943 if (uap
->cmd
== F_GETPATH_NOFIRMLINK
) {
1944 error
= vn_getpath_ext(vp
, NULL
, pathbufp
, &pathlen
, VN_GETPATH_NO_FIRMLINK
);
1946 error
= vn_getpath(vp
, pathbufp
, &pathlen
);
1948 (void)vnode_put(vp
);
1951 error
= copyout((caddr_t
)pathbufp
, argp
, pathlen
);
1954 FREE(pathbufp
, M_TEMP
);
1958 case F_PATHPKG_CHECK
: {
1962 if (fp
->f_type
!= DTYPE_VNODE
) {
1966 vp
= (struct vnode
*)fp
->f_data
;
1969 pathlen
= MAXPATHLEN
;
1970 pathbufp
= zalloc(ZV_NAMEI
);
1972 if ((error
= copyinstr(argp
, pathbufp
, MAXPATHLEN
, &pathlen
)) == 0) {
1973 if ((error
= vnode_getwithref(vp
)) == 0) {
1974 AUDIT_ARG(text
, pathbufp
);
1975 error
= vn_path_package_check(vp
, pathbufp
, (int)pathlen
, retval
);
1977 (void)vnode_put(vp
);
1980 zfree(ZV_NAMEI
, pathbufp
);
1984 case F_CHKCLEAN
: // used by regression tests to see if all dirty pages got cleaned by fsync()
1985 case F_FULLFSYNC
: // fsync + flush the journal + DKIOCSYNCHRONIZE
1986 case F_BARRIERFSYNC
: // fsync + barrier
1987 case F_FREEZE_FS
: // freeze all other fs operations for the fs of this fd
1988 case F_THAW_FS
: { // thaw all frozen fs operations for the fs of this fd
1989 if (fp
->f_type
!= DTYPE_VNODE
) {
1993 vp
= (struct vnode
*)fp
->f_data
;
1996 if ((error
= vnode_getwithref(vp
)) == 0) {
1997 error
= VNOP_IOCTL(vp
, uap
->cmd
, (caddr_t
)NULL
, 0, &context
);
1999 (void)vnode_put(vp
);
2005 * SPI (private) for opening a file starting from a dir fd
2008 struct user_fopenfrom fopen
;
2009 struct vnode_attr va
;
2010 struct nameidata nd
;
2013 /* Check if this isn't a valid file descriptor */
2014 if ((fp
->f_type
!= DTYPE_VNODE
) ||
2015 (fp
->f_flag
& FREAD
) == 0) {
2019 vp
= (struct vnode
*)fp
->f_data
;
2022 if (vnode_getwithref(vp
)) {
2027 /* Only valid for directories */
2028 if (vp
->v_type
!= VDIR
) {
2035 * Only entitled apps may use the credentials of the thread
2036 * that opened the file descriptor.
2037 * Non-entitled threads will use their own context.
2039 if (IOTaskHasEntitlement(current_task(), ACCOUNT_OPENFROM_ENTITLEMENT
)) {
2040 has_entitlement
= 1;
2043 /* Get flags, mode and pathname arguments. */
2044 if (IS_64BIT_PROCESS(p
)) {
2045 error
= copyin(argp
, &fopen
, sizeof(fopen
));
2047 struct user32_fopenfrom fopen32
;
2049 error
= copyin(argp
, &fopen32
, sizeof(fopen32
));
2050 fopen
.o_flags
= fopen32
.o_flags
;
2051 fopen
.o_mode
= fopen32
.o_mode
;
2052 fopen
.o_pathname
= CAST_USER_ADDR_T(fopen32
.o_pathname
);
2058 AUDIT_ARG(fflags
, fopen
.o_flags
);
2059 AUDIT_ARG(mode
, fopen
.o_mode
);
2061 /* Mask off all but regular access permissions */
2062 cmode
= ((fopen
.o_mode
& ~fdp
->fd_cmask
) & ALLPERMS
) & ~S_ISTXT
;
2063 VATTR_SET(&va
, va_mode
, cmode
& ACCESSPERMS
);
2065 /* Start the lookup relative to the file descriptor's vnode. */
2066 NDINIT(&nd
, LOOKUP
, OP_OPEN
, USEDVP
| FOLLOW
| AUDITVNPATH1
, UIO_USERSPACE
,
2067 fopen
.o_pathname
, has_entitlement
? &context
: vfs_context_current());
2070 error
= open1(has_entitlement
? &context
: vfs_context_current(),
2071 &nd
, fopen
.o_flags
, &va
, fileproc_alloc_init
, NULL
, retval
);
2077 * SPI (private) for unlinking a file starting from a dir fd
2079 case F_UNLINKFROM
: {
2080 user_addr_t pathname
;
2082 /* Check if this isn't a valid file descriptor */
2083 if ((fp
->f_type
!= DTYPE_VNODE
) ||
2084 (fp
->f_flag
& FREAD
) == 0) {
2088 vp
= (struct vnode
*)fp
->f_data
;
2091 if (vnode_getwithref(vp
)) {
2096 /* Only valid for directories */
2097 if (vp
->v_type
!= VDIR
) {
2104 * Only entitled apps may use the credentials of the thread
2105 * that opened the file descriptor.
2106 * Non-entitled threads will use their own context.
2108 if (IOTaskHasEntitlement(current_task(), ACCOUNT_OPENFROM_ENTITLEMENT
)) {
2109 has_entitlement
= 1;
2112 /* Get flags, mode and pathname arguments. */
2113 if (IS_64BIT_PROCESS(p
)) {
2114 pathname
= (user_addr_t
)argp
;
2116 pathname
= CAST_USER_ADDR_T(argp
);
2119 /* Start the lookup relative to the file descriptor's vnode. */
2120 error
= unlink1(has_entitlement
? &context
: vfs_context_current(),
2121 vp
, pathname
, UIO_USERSPACE
, 0);
2129 case F_ADDFILESIGS_FOR_DYLD_SIM
:
2130 case F_ADDFILESIGS_RETURN
:
2131 case F_ADDFILESIGS_INFO
:
2133 struct cs_blob
*blob
= NULL
;
2134 struct user_fsignatures fs
;
2136 vm_offset_t kernel_blob_addr
;
2137 vm_size_t kernel_blob_size
;
2138 int blob_add_flags
= 0;
2139 const size_t sizeof_fs
= (uap
->cmd
== F_ADDFILESIGS_INFO
?
2140 offsetof(struct user_fsignatures
, fs_cdhash
/* first output element */) :
2141 offsetof(struct user_fsignatures
, fs_fsignatures_size
/* compat */));
2143 if (fp
->f_type
!= DTYPE_VNODE
) {
2147 vp
= (struct vnode
*)fp
->f_data
;
2150 if (uap
->cmd
== F_ADDFILESIGS_FOR_DYLD_SIM
) {
2151 blob_add_flags
|= MAC_VNODE_CHECK_DYLD_SIM
;
2152 if ((p
->p_csflags
& CS_KILL
) == 0) {
2154 p
->p_csflags
|= CS_KILL
;
2159 error
= vnode_getwithref(vp
);
2164 if (IS_64BIT_PROCESS(p
)) {
2165 error
= copyin(argp
, &fs
, sizeof_fs
);
2167 if (uap
->cmd
== F_ADDFILESIGS_INFO
) {
2173 struct user32_fsignatures fs32
;
2175 error
= copyin(argp
, &fs32
, sizeof(fs32
));
2176 fs
.fs_file_start
= fs32
.fs_file_start
;
2177 fs
.fs_blob_start
= CAST_USER_ADDR_T(fs32
.fs_blob_start
);
2178 fs
.fs_blob_size
= fs32
.fs_blob_size
;
2187 * First check if we have something loaded a this offset
2189 blob
= ubc_cs_blob_get(vp
, CPU_TYPE_ANY
, CPU_SUBTYPE_ANY
, fs
.fs_file_start
);
2191 /* If this is for dyld_sim revalidate the blob */
2192 if (uap
->cmd
== F_ADDFILESIGS_FOR_DYLD_SIM
) {
2193 error
= ubc_cs_blob_revalidate(vp
, blob
, NULL
, blob_add_flags
, proc_platform(p
));
2196 if (error
!= EAGAIN
) {
2206 * An arbitrary limit, to prevent someone from mapping in a 20GB blob. This should cover
2207 * our use cases for the immediate future, but note that at the time of this commit, some
2208 * platforms are nearing 2MB blob sizes (with a prior soft limit of 2.5MB).
2210 * We should consider how we can manage this more effectively; the above means that some
2211 * platforms are using megabytes of memory for signing data; it merely hasn't crossed the
2212 * threshold considered ridiculous at the time of this change.
2214 #define CS_MAX_BLOB_SIZE (40ULL * 1024ULL * 1024ULL)
2215 if (fs
.fs_blob_size
> CS_MAX_BLOB_SIZE
) {
2221 kernel_blob_size
= CAST_DOWN(vm_size_t
, fs
.fs_blob_size
);
2222 kr
= ubc_cs_blob_allocate(&kernel_blob_addr
, &kernel_blob_size
);
2223 if (kr
!= KERN_SUCCESS
|| kernel_blob_size
< fs
.fs_blob_size
) {
2229 if (uap
->cmd
== F_ADDSIGS
) {
2230 error
= copyin(fs
.fs_blob_start
,
2231 (void *) kernel_blob_addr
,
2233 } else { /* F_ADDFILESIGS || F_ADDFILESIGS_RETURN || F_ADDFILESIGS_FOR_DYLD_SIM || F_ADDFILESIGS_INFO */
2236 error
= vn_rdwr(UIO_READ
,
2238 (caddr_t
) kernel_blob_addr
,
2239 (int)kernel_blob_size
,
2240 fs
.fs_file_start
+ fs
.fs_blob_start
,
2246 if ((error
== 0) && resid
) {
2247 /* kernel_blob_size rounded to a page size, but signature may be at end of file */
2248 memset((void *)(kernel_blob_addr
+ (kernel_blob_size
- resid
)), 0x0, resid
);
2253 ubc_cs_blob_deallocate(kernel_blob_addr
,
2260 error
= ubc_cs_blob_add(vp
,
2262 CPU_TYPE_ANY
, /* not for a specific architecture */
2271 /* ubc_blob_add() has consumed "kernel_blob_addr" if it is zeroed */
2273 if (kernel_blob_addr
) {
2274 ubc_cs_blob_deallocate(kernel_blob_addr
,
2280 #if CHECK_CS_VALIDATION_BITMAP
2281 ubc_cs_validation_bitmap_allocate( vp
);
2286 if (uap
->cmd
== F_ADDFILESIGS_RETURN
|| uap
->cmd
== F_ADDFILESIGS_FOR_DYLD_SIM
||
2287 uap
->cmd
== F_ADDFILESIGS_INFO
) {
2289 * The first element of the structure is a
2290 * off_t that happen to have the same size for
2291 * all archs. Lets overwrite that.
2293 off_t end_offset
= 0;
2295 end_offset
= blob
->csb_end_offset
;
2297 error
= copyout(&end_offset
, argp
, sizeof(end_offset
));
2305 if (uap
->cmd
== F_ADDFILESIGS_INFO
) {
2306 /* Return information. What we copy out depends on the size of the
2307 * passed in structure, to keep binary compatibility. */
2309 if (fs
.fs_fsignatures_size
>= sizeof(struct user_fsignatures
)) {
2310 // enough room for fs_cdhash[20]+fs_hash_type
2313 error
= copyout(blob
->csb_cdhash
,
2314 (vm_address_t
)argp
+ offsetof(struct user_fsignatures
, fs_cdhash
),
2315 USER_FSIGNATURES_CDHASH_LEN
);
2320 int hashtype
= cs_hash_type(blob
->csb_hashtype
);
2321 error
= copyout(&hashtype
,
2322 (vm_address_t
)argp
+ offsetof(struct user_fsignatures
, fs_hash_type
),
2332 (void) vnode_put(vp
);
2335 #if CONFIG_SUPPLEMENTAL_SIGNATURES
2336 case F_ADDFILESUPPL
:
2339 struct cs_blob
*blob
= NULL
;
2340 struct user_fsupplement fs
;
2342 struct fileproc
* orig_fp
= NULL
;
2344 vm_offset_t kernel_blob_addr
;
2345 vm_size_t kernel_blob_size
;
2347 if (!IS_64BIT_PROCESS(p
)) {
2349 goto out
; // drop fp and unlock fds
2352 if (fp
->f_type
!= DTYPE_VNODE
) {
2357 error
= copyin(argp
, &fs
, sizeof(fs
));
2362 orig_fd
= fs
.fs_orig_fd
;
2363 if ((error
= fp_lookup(p
, orig_fd
, &orig_fp
, 1))) {
2364 printf("CODE SIGNING: Failed to find original file for supplemental signature attachment\n");
2368 if (orig_fp
->f_type
!= DTYPE_VNODE
) {
2370 fp_drop(p
, orig_fd
, orig_fp
, 1);
2374 ivp
= (struct vnode
*)orig_fp
->f_data
;
2376 vp
= (struct vnode
*)fp
->f_data
;
2380 error
= vnode_getwithref(ivp
);
2382 fp_drop(p
, orig_fd
, orig_fp
, 0);
2383 goto outdrop
; //drop fp
2386 error
= vnode_getwithref(vp
);
2389 fp_drop(p
, orig_fd
, orig_fp
, 0);
2393 if (fs
.fs_blob_size
> CS_MAX_BLOB_SIZE
) {
2395 goto dropboth
; // drop iocounts on vp and ivp, drop orig_fp then drop fp via outdrop
2398 kernel_blob_size
= CAST_DOWN(vm_size_t
, fs
.fs_blob_size
);
2399 kr
= ubc_cs_blob_allocate(&kernel_blob_addr
, &kernel_blob_size
);
2400 if (kr
!= KERN_SUCCESS
) {
2406 error
= vn_rdwr(UIO_READ
, vp
,
2407 (caddr_t
)kernel_blob_addr
, (int)kernel_blob_size
,
2408 fs
.fs_file_start
+ fs
.fs_blob_start
,
2410 kauth_cred_get(), &resid
, p
);
2411 if ((error
== 0) && resid
) {
2412 /* kernel_blob_size rounded to a page size, but signature may be at end of file */
2413 memset((void *)(kernel_blob_addr
+ (kernel_blob_size
- resid
)), 0x0, resid
);
2417 ubc_cs_blob_deallocate(kernel_blob_addr
,
2422 error
= ubc_cs_blob_add_supplement(vp
, ivp
, fs
.fs_file_start
,
2423 &kernel_blob_addr
, kernel_blob_size
, &blob
);
2425 /* ubc_blob_add_supplement() has consumed kernel_blob_addr if it is zeroed */
2427 if (kernel_blob_addr
) {
2428 ubc_cs_blob_deallocate(kernel_blob_addr
,
2435 fp_drop(p
, orig_fd
, orig_fp
, 0);
2441 fp_drop(p
, orig_fd
, orig_fp
, 0);
2451 struct fileglob
*fg
;
2454 if (fp
->f_type
!= DTYPE_VNODE
) {
2461 if (IS_64BIT_PROCESS(p
)) {
2462 error
= copyin(argp
, &lv
, sizeof(lv
));
2464 struct user32_fchecklv lv32
= {};
2466 error
= copyin(argp
, &lv32
, sizeof(lv32
));
2467 lv
.lv_file_start
= lv32
.lv_file_start
;
2468 lv
.lv_error_message
= (void *)(uintptr_t)lv32
.lv_error_message
;
2469 lv
.lv_error_message_size
= lv32
.lv_error_message_size
;
2476 error
= mac_file_check_library_validation(p
, fg
, lv
.lv_file_start
,
2477 (user_long_t
)lv
.lv_error_message
, lv
.lv_error_message_size
);
2482 case F_GETSIGSINFO
: {
2483 struct cs_blob
*blob
= NULL
;
2484 fgetsigsinfo_t sigsinfo
= {};
2486 if (fp
->f_type
!= DTYPE_VNODE
) {
2490 vp
= (struct vnode
*)fp
->f_data
;
2493 error
= vnode_getwithref(vp
);
2498 error
= copyin(argp
, &sigsinfo
, sizeof(sigsinfo
));
2504 blob
= ubc_cs_blob_get(vp
, CPU_TYPE_ANY
, CPU_SUBTYPE_ANY
, sigsinfo
.fg_file_start
);
2510 switch (sigsinfo
.fg_info_request
) {
2511 case GETSIGSINFO_PLATFORM_BINARY
:
2512 sigsinfo
.fg_sig_is_platform
= blob
->csb_platform_binary
;
2513 error
= copyout(&sigsinfo
.fg_sig_is_platform
,
2514 (vm_address_t
)argp
+ offsetof(struct fgetsigsinfo
, fg_sig_is_platform
),
2515 sizeof(sigsinfo
.fg_sig_is_platform
));
2530 case F_GETPROTECTIONCLASS
: {
2531 if (fp
->f_type
!= DTYPE_VNODE
) {
2535 vp
= (struct vnode
*)fp
->f_data
;
2539 if (vnode_getwithref(vp
)) {
2544 struct vnode_attr va
;
2547 VATTR_WANTED(&va
, va_dataprotect_class
);
2548 error
= VNOP_GETATTR(vp
, &va
, &context
);
2550 if (VATTR_IS_SUPPORTED(&va
, va_dataprotect_class
)) {
2551 *retval
= va
.va_dataprotect_class
;
2561 case F_SETPROTECTIONCLASS
: {
2562 /* tmp must be a valid PROTECTION_CLASS_* */
2563 tmp
= CAST_DOWN_EXPLICIT(uint32_t, uap
->arg
);
2565 if (fp
->f_type
!= DTYPE_VNODE
) {
2569 vp
= (struct vnode
*)fp
->f_data
;
2573 if (vnode_getwithref(vp
)) {
2578 /* Only go forward if you have write access */
2579 vfs_context_t ctx
= vfs_context_current();
2580 if (vnode_authorize(vp
, NULLVP
, (KAUTH_VNODE_ACCESS
| KAUTH_VNODE_WRITE_DATA
), ctx
) != 0) {
2586 struct vnode_attr va
;
2589 VATTR_SET(&va
, va_dataprotect_class
, tmp
);
2591 error
= VNOP_SETATTR(vp
, &va
, ctx
);
2597 case F_TRANSCODEKEY
: {
2598 if (fp
->f_type
!= DTYPE_VNODE
) {
2603 vp
= (struct vnode
*)fp
->f_data
;
2606 if (vnode_getwithref(vp
)) {
2612 .len
= CP_MAX_WRAPPEDKEYSIZE
,
2615 MALLOC(k
.key
, char *, k
.len
, M_TEMP
, M_WAITOK
| M_ZERO
);
2617 error
= VNOP_IOCTL(vp
, F_TRANSCODEKEY
, (caddr_t
)&k
, 1, &context
);
2622 error
= copyout(k
.key
, argp
, k
.len
);
2626 FREE(k
.key
, M_TEMP
);
2631 case F_GETPROTECTIONLEVEL
: {
2632 if (fp
->f_type
!= DTYPE_VNODE
) {
2637 vp
= (struct vnode
*) fp
->f_data
;
2640 if (vnode_getwithref(vp
)) {
2645 error
= VNOP_IOCTL(vp
, F_GETPROTECTIONLEVEL
, (caddr_t
)retval
, 0, &context
);
2651 case F_GETDEFAULTPROTLEVEL
: {
2652 if (fp
->f_type
!= DTYPE_VNODE
) {
2657 vp
= (struct vnode
*) fp
->f_data
;
2660 if (vnode_getwithref(vp
)) {
2666 * if cp_get_major_vers fails, error will be set to proper errno
2667 * and cp_version will still be 0.
2670 error
= VNOP_IOCTL(vp
, F_GETDEFAULTPROTLEVEL
, (caddr_t
)retval
, 0, &context
);
2676 #endif /* CONFIG_PROTECT */
2678 case F_MOVEDATAEXTENTS
: {
2679 struct fileproc
*fp2
= NULL
;
2680 struct vnode
*src_vp
= NULLVP
;
2681 struct vnode
*dst_vp
= NULLVP
;
2682 /* We need to grab the 2nd FD out of the argments before moving on. */
2683 int fd2
= CAST_DOWN_EXPLICIT(int32_t, uap
->arg
);
2685 error
= priv_check_cred(kauth_cred_get(), PRIV_VFS_MOVE_DATA_EXTENTS
, 0);
2690 if (fp
->f_type
!= DTYPE_VNODE
) {
2696 * For now, special case HFS+ and APFS only, since this
2699 src_vp
= (struct vnode
*)fp
->f_data
;
2700 if (src_vp
->v_tag
!= VT_HFS
&& src_vp
->v_tag
!= VT_APFS
) {
2706 * Get the references before we start acquiring iocounts on the vnodes,
2707 * while we still hold the proc fd lock
2709 if ((error
= fp_lookup(p
, fd2
, &fp2
, 1))) {
2713 if (fp2
->f_type
!= DTYPE_VNODE
) {
2714 fp_drop(p
, fd2
, fp2
, 1);
2718 dst_vp
= (struct vnode
*)fp2
->f_data
;
2719 if (dst_vp
->v_tag
!= VT_HFS
&& dst_vp
->v_tag
!= VT_APFS
) {
2720 fp_drop(p
, fd2
, fp2
, 1);
2726 /* Re-do MAC checks against the new FD, pass in a fake argument */
2727 error
= mac_file_check_fcntl(proc_ucred(p
), fp2
->fp_glob
, uap
->cmd
, 0);
2729 fp_drop(p
, fd2
, fp2
, 1);
2733 /* Audit the 2nd FD */
2738 if (vnode_getwithref(src_vp
)) {
2739 fp_drop(p
, fd2
, fp2
, 0);
2743 if (vnode_getwithref(dst_vp
)) {
2745 fp_drop(p
, fd2
, fp2
, 0);
2751 * Basic asserts; validate they are not the same and that
2752 * both live on the same filesystem.
2754 if (dst_vp
== src_vp
) {
2757 fp_drop(p
, fd2
, fp2
, 0);
2762 if (dst_vp
->v_mount
!= src_vp
->v_mount
) {
2765 fp_drop(p
, fd2
, fp2
, 0);
2770 /* Now we have a legit pair of FDs. Go to work */
2772 /* Now check for write access to the target files */
2773 if (vnode_authorize(src_vp
, NULLVP
,
2774 (KAUTH_VNODE_ACCESS
| KAUTH_VNODE_WRITE_DATA
), &context
) != 0) {
2777 fp_drop(p
, fd2
, fp2
, 0);
2782 if (vnode_authorize(dst_vp
, NULLVP
,
2783 (KAUTH_VNODE_ACCESS
| KAUTH_VNODE_WRITE_DATA
), &context
) != 0) {
2786 fp_drop(p
, fd2
, fp2
, 0);
2791 /* Verify that both vps point to files and not directories */
2792 if (!vnode_isreg(src_vp
) || !vnode_isreg(dst_vp
)) {
2796 fp_drop(p
, fd2
, fp2
, 0);
2801 * The exchangedata syscall handler passes in 0 for the flags to VNOP_EXCHANGE.
2802 * We'll pass in our special bit indicating that the new behavior is expected
2805 error
= VNOP_EXCHANGE(src_vp
, dst_vp
, FSOPT_EXCHANGE_DATA_ONLY
, &context
);
2809 fp_drop(p
, fd2
, fp2
, 0);
2814 * SPI for making a file compressed.
2816 case F_MAKECOMPRESSED
: {
2817 uint32_t gcounter
= CAST_DOWN_EXPLICIT(uint32_t, uap
->arg
);
2819 if (fp
->f_type
!= DTYPE_VNODE
) {
2824 vp
= (struct vnode
*) fp
->f_data
;
2828 if (vnode_getwithref(vp
)) {
2834 if ((vnode_isreg(vp
) == 0) && (vnode_islnk(vp
) == 0)) {
2840 /* invoke ioctl to pass off to FS */
2841 /* Only go forward if you have write access */
2842 vfs_context_t ctx
= vfs_context_current();
2843 if (vnode_authorize(vp
, NULLVP
, (KAUTH_VNODE_ACCESS
| KAUTH_VNODE_WRITE_DATA
), ctx
) != 0) {
2849 error
= VNOP_IOCTL(vp
, uap
->cmd
, (caddr_t
)&gcounter
, 0, &context
);
2856 * SPI (private) for indicating to a filesystem that subsequent writes to
2857 * the open FD will written to the Fastflow.
2859 case F_SET_GREEDY_MODE
:
2860 /* intentionally drop through to the same handler as F_SETSTATIC.
2861 * both fcntls should pass the argument and their selector into VNOP_IOCTL.
2865 * SPI (private) for indicating to a filesystem that subsequent writes to
2866 * the open FD will represent static content.
2868 case F_SETSTATICCONTENT
: {
2869 caddr_t ioctl_arg
= NULL
;
2872 ioctl_arg
= (caddr_t
) 1;
2875 if (fp
->f_type
!= DTYPE_VNODE
) {
2879 vp
= (struct vnode
*)fp
->f_data
;
2882 error
= vnode_getwithref(vp
);
2888 /* Only go forward if you have write access */
2889 vfs_context_t ctx
= vfs_context_current();
2890 if (vnode_authorize(vp
, NULLVP
, (KAUTH_VNODE_ACCESS
| KAUTH_VNODE_WRITE_DATA
), ctx
) != 0) {
2896 error
= VNOP_IOCTL(vp
, uap
->cmd
, ioctl_arg
, 0, &context
);
2897 (void)vnode_put(vp
);
2903 * SPI (private) for indicating to the lower level storage driver that the
2904 * subsequent writes should be of a particular IO type (burst, greedy, static),
2905 * or other flavors that may be necessary.
2912 /* extract 32 bits of flags from userland */
2913 param_ptr
= (caddr_t
) uap
->arg
;
2914 param
= (uint32_t) param_ptr
;
2916 /* If no argument is specified, error out */
2922 * Validate the different types of flags that can be specified:
2923 * all of them are mutually exclusive for now.
2926 case F_IOTYPE_ISOCHRONOUS
:
2935 if (fp
->f_type
!= DTYPE_VNODE
) {
2939 vp
= (struct vnode
*)fp
->f_data
;
2942 error
= vnode_getwithref(vp
);
2948 /* Only go forward if you have write access */
2949 vfs_context_t ctx
= vfs_context_current();
2950 if (vnode_authorize(vp
, NULLVP
, (KAUTH_VNODE_ACCESS
| KAUTH_VNODE_WRITE_DATA
), ctx
) != 0) {
2956 error
= VNOP_IOCTL(vp
, uap
->cmd
, param_ptr
, 0, &context
);
2957 (void)vnode_put(vp
);
2963 * Set the vnode pointed to by 'fd'
2964 * and tag it as the (potentially future) backing store
2965 * for another filesystem
2967 case F_SETBACKINGSTORE
: {
2968 if (fp
->f_type
!= DTYPE_VNODE
) {
2973 vp
= (struct vnode
*)fp
->f_data
;
2975 if (vp
->v_tag
!= VT_HFS
) {
2981 if (vnode_getwithref(vp
)) {
2986 /* only proceed if you have write access */
2987 vfs_context_t ctx
= vfs_context_current();
2988 if (vnode_authorize(vp
, NULLVP
, (KAUTH_VNODE_ACCESS
| KAUTH_VNODE_WRITE_DATA
), ctx
) != 0) {
2995 /* If arg != 0, set, otherwise unset */
2997 error
= VNOP_IOCTL(vp
, uap
->cmd
, (caddr_t
)1, 0, &context
);
2999 error
= VNOP_IOCTL(vp
, uap
->cmd
, (caddr_t
)NULL
, 0, &context
);
3007 * like F_GETPATH, but special semantics for
3008 * the mobile time machine handler.
3010 case F_GETPATH_MTMINFO
: {
3014 if (fp
->f_type
!= DTYPE_VNODE
) {
3018 vp
= (struct vnode
*)fp
->f_data
;
3021 pathlen
= MAXPATHLEN
;
3022 MALLOC(pathbufp
, char *, pathlen
, M_TEMP
, M_WAITOK
);
3023 if (pathbufp
== NULL
) {
3027 if ((error
= vnode_getwithref(vp
)) == 0) {
3028 int backingstore
= 0;
3030 /* Check for error from vn_getpath before moving on */
3031 if ((error
= vn_getpath(vp
, pathbufp
, &pathlen
)) == 0) {
3032 if (vp
->v_tag
== VT_HFS
) {
3033 error
= VNOP_IOCTL(vp
, uap
->cmd
, (caddr_t
) &backingstore
, 0, &context
);
3035 (void)vnode_put(vp
);
3038 error
= copyout((caddr_t
)pathbufp
, argp
, pathlen
);
3042 * If the copyout was successful, now check to ensure
3043 * that this vnode is not a BACKINGSTORE vnode. mtmd
3044 * wants the path regardless.
3051 (void)vnode_put(vp
);
3054 FREE(pathbufp
, M_TEMP
);
3058 #if DEBUG || DEVELOPMENT
3060 if (fp
->f_type
!= DTYPE_VNODE
) {
3064 vp
= (struct vnode
*)fp
->f_data
;
3073 * This is an fcntl() that we d not recognize at this level;
3074 * if this is a vnode, we send it down into the VNOP_IOCTL
3075 * for this vnode; this can include special devices, and will
3076 * effectively overload fcntl() to send ioctl()'s.
3078 if ((uap
->cmd
& IOC_VOID
) && (uap
->cmd
& IOC_INOUT
)) {
3083 /* Catch any now-invalid fcntl() selectors */
3085 case (int)APFSIOC_REVERT_TO_SNAPSHOT
:
3086 case (int)FSIOC_FIOSEEKHOLE
:
3087 case (int)FSIOC_FIOSEEKDATA
:
3088 case (int)FSIOC_CAS_BSDFLAGS
:
3089 case HFS_GET_BOOT_INFO
:
3090 case HFS_SET_BOOT_INFO
:
3092 case F_MARKDEPENDENCY
:
3094 case TIOCREVOKECLEAR
:
3101 if (fp
->f_type
!= DTYPE_VNODE
) {
3105 vp
= (struct vnode
*)fp
->f_data
;
3108 if ((error
= vnode_getwithref(vp
)) == 0) {
3109 #define STK_PARAMS 128
3110 char stkbuf
[STK_PARAMS
] = {0};
3114 * For this to work properly, we have to copy in the
3115 * ioctl() cmd argument if there is one; we must also
3116 * check that a command parameter, if present, does
3117 * not exceed the maximum command length dictated by
3118 * the number of bits we have available in the command
3119 * to represent a structure length. Finally, we have
3120 * to copy the results back out, if it is that type of
3123 size
= IOCPARM_LEN(uap
->cmd
);
3124 if (size
> IOCPARM_MAX
) {
3125 (void)vnode_put(vp
);
3131 if (size
> sizeof(stkbuf
)) {
3132 memp
= (caddr_t
)kheap_alloc(KHEAP_TEMP
, size
, Z_WAITOK
);
3134 (void)vnode_put(vp
);
3143 if (uap
->cmd
& IOC_IN
) {
3146 error
= copyin(argp
, data
, size
);
3148 (void)vnode_put(vp
);
3150 kheap_free(KHEAP_TEMP
, memp
, size
);
3155 /* Bzero the section beyond that which was needed */
3156 if (size
<= sizeof(stkbuf
)) {
3157 bzero((((uint8_t*)data
) + size
), (sizeof(stkbuf
) - size
));
3162 *(user_addr_t
*)data
= argp
;
3164 *(uint32_t *)data
= (uint32_t)argp
;
3167 } else if ((uap
->cmd
& IOC_OUT
) && size
) {
3169 * Zero the buffer so the user always
3170 * gets back something deterministic.
3173 } else if (uap
->cmd
& IOC_VOID
) {
3175 *(user_addr_t
*)data
= argp
;
3177 *(uint32_t *)data
= (uint32_t)argp
;
3181 error
= VNOP_IOCTL(vp
, uap
->cmd
, CAST_DOWN(caddr_t
, data
), 0, &context
);
3183 (void)vnode_put(vp
);
3185 /* Copy any output data to user */
3186 if (error
== 0 && (uap
->cmd
& IOC_OUT
) && size
) {
3187 error
= copyout(data
, argp
, size
);
3190 kheap_free(KHEAP_TEMP
, memp
, size
);
3197 AUDIT_ARG(vnpath_withref
, vp
, ARG_VNODE1
);
3198 fp_drop(p
, fd
, fp
, 0);
3201 fp_drop(p
, fd
, fp
, 1);
3210 * Description: Common code for dup, dup2, and fcntl(F_DUPFD).
3212 * Parameters: p Process performing the dup
3214 * new The fd to dup it to
3215 * fd_flags Flags to augment the new fd
3216 * retval Pointer to the call return area
3218 * Returns: 0 Success
3223 * *retval (modified) The new descriptor
3225 * Locks: Assumes proc_fdlock for process pointing to fdp is held by
3228 * Notes: This function may drop and reacquire this lock; it is unsafe
3229 * for a caller to assume that other state protected by the lock
3230 * has not been subsequently changed out from under it.
3234 struct filedesc
*fdp
, int old
, int new, int fd_flags
, int32_t *retval
)
3236 struct fileproc
*nfp
;
3237 struct fileproc
*ofp
;
3243 proc_fdlock_assert(p
, LCK_MTX_ASSERT_OWNED
);
3245 if ((ofp
= fdp
->fd_ofiles
[old
]) == NULL
||
3246 (fdp
->fd_ofileflags
[old
] & UF_RESERVED
)) {
3252 error
= mac_file_check_dup(proc_ucred(p
), ofp
->fp_glob
, new);
3261 nfp
= fileproc_alloc_init(NULL
);
3270 fg_ref(p
, ofp
->fp_glob
);
3271 nfp
->fp_glob
= ofp
->fp_glob
;
3274 if (fdp
->fd_ofiles
[new] != 0) {
3275 panic("finishdup: overwriting fd_ofiles with new %d", new);
3277 if ((fdp
->fd_ofileflags
[new] & UF_RESERVED
) == 0) {
3278 panic("finishdup: unreserved fileflags with new %d", new);
3282 if (new > fdp
->fd_lastfile
) {
3283 fdp
->fd_lastfile
= new;
3285 *fdflags(p
, new) |= fd_flags
;
3286 procfdtbl_releasefd(p
, new, nfp
);
3295 * Description: The implementation of the close(2) system call
3297 * Parameters: p Process in whose per process file table
3298 * the close is to occur
3299 * uap->fd fd to be closed
3302 * Returns: 0 Success
3303 * fp_lookup:EBADF Bad file descriptor
3304 * fp_guard_exception:??? Guarded file descriptor
3305 * close_internal:EBADF
3306 * close_internal:??? Anything returnable by a per-fileops
3310 sys_close(proc_t p
, struct close_args
*uap
, __unused
int32_t *retval
)
3312 __pthread_testcancel(1);
3313 return close_nocancel(p
, uap
->fd
);
3317 sys_close_nocancel(proc_t p
, struct close_nocancel_args
*uap
, __unused
int32_t *retval
)
3319 return close_nocancel(p
, uap
->fd
);
3323 close_nocancel(proc_t p
, int fd
)
3325 struct fileproc
*fp
;
3327 AUDIT_SYSCLOSE(p
, fd
);
3330 if ((fp
= fp_get_noref_locked(p
, fd
)) == NULL
) {
3335 if (fp_isguarded(fp
, GUARD_CLOSE
)) {
3336 int error
= fp_guard_exception(p
, fd
, fp
, kGUARD_EXC_CLOSE
);
3341 return fp_close_and_unlock(p
, fd
, fp
, 0);
3346 fp_close_and_unlock(proc_t p
, int fd
, struct fileproc
*fp
, int flags
)
3348 struct filedesc
*fdp
= p
->p_fd
;
3349 struct fileglob
*fg
= fp
->fp_glob
;
3352 proc_fdlock_assert(p
, LCK_MTX_ASSERT_OWNED
);
3356 * Keep most people from finding the filedesc while we are closing it.
3360 * - dup2() which always waits for UF_RESERVED to clear
3362 * - close/guarded_close/... who will fail the fileproc lookup if
3363 * UF_RESERVED is set,
3365 * - fdexec()/fdfree() who only run once all threads in the proc
3366 * are properly canceled, hence no fileproc in this proc should
3369 * Which means that neither UF_RESERVED nor UF_CLOSING should be set.
3371 * Callers of fp_get_noref_locked_with_iocount() can still find
3372 * this entry so that they can drop their I/O reference despite
3373 * not having remembered the fileproc pointer (namely select() and
3376 if (p
->p_fd
->fd_ofileflags
[fd
] & (UF_RESERVED
| UF_CLOSING
)) {
3377 panic("%s: called with fileproc in flux (%d/:%p)",
3380 p
->p_fd
->fd_ofileflags
[fd
] |= (UF_RESERVED
| UF_CLOSING
);
3382 if ((fp
->fp_flags
& FP_AIOISSUED
) || kauth_authorize_fileop_has_listeners()) {
3385 if ((FILEGLOB_DTYPE(fg
) == DTYPE_VNODE
) && kauth_authorize_fileop_has_listeners()) {
3387 * call out to allow 3rd party notification of close.
3388 * Ignore result of kauth_authorize_fileop call.
3390 if (vnode_getwithref((vnode_t
)fg
->fg_data
) == 0) {
3391 u_int fileop_flags
= 0;
3392 if (fg
->fg_flag
& FWASWRITTEN
) {
3393 fileop_flags
|= KAUTH_FILEOP_CLOSE_MODIFIED
;
3395 kauth_authorize_fileop(fg
->fg_cred
, KAUTH_FILEOP_CLOSE
,
3396 (uintptr_t)fg
->fg_data
, (uintptr_t)fileop_flags
);
3398 mac_file_notify_close(proc_ucred(p
), fp
->fp_glob
);
3400 vnode_put((vnode_t
)fg
->fg_data
);
3403 if (fp
->fp_flags
& FP_AIOISSUED
) {
3405 * cancel all async IO requests that can be cancelled.
3407 _aio_close( p
, fd
);
3413 if (fd
< fdp
->fd_knlistsize
) {
3414 knote_fdclose(p
, fd
);
3417 fileproc_drain(p
, fp
);
3419 if (flags
& FD_DUP2RESV
) {
3420 fdp
->fd_ofiles
[fd
] = NULL
;
3421 fdp
->fd_ofileflags
[fd
] &= ~(UF_CLOSING
| UF_EXCLOSE
| UF_FORKCLOSE
);
3428 if (ENTR_SHOULDTRACE
&& FILEGLOB_DTYPE(fg
) == DTYPE_SOCKET
) {
3429 KERNEL_ENERGYTRACE(kEnTrActKernSocket
, DBG_FUNC_END
,
3430 fd
, 0, (int64_t)VM_KERNEL_ADDRPERM(fg
->fg_data
));
3435 return fg_drop(p
, fg
);
3442 * Description: Return status information about a file descriptor.
3444 * Parameters: p The process doing the fstat
3446 * ub The user stat buffer
3447 * xsecurity The user extended security
3448 * buffer, or 0 if none
3449 * xsecurity_size The size of xsecurity, or 0
3451 * isstat64 Flag to indicate 64 bit version
3452 * for inode size, etc.
3454 * Returns: 0 Success
3457 * fp_lookup:EBADF Bad file descriptor
3458 * vnode_getwithref:???
3460 * vnode_getwithref:???
3467 * Notes: Internal implementation for all other fstat() related
3470 * XXX switch on node type is bogus; need a stat in struct
3471 * XXX fileops instead.
3474 fstat(proc_t p
, int fd
, user_addr_t ub
, user_addr_t xsecurity
, user_addr_t xsecurity_size
, int isstat64
)
3476 struct fileproc
*fp
;
3482 struct user64_stat user64_sb
;
3483 struct user32_stat user32_sb
;
3484 struct user64_stat64 user64_sb64
;
3485 struct user32_stat64 user32_sb64
;
3490 kauth_filesec_t fsec
;
3491 user_size_t xsecurity_bufsize
;
3492 vfs_context_t ctx
= vfs_context_current();
3498 if ((error
= fp_lookup(p
, fd
, &fp
, 0)) != 0) {
3503 fsec
= KAUTH_FILESEC_NONE
;
3505 sbptr
= (void *)&source
;
3509 if ((error
= vnode_getwithref((vnode_t
)data
)) == 0) {
3511 * If the caller has the file open, and is not
3512 * requesting extended security information, we are
3513 * going to let them get the basic stat information.
3515 if (xsecurity
== USER_ADDR_NULL
) {
3516 error
= vn_stat_noauth((vnode_t
)data
, sbptr
, NULL
, isstat64
, 0, ctx
,
3517 fp
->fp_glob
->fg_cred
);
3519 error
= vn_stat((vnode_t
)data
, sbptr
, &fsec
, isstat64
, 0, ctx
);
3522 AUDIT_ARG(vnpath
, (struct vnode
*)data
, ARG_VNODE1
);
3523 (void)vnode_put((vnode_t
)data
);
3529 error
= soo_stat((struct socket
*)data
, sbptr
, isstat64
);
3531 #endif /* SOCKETS */
3534 error
= pipe_stat((void *)data
, sbptr
, isstat64
);
3538 error
= pshm_stat((void *)data
, sbptr
, isstat64
);
3542 error
= kqueue_stat((void *)data
, sbptr
, isstat64
, p
);
3552 if (isstat64
!= 0) {
3553 source
.sb64
.st_lspare
= 0;
3554 source
.sb64
.st_qspare
[0] = 0LL;
3555 source
.sb64
.st_qspare
[1] = 0LL;
3557 if (IS_64BIT_PROCESS(current_proc())) {
3558 munge_user64_stat64(&source
.sb64
, &dest
.user64_sb64
);
3559 my_size
= sizeof(dest
.user64_sb64
);
3560 sbp
= (caddr_t
)&dest
.user64_sb64
;
3562 munge_user32_stat64(&source
.sb64
, &dest
.user32_sb64
);
3563 my_size
= sizeof(dest
.user32_sb64
);
3564 sbp
= (caddr_t
)&dest
.user32_sb64
;
3567 source
.sb
.st_lspare
= 0;
3568 source
.sb
.st_qspare
[0] = 0LL;
3569 source
.sb
.st_qspare
[1] = 0LL;
3570 if (IS_64BIT_PROCESS(current_proc())) {
3571 munge_user64_stat(&source
.sb
, &dest
.user64_sb
);
3572 my_size
= sizeof(dest
.user64_sb
);
3573 sbp
= (caddr_t
)&dest
.user64_sb
;
3575 munge_user32_stat(&source
.sb
, &dest
.user32_sb
);
3576 my_size
= sizeof(dest
.user32_sb
);
3577 sbp
= (caddr_t
)&dest
.user32_sb
;
3581 error
= copyout(sbp
, ub
, my_size
);
3584 /* caller wants extended security information? */
3585 if (xsecurity
!= USER_ADDR_NULL
) {
3586 /* did we get any? */
3587 if (fsec
== KAUTH_FILESEC_NONE
) {
3588 if (susize(xsecurity_size
, 0) != 0) {
3593 /* find the user buffer size */
3594 xsecurity_bufsize
= fusize(xsecurity_size
);
3596 /* copy out the actual data size */
3597 if (susize(xsecurity_size
, KAUTH_FILESEC_COPYSIZE(fsec
)) != 0) {
3602 /* if the caller supplied enough room, copy out to it */
3603 if (xsecurity_bufsize
>= KAUTH_FILESEC_COPYSIZE(fsec
)) {
3604 error
= copyout(fsec
, xsecurity
, KAUTH_FILESEC_COPYSIZE(fsec
));
3609 fp_drop(p
, fd
, fp
, 0);
3611 kauth_filesec_free(fsec
);
3618 * sys_fstat_extended
3620 * Description: Extended version of fstat supporting returning extended
3621 * security information
3623 * Parameters: p The process doing the fstat
3624 * uap->fd The fd to stat
3625 * uap->ub The user stat buffer
3626 * uap->xsecurity The user extended security
3627 * buffer, or 0 if none
3628 * uap->xsecurity_size The size of xsecurity, or 0
3630 * Returns: 0 Success
3631 * !0 Errno (see fstat)
3634 sys_fstat_extended(proc_t p
, struct fstat_extended_args
*uap
, __unused
int32_t *retval
)
3636 return fstat(p
, uap
->fd
, uap
->ub
, uap
->xsecurity
, uap
->xsecurity_size
, 0);
3643 * Description: Get file status for the file associated with fd
3645 * Parameters: p The process doing the fstat
3646 * uap->fd The fd to stat
3647 * uap->ub The user stat buffer
3649 * Returns: 0 Success
3650 * !0 Errno (see fstat)
3653 sys_fstat(proc_t p
, struct fstat_args
*uap
, __unused
int32_t *retval
)
3655 return fstat(p
, uap
->fd
, uap
->ub
, 0, 0, 0);
3660 * sys_fstat64_extended
3662 * Description: Extended version of fstat64 supporting returning extended
3663 * security information
3665 * Parameters: p The process doing the fstat
3666 * uap->fd The fd to stat
3667 * uap->ub The user stat buffer
3668 * uap->xsecurity The user extended security
3669 * buffer, or 0 if none
3670 * uap->xsecurity_size The size of xsecurity, or 0
3672 * Returns: 0 Success
3673 * !0 Errno (see fstat)
3676 sys_fstat64_extended(proc_t p
, struct fstat64_extended_args
*uap
, __unused
int32_t *retval
)
3678 return fstat(p
, uap
->fd
, uap
->ub
, uap
->xsecurity
, uap
->xsecurity_size
, 1);
3685 * Description: Get 64 bit version of the file status for the file associated
3688 * Parameters: p The process doing the fstat
3689 * uap->fd The fd to stat
3690 * uap->ub The user stat buffer
3692 * Returns: 0 Success
3693 * !0 Errno (see fstat)
3696 sys_fstat64(proc_t p
, struct fstat64_args
*uap
, __unused
int32_t *retval
)
3698 return fstat(p
, uap
->fd
, uap
->ub
, 0, 0, 1);
3705 * Description: Return pathconf information about a file descriptor.
3707 * Parameters: p Process making the request
3708 * uap->fd fd to get information about
3709 * uap->name Name of information desired
3710 * retval Pointer to the call return area
3712 * Returns: 0 Success
3714 * fp_lookup:EBADF Bad file descriptor
3715 * vnode_getwithref:???
3719 * *retval (modified) Returned information (numeric)
3722 sys_fpathconf(proc_t p
, struct fpathconf_args
*uap
, int32_t *retval
)
3725 struct fileproc
*fp
;
3732 AUDIT_ARG(fd
, uap
->fd
);
3733 if ((error
= fp_lookup(p
, fd
, &fp
, 0))) {
3741 if (uap
->name
!= _PC_PIPE_BUF
) {
3750 if (uap
->name
!= _PC_PIPE_BUF
) {
3759 vp
= (struct vnode
*)data
;
3761 if ((error
= vnode_getwithref(vp
)) == 0) {
3762 AUDIT_ARG(vnpath
, vp
, ARG_VNODE1
);
3764 error
= vn_pathconf(vp
, uap
->name
, retval
, vfs_context_current());
3766 (void)vnode_put(vp
);
3776 fp_drop(p
, fd
, fp
, 0);
3781 * Statistics counter for the number of times a process calling fdalloc()
3782 * has resulted in an expansion of the per process open file table.
3784 * XXX This would likely be of more use if it were per process
3792 * Description: Allocate a file descriptor for the process.
3794 * Parameters: p Process to allocate the fd in
3795 * want The fd we would prefer to get
3796 * result Pointer to fd we got
3798 * Returns: 0 Success
3803 * *result (modified) The fd which was allocated
3806 fdalloc(proc_t p
, int want
, int *result
)
3808 struct filedesc
*fdp
= p
->p_fd
;
3810 int last
, numfiles
, oldnfiles
;
3811 struct fileproc
**newofiles
, **ofiles
;
3812 char *newofileflags
;
3814 rlim_t nofile
= proc_limitgetcur(p
, RLIMIT_NOFILE
, TRUE
);
3816 nofile
= MIN(nofile
, INT_MAX
);
3819 * Search for a free descriptor starting at the higher
3820 * of want or fd_freefile. If that fails, consider
3821 * expanding the ofile array.
3824 proc_fdlock_assert(p
, LCK_MTX_ASSERT_OWNED
);
3827 lim
= MIN(nofile
, maxfilesperproc
);
3829 last
= (int)MIN((unsigned int)fdp
->fd_nfiles
, (unsigned int)lim
);
3830 if ((i
= want
) < fdp
->fd_freefile
) {
3831 i
= fdp
->fd_freefile
;
3833 for (; i
< last
; i
++) {
3834 if (fdp
->fd_ofiles
[i
] == NULL
&& !(fdp
->fd_ofileflags
[i
] & UF_RESERVED
)) {
3835 procfdtbl_reservefd(p
, i
);
3836 if (i
> fdp
->fd_lastfile
) {
3837 fdp
->fd_lastfile
= i
;
3839 if (want
<= fdp
->fd_freefile
) {
3840 fdp
->fd_freefile
= i
;
3848 * No space in current array. Expand?
3850 if ((rlim_t
)fdp
->fd_nfiles
>= lim
) {
3853 if (fdp
->fd_nfiles
< NDEXTENT
) {
3854 numfiles
= NDEXTENT
;
3856 numfiles
= 2 * fdp
->fd_nfiles
;
3859 if ((rlim_t
)numfiles
> lim
) {
3860 numfiles
= (int)lim
;
3863 MALLOC(newofiles
, struct fileproc
**,
3864 numfiles
* OFILESIZE
, M_OFILETABL
, M_WAITOK
);
3866 if (newofiles
== NULL
) {
3869 if (fdp
->fd_nfiles
>= numfiles
) {
3870 FREE(newofiles
, M_OFILETABL
);
3873 newofileflags
= (char *) &newofiles
[numfiles
];
3875 * Copy the existing ofile and ofileflags arrays
3876 * and zero the new portion of each array.
3878 oldnfiles
= fdp
->fd_nfiles
;
3879 (void) memcpy(newofiles
, fdp
->fd_ofiles
,
3880 oldnfiles
* sizeof(*fdp
->fd_ofiles
));
3881 (void) memset(&newofiles
[oldnfiles
], 0,
3882 (numfiles
- oldnfiles
) * sizeof(*fdp
->fd_ofiles
));
3884 (void) memcpy(newofileflags
, fdp
->fd_ofileflags
,
3885 oldnfiles
* sizeof(*fdp
->fd_ofileflags
));
3886 (void) memset(&newofileflags
[oldnfiles
], 0,
3887 (numfiles
- oldnfiles
) *
3888 sizeof(*fdp
->fd_ofileflags
));
3889 ofiles
= fdp
->fd_ofiles
;
3890 fdp
->fd_ofiles
= newofiles
;
3891 fdp
->fd_ofileflags
= newofileflags
;
3892 fdp
->fd_nfiles
= numfiles
;
3893 FREE(ofiles
, M_OFILETABL
);
3902 * Description: Check to see whether n user file descriptors are available
3905 * Parameters: p Process to check in
3906 * n The number of fd's desired
3911 * Locks: Assumes proc_fdlock for process is held by the caller
3913 * Notes: The answer only remains valid so long as the proc_fdlock is
3914 * held by the caller.
3917 fdavail(proc_t p
, int n
)
3919 struct filedesc
*fdp
= p
->p_fd
;
3920 struct fileproc
**fpp
;
3924 rlim_t nofile
= proc_limitgetcur(p
, RLIMIT_NOFILE
, TRUE
);
3926 lim
= (int)MIN(nofile
, maxfilesperproc
);
3927 if ((i
= lim
- fdp
->fd_nfiles
) > 0 && (n
-= i
) <= 0) {
3930 fpp
= &fdp
->fd_ofiles
[fdp
->fd_freefile
];
3931 flags
= &fdp
->fd_ofileflags
[fdp
->fd_freefile
];
3932 for (i
= fdp
->fd_nfiles
- fdp
->fd_freefile
; --i
>= 0; fpp
++, flags
++) {
3933 if (*fpp
== NULL
&& !(*flags
& UF_RESERVED
) && --n
<= 0) {
3942 fp_get_noref_locked(proc_t p
, int fd
)
3944 struct filedesc
*fdp
= p
->p_fd
;
3945 struct fileproc
*fp
;
3947 if (fd
< 0 || fd
>= fdp
->fd_nfiles
||
3948 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
3949 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
3956 fp_get_noref_locked_with_iocount(proc_t p
, int fd
)
3958 struct filedesc
*fdp
= p
->p_fd
;
3959 struct fileproc
*fp
= NULL
;
3961 if (fd
< 0 || fd
>= fdp
->fd_nfiles
||
3962 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
3963 os_ref_get_count(&fp
->fp_iocount
) <= 1 ||
3964 ((fdp
->fd_ofileflags
[fd
] & UF_RESERVED
) &&
3965 !(fdp
->fd_ofileflags
[fd
] & UF_CLOSING
))) {
3966 panic("%s: caller without an ioccount on fileproc (%d/:%p)",
3974 fp_get_ftype(proc_t p
, int fd
, file_type_t ftype
, int err
, struct fileproc
**fpp
)
3976 struct filedesc
*fdp
= p
->p_fd
;
3977 struct fileproc
*fp
;
3979 proc_fdlock_spin(p
);
3980 if (fd
< 0 || fd
>= fdp
->fd_nfiles
||
3981 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
3982 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
3987 if (fp
->f_type
!= ftype
) {
3992 os_ref_retain_locked(&fp
->fp_iocount
);
4003 * Description: Get fileproc and vnode pointer for a given fd from the per
4004 * process open file table of the specified process, and if
4005 * successful, increment the fp_iocount
4007 * Parameters: p Process in which fd lives
4008 * fd fd to get information for
4009 * resultfp Pointer to result fileproc
4010 * pointer area, or 0 if none
4011 * resultvp Pointer to result vnode pointer
4012 * area, or 0 if none
4014 * Returns: 0 Success
4015 * EBADF Bad file descriptor
4016 * ENOTSUP fd does not refer to a vnode
4019 * *resultfp (modified) Fileproc pointer
4020 * *resultvp (modified) vnode pointer
4022 * Notes: The resultfp and resultvp fields are optional, and may be
4023 * independently specified as NULL to skip returning information
4025 * Locks: Internally takes and releases proc_fdlock
4028 fp_getfvp(proc_t p
, int fd
, struct fileproc
**resultfp
, struct vnode
**resultvp
)
4030 struct fileproc
*fp
;
4033 error
= fp_get_ftype(p
, fd
, DTYPE_VNODE
, ENOTSUP
, &fp
);
4039 *resultvp
= (struct vnode
*)fp
->f_data
;
4050 * Description: Get pipe id for a given fd from the per process open file table
4051 * of the specified process.
4053 * Parameters: p Process in which fd lives
4054 * fd fd to get information for
4055 * result_pipe_id Pointer to result pipe id
4057 * Returns: 0 Success
4058 * EIVAL NULL pointer arguments passed
4059 * fp_lookup:EBADF Bad file descriptor
4060 * ENOTSUP fd does not refer to a pipe
4063 * *result_pipe_id (modified) pipe id
4065 * Locks: Internally takes and releases proc_fdlock
4068 fp_get_pipe_id(proc_t p
, int fd
, uint64_t *result_pipe_id
)
4070 struct fileproc
*fp
= FILEPROC_NULL
;
4071 struct fileglob
*fg
= NULL
;
4074 if (p
== NULL
|| result_pipe_id
== NULL
) {
4079 if ((error
= fp_lookup(p
, fd
, &fp
, 1))) {
4085 if (FILEGLOB_DTYPE(fg
) == DTYPE_PIPE
) {
4086 *result_pipe_id
= pipe_id((struct pipe
*)fg
->fg_data
);
4091 fp_drop(p
, fd
, fp
, 1);
4100 * Description: Get fileproc pointer for a given fd from the per process
4101 * open file table of the specified process and if successful,
4102 * increment the fp_iocount
4104 * Parameters: p Process in which fd lives
4105 * fd fd to get information for
4106 * resultfp Pointer to result fileproc
4107 * pointer area, or 0 if none
4108 * locked !0 if the caller holds the
4109 * proc_fdlock, 0 otherwise
4111 * Returns: 0 Success
4112 * EBADF Bad file descriptor
4115 * *resultfp (modified) Fileproc pointer
4117 * Locks: If the argument 'locked' is non-zero, then the caller is
4118 * expected to have taken and held the proc_fdlock; if it is
4119 * zero, than this routine internally takes and drops this lock.
4122 fp_lookup(proc_t p
, int fd
, struct fileproc
**resultfp
, int locked
)
4124 struct filedesc
*fdp
= p
->p_fd
;
4125 struct fileproc
*fp
;
4128 proc_fdlock_spin(p
);
4130 if (fd
< 0 || fdp
== NULL
|| fd
>= fdp
->fd_nfiles
||
4131 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
4132 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
4138 os_ref_retain_locked(&fp
->fp_iocount
);
4154 * Description: Swap the fileproc pointer for a given fd with a new
4155 * fileproc pointer in the per-process open file table of
4156 * the specified process. The fdlock must be held at entry.
4157 * Iff the swap is successful, the old fileproc pointer is freed.
4159 * Parameters: p Process containing the fd
4160 * fd The fd of interest
4161 * nfp Pointer to the newfp
4163 * Returns: 0 Success
4164 * EBADF Bad file descriptor
4166 * EKEEPLOOKING Other references were active, try again.
4169 fp_tryswap(proc_t p
, int fd
, struct fileproc
*nfp
)
4171 struct fileproc
*fp
;
4174 proc_fdlock_assert(p
, LCK_MTX_ASSERT_OWNED
);
4176 if (0 != (error
= fp_lookup(p
, fd
, &fp
, 1))) {
4180 * At this point, our caller (change_guardedfd_np) has
4181 * one fp_iocount reference, and we just took another
4182 * one to begin the replacement.
4183 * fp and nfp have a +1 reference from allocation.
4184 * Thus if no-one else is looking, fp_iocount should be 3.
4186 if (os_ref_get_count(&fp
->fp_iocount
) < 3 ||
4187 1 != os_ref_get_count(&nfp
->fp_iocount
)) {
4188 panic("%s: fp_iocount", __func__
);
4189 } else if (3 == os_ref_get_count(&fp
->fp_iocount
)) {
4190 /* Copy the contents of *fp, preserving the "type" of *nfp */
4192 nfp
->fp_flags
= (nfp
->fp_flags
& FP_TYPEMASK
) |
4193 (fp
->fp_flags
& ~FP_TYPEMASK
);
4194 os_ref_retain_locked(&nfp
->fp_iocount
);
4195 os_ref_retain_locked(&nfp
->fp_iocount
);
4196 nfp
->fp_glob
= fp
->fp_glob
;
4197 nfp
->fp_wset
= fp
->fp_wset
;
4199 p
->p_fd
->fd_ofiles
[fd
] = nfp
;
4200 fp_drop(p
, fd
, nfp
, 1);
4202 os_ref_release_live(&fp
->fp_iocount
);
4203 os_ref_release_live(&fp
->fp_iocount
);
4207 * Wait for all other active references to evaporate.
4209 p
->p_fpdrainwait
= 1;
4210 error
= msleep(&p
->p_fpdrainwait
, &p
->p_fdmlock
,
4211 PRIBIO
| PCATCH
, "tryswap fpdrain", NULL
);
4214 * Return an "internal" errno to trigger a full
4215 * reevaluation of the change-guard attempt.
4217 error
= EKEEPLOOKING
;
4219 (void) fp_drop(p
, fd
, fp
, 1);
4228 * Description: Drop the I/O reference previously taken by calling fp_lookup
4231 * Parameters: p Process in which the fd lives
4232 * fd fd associated with the fileproc
4233 * fp fileproc on which to set the
4234 * flag and drop the reference
4235 * locked flag to internally take and
4236 * drop proc_fdlock if it is not
4237 * already held by the caller
4239 * Returns: 0 Success
4240 * EBADF Bad file descriptor
4242 * Locks: This function internally takes and drops the proc_fdlock for
4243 * the supplied process if 'locked' is non-zero, and assumes that
4244 * the caller already holds this lock if 'locked' is non-zero.
4246 * Notes: The fileproc must correspond to the fd in the supplied proc
4249 fp_drop(proc_t p
, int fd
, struct fileproc
*fp
, int locked
)
4251 struct filedesc
*fdp
= p
->p_fd
;
4255 proc_fdlock_spin(p
);
4257 if ((fp
== FILEPROC_NULL
) && (fd
< 0 || fd
>= fdp
->fd_nfiles
||
4258 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
4259 ((fdp
->fd_ofileflags
[fd
] & UF_RESERVED
) &&
4260 !(fdp
->fd_ofileflags
[fd
] & UF_CLOSING
)))) {
4267 if (1 == os_ref_release_locked(&fp
->fp_iocount
)) {
4268 if (fp
->fp_flags
& FP_SELCONFLICT
) {
4269 fp
->fp_flags
&= ~FP_SELCONFLICT
;
4272 if (p
->p_fpdrainwait
) {
4273 p
->p_fpdrainwait
= 0;
4281 wakeup(&p
->p_fpdrainwait
);
4291 * Description: Given an fd, look it up in the current process's per process
4292 * open file table, and return its internal vnode pointer.
4294 * Parameters: fd fd to obtain vnode from
4295 * vpp pointer to vnode return area
4297 * Returns: 0 Success
4298 * EINVAL The fd does not refer to a
4299 * vnode fileproc entry
4300 * fp_lookup:EBADF Bad file descriptor
4303 * *vpp (modified) Returned vnode pointer
4305 * Locks: This function internally takes and drops the proc_fdlock for
4306 * the current process
4308 * Notes: If successful, this function increments the fp_iocount on the
4309 * fd's corresponding fileproc.
4311 * The fileproc referenced is not returned; because of this, care
4312 * must be taken to not drop the last reference (e.g. by closing
4313 * the file). This is inherently unsafe, since the reference may
4314 * not be recoverable from the vnode, if there is a subsequent
4315 * close that destroys the associate fileproc. The caller should
4316 * therefore retain their own reference on the fileproc so that
4317 * the fp_iocount can be dropped subsequently. Failure to do this
4318 * can result in the returned pointer immediately becoming invalid
4319 * following the call.
4321 * Use of this function is discouraged.
4324 file_vnode(int fd
, struct vnode
**vpp
)
4326 return file_vnode_withvid(fd
, vpp
, NULL
);
4330 * file_vnode_withvid
4332 * Description: Given an fd, look it up in the current process's per process
4333 * open file table, and return its internal vnode pointer.
4335 * Parameters: fd fd to obtain vnode from
4336 * vpp pointer to vnode return area
4337 * vidp pointer to vid of the returned vnode
4339 * Returns: 0 Success
4340 * EINVAL The fd does not refer to a
4341 * vnode fileproc entry
4342 * fp_lookup:EBADF Bad file descriptor
4345 * *vpp (modified) Returned vnode pointer
4347 * Locks: This function internally takes and drops the proc_fdlock for
4348 * the current process
4350 * Notes: If successful, this function increments the fp_iocount on the
4351 * fd's corresponding fileproc.
4353 * The fileproc referenced is not returned; because of this, care
4354 * must be taken to not drop the last reference (e.g. by closing
4355 * the file). This is inherently unsafe, since the reference may
4356 * not be recoverable from the vnode, if there is a subsequent
4357 * close that destroys the associate fileproc. The caller should
4358 * therefore retain their own reference on the fileproc so that
4359 * the fp_iocount can be dropped subsequently. Failure to do this
4360 * can result in the returned pointer immediately becoming invalid
4361 * following the call.
4363 * Use of this function is discouraged.
4366 file_vnode_withvid(int fd
, struct vnode
**vpp
, uint32_t *vidp
)
4368 struct fileproc
*fp
;
4371 error
= fp_get_ftype(current_proc(), fd
, DTYPE_VNODE
, EINVAL
, &fp
);
4377 *vidp
= vnode_vid(fp
->f_data
);
4386 * Description: Given an fd, look it up in the current process's per process
4387 * open file table, and return its internal socket pointer.
4389 * Parameters: fd fd to obtain vnode from
4390 * sp pointer to socket return area
4392 * Returns: 0 Success
4393 * ENOTSOCK Not a socket
4394 * fp_lookup:EBADF Bad file descriptor
4397 * *sp (modified) Returned socket pointer
4399 * Locks: This function internally takes and drops the proc_fdlock for
4400 * the current process
4402 * Notes: If successful, this function increments the fp_iocount on the
4403 * fd's corresponding fileproc.
4405 * The fileproc referenced is not returned; because of this, care
4406 * must be taken to not drop the last reference (e.g. by closing
4407 * the file). This is inherently unsafe, since the reference may
4408 * not be recoverable from the socket, if there is a subsequent
4409 * close that destroys the associate fileproc. The caller should
4410 * therefore retain their own reference on the fileproc so that
4411 * the fp_iocount can be dropped subsequently. Failure to do this
4412 * can result in the returned pointer immediately becoming invalid
4413 * following the call.
4415 * Use of this function is discouraged.
4418 file_socket(int fd
, struct socket
**sp
)
4420 struct fileproc
*fp
;
4423 error
= fp_get_ftype(current_proc(), fd
, DTYPE_SOCKET
, ENOTSOCK
, &fp
);
4426 *sp
= (struct socket
*)fp
->f_data
;
4436 * Description: Given an fd, look it up in the current process's per process
4437 * open file table, and return its fileproc's flags field.
4439 * Parameters: fd fd whose flags are to be
4441 * flags pointer to flags data area
4443 * Returns: 0 Success
4444 * ENOTSOCK Not a socket
4445 * fp_lookup:EBADF Bad file descriptor
4448 * *flags (modified) Returned flags field
4450 * Locks: This function internally takes and drops the proc_fdlock for
4451 * the current process
4454 file_flags(int fd
, int *flags
)
4456 proc_t p
= current_proc();
4457 struct fileproc
*fp
;
4460 proc_fdlock_spin(p
);
4461 fp
= fp_get_noref_locked(p
, fd
);
4463 *flags
= (int)fp
->f_flag
;
4475 * Description: Drop an iocount reference on an fd, and wake up any waiters
4476 * for draining (i.e. blocked in fileproc_drain() called during
4477 * the last attempt to close a file).
4479 * Parameters: fd fd on which an ioreference is
4482 * Returns: 0 Success
4484 * Description: Given an fd, look it up in the current process's per process
4485 * open file table, and drop it's fileproc's fp_iocount by one
4487 * Notes: This is intended as a corresponding operation to the functions
4488 * file_vnode() and file_socket() operations.
4490 * If the caller can't possibly hold an I/O reference,
4491 * this function will panic the kernel rather than allowing
4492 * for memory corruption. Callers should always call this
4493 * because they acquired an I/O reference on this file before.
4495 * Use of this function is discouraged.
4500 struct fileproc
*fp
;
4501 proc_t p
= current_proc();
4504 proc_fdlock_spin(p
);
4505 fp
= fp_get_noref_locked_with_iocount(p
, fd
);
4507 if (1 == os_ref_release_locked(&fp
->fp_iocount
)) {
4508 if (fp
->fp_flags
& FP_SELCONFLICT
) {
4509 fp
->fp_flags
&= ~FP_SELCONFLICT
;
4512 if (p
->p_fpdrainwait
) {
4513 p
->p_fpdrainwait
= 0;
4520 wakeup(&p
->p_fpdrainwait
);
4530 * Create a new open file structure and allocate
4531 * a file descriptor for the process that refers to it.
4533 * Returns: 0 Success
4535 * Description: Allocate an entry in the per process open file table and
4536 * return the corresponding fileproc and fd.
4538 * Parameters: p The process in whose open file
4539 * table the fd is to be allocated
4540 * resultfp Pointer to fileproc pointer
4542 * resultfd Pointer to fd return area
4544 * fp_zalloc fileproc allocator to use
4545 * crarg allocator args
4547 * Returns: 0 Success
4548 * ENFILE Too many open files in system
4549 * fdalloc:EMFILE Too many open files in process
4550 * fdalloc:ENOMEM M_OFILETABL zone exhausted
4551 * ENOMEM fp_zone or fg_zone zone
4555 * *resultfd (modified) Returned fileproc pointer
4556 * *resultfd (modified) Returned fd
4558 * Notes: This function takes separate process and context arguments
4559 * solely to support kern_exec.c; otherwise, it would take
4560 * neither, and use the vfs_context_current() routine internally.
4563 falloc_withalloc(proc_t p
, struct fileproc
**resultfp
, int *resultfd
,
4564 vfs_context_t ctx
, fp_allocfn_t fp_zalloc
, void *crarg
)
4566 struct fileproc
*fp
;
4567 struct fileglob
*fg
;
4570 /* Make sure we don't go beyond the system-wide limit */
4571 if (nfiles
>= maxfiles
) {
4578 /* fdalloc will make sure the process stays below per-process limit */
4579 if ((error
= fdalloc(p
, 0, &nfd
))) {
4585 error
= mac_file_check_create(proc_ucred(p
));
4593 * Allocate a new file descriptor.
4594 * If the process has file descriptor zero open, add to the list
4595 * of open files at that point, otherwise put it at the front of
4596 * the list of open files.
4600 fp
= (*fp_zalloc
)(crarg
);
4604 fg
= zalloc_flags(fg_zone
, Z_WAITOK
| Z_ZERO
);
4605 lck_mtx_init(&fg
->fg_lock
, file_lck_grp
, file_lck_attr
);
4607 os_ref_retain_locked(&fp
->fp_iocount
);
4608 os_ref_init_raw(&fg
->fg_count
, &f_refgrp
);
4609 fg
->fg_ops
= &uninitops
;
4612 mac_file_label_init(fg
);
4615 kauth_cred_ref(ctx
->vc_ucred
);
4617 fp
->f_cred
= ctx
->vc_ucred
;
4620 mac_file_label_associate(fp
->f_cred
, fg
);
4623 os_atomic_inc(&nfiles
, relaxed
);
4627 p
->p_fd
->fd_ofiles
[nfd
] = fp
;
4642 falloc(proc_t p
, struct fileproc
**resultfp
, int *resultfd
, vfs_context_t ctx
)
4644 return falloc_withalloc(p
, resultfp
, resultfd
, ctx
,
4645 fileproc_alloc_init
, NULL
);
4651 * Description: Perform close-on-exec processing for all files in a process
4652 * that are either marked as close-on-exec, or which were in the
4653 * process of being opened at the time of the execve
4655 * Also handles the case (via posix_spawn()) where -all-
4656 * files except those marked with "inherit" as treated as
4659 * Parameters: p Pointer to process calling
4664 * Locks: This function internally takes and drops proc_fdlock()
4665 * But assumes tables don't grow/change while unlocked.
4669 fdexec(proc_t p
, short flags
, int self_exec
)
4671 struct filedesc
*fdp
= p
->p_fd
;
4673 boolean_t cloexec_default
= (flags
& POSIX_SPAWN_CLOEXEC_DEFAULT
) != 0;
4674 thread_t self
= current_thread();
4675 struct uthread
*ut
= get_bsdthread_info(self
);
4676 struct kqworkq
*dealloc_kqwq
= NULL
;
4679 * If the current thread is bound as a workq/workloop
4680 * servicing thread, we need to unbind it first.
4682 if (ut
->uu_kqr_bound
&& self_exec
) {
4683 kqueue_threadreq_unbind(p
, ut
->uu_kqr_bound
);
4689 * Deallocate the knotes for this process
4690 * and mark the tables non-existent so
4691 * subsequent kqueue closes go faster.
4694 assert(fdp
->fd_knlistsize
== 0);
4695 assert(fdp
->fd_knhashmask
== 0);
4697 for (i
= fdp
->fd_lastfile
; i
>= 0; i
--) {
4698 struct fileproc
*fp
= fdp
->fd_ofiles
[i
];
4699 char *flagp
= &fdp
->fd_ofileflags
[i
];
4701 if (fp
&& cloexec_default
) {
4703 * Reverse the usual semantics of file descriptor
4704 * inheritance - all of them should be closed
4705 * except files marked explicitly as "inherit" and
4706 * not marked close-on-exec.
4708 if ((*flagp
& (UF_EXCLOSE
| UF_INHERIT
)) != UF_INHERIT
) {
4709 *flagp
|= UF_EXCLOSE
;
4711 *flagp
&= ~UF_INHERIT
;
4715 ((*flagp
& (UF_RESERVED
| UF_EXCLOSE
)) == UF_EXCLOSE
)
4717 || (fp
&& mac_file_check_inherit(proc_ucred(p
), fp
->fp_glob
))
4720 fp_close_and_unlock(p
, i
, fp
, 0);
4725 /* release the per-process workq kq */
4726 if (fdp
->fd_wqkqueue
) {
4727 dealloc_kqwq
= fdp
->fd_wqkqueue
;
4728 fdp
->fd_wqkqueue
= NULL
;
4733 /* Anything to free? */
4735 kqworkq_dealloc(dealloc_kqwq
);
4743 * Description: Copy a filedesc structure. This is normally used as part of
4744 * forkproc() when forking a new process, to copy the per process
4745 * open file table over to the new process.
4747 * Parameters: p Process whose open file table
4748 * is to be copied (parent)
4749 * uth_cdir Per thread current working
4750 * cirectory, or NULL
4752 * Returns: NULL Copy failed
4753 * !NULL Pointer to new struct filedesc
4755 * Locks: This function internally takes and drops proc_fdlock()
4757 * Notes: Files are copied directly, ignoring the new resource limits
4758 * for the process that's being copied into. Since the descriptor
4759 * references are just additional references, this does not count
4760 * against the number of open files on the system.
4762 * The struct filedesc includes the current working directory,
4763 * and the current root directory, if the process is chroot'ed.
4765 * If the exec was called by a thread using a per thread current
4766 * working directory, we inherit the working directory from the
4767 * thread making the call, rather than from the process.
4769 * In the case of a failure to obtain a reference, for most cases,
4770 * the file entry will be silently dropped. There's an exception
4771 * for the case of a chroot dir, since a failure to to obtain a
4772 * reference there would constitute an "escape" from the chroot
4773 * environment, which must not be allowed. In that case, we will
4774 * deny the execve() operation, rather than allowing the escape.
4777 fdcopy(proc_t p
, vnode_t uth_cdir
)
4779 struct filedesc
*newfdp
, *fdp
= p
->p_fd
;
4781 struct fileproc
*ofp
, *fp
;
4784 newfdp
= zalloc(fdp_zone
);
4789 * the FD_CHROOT flag will be inherited via this copy
4791 (void) memcpy(newfdp
, fdp
, sizeof(*newfdp
));
4794 * If we are running with per-thread current working directories,
4795 * inherit the new current working directory from the current thread
4796 * instead, before we take our references.
4798 if (uth_cdir
!= NULLVP
) {
4799 newfdp
->fd_cdir
= uth_cdir
;
4803 * For both fd_cdir and fd_rdir make sure we get
4804 * a valid reference... if we can't, than set
4805 * set the pointer(s) to NULL in the child... this
4806 * will keep us from using a non-referenced vp
4807 * and allows us to do the vnode_rele only on
4808 * a properly referenced vp
4810 if ((v_dir
= newfdp
->fd_cdir
)) {
4811 if (vnode_getwithref(v_dir
) == 0) {
4812 if ((vnode_ref(v_dir
))) {
4813 newfdp
->fd_cdir
= NULL
;
4817 newfdp
->fd_cdir
= NULL
;
4820 if (newfdp
->fd_cdir
== NULL
&& fdp
->fd_cdir
) {
4822 * we couldn't get a new reference on
4823 * the current working directory being
4824 * inherited... we might as well drop
4825 * our reference from the parent also
4826 * since the vnode has gone DEAD making
4827 * it useless... by dropping it we'll
4828 * be that much closer to recycling it
4830 vnode_rele(fdp
->fd_cdir
);
4831 fdp
->fd_cdir
= NULL
;
4834 if ((v_dir
= newfdp
->fd_rdir
)) {
4835 if (vnode_getwithref(v_dir
) == 0) {
4836 if ((vnode_ref(v_dir
))) {
4837 newfdp
->fd_rdir
= NULL
;
4841 newfdp
->fd_rdir
= NULL
;
4844 /* Coming from a chroot environment and unable to get a reference... */
4845 if (newfdp
->fd_rdir
== NULL
&& fdp
->fd_rdir
) {
4848 * We couldn't get a new reference on
4849 * the chroot directory being
4850 * inherited... this is fatal, since
4851 * otherwise it would constitute an
4852 * escape from a chroot environment by
4855 if (newfdp
->fd_cdir
) {
4856 vnode_rele(newfdp
->fd_cdir
);
4858 zfree(fdp_zone
, newfdp
);
4863 * If the number of open files fits in the internal arrays
4864 * of the open file structure, use them, otherwise allocate
4865 * additional memory for the number of descriptors currently
4868 if (newfdp
->fd_lastfile
< NDFILE
) {
4872 * Compute the smallest multiple of NDEXTENT needed
4873 * for the file descriptors currently in use,
4874 * allowing the table to shrink.
4876 i
= newfdp
->fd_nfiles
;
4877 while (i
> 1 + 2 * NDEXTENT
&& i
> 1 + newfdp
->fd_lastfile
* 2) {
4883 MALLOC(newfdp
->fd_ofiles
, struct fileproc
**,
4884 i
* OFILESIZE
, M_OFILETABL
, M_WAITOK
);
4885 if (newfdp
->fd_ofiles
== NULL
) {
4886 if (newfdp
->fd_cdir
) {
4887 vnode_rele(newfdp
->fd_cdir
);
4889 if (newfdp
->fd_rdir
) {
4890 vnode_rele(newfdp
->fd_rdir
);
4893 zfree(fdp_zone
, newfdp
);
4896 (void) memset(newfdp
->fd_ofiles
, 0, i
* OFILESIZE
);
4899 newfdp
->fd_ofileflags
= (char *) &newfdp
->fd_ofiles
[i
];
4900 newfdp
->fd_nfiles
= i
;
4902 if (fdp
->fd_nfiles
> 0) {
4903 struct fileproc
**fpp
;
4906 (void) memcpy(newfdp
->fd_ofiles
, fdp
->fd_ofiles
,
4907 (newfdp
->fd_lastfile
+ 1) * sizeof(*fdp
->fd_ofiles
));
4908 (void) memcpy(newfdp
->fd_ofileflags
, fdp
->fd_ofileflags
,
4909 (newfdp
->fd_lastfile
+ 1) * sizeof(*fdp
->fd_ofileflags
));
4911 fpp
= &newfdp
->fd_ofiles
[newfdp
->fd_lastfile
];
4912 flags
= &newfdp
->fd_ofileflags
[newfdp
->fd_lastfile
];
4913 for (i
= newfdp
->fd_lastfile
; i
>= 0; i
--, fpp
--, flags
--) {
4914 if ((ofp
= *fpp
) != NULL
&&
4915 0 == (ofp
->fp_glob
->fg_lflags
& FG_CONFINED
) &&
4916 0 == (*flags
& (UF_FORKCLOSE
| UF_RESERVED
))) {
4918 if (FILEPROC_TYPE(ofp
) != FTYPE_SIMPLE
) {
4919 panic("complex fileproc");
4922 fp
= fileproc_alloc_init(NULL
);
4925 * XXX no room to copy, unable to
4926 * XXX safely unwind state at present
4931 (ofp
->fp_flags
& ~FP_TYPEMASK
);
4932 fp
->fp_glob
= ofp
->fp_glob
;
4933 fg_ref(p
, fp
->fp_glob
);
4941 if (i
== newfdp
->fd_lastfile
&& i
> 0) {
4942 newfdp
->fd_lastfile
--;
4944 if (i
< newfdp
->fd_freefile
) {
4945 newfdp
->fd_freefile
= i
;
4954 * Initialize knote and kqueue tracking structs
4956 newfdp
->fd_knlist
= NULL
;
4957 newfdp
->fd_knlistsize
= 0;
4958 newfdp
->fd_knhash
= NULL
;
4959 newfdp
->fd_knhashmask
= 0;
4960 newfdp
->fd_kqhash
= NULL
;
4961 newfdp
->fd_kqhashmask
= 0;
4962 newfdp
->fd_wqkqueue
= NULL
;
4963 lck_mtx_init(&newfdp
->fd_kqhashlock
, proc_kqhashlock_grp
, proc_lck_attr
);
4964 lck_mtx_init(&newfdp
->fd_knhashlock
, proc_knhashlock_grp
, proc_lck_attr
);
4973 * Description: Release a filedesc (per process open file table) structure;
4974 * this is done on process exit(), or from forkproc_free() if
4975 * the fork fails for some reason subsequent to a successful
4978 * Parameters: p Pointer to process going away
4982 * Locks: This function internally takes and drops proc_fdlock()
4987 struct filedesc
*fdp
;
4988 struct fileproc
*fp
;
4989 struct kqworkq
*dealloc_kqwq
= NULL
;
4994 if (p
== kernproc
|| NULL
== (fdp
= p
->p_fd
)) {
4999 extern struct filedesc filedesc0
;
5001 if (&filedesc0
== fdp
) {
5006 * deallocate all the knotes up front and claim empty
5007 * tables to make any subsequent kqueue closes faster.
5010 assert(fdp
->fd_knlistsize
== 0);
5011 assert(fdp
->fd_knhashmask
== 0);
5014 * dealloc all workloops that have outstanding retains
5015 * when created with scheduling parameters.
5017 kqworkloops_dealloc(p
);
5019 /* close file descriptors */
5020 if (fdp
->fd_nfiles
> 0 && fdp
->fd_ofiles
) {
5021 for (i
= fdp
->fd_lastfile
; i
>= 0; i
--) {
5022 if ((fp
= fdp
->fd_ofiles
[i
]) != NULL
) {
5023 if (fdp
->fd_ofileflags
[i
] & UF_RESERVED
) {
5024 panic("fdfree: found fp with UF_RESERVED");
5026 fp_close_and_unlock(p
, i
, fp
, 0);
5030 FREE(fdp
->fd_ofiles
, M_OFILETABL
);
5031 fdp
->fd_ofiles
= NULL
;
5035 if (fdp
->fd_wqkqueue
) {
5036 dealloc_kqwq
= fdp
->fd_wqkqueue
;
5037 fdp
->fd_wqkqueue
= NULL
;
5043 kqworkq_dealloc(dealloc_kqwq
);
5046 vnode_rele(fdp
->fd_cdir
);
5049 vnode_rele(fdp
->fd_rdir
);
5052 proc_fdlock_spin(p
);
5056 if (fdp
->fd_kqhash
) {
5057 for (uint32_t j
= 0; j
<= fdp
->fd_kqhashmask
; j
++) {
5058 assert(LIST_EMPTY(&fdp
->fd_kqhash
[j
]));
5060 hashdestroy(fdp
->fd_kqhash
, M_KQUEUE
, fdp
->fd_kqhashmask
);
5063 lck_mtx_destroy(&fdp
->fd_kqhashlock
, proc_kqhashlock_grp
);
5064 lck_mtx_destroy(&fdp
->fd_knhashlock
, proc_knhashlock_grp
);
5066 zfree(fdp_zone
, fdp
);
5072 * Description: Drain out pending I/O operations
5074 * Parameters: p Process closing this file
5075 * fp fileproc struct for the open
5076 * instance on the file
5080 * Locks: Assumes the caller holds the proc_fdlock
5082 * Notes: For character devices, this occurs on the last close of the
5083 * device; for all other file descriptors, this occurs on each
5084 * close to prevent fd's from being closed out from under
5085 * operations currently in progress and blocked
5087 * See Also: file_vnode(), file_socket(), file_drop(), and the cautions
5088 * regarding their use and interaction with this function.
5091 fileproc_drain(proc_t p
, struct fileproc
* fp
)
5093 struct vfs_context context
;
5095 bool is_current_proc
;
5097 is_current_proc
= (p
== current_proc());
5099 if (!is_current_proc
) {
5101 thread
= proc_thread(p
); /* XXX */
5102 thread_reference(thread
);
5105 thread
= current_thread();
5108 context
.vc_thread
= thread
;
5109 context
.vc_ucred
= fp
->fp_glob
->fg_cred
;
5111 /* Set the vflag for drain */
5112 fileproc_modify_vflags(fp
, FPV_DRAIN
, FALSE
);
5114 while (os_ref_get_count(&fp
->fp_iocount
) > 1) {
5115 lck_mtx_convert_spin(&p
->p_fdmlock
);
5117 fo_drain(fp
, &context
);
5118 if ((fp
->fp_flags
& FP_INSELECT
) == FP_INSELECT
) {
5119 if (waitq_wakeup64_all((struct waitq
*)fp
->fp_wset
, NO_EVENT64
,
5120 THREAD_INTERRUPTED
, WAITQ_ALL_PRIORITIES
) == KERN_INVALID_ARGUMENT
) {
5121 panic("bad wait queue for waitq_wakeup64_all %p (fp:%p)", fp
->fp_wset
, fp
);
5124 if ((fp
->fp_flags
& FP_SELCONFLICT
) == FP_SELCONFLICT
) {
5125 if (waitq_wakeup64_all(&select_conflict_queue
, NO_EVENT64
,
5126 THREAD_INTERRUPTED
, WAITQ_ALL_PRIORITIES
) == KERN_INVALID_ARGUMENT
) {
5127 panic("bad select_conflict_queue");
5130 p
->p_fpdrainwait
= 1;
5132 msleep(&p
->p_fpdrainwait
, &p
->p_fdmlock
, PRIBIO
, "fpdrain", NULL
);
5135 if ((fp
->fp_flags
& FP_INSELECT
) != 0) {
5136 panic("FP_INSELECT set on drained fp");
5139 if ((fp
->fp_flags
& FP_SELCONFLICT
) == FP_SELCONFLICT
) {
5140 fp
->fp_flags
&= ~FP_SELCONFLICT
;
5143 if (!is_current_proc
) {
5144 thread_deallocate(thread
);
5152 * Description: Release the fd and free the fileproc associated with the fd
5153 * in the per process open file table of the specified process;
5154 * these values must correspond.
5156 * Parameters: p Process containing fd
5157 * fd fd to be released
5158 * fp fileproc to be freed
5161 fp_free(proc_t p
, int fd
, struct fileproc
* fp
)
5163 proc_fdlock_spin(p
);
5167 fg_free(fp
->fp_glob
);
5168 os_ref_release_live(&fp
->fp_iocount
);
5176 * Description: Apply an advisory lock on a file descriptor.
5178 * Parameters: p Process making request
5179 * uap->fd fd on which the lock is to be
5181 * uap->how (Un)Lock bits, including type
5182 * retval Pointer to the call return area
5184 * Returns: 0 Success
5185 * fp_getfvp:EBADF Bad file descriptor
5186 * fp_getfvp:ENOTSUP fd does not refer to a vnode
5187 * vnode_getwithref:???
5191 * *retval (modified) Size of dtable
5193 * Notes: Just attempt to get a record lock of the requested type on
5194 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
5197 sys_flock(proc_t p
, struct flock_args
*uap
, __unused
int32_t *retval
)
5201 struct fileproc
*fp
;
5204 vfs_context_t ctx
= vfs_context_current();
5207 AUDIT_ARG(fd
, uap
->fd
);
5208 if ((error
= fp_getfvp(p
, fd
, &fp
, &vp
))) {
5211 if ((error
= vnode_getwithref(vp
))) {
5214 AUDIT_ARG(vnpath
, vp
, ARG_VNODE1
);
5216 lf
.l_whence
= SEEK_SET
;
5219 if (how
& LOCK_UN
) {
5220 lf
.l_type
= F_UNLCK
;
5221 error
= VNOP_ADVLOCK(vp
, (caddr_t
)fp
->fp_glob
, F_UNLCK
, &lf
, F_FLOCK
, ctx
, NULL
);
5224 if (how
& LOCK_EX
) {
5225 lf
.l_type
= F_WRLCK
;
5226 } else if (how
& LOCK_SH
) {
5227 lf
.l_type
= F_RDLCK
;
5233 error
= mac_file_check_lock(proc_ucred(p
), fp
->fp_glob
, F_SETLK
, &lf
);
5238 error
= VNOP_ADVLOCK(vp
, (caddr_t
)fp
->fp_glob
, F_SETLK
, &lf
,
5239 (how
& LOCK_NB
? F_FLOCK
: F_FLOCK
| F_WAIT
),
5242 os_atomic_or(&fp
->fp_glob
->fg_flag
, FWASLOCKED
, relaxed
);
5245 (void)vnode_put(vp
);
5247 fp_drop(p
, fd
, fp
, 0);
5252 * sys_fileport_makeport
5254 * Description: Obtain a Mach send right for a given file descriptor.
5256 * Parameters: p Process calling fileport
5257 * uap->fd The fd to reference
5258 * uap->portnamep User address at which to place port name.
5260 * Returns: 0 Success.
5261 * EBADF Bad file descriptor.
5262 * EINVAL File descriptor had type that cannot be sent, misc. other errors.
5263 * EFAULT Address at which to store port name is not valid.
5264 * EAGAIN Resource shortage.
5267 * On success, name of send right is stored at user-specified address.
5270 sys_fileport_makeport(proc_t p
, struct fileport_makeport_args
*uap
,
5271 __unused
int *retval
)
5275 user_addr_t user_portaddr
= uap
->portnamep
;
5276 struct fileproc
*fp
= FILEPROC_NULL
;
5277 struct fileglob
*fg
= NULL
;
5278 ipc_port_t fileport
;
5279 mach_port_name_t name
= MACH_PORT_NULL
;
5282 err
= fp_lookup(p
, fd
, &fp
, 1);
5288 if (!fg_sendable(fg
)) {
5293 if (fp_isguarded(fp
, GUARD_FILEPORT
)) {
5294 err
= fp_guard_exception(p
, fd
, fp
, kGUARD_EXC_FILEPORT
);
5298 /* Dropped when port is deallocated */
5303 /* Allocate and initialize a port */
5304 fileport
= fileport_alloc(fg
);
5305 if (fileport
== IPC_PORT_NULL
) {
5311 /* Add an entry. Deallocates port on failure. */
5312 name
= ipc_port_copyout_send(fileport
, get_task_ipcspace(p
->task
));
5313 if (!MACH_PORT_VALID(name
)) {
5318 err
= copyout(&name
, user_portaddr
, sizeof(mach_port_name_t
));
5323 /* Tag the fileglob for debugging purposes */
5324 lck_mtx_lock_spin(&fg
->fg_lock
);
5325 fg
->fg_lflags
|= FG_PORTMADE
;
5326 lck_mtx_unlock(&fg
->fg_lock
);
5328 fp_drop(p
, fd
, fp
, 0);
5335 if (MACH_PORT_VALID(name
)) {
5336 /* Don't care if another thread races us to deallocate the entry */
5337 (void) mach_port_deallocate(get_task_ipcspace(p
->task
), name
);
5340 if (fp
!= FILEPROC_NULL
) {
5341 fp_drop(p
, fd
, fp
, 0);
5348 fileport_releasefg(struct fileglob
*fg
)
5350 (void)fg_drop(PROC_NULL
, fg
);
5356 * Description: Obtain the file descriptor for a given Mach send right.
5358 * Returns: 0 Success
5359 * EINVAL Invalid Mach port name, or port is not for a file.
5361 * fdalloc:ENOMEM Unable to allocate fileproc or extend file table.
5364 * *retval (modified) The new descriptor
5367 fileport_makefd(proc_t p
, ipc_port_t port
, int uf_flags
, int *retval
)
5369 struct fileglob
*fg
;
5370 struct fileproc
*fp
= FILEPROC_NULL
;
5374 fg
= fileport_port_to_fileglob(port
);
5380 fp
= fileproc_alloc_init(NULL
);
5381 if (fp
== FILEPROC_NULL
) {
5387 err
= fdalloc(p
, 0, &fd
);
5393 *fdflags(p
, fd
) |= uf_flags
;
5399 procfdtbl_releasefd(p
, fd
, fp
);
5405 if ((fp
!= NULL
) && (0 != err
)) {
5413 * sys_fileport_makefd
5415 * Description: Obtain the file descriptor for a given Mach send right.
5417 * Parameters: p Process calling fileport
5418 * uap->port Name of send right to file port.
5420 * Returns: 0 Success
5421 * EINVAL Invalid Mach port name, or port is not for a file.
5423 * fdalloc:ENOMEM Unable to allocate fileproc or extend file table.
5426 * *retval (modified) The new descriptor
5429 sys_fileport_makefd(proc_t p
, struct fileport_makefd_args
*uap
, int32_t *retval
)
5431 ipc_port_t port
= IPC_PORT_NULL
;
5432 mach_port_name_t send
= uap
->port
;
5436 res
= ipc_object_copyin(get_task_ipcspace(p
->task
),
5437 send
, MACH_MSG_TYPE_COPY_SEND
, &port
, 0, NULL
, IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND
);
5439 if (res
== KERN_SUCCESS
) {
5440 err
= fileport_makefd(p
, port
, UF_EXCLOSE
, retval
);
5445 if (IPC_PORT_NULL
!= port
) {
5446 ipc_port_release_send(port
);
5456 * Description: Duplicate the specified descriptor to a free descriptor;
5457 * this is the second half of fdopen(), above.
5459 * Parameters: fdp filedesc pointer to fill in
5461 * dfd fd to dup from
5462 * mode mode to set on new fd
5463 * error command code
5465 * Returns: 0 Success
5466 * EBADF Source fd is bad
5467 * EACCES Requested mode not allowed
5468 * !0 'error', if not ENODEV or
5471 * Notes: XXX This is not thread safe; see fdopen() above
5474 dupfdopen(struct filedesc
*fdp
, int indx
, int dfd
, int flags
, int error
)
5476 struct fileproc
*wfp
;
5477 struct fileproc
*fp
;
5481 proc_t p
= current_proc();
5484 * If the to-be-dup'd fd number is greater than the allowed number
5485 * of file descriptors, or the fd to be dup'd has already been
5486 * closed, reject. Note, check for new == old is necessary as
5487 * falloc could allocate an already closed to-be-dup'd descriptor
5488 * as the new descriptor.
5492 fp
= fdp
->fd_ofiles
[indx
];
5493 if (dfd
< 0 || dfd
>= fdp
->fd_nfiles
||
5494 (wfp
= fdp
->fd_ofiles
[dfd
]) == NULL
|| wfp
== fp
||
5495 (fdp
->fd_ofileflags
[dfd
] & UF_RESERVED
)) {
5500 myerror
= mac_file_check_dup(proc_ucred(p
), wfp
->fp_glob
, dfd
);
5507 * There are two cases of interest here.
5509 * For ENODEV simply dup (dfd) to file descriptor
5510 * (indx) and return.
5512 * For ENXIO steal away the file structure from (dfd) and
5513 * store it in (indx). (dfd) is effectively closed by
5516 * Any other error code is just returned.
5520 if (fp_isguarded(wfp
, GUARD_DUP
)) {
5526 * Check that the mode the file is being opened for is a
5527 * subset of the mode of the existing descriptor.
5529 if (((flags
& (FREAD
| FWRITE
)) | wfp
->f_flag
) != wfp
->f_flag
) {
5533 if (indx
> fdp
->fd_lastfile
) {
5534 fdp
->fd_lastfile
= indx
;
5538 fg_free(fp
->fp_glob
);
5540 fg_ref(p
, wfp
->fp_glob
);
5541 fp
->fp_glob
= wfp
->fp_glob
;
5543 fdp
->fd_ofileflags
[indx
] = fdp
->fd_ofileflags
[dfd
] |
5544 (flags
& O_CLOEXEC
) ? UF_EXCLOSE
: 0;
5560 * Description: Generic fileops read indirected through the fileops pointer
5561 * in the fileproc structure
5563 * Parameters: fp fileproc structure pointer
5564 * uio user I/O structure pointer
5566 * ctx VFS context for operation
5568 * Returns: 0 Success
5569 * !0 Errno from read
5572 fo_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
5574 return (*fp
->f_ops
->fo_read
)(fp
, uio
, flags
, ctx
);
5578 fo_no_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
5580 #pragma unused(fp, uio, flags, ctx)
5588 * Description: Generic fileops write indirected through the fileops pointer
5589 * in the fileproc structure
5591 * Parameters: fp fileproc structure pointer
5592 * uio user I/O structure pointer
5594 * ctx VFS context for operation
5596 * Returns: 0 Success
5597 * !0 Errno from write
5600 fo_write(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
5602 return (*fp
->f_ops
->fo_write
)(fp
, uio
, flags
, ctx
);
5606 fo_no_write(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
5608 #pragma unused(fp, uio, flags, ctx)
5616 * Description: Generic fileops ioctl indirected through the fileops pointer
5617 * in the fileproc structure
5619 * Parameters: fp fileproc structure pointer
5621 * data pointer to internalized copy
5622 * of user space ioctl command
5623 * parameter data in kernel space
5624 * ctx VFS context for operation
5626 * Returns: 0 Success
5627 * !0 Errno from ioctl
5629 * Locks: The caller is assumed to have held the proc_fdlock; this
5630 * function releases and reacquires this lock. If the caller
5631 * accesses data protected by this lock prior to calling this
5632 * function, it will need to revalidate/reacquire any cached
5633 * protected data obtained prior to the call.
5636 fo_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
, vfs_context_t ctx
)
5640 proc_fdunlock(vfs_context_proc(ctx
));
5641 error
= (*fp
->f_ops
->fo_ioctl
)(fp
, com
, data
, ctx
);
5642 proc_fdlock(vfs_context_proc(ctx
));
5647 fo_no_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
, vfs_context_t ctx
)
5649 #pragma unused(fp, com, data, ctx)
5657 * Description: Generic fileops select indirected through the fileops pointer
5658 * in the fileproc structure
5660 * Parameters: fp fileproc structure pointer
5661 * which select which
5662 * wql pointer to wait queue list
5663 * ctx VFS context for operation
5665 * Returns: 0 Success
5666 * !0 Errno from select
5669 fo_select(struct fileproc
*fp
, int which
, void *wql
, vfs_context_t ctx
)
5671 return (*fp
->f_ops
->fo_select
)(fp
, which
, wql
, ctx
);
5675 fo_no_select(struct fileproc
*fp
, int which
, void *wql
, vfs_context_t ctx
)
5677 #pragma unused(fp, which, wql, ctx)
5685 * Description: Generic fileops close indirected through the fileops pointer
5686 * in the fileproc structure
5688 * Parameters: fp fileproc structure pointer for
5690 * ctx VFS context for operation
5692 * Returns: 0 Success
5693 * !0 Errno from close
5696 fo_close(struct fileglob
*fg
, vfs_context_t ctx
)
5698 return (*fg
->fg_ops
->fo_close
)(fg
, ctx
);
5705 * Description: Generic fileops kqueue filter indirected through the fileops
5706 * pointer in the fileproc structure
5708 * Parameters: fp fileproc structure pointer
5709 * ctx VFS context for operation
5711 * Returns: 0 Success
5712 * !0 errno from drain
5715 fo_drain(struct fileproc
*fp
, vfs_context_t ctx
)
5717 return (*fp
->f_ops
->fo_drain
)(fp
, ctx
);
5721 fo_no_drain(struct fileproc
*fp
, vfs_context_t ctx
)
5723 #pragma unused(fp, ctx)
5731 * Description: Generic fileops kqueue filter indirected through the fileops
5732 * pointer in the fileproc structure
5734 * Parameters: fp fileproc structure pointer
5735 * kn pointer to knote to filter on
5737 * Returns: (kn->kn_flags & EV_ERROR) error in kn->kn_data
5738 * 0 Filter is not active
5739 * !0 Filter is active
5742 fo_kqfilter(struct fileproc
*fp
, struct knote
*kn
, struct kevent_qos_s
*kev
)
5744 return (*fp
->f_ops
->fo_kqfilter
)(fp
, kn
, kev
);
5748 fo_no_kqfilter(struct fileproc
*fp
, struct knote
*kn
, struct kevent_qos_s
*kev
)
5750 #pragma unused(fp, kev)
5751 knote_set_error(kn
, ENOTSUP
);
5757 fileproc_alloc_init(__unused
void *arg
)
5759 struct fileproc
*fp
= zalloc_flags(fp_zone
, Z_WAITOK
| Z_ZERO
);
5761 os_ref_init(&fp
->fp_iocount
, &f_refgrp
);
5767 fileproc_free(struct fileproc
*fp
)
5769 os_ref_count_t __unused refc
= os_ref_release(&fp
->fp_iocount
);
5770 #if DEVELOPMENT || DEBUG
5772 panic("%s: pid %d refc: %u != 0",
5773 __func__
, proc_pid(current_proc()), refc
);
5776 switch (FILEPROC_TYPE(fp
)) {
5781 guarded_fileproc_free(fp
);
5784 panic("%s: corrupt fp %p flags %x", __func__
, fp
, fp
->fp_flags
);
5789 fileproc_modify_vflags(struct fileproc
*fp
, fileproc_vflags_t vflags
, boolean_t clearflags
)
5792 os_atomic_andnot(&fp
->fp_vflags
, vflags
, relaxed
);
5794 os_atomic_or(&fp
->fp_vflags
, vflags
, relaxed
);
5799 fileproc_get_vflags(struct fileproc
*fp
)
5801 return os_atomic_load(&fp
->fp_vflags
, relaxed
);