2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
30 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
32 * Copyright (c) 1982, 1986, 1989, 1991, 1993
33 * The Regents of the University of California. All rights reserved.
34 * (c) UNIX System Laboratories, Inc.
35 * All or some portions of this file are derived from material licensed
36 * to the University of California by American Telephone and Telegraph
37 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
38 * the permission of UNIX System Laboratories, Inc.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * @(#)kern_descrip.c 8.8 (Berkeley) 2/14/95
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/filedesc.h>
74 #include <sys/kernel.h>
75 #include <sys/vnode_internal.h>
76 #include <sys/proc_internal.h>
77 #include <sys/kauth.h>
78 #include <sys/file_internal.h>
79 #include <sys/socket.h>
80 #include <sys/socketvar.h>
82 #include <sys/ioctl.h>
83 #include <sys/fcntl.h>
84 #include <sys/malloc.h>
86 #include <sys/syslog.h>
87 #include <sys/unistd.h>
88 #include <sys/resourcevar.h>
89 #include <sys/aio_kern.h>
91 #include <kern/lock.h>
93 #include <bsm/audit_kernel.h>
95 #include <sys/mount_internal.h>
96 #include <sys/kdebug.h>
97 #include <sys/sysproto.h>
99 #include <kern/kern_types.h>
100 #include <kern/kalloc.h>
101 #include <libkern/OSAtomic.h>
106 int fdopen(dev_t dev
, int mode
, int type
, struct proc
*p
);
107 int ogetdtablesize(struct proc
*p
, void *uap
, register_t
*retval
);
108 int finishdup(struct proc
* p
, struct filedesc
*fdp
, int old
, int new, register_t
*retval
);
110 int closef(struct fileglob
*fg
, struct proc
*p
);
111 int falloc_locked(struct proc
*p
, struct fileproc
**resultfp
, int *resultfd
, int locked
);
112 void fddrop(struct proc
*p
, int fd
);
113 int fdgetf_noref(struct proc
*p
, int fd
, struct fileproc
**resultfp
);
114 void fg_drop(struct fileproc
* fp
);
115 void fg_free(struct fileglob
*fg
);
116 void fg_ref(struct fileproc
* fp
);
117 int fp_getfpshm(struct proc
*p
, int fd
, struct fileproc
**resultfp
, struct pshmnode
**resultpshm
);
119 static int closef_finish(struct fileproc
*fp
, struct fileglob
*fg
, struct proc
*p
);
121 extern void file_lock_init(void);
122 extern int is_suser(void);
123 extern int kqueue_stat(struct fileproc
*fp
, struct stat
*st
, struct proc
*p
);
124 extern int soo_stat(struct socket
*so
, struct stat
*ub
);
125 extern int vn_path_package_check(vnode_t vp
, char *path
, int pathlen
, int *component
);
127 extern kauth_scope_t kauth_scope_fileop
;
129 #define f_flag f_fglob->fg_flag
130 #define f_type f_fglob->fg_type
131 #define f_msgcount f_fglob->fg_msgcount
132 #define f_cred f_fglob->fg_cred
133 #define f_ops f_fglob->fg_ops
134 #define f_offset f_fglob->fg_offset
135 #define f_data f_fglob->fg_data
137 * Descriptor management.
139 struct filelist filehead
; /* head of list of open files */
140 struct fmsglist fmsghead
; /* head of list of open files */
141 struct fmsglist fmsg_ithead
; /* head of list of open files */
142 int nfiles
; /* actual number of open files */
145 lck_grp_attr_t
* file_lck_grp_attr
;
146 lck_grp_t
* file_lck_grp
;
147 lck_attr_t
* file_lck_attr
;
149 lck_mtx_t
* uipc_lock
;
150 lck_mtx_t
* file_iterate_lcok
;
151 lck_mtx_t
* file_flist_lock
;
158 /* allocate file lock group attribute and group */
159 file_lck_grp_attr
= lck_grp_attr_alloc_init();
160 lck_grp_attr_setstat(file_lck_grp_attr
);
162 file_lck_grp
= lck_grp_alloc_init("file", file_lck_grp_attr
);
164 /* Allocate file lock attribute */
165 file_lck_attr
= lck_attr_alloc_init();
166 //lck_attr_setdebug(file_lck_attr);
168 uipc_lock
= lck_mtx_alloc_init(file_lck_grp
, file_lck_attr
);
169 file_iterate_lcok
= lck_mtx_alloc_init(file_lck_grp
, file_lck_attr
);
170 file_flist_lock
= lck_mtx_alloc_init(file_lck_grp
, file_lck_attr
);
178 proc_fdlock(struct proc
*p
)
180 lck_mtx_lock(&p
->p_fdmlock
);
184 proc_fdunlock(struct proc
*p
)
186 lck_mtx_unlock(&p
->p_fdmlock
);
190 * System calls on descriptors.
194 getdtablesize(struct proc
*p
, __unused
struct getdtablesize_args
*uap
, register_t
*retval
)
197 *retval
= min((int)p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
, maxfiles
);
204 ogetdtablesize(struct proc
*p
, __unused
void *uap
, register_t
*retval
)
207 *retval
= min((int)p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
, NOFILE
);
213 static __inline__
void
214 _fdrelse(struct filedesc
*fdp
, int fd
)
216 if (fd
< fdp
->fd_freefile
)
217 fdp
->fd_freefile
= fd
;
219 if (fd
> fdp
->fd_lastfile
)
220 panic("fdrelse: fd_lastfile inconsistent");
222 fdp
->fd_ofiles
[fd
] = NULL
;
223 fdp
->fd_ofileflags
[fd
] = 0;
225 while ((fd
= fdp
->fd_lastfile
) > 0 &&
226 fdp
->fd_ofiles
[fd
] == NULL
&&
227 !(fdp
->fd_ofileflags
[fd
] & UF_RESERVED
))
232 * Duplicate a file descriptor.
238 struct dup_args
*uap
;
241 register struct filedesc
*fdp
= p
->p_fd
;
242 register int old
= uap
->fd
;
247 if ( (error
= fp_lookup(p
, old
, &fp
, 1)) ) {
251 if ( (error
= fdalloc(p
, 0, &new)) ) {
252 fp_drop(p
, old
, fp
, 1);
256 error
= finishdup(p
, fdp
, old
, new, retval
);
257 fp_drop(p
, old
, fp
, 1);
264 * Duplicate a file descriptor to a particular value.
270 struct dup2_args
*uap
;
273 register struct filedesc
*fdp
= p
->p_fd
;
274 register int old
= uap
->from
, new = uap
->to
;
280 if ( (error
= fp_lookup(p
, old
, &fp
, 1)) ) {
285 new >= p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
||
287 fp_drop(p
, old
, fp
, 1);
292 fp_drop(p
, old
, fp
, 1);
297 if (new < 0 || new >= fdp
->fd_nfiles
) {
298 if ( (error
= fdalloc(p
, new, &i
)) ) {
299 fp_drop(p
, old
, fp
, 1);
308 struct fileproc
**fpp
;
311 flags
= fdp
->fd_ofileflags
[new];
312 if ((flags
& (UF_RESERVED
| UF_CLOSING
)) == UF_RESERVED
) {
313 fp_drop(p
, old
, fp
, 1);
319 * dup2() must succeed even if the close has an error.
321 if (*(fpp
= &fdp
->fd_ofiles
[new])) {
322 struct fileproc
*nfp
= *fpp
;
324 close_internal(p
, new, nfp
, (CLOSEINT_LOCKED
| CLOSEINT_WAITONCLOSE
| CLOSEINT_NOFDRELSE
| CLOSEINT_NOFDNOREF
));
328 error
= finishdup(p
, fdp
, old
, new, retval
);
329 fp_drop(p
, old
, fp
, 1);
336 * The file control system call.
339 fcntl(p
, uap
, retval
)
341 struct fcntl_args
*uap
;
345 struct filedesc
*fdp
= p
->p_fd
;
349 int i
, tmp
, error
, error2
, flg
= F_POSIX
;
351 struct vfs_context context
;
355 int devBlockSize
= 0;
359 AUDIT_ARG(fd
, uap
->fd
);
360 AUDIT_ARG(cmd
, uap
->cmd
);
363 if ( (error
= fp_lookup(p
, fd
, &fp
, 1)) ) {
368 context
.vc_ucred
= fp
->f_cred
;
369 if (proc_is64bit(p
)) {
373 /* since the arg parameter is defined as a long but may be either
374 * a long or a pointer we must take care to handle sign extension
375 * issues. Our sys call munger will sign extend a long when we are
376 * called from a 32-bit process. Since we can never have an address
377 * greater than 32-bits from a 32-bit process we lop off the top
378 * 32-bits to avoid getting the wrong address
380 argp
= CAST_USER_ADDR_T(uap
->arg
);
383 pop
= &fdp
->fd_ofileflags
[fd
];
388 newmin
= CAST_DOWN(int, uap
->arg
);
389 if ((u_int
)newmin
>= p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
||
390 newmin
>= maxfiles
) {
394 if ( (error
= fdalloc(p
, newmin
, &i
)) )
396 error
= finishdup(p
, fdp
, fd
, i
, retval
);
400 *retval
= (*pop
& UF_EXCLOSE
)? 1 : 0;
405 *pop
= (*pop
&~ UF_EXCLOSE
) |
406 (uap
->arg
& 1)? UF_EXCLOSE
: 0;
411 *retval
= OFLAGS(fp
->f_flag
);
416 fp
->f_flag
&= ~FCNTLFLAGS
;
417 tmp
= CAST_DOWN(int, uap
->arg
);
418 fp
->f_flag
|= FFLAGS(tmp
) & FCNTLFLAGS
;
419 tmp
= fp
->f_flag
& FNONBLOCK
;
420 error
= fo_ioctl(fp
, FIONBIO
, (caddr_t
)&tmp
, p
);
423 tmp
= fp
->f_flag
& FASYNC
;
424 error
= fo_ioctl(fp
, FIOASYNC
, (caddr_t
)&tmp
, p
);
427 fp
->f_flag
&= ~FNONBLOCK
;
429 (void)fo_ioctl(fp
, FIONBIO
, (caddr_t
)&tmp
, p
);
433 if (fp
->f_type
== DTYPE_SOCKET
) {
434 *retval
= ((struct socket
*)fp
->f_data
)->so_pgid
;
438 error
= fo_ioctl(fp
, (int)TIOCGPGRP
, (caddr_t
)retval
, p
);
443 tmp
= CAST_DOWN(pid_t
, uap
->arg
);
444 if (fp
->f_type
== DTYPE_SOCKET
) {
445 ((struct socket
*)fp
->f_data
)->so_pgid
= tmp
;
449 if (fp
->f_type
== DTYPE_PIPE
) {
450 error
= fo_ioctl(fp
, (int)TIOCSPGRP
, (caddr_t
)&tmp
, p
);
457 struct proc
*p1
= pfind(tmp
);
462 tmp
= (int)p1
->p_pgrp
->pg_id
;
464 error
= fo_ioctl(fp
, (int)TIOCSPGRP
, (caddr_t
)&tmp
, p
);
469 /* Fall into F_SETLK */
472 if (fp
->f_type
!= DTYPE_VNODE
) {
476 vp
= (struct vnode
*)fp
->f_data
;
479 offset
= fp
->f_offset
;
482 /* Copy in the lock structure */
483 error
= copyin(argp
, (caddr_t
)&fl
, sizeof (fl
));
487 if ( (error
= vnode_getwithref(vp
)) ) {
490 if (fl
.l_whence
== SEEK_CUR
)
491 fl
.l_start
+= offset
;
496 if ((fflag
& FREAD
) == 0) {
501 OSBitOrAtomic(P_LADVLOCK
, &p
->p_ladvflag
);
502 error
= VNOP_ADVLOCK(vp
, (caddr_t
)p
, F_SETLK
, &fl
, flg
, &context
);
507 if ((fflag
& FWRITE
) == 0) {
512 OSBitOrAtomic(P_LADVLOCK
, &p
->p_ladvflag
);
513 error
= VNOP_ADVLOCK(vp
, (caddr_t
)p
, F_SETLK
, &fl
, flg
, &context
);
518 error
= VNOP_ADVLOCK(vp
, (caddr_t
)p
, F_UNLCK
, &fl
,
530 if (fp
->f_type
!= DTYPE_VNODE
) {
534 vp
= (struct vnode
*)fp
->f_data
;
536 offset
= fp
->f_offset
;
539 /* Copy in the lock structure */
540 error
= copyin(argp
, (caddr_t
)&fl
, sizeof (fl
));
544 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
545 if (fl
.l_whence
== SEEK_CUR
)
546 fl
.l_start
+= offset
;
548 error
= VNOP_ADVLOCK(vp
, (caddr_t
)p
, F_GETLK
, &fl
, F_POSIX
, &context
);
553 error
= copyout((caddr_t
)&fl
, argp
, sizeof (fl
));
557 case F_PREALLOCATE
: {
558 fstore_t alloc_struct
; /* structure for allocate command */
559 u_int32_t alloc_flags
= 0;
561 if (fp
->f_type
!= DTYPE_VNODE
) {
566 vp
= (struct vnode
*)fp
->f_data
;
569 /* make sure that we have write permission */
570 if ((fp
->f_flag
& FWRITE
) == 0) {
575 error
= copyin(argp
, (caddr_t
)&alloc_struct
, sizeof (alloc_struct
));
579 /* now set the space allocated to 0 */
580 alloc_struct
.fst_bytesalloc
= 0;
583 * Do some simple parameter checking
586 /* set up the flags */
588 alloc_flags
|= PREALLOCATE
;
590 if (alloc_struct
.fst_flags
& F_ALLOCATECONTIG
)
591 alloc_flags
|= ALLOCATECONTIG
;
593 if (alloc_struct
.fst_flags
& F_ALLOCATEALL
)
594 alloc_flags
|= ALLOCATEALL
;
597 * Do any position mode specific stuff. The only
598 * position mode supported now is PEOFPOSMODE
601 switch (alloc_struct
.fst_posmode
) {
604 if (alloc_struct
.fst_offset
!= 0) {
609 alloc_flags
|= ALLOCATEFROMPEOF
;
613 if (alloc_struct
.fst_offset
<= 0) {
618 alloc_flags
|= ALLOCATEFROMVOL
;
626 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
628 * call allocate to get the space
630 error
= VNOP_ALLOCATE(vp
,alloc_struct
.fst_length
,alloc_flags
,
631 &alloc_struct
.fst_bytesalloc
, alloc_struct
.fst_offset
,
635 error2
= copyout((caddr_t
)&alloc_struct
, argp
, sizeof (alloc_struct
));
644 if (fp
->f_type
!= DTYPE_VNODE
) {
650 error
= copyin(argp
, (caddr_t
)&offset
, sizeof (off_t
));
655 * Make sure that we are root. Growing a file
656 * without zero filling the data is a security hole
657 * root would have access anyway so we'll allow it
664 vp
= (struct vnode
*)fp
->f_data
;
666 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
670 error
= vnode_setsize(vp
, offset
, IO_NOZEROFILL
, &context
);
677 if (fp
->f_type
!= DTYPE_VNODE
) {
681 vp
= (struct vnode
*)fp
->f_data
;
684 if ( (error
= vnode_getwithref(vp
)) == 0) {
686 vnode_clearnoreadahead(vp
);
688 vnode_setnoreadahead(vp
);
695 if (fp
->f_type
!= DTYPE_VNODE
) {
699 vp
= (struct vnode
*)fp
->f_data
;
702 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
704 vnode_setnocache(vp
);
706 vnode_clearnocache(vp
);
712 case F_GLOBAL_NOCACHE
:
713 if (fp
->f_type
!= DTYPE_VNODE
) {
717 vp
= (struct vnode
*)fp
->f_data
;
720 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
722 *retval
= vnode_isnocache(vp
);
725 vnode_setnocache(vp
);
727 vnode_clearnocache(vp
);
734 struct radvisory ra_struct
;
736 if (fp
->f_type
!= DTYPE_VNODE
) {
740 vp
= (struct vnode
*)fp
->f_data
;
743 if ( (error
= copyin(argp
, (caddr_t
)&ra_struct
, sizeof (ra_struct
))) )
745 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
746 error
= VNOP_IOCTL(vp
, F_RDADVISE
, (caddr_t
)&ra_struct
, 0, &context
);
753 case F_READBOOTSTRAP
:
754 case F_WRITEBOOTSTRAP
: {
755 fbootstraptransfer_t fbt_struct
;
756 user_fbootstraptransfer_t user_fbt_struct
;
758 caddr_t boot_structp
;
760 if (fp
->f_type
!= DTYPE_VNODE
) {
764 vp
= (struct vnode
*)fp
->f_data
;
767 if (IS_64BIT_PROCESS(p
)) {
768 sizeof_struct
= sizeof(user_fbt_struct
);
769 boot_structp
= (caddr_t
) &user_fbt_struct
;
772 sizeof_struct
= sizeof(fbt_struct
);
773 boot_structp
= (caddr_t
) &fbt_struct
;
775 error
= copyin(argp
, boot_structp
, sizeof_struct
);
778 if ( (error
= vnode_getwithref(vp
)) ) {
781 if (uap
->cmd
== F_WRITEBOOTSTRAP
) {
783 * Make sure that we are root. Updating the
784 * bootstrap on a disk could be a security hole
792 if (strcmp(vnode_mount(vp
)->mnt_vfsstat
.f_fstypename
, "hfs") != 0) {
796 * call vnop_ioctl to handle the I/O
798 error
= VNOP_IOCTL(vp
, uap
->cmd
, boot_structp
, 0, &context
);
804 struct log2phys l2p_struct
; /* structure for allocate command */
806 if (fp
->f_type
!= DTYPE_VNODE
) {
810 vp
= (struct vnode
*)fp
->f_data
;
812 if ( (error
= vnode_getwithref(vp
)) ) {
815 error
= VNOP_OFFTOBLK(vp
, fp
->f_offset
, &lbn
);
820 error
= VNOP_BLKTOOFF(vp
, lbn
, &offset
);
825 devBlockSize
= vfs_devblocksize(vnode_mount(vp
));
827 error
= VNOP_BLOCKMAP(vp
, offset
, devBlockSize
, &bn
, NULL
, NULL
, 0, &context
);
832 l2p_struct
.l2p_flags
= 0; /* for now */
833 l2p_struct
.l2p_contigbytes
= 0; /* for now */
834 l2p_struct
.l2p_devoffset
= bn
* devBlockSize
;
835 l2p_struct
.l2p_devoffset
+= fp
->f_offset
- offset
;
836 error
= copyout((caddr_t
)&l2p_struct
, argp
, sizeof (l2p_struct
));
844 if (fp
->f_type
!= DTYPE_VNODE
) {
848 vp
= (struct vnode
*)fp
->f_data
;
851 pathlen
= MAXPATHLEN
;
852 MALLOC(pathbufp
, char *, pathlen
, M_TEMP
, M_WAITOK
);
853 if (pathbufp
== NULL
) {
857 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
858 error
= vn_getpath(vp
, pathbufp
, &pathlen
);
862 error
= copyout((caddr_t
)pathbufp
, argp
, pathlen
);
864 FREE(pathbufp
, M_TEMP
);
868 case F_PATHPKG_CHECK
: {
872 if (fp
->f_type
!= DTYPE_VNODE
) {
876 vp
= (struct vnode
*)fp
->f_data
;
879 pathlen
= MAXPATHLEN
;
880 pathbufp
= kalloc(MAXPATHLEN
);
882 if ( (error
= copyinstr(argp
, pathbufp
, MAXPATHLEN
, &pathlen
)) == 0 ) {
883 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
884 error
= vn_path_package_check(vp
, pathbufp
, pathlen
, retval
);
889 kfree(pathbufp
, MAXPATHLEN
);
893 case F_CHKCLEAN
: // used by regression tests to see if all dirty pages got cleaned by fsync()
894 case F_FULLFSYNC
: // fsync + flush the journal + DKIOCSYNCHRONIZECACHE
895 case F_FREEZE_FS
: // freeze all other fs operations for the fs of this fd
896 case F_THAW_FS
: { // thaw all frozen fs operations for the fs of this fd
897 if (fp
->f_type
!= DTYPE_VNODE
) {
901 vp
= (struct vnode
*)fp
->f_data
;
904 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
905 error
= VNOP_IOCTL(vp
, uap
->cmd
, (caddr_t
)NULL
, 0, &context
);
913 if (uap
->cmd
< FCNTL_FS_SPECIFIC_BASE
) {
918 // if it's a fs-specific fcntl() then just pass it through
920 if (fp
->f_type
!= DTYPE_VNODE
) {
924 vp
= (struct vnode
*)fp
->f_data
;
927 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
928 error
= VNOP_IOCTL(vp
, uap
->cmd
, CAST_DOWN(caddr_t
, argp
), 0, &context
);
937 AUDIT_ARG(vnpath_withref
, vp
, ARG_VNODE1
);
938 fp_drop(p
, fd
, fp
, 0);
941 fp_drop(p
, fd
, fp
, 1);
947 * Common code for dup, dup2, and fcntl(F_DUPFD).
950 finishdup(struct proc
* p
, struct filedesc
*fdp
, int old
, int new, register_t
*retval
)
952 struct fileproc
*nfp
;
953 struct fileproc
*ofp
;
955 if ((ofp
= fdp
->fd_ofiles
[old
]) == NULL
||
956 (fdp
->fd_ofileflags
[old
] & UF_RESERVED
)) {
963 MALLOC_ZONE(nfp
, struct fileproc
*, sizeof(struct fileproc
), M_FILEPROC
, M_WAITOK
);
964 bzero(nfp
, sizeof(struct fileproc
));
967 nfp
->f_flags
= ofp
->f_flags
;
968 nfp
->f_fglob
= ofp
->f_fglob
;
971 fdp
->fd_ofiles
[new] = nfp
;
972 fdp
->fd_ofileflags
[new] = fdp
->fd_ofileflags
[old
] &~ UF_EXCLOSE
;
973 if (new > fdp
->fd_lastfile
)
974 fdp
->fd_lastfile
= new;
981 close(struct proc
*p
, struct close_args
*uap
, __unused register_t
*retval
)
987 AUDIT_SYSCLOSE(p
, fd
);
991 if ( (error
= fp_lookup(p
,fd
,&fp
, 1)) ) {
996 error
= close_internal(p
, fd
, fp
, CLOSEINT_LOCKED
| CLOSEINT_WAITONCLOSE
);
1005 * Close a file descriptor.
1008 close_internal(struct proc
*p
, int fd
, struct fileproc
*fp
, int flags
)
1010 struct filedesc
*fdp
= p
->p_fd
;
1012 int locked
= flags
& CLOSEINT_LOCKED
;
1013 int waitonclose
= flags
& CLOSEINT_WAITONCLOSE
;
1014 int norelse
= flags
& CLOSEINT_NOFDRELSE
;
1015 int nofdref
= flags
& CLOSEINT_NOFDNOREF
;
1016 int slpstate
= PRIBIO
;
1021 /* Keep people from using the filedesc while we are closing it */
1022 fdp
->fd_ofileflags
[fd
] |= UF_RESERVED
;
1024 fdp
->fd_ofileflags
[fd
] |= UF_CLOSING
;
1027 if ((waitonclose
&& ((fp
->f_flags
& FP_CLOSING
) == FP_CLOSING
))) {
1029 fp_drop(p
, fd
, fp
, 1);
1030 fp
->f_flags
|= FP_WAITCLOSE
;
1033 msleep(&fp
->f_flags
, &p
->p_fdmlock
, slpstate
, "close wait",0) ;
1037 fp
->f_flags
|= FP_CLOSING
;
1041 if ( (fp
->f_flags
& FP_AIOISSUED
) || kauth_authorize_fileop_has_listeners() ) {
1045 if ( (fp
->f_type
== DTYPE_VNODE
) && kauth_authorize_fileop_has_listeners() ) {
1047 * call out to allow 3rd party notification of close.
1048 * Ignore result of kauth_authorize_fileop call.
1050 if (vnode_getwithref((vnode_t
)fp
->f_data
) == 0) {
1051 u_int fileop_flags
= 0;
1052 if ((fp
->f_flags
& FP_WRITTEN
) != 0)
1053 fileop_flags
|= KAUTH_FILEOP_CLOSE_MODIFIED
;
1054 kauth_authorize_fileop(fp
->f_fglob
->fg_cred
, KAUTH_FILEOP_CLOSE
,
1055 (uintptr_t)fp
->f_data
, (uintptr_t)fileop_flags
);
1056 vnode_put((vnode_t
)fp
->f_data
);
1059 if (fp
->f_flags
& FP_AIOISSUED
)
1061 * cancel all async IO requests that can be cancelled.
1063 _aio_close( p
, fd
);
1068 if (fd
< fdp
->fd_knlistsize
)
1069 knote_fdclose(p
, fd
);
1071 if (fp
->f_flags
& FP_WAITEVENT
)
1072 (void)waitevent_close(p
, fp
);
1074 if ((fp
->f_flags
& FP_INCHRREAD
) == 0)
1075 fileproc_drain(p
, fp
);
1078 error
= closef_locked(fp
, fp
->f_fglob
, p
);
1079 if ((fp
->f_flags
& FP_WAITCLOSE
) == FP_WAITCLOSE
)
1080 wakeup(&fp
->f_flags
);
1081 fp
->f_flags
&= ~(FP_WAITCLOSE
| FP_CLOSING
);
1086 FREE_ZONE(fp
, sizeof *fp
, M_FILEPROC
);
1091 * Return status information about a file descriptor.
1093 * XXX switch on node type is bogus; need a stat in struct fileops instead.
1096 fstat1(struct proc
*p
, int fd
, user_addr_t ub
, user_addr_t xsecurity
, user_addr_t xsecurity_size
)
1098 struct fileproc
*fp
;
1100 struct user_stat user_sb
;
1105 kauth_filesec_t fsec
;
1106 ssize_t xsecurity_bufsize
;
1108 struct vfs_context context
;
1113 if ((error
= fp_lookup(p
, fd
, &fp
, 0)) != 0)
1117 fsec
= KAUTH_FILESEC_NONE
;
1122 context
.vc_proc
= current_proc();
1123 context
.vc_ucred
= kauth_cred_get();
1124 if ((error
= vnode_getwithref((vnode_t
)data
)) == 0) {
1126 * If the caller has the file open, and is not requesting extended security,
1127 * we are going to let them get the basic stat information.
1129 if (xsecurity
== USER_ADDR_NULL
) {
1130 error
= vn_stat_noauth((vnode_t
)data
, &sb
, NULL
, &context
);
1132 error
= vn_stat((vnode_t
)data
, &sb
, &fsec
, &context
);
1135 AUDIT_ARG(vnpath
, (struct vnode
*)data
, ARG_VNODE1
);
1136 (void)vnode_put((vnode_t
)data
);
1141 error
= soo_stat((struct socket
*)data
, &sb
);
1145 error
= pipe_stat((void *)data
, &sb
);
1149 error
= pshm_stat((void *)data
, &sb
);
1153 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1154 error
= kqueue_stat(fp
, &sb
, p
);
1155 thread_funnel_set(kernel_flock
, funnel_state
);
1162 /* Zap spare fields */
1164 sb
.st_qspare
[0] = 0LL;
1165 sb
.st_qspare
[1] = 0LL;
1168 if (IS_64BIT_PROCESS(current_proc())) {
1169 munge_stat(&sb
, &user_sb
);
1170 my_size
= sizeof(user_sb
);
1171 sbp
= (caddr_t
)&user_sb
;
1174 my_size
= sizeof(sb
);
1177 error
= copyout(sbp
, ub
, my_size
);
1180 /* caller wants extended security information? */
1181 if (xsecurity
!= USER_ADDR_NULL
) {
1183 /* did we get any? */
1184 if (fsec
== KAUTH_FILESEC_NONE
) {
1185 if (susize(xsecurity_size
, 0) != 0) {
1190 /* find the user buffer size */
1191 xsecurity_bufsize
= fusize(xsecurity_size
);
1193 /* copy out the actual data size */
1194 if (susize(xsecurity_size
, KAUTH_FILESEC_COPYSIZE(fsec
)) != 0) {
1199 /* if the caller supplied enough room, copy out to it */
1200 if (xsecurity_bufsize
>= KAUTH_FILESEC_COPYSIZE(fsec
))
1201 error
= copyout(fsec
, xsecurity
, KAUTH_FILESEC_COPYSIZE(fsec
));
1205 fp_drop(p
, fd
, fp
, 0);
1207 kauth_filesec_free(fsec
);
1212 fstat_extended(struct proc
*p
, struct fstat_extended_args
*uap
, __unused register_t
*retval
)
1214 return(fstat1(p
, uap
->fd
, uap
->ub
, uap
->xsecurity
, uap
->xsecurity_size
));
1218 fstat(struct proc
*p
, register struct fstat_args
*uap
, __unused register_t
*retval
)
1220 return(fstat1(p
, uap
->fd
, uap
->ub
, 0, 0));
1224 * Return pathconf information about a file descriptor.
1227 fpathconf(p
, uap
, retval
)
1229 register struct fpathconf_args
*uap
;
1233 struct fileproc
*fp
;
1235 struct vfs_context context
;
1241 AUDIT_ARG(fd
, uap
->fd
);
1242 if ( (error
= fp_lookup(p
, fd
, &fp
, 0)) )
1250 if (uap
->name
!= _PC_PIPE_BUF
) {
1264 vp
= (struct vnode
*)data
;
1266 if ( (error
= vnode_getwithref(vp
)) == 0) {
1267 AUDIT_ARG(vnpath
, vp
, ARG_VNODE1
);
1269 context
.vc_proc
= p
;
1270 context
.vc_ucred
= kauth_cred_get();
1272 error
= vn_pathconf(vp
, uap
->name
, retval
, &context
);
1274 (void)vnode_put(vp
);
1284 panic("fpathconf (unrecognized - %d)", type
);
1288 fp_drop(p
, fd
, fp
, 0);
1293 * Allocate a file descriptor for the process.
1298 fdalloc(p
, want
, result
)
1303 register struct filedesc
*fdp
= p
->p_fd
;
1305 int lim
, last
, numfiles
, oldnfiles
;
1306 struct fileproc
**newofiles
, **ofiles
;
1307 char *newofileflags
, *ofileflags
;
1310 * Search for a free descriptor starting at the higher
1311 * of want or fd_freefile. If that fails, consider
1312 * expanding the ofile array.
1314 lim
= min((int)p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
, maxfiles
);
1316 last
= min(fdp
->fd_nfiles
, lim
);
1317 if ((i
= want
) < fdp
->fd_freefile
)
1318 i
= fdp
->fd_freefile
;
1319 ofiles
= &fdp
->fd_ofiles
[i
];
1320 ofileflags
= &fdp
->fd_ofileflags
[i
];
1321 for (; i
< last
; i
++) {
1322 if (*ofiles
== NULL
&& !(*ofileflags
& UF_RESERVED
)) {
1323 *ofileflags
= UF_RESERVED
;
1324 if (i
> fdp
->fd_lastfile
)
1325 fdp
->fd_lastfile
= i
;
1326 if (want
<= fdp
->fd_freefile
)
1327 fdp
->fd_freefile
= i
;
1331 ofiles
++; ofileflags
++;
1335 * No space in current array. Expand?
1337 if (fdp
->fd_nfiles
>= lim
)
1339 if (fdp
->fd_nfiles
< NDEXTENT
)
1340 numfiles
= NDEXTENT
;
1342 numfiles
= 2 * fdp
->fd_nfiles
;
1347 MALLOC_ZONE(newofiles
, struct fileproc
**,
1348 numfiles
* OFILESIZE
, M_OFILETABL
, M_WAITOK
);
1350 if (newofiles
== NULL
) {
1353 if (fdp
->fd_nfiles
>= numfiles
) {
1354 FREE_ZONE(newofiles
, numfiles
* OFILESIZE
, M_OFILETABL
);
1357 newofileflags
= (char *) &newofiles
[numfiles
];
1359 * Copy the existing ofile and ofileflags arrays
1360 * and zero the new portion of each array.
1362 oldnfiles
= fdp
->fd_nfiles
;
1363 (void) memcpy(newofiles
, fdp
->fd_ofiles
,
1364 oldnfiles
* sizeof *fdp
->fd_ofiles
);
1365 (void) memset(&newofiles
[oldnfiles
], 0,
1366 (numfiles
- oldnfiles
) * sizeof *fdp
->fd_ofiles
);
1368 (void) memcpy(newofileflags
, fdp
->fd_ofileflags
,
1369 oldnfiles
* sizeof *fdp
->fd_ofileflags
);
1370 (void) memset(&newofileflags
[oldnfiles
], 0,
1371 (numfiles
- oldnfiles
) *
1372 sizeof *fdp
->fd_ofileflags
);
1373 ofiles
= fdp
->fd_ofiles
;
1374 fdp
->fd_ofiles
= newofiles
;
1375 fdp
->fd_ofileflags
= newofileflags
;
1376 fdp
->fd_nfiles
= numfiles
;
1377 FREE_ZONE(ofiles
, oldnfiles
* OFILESIZE
, M_OFILETABL
);
1383 * Check to see whether n user file descriptors
1384 * are available to the process p.
1391 struct filedesc
*fdp
= p
->p_fd
;
1392 struct fileproc
**fpp
;
1396 lim
= min((int)p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
, maxfiles
);
1397 if ((i
= lim
- fdp
->fd_nfiles
) > 0 && (n
-= i
) <= 0)
1399 fpp
= &fdp
->fd_ofiles
[fdp
->fd_freefile
];
1400 flags
= &fdp
->fd_ofileflags
[fdp
->fd_freefile
];
1401 for (i
= fdp
->fd_nfiles
- fdp
->fd_freefile
; --i
>= 0; fpp
++, flags
++)
1402 if (*fpp
== NULL
&& !(*flags
& UF_RESERVED
) && --n
<= 0)
1412 _fdrelse(p
->p_fd
, fd
);
1420 struct filedesc
*fdp
= p
->p_fd
;
1421 struct fileproc
*fp
;
1423 if (fd
< fdp
->fd_freefile
)
1424 fdp
->fd_freefile
= fd
;
1426 if (fd
> fdp
->fd_lastfile
)
1427 panic("fdrelse: fd_lastfile inconsistent");
1429 fp
= fdp
->fd_ofiles
[fd
];
1430 fdp
->fd_ofiles
[fd
] = NULL
;
1431 fdp
->fd_ofileflags
[fd
] = 0;
1433 while ((fd
= fdp
->fd_lastfile
) > 0 &&
1434 fdp
->fd_ofiles
[fd
] == NULL
&&
1435 !(fdp
->fd_ofileflags
[fd
] & UF_RESERVED
))
1437 FREE_ZONE(fp
, sizeof *fp
, M_FILEPROC
);
1442 fdgetf_noref(p
, fd
, resultfp
)
1445 struct fileproc
**resultfp
;
1447 struct filedesc
*fdp
= p
->p_fd
;
1448 struct fileproc
*fp
;
1450 if (fd
< 0 || fd
>= fdp
->fd_nfiles
||
1451 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
1452 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
1461 /* should be called only when proc_fdlock is held */
1463 fp_setflags(proc_t p
, struct fileproc
* fp
, int flags
)
1466 fp
->f_flags
|= flags
;
1471 fp_clearflags(proc_t p
, struct fileproc
* fp
, int flags
)
1476 fp
->f_flags
&= ~flags
;
1481 fp_getfvp(p
, fd
, resultfp
, resultvp
)
1484 struct fileproc
**resultfp
;
1485 struct vnode
**resultvp
;
1487 struct filedesc
*fdp
= p
->p_fd
;
1488 struct fileproc
*fp
;
1491 if (fd
< 0 || fd
>= fdp
->fd_nfiles
||
1492 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
1493 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
1497 if (fp
->f_type
!= DTYPE_VNODE
) {
1506 *resultvp
= (struct vnode
*)fp
->f_data
;
1514 * Returns: EBADF The file descriptor is invalid
1515 * EOPNOTSUPP The file descriptor is not a socket
1518 * Notes: EOPNOTSUPP should probably be ENOTSOCK; this function is only
1519 * ever called from accept1().
1522 fp_getfsock(p
, fd
, resultfp
, results
)
1525 struct fileproc
**resultfp
;
1526 struct socket
**results
;
1528 struct filedesc
*fdp
= p
->p_fd
;
1529 struct fileproc
*fp
;
1532 if (fd
< 0 || fd
>= fdp
->fd_nfiles
||
1533 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
1534 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
1538 if (fp
->f_type
!= DTYPE_SOCKET
) {
1547 *results
= (struct socket
*)fp
->f_data
;
1555 fp_getfkq(p
, fd
, resultfp
, resultkq
)
1558 struct fileproc
**resultfp
;
1559 struct kqueue
**resultkq
;
1561 struct filedesc
*fdp
= p
->p_fd
;
1562 struct fileproc
*fp
;
1565 if ( fd
< 0 || fd
>= fdp
->fd_nfiles
||
1566 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
1567 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
1571 if (fp
->f_type
!= DTYPE_KQUEUE
) {
1580 *resultkq
= (struct kqueue
*)fp
->f_data
;
1587 fp_getfpshm(p
, fd
, resultfp
, resultpshm
)
1590 struct fileproc
**resultfp
;
1591 struct pshmnode
**resultpshm
;
1593 struct filedesc
*fdp
= p
->p_fd
;
1594 struct fileproc
*fp
;
1597 if (fd
< 0 || fd
>= fdp
->fd_nfiles
||
1598 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
1599 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
1603 if (fp
->f_type
!= DTYPE_PSXSHM
) {
1613 *resultpshm
= (struct pshmnode
*)fp
->f_data
;
1621 fp_getfpsem(p
, fd
, resultfp
, resultpsem
)
1624 struct fileproc
**resultfp
;
1625 struct psemnode
**resultpsem
;
1627 struct filedesc
*fdp
= p
->p_fd
;
1628 struct fileproc
*fp
;
1631 if (fd
< 0 || fd
>= fdp
->fd_nfiles
||
1632 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
1633 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
1637 if (fp
->f_type
!= DTYPE_PSXSEM
) {
1646 *resultpsem
= (struct psemnode
*)fp
->f_data
;
1652 fp_lookup(p
, fd
, resultfp
, locked
)
1655 struct fileproc
**resultfp
;
1658 struct filedesc
*fdp
= p
->p_fd
;
1659 struct fileproc
*fp
;
1663 if (fd
< 0 || fd
>= fdp
->fd_nfiles
||
1664 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
1665 (fdp
->fd_ofileflags
[fd
] & UF_RESERVED
)) {
1681 fp_drop_written(proc_t p
, int fd
, struct fileproc
*fp
)
1687 fp
->f_flags
|= FP_WRITTEN
;
1689 error
= fp_drop(p
, fd
, fp
, 1);
1698 fp_drop_event(proc_t p
, int fd
, struct fileproc
*fp
)
1704 fp
->f_flags
|= FP_WAITEVENT
;
1706 error
= fp_drop(p
, fd
, fp
, 1);
1714 fp_drop(p
, fd
, fp
, locked
)
1717 struct fileproc
*fp
;
1720 struct filedesc
*fdp
= p
->p_fd
;
1724 if ((fp
== FILEPROC_NULL
) && (fd
< 0 || fd
>= fdp
->fd_nfiles
||
1725 (fp
= fdp
->fd_ofiles
[fd
]) == NULL
||
1726 ((fdp
->fd_ofileflags
[fd
] & UF_RESERVED
) &&
1727 !(fdp
->fd_ofileflags
[fd
] & UF_CLOSING
)))) {
1734 if (p
->p_fpdrainwait
&& fp
->f_iocount
== 0) {
1735 p
->p_fpdrainwait
= 0;
1736 wakeup(&p
->p_fpdrainwait
);
1745 file_vnode(int fd
, struct vnode
**vpp
)
1747 struct proc
* p
= current_proc();
1748 struct fileproc
*fp
;
1752 if ( (error
= fp_lookup(p
, fd
, &fp
, 1)) ) {
1756 if (fp
->f_type
!= DTYPE_VNODE
) {
1757 fp_drop(p
, fd
, fp
,1);
1761 *vpp
= (struct vnode
*)fp
->f_data
;
1769 file_socket(int fd
, struct socket
**sp
)
1771 struct proc
* p
= current_proc();
1772 struct fileproc
*fp
;
1776 if ( (error
= fp_lookup(p
, fd
, &fp
, 1)) ) {
1780 if (fp
->f_type
!= DTYPE_SOCKET
) {
1781 fp_drop(p
, fd
, fp
,1);
1785 *sp
= (struct socket
*)fp
->f_data
;
1792 file_flags(int fd
, int * flags
)
1795 struct proc
* p
= current_proc();
1796 struct fileproc
*fp
;
1800 if ( (error
= fp_lookup(p
, fd
, &fp
, 1)) ) {
1804 *flags
= (int)fp
->f_flag
;
1805 fp_drop(p
, fd
, fp
,1);
1815 struct fileproc
*fp
;
1816 struct proc
*p
= current_proc();
1819 if (fd
< 0 || fd
>= p
->p_fd
->fd_nfiles
||
1820 (fp
= p
->p_fd
->fd_ofiles
[fd
]) == NULL
||
1821 ((p
->p_fd
->fd_ofileflags
[fd
] & UF_RESERVED
) &&
1822 !(p
->p_fd
->fd_ofileflags
[fd
] & UF_CLOSING
))) {
1828 if (p
->p_fpdrainwait
&& fp
->f_iocount
== 0) {
1829 p
->p_fpdrainwait
= 0;
1830 wakeup(&p
->p_fpdrainwait
);
1839 falloc(p
, resultfp
, resultfd
)
1841 struct fileproc
**resultfp
;
1847 error
= falloc_locked(p
, resultfp
, resultfd
, 1);
1853 * Create a new open file structure and allocate
1854 * a file decriptor for the process that refers to it.
1857 falloc_locked(p
, resultfp
, resultfd
, locked
)
1859 struct fileproc
**resultfp
;
1863 struct fileproc
*fp
, *fq
;
1864 struct fileglob
*fg
;
1869 if ( (error
= fdalloc(p
, 0, &nfd
)) ) {
1874 if (nfiles
>= maxfiles
) {
1881 * Allocate a new file descriptor.
1882 * If the process has file descriptor zero open, add to the list
1883 * of open files at that point, otherwise put it at the front of
1884 * the list of open files.
1888 MALLOC_ZONE(fp
, struct fileproc
*, sizeof(struct fileproc
), M_FILEPROC
, M_WAITOK
);
1889 MALLOC_ZONE(fg
, struct fileglob
*, sizeof(struct fileglob
), M_FILEGLOB
, M_WAITOK
);
1890 bzero(fp
, sizeof(struct fileproc
));
1891 bzero(fg
, sizeof(struct fileglob
));
1892 lck_mtx_init(&fg
->fg_lock
, file_lck_grp
, file_lck_attr
);
1900 fp
->f_cred
= kauth_cred_proc_ref(p
);
1902 lck_mtx_lock(file_flist_lock
);
1906 if ( (fq
= p
->p_fd
->fd_ofiles
[0]) ) {
1907 LIST_INSERT_AFTER(fq
->f_fglob
, fg
, f_list
);
1909 LIST_INSERT_HEAD(&filehead
, fg
, f_list
);
1911 lck_mtx_unlock(file_flist_lock
);
1913 p
->p_fd
->fd_ofiles
[nfd
] = fp
;
1927 * Free a file structure.
1931 struct fileglob
*fg
;
1935 lck_mtx_lock(file_flist_lock
);
1936 LIST_REMOVE(fg
, f_list
);
1938 lck_mtx_unlock(file_flist_lock
);
1941 if (cred
!= NOCRED
) {
1942 fg
->fg_cred
= NOCRED
;
1943 kauth_cred_rele(cred
);
1945 lck_mtx_destroy(&fg
->fg_lock
, file_lck_grp
);
1947 FREE_ZONE(fg
, sizeof *fg
, M_FILEGLOB
);
1954 struct filedesc
*fdp
= p
->p_fd
;
1955 int i
= fdp
->fd_lastfile
;
1956 struct fileproc
**fpp
= &fdp
->fd_ofiles
[i
];
1957 char *flags
= &fdp
->fd_ofileflags
[i
];
1960 funnel_state
= thread_funnel_set(kernel_flock
, FALSE
);
1964 if ((*flags
& (UF_RESERVED
|UF_EXCLOSE
)) == UF_EXCLOSE
) {
1965 struct fileproc
*fp
= *fpp
;
1967 if (i
< fdp
->fd_knlistsize
)
1968 knote_fdclose(p
, i
);
1970 *fpp
= NULL
; *flags
= 0;
1971 if (i
== fdp
->fd_lastfile
&& i
> 0)
1973 closef_locked(fp
, fp
->f_fglob
, p
);
1974 FREE_ZONE(fp
, sizeof *fp
, M_FILEPROC
);
1977 i
--; fpp
--; flags
--;
1980 thread_funnel_set(kernel_flock
, funnel_state
);
1984 * Copy a filedesc structure.
1990 struct filedesc
*newfdp
, *fdp
= p
->p_fd
;
1992 struct fileproc
*ofp
, *fp
;
1995 MALLOC_ZONE(newfdp
, struct filedesc
*,
1996 sizeof *newfdp
, M_FILEDESC
, M_WAITOK
);
2003 * the FD_CHROOT flag will be inherited via this copy
2005 (void) memcpy(newfdp
, fdp
, sizeof *newfdp
);
2008 * for both fd_cdir and fd_rdir make sure we get
2009 * a valid reference... if we can't, than set
2010 * set the pointer(s) to NULL in the child... this
2011 * will keep us from using a non-referenced vp
2012 * and allows us to do the vnode_rele only on
2013 * a properly referenced vp
2015 if ( (v_dir
= newfdp
->fd_cdir
) ) {
2016 if (vnode_getwithref(v_dir
) == 0) {
2017 if ( (vnode_ref(v_dir
)) )
2018 newfdp
->fd_cdir
= NULL
;
2021 newfdp
->fd_cdir
= NULL
;
2023 if (newfdp
->fd_cdir
== NULL
&& fdp
->fd_cdir
) {
2025 * we couldn't get a new reference on
2026 * the current working directory being
2027 * inherited... we might as well drop
2028 * our reference from the parent also
2029 * since the vnode has gone DEAD making
2030 * it useless... by dropping it we'll
2031 * be that much closer to recyling it
2033 vnode_rele(fdp
->fd_cdir
);
2034 fdp
->fd_cdir
= NULL
;
2037 if ( (v_dir
= newfdp
->fd_rdir
) ) {
2038 if (vnode_getwithref(v_dir
) == 0) {
2039 if ( (vnode_ref(v_dir
)) )
2040 newfdp
->fd_rdir
= NULL
;
2043 newfdp
->fd_rdir
= NULL
;
2045 if (newfdp
->fd_rdir
== NULL
&& fdp
->fd_rdir
) {
2047 * we couldn't get a new reference on
2048 * the root directory being
2049 * inherited... we might as well drop
2050 * our reference from the parent also
2051 * since the vnode has gone DEAD making
2052 * it useless... by dropping it we'll
2053 * be that much closer to recyling it
2055 vnode_rele(fdp
->fd_rdir
);
2056 fdp
->fd_rdir
= NULL
;
2058 newfdp
->fd_refcnt
= 1;
2061 * If the number of open files fits in the internal arrays
2062 * of the open file structure, use them, otherwise allocate
2063 * additional memory for the number of descriptors currently
2066 if (newfdp
->fd_lastfile
< NDFILE
)
2070 * Compute the smallest multiple of NDEXTENT needed
2071 * for the file descriptors currently in use,
2072 * allowing the table to shrink.
2074 i
= newfdp
->fd_nfiles
;
2075 while (i
> 2 * NDEXTENT
&& i
> newfdp
->fd_lastfile
* 2)
2080 MALLOC_ZONE(newfdp
->fd_ofiles
, struct fileproc
**,
2081 i
* OFILESIZE
, M_OFILETABL
, M_WAITOK
);
2082 if (newfdp
->fd_ofiles
== NULL
) {
2083 if (newfdp
->fd_cdir
)
2084 vnode_rele(newfdp
->fd_cdir
);
2085 if (newfdp
->fd_rdir
)
2086 vnode_rele(newfdp
->fd_rdir
);
2088 FREE_ZONE(newfdp
, sizeof *newfdp
, M_FILEDESC
);
2093 newfdp
->fd_ofileflags
= (char *) &newfdp
->fd_ofiles
[i
];
2094 newfdp
->fd_nfiles
= i
;
2096 if (fdp
->fd_nfiles
> 0) {
2097 struct fileproc
**fpp
;
2100 (void) memcpy(newfdp
->fd_ofiles
, fdp
->fd_ofiles
,
2101 i
* sizeof *fdp
->fd_ofiles
);
2102 (void) memcpy(newfdp
->fd_ofileflags
, fdp
->fd_ofileflags
,
2103 i
* sizeof *fdp
->fd_ofileflags
);
2106 * kq descriptors cannot be copied.
2108 if (newfdp
->fd_knlistsize
!= -1) {
2109 fpp
= &newfdp
->fd_ofiles
[newfdp
->fd_lastfile
];
2110 for (i
= newfdp
->fd_lastfile
; i
>= 0; i
--, fpp
--) {
2111 if (*fpp
!= NULL
&& (*fpp
)->f_type
== DTYPE_KQUEUE
) {
2113 if (i
< newfdp
->fd_freefile
)
2114 newfdp
->fd_freefile
= i
;
2116 if (*fpp
== NULL
&& i
== newfdp
->fd_lastfile
&& i
> 0)
2117 newfdp
->fd_lastfile
--;
2119 newfdp
->fd_knlist
= NULL
;
2120 newfdp
->fd_knlistsize
= -1;
2121 newfdp
->fd_knhash
= NULL
;
2122 newfdp
->fd_knhashmask
= 0;
2124 fpp
= newfdp
->fd_ofiles
;
2125 flags
= newfdp
->fd_ofileflags
;
2127 for (i
= newfdp
->fd_lastfile
; i
-- >= 0; fpp
++, flags
++)
2128 if ((ofp
= *fpp
) != NULL
&& !(*flags
& UF_RESERVED
)) {
2129 MALLOC_ZONE(fp
, struct fileproc
*, sizeof(struct fileproc
), M_FILEPROC
, M_WAITOK
);
2130 bzero(fp
, sizeof(struct fileproc
));
2131 fp
->f_flags
= ofp
->f_flags
;
2132 //fp->f_iocount = ofp->f_iocount;
2134 fp
->f_fglob
= ofp
->f_fglob
;
2142 (void) memset(newfdp
->fd_ofiles
, 0, i
* OFILESIZE
);
2149 * Release a filedesc structure.
2155 struct filedesc
*fdp
;
2156 struct fileproc
*fp
;
2161 /* Certain daemons might not have file descriptors */
2164 if ((fdp
== NULL
) || (--fdp
->fd_refcnt
> 0)) {
2168 if (fdp
->fd_refcnt
== 0xffff)
2169 panic("fdfree: bad fd_refcnt");
2171 /* Last reference: the structure can't change out from under us */
2173 if (fdp
->fd_nfiles
> 0 && fdp
->fd_ofiles
) {
2174 for (i
= fdp
->fd_lastfile
; i
>= 0; i
--) {
2175 if ((fp
= fdp
->fd_ofiles
[i
]) != NULL
) {
2177 if (fdp
->fd_ofileflags
[i
] & UF_RESERVED
)
2178 panic("fdfree: found fp with UF_RESERVED\n");
2180 /* closef drops the iocount ... */
2181 if ((fp
->f_flags
& FP_INCHRREAD
) != 0)
2183 fdp
->fd_ofiles
[i
] = NULL
;
2184 fdp
->fd_ofileflags
[i
] |= UF_RESERVED
;
2186 if (i
< fdp
->fd_knlistsize
)
2187 knote_fdclose(p
, i
);
2188 if (fp
->f_flags
& FP_WAITEVENT
)
2189 (void)waitevent_close(p
, fp
);
2190 (void) closef_locked(fp
, fp
->f_fglob
, p
);
2191 FREE_ZONE(fp
, sizeof *fp
, M_FILEPROC
);
2194 FREE_ZONE(fdp
->fd_ofiles
, fdp
->fd_nfiles
* OFILESIZE
, M_OFILETABL
);
2195 fdp
->fd_ofiles
= NULL
;
2202 vnode_rele(fdp
->fd_cdir
);
2204 vnode_rele(fdp
->fd_rdir
);
2211 FREE(fdp
->fd_knlist
, M_KQUEUE
);
2213 FREE(fdp
->fd_knhash
, M_KQUEUE
);
2215 FREE_ZONE(fdp
, sizeof *fdp
, M_FILEDESC
);
2219 closef_finish(fp
, fg
, p
)
2220 struct fileproc
*fp
;
2221 struct fileglob
*fg
;
2227 struct vfs_context context
;
2229 if ((fg
->fg_flag
& FHASLOCK
) && fg
->fg_type
== DTYPE_VNODE
) {
2230 lf
.l_whence
= SEEK_SET
;
2233 lf
.l_type
= F_UNLCK
;
2234 vp
= (struct vnode
*)fg
->fg_data
;
2235 context
.vc_proc
= p
;
2236 context
.vc_ucred
= fg
->fg_cred
;
2238 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
, F_UNLCK
, &lf
, F_FLOCK
, &context
);
2241 error
= fo_close(fg
, p
);
2245 if (((fp
!= (struct fileproc
*)0) && ((fp
->f_flags
& FP_INCHRREAD
) != 0))) {
2247 if ( ((fp
->f_flags
& FP_INCHRREAD
) != 0) ) {
2248 fileproc_drain(p
, fp
);
2259 struct fileglob
*fg
;
2265 error
= closef_locked((struct fileproc
*)0, fg
, p
);
2271 * Internal form of close.
2272 * Decrement reference count on file structure.
2273 * Note: p may be NULL when closing a file
2274 * that was being passed in a message.
2277 closef_locked(fp
, fg
, p
)
2278 struct fileproc
*fp
;
2279 struct fileglob
*fg
;
2284 struct vfs_context context
;
2291 * POSIX record locking dictates that any close releases ALL
2292 * locks owned by this process. This is handled by setting
2293 * a flag in the unlock to free ONLY locks obeying POSIX
2294 * semantics, and not to free BSD-style file locks.
2295 * If the descriptor was in a message, POSIX-style locks
2296 * aren't passed with the descriptor.
2298 if (p
&& (p
->p_ladvflag
& P_LADVLOCK
) && fg
->fg_type
== DTYPE_VNODE
) {
2301 lf
.l_whence
= SEEK_SET
;
2304 lf
.l_type
= F_UNLCK
;
2305 vp
= (struct vnode
*)fg
->fg_data
;
2307 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
2308 context
.vc_proc
= p
;
2309 context
.vc_ucred
= fg
->fg_cred
;
2310 (void) VNOP_ADVLOCK(vp
, (caddr_t
)p
, F_UNLCK
, &lf
, F_POSIX
, &context
);
2312 (void)vnode_put(vp
);
2316 lck_mtx_lock(&fg
->fg_lock
);
2319 if (fg
->fg_count
> 0) {
2320 lck_mtx_unlock(&fg
->fg_lock
);
2323 if (fg
->fg_count
!= 0)
2324 panic("fg: being freed with bad fg_count (%d)", fg
, fg
->fg_count
);
2326 if (fp
&& (fp
->f_flags
& FP_WRITTEN
))
2327 fg
->fg_flag
|= FWASWRITTEN
;
2329 fg
->fg_lflags
|= FG_TERM
;
2330 lck_mtx_unlock(&fg
->fg_lock
);
2333 error
= closef_finish(fp
, fg
, p
);
2342 fileproc_drain(struct proc
*p
, struct fileproc
* fp
)
2344 fp
->f_iocount
-- ; /* (the one the close holds) */
2346 while (fp
->f_iocount
) {
2347 if (((fp
->f_flags
& FP_INSELECT
)== FP_INSELECT
)) {
2348 wait_queue_wakeup_all((wait_queue_t
)fp
->f_waddr
, &selwait
, THREAD_INTERRUPTED
);
2350 if (fp
->f_fglob
->fg_ops
->fo_drain
) {
2351 (*fp
->f_fglob
->fg_ops
->fo_drain
)(fp
, p
);
2354 p
->p_fpdrainwait
= 1;
2356 msleep(&p
->p_fpdrainwait
, &p
->p_fdmlock
, PRIBIO
, "fpdrain",0);
2358 //panic("successful wait after drain\n");
2363 fp_free(struct proc
* p
, int fd
, struct fileproc
* fp
)
2369 fg_free(fp
->f_fglob
);
2370 FREE_ZONE(fp
, sizeof *fp
, M_FILEPROC
);
2375 * Apply an advisory lock on a file descriptor.
2377 * Just attempt to get a record lock of the requested type on
2378 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2381 flock(struct proc
*p
, register struct flock_args
*uap
, __unused register_t
*retval
)
2385 struct fileproc
*fp
;
2388 struct vfs_context context
;
2391 AUDIT_ARG(fd
, uap
->fd
);
2392 if ( (error
= fp_getfvp(p
, fd
, &fp
, &vp
)) ) {
2395 if ( (error
= vnode_getwithref(vp
)) ) {
2398 AUDIT_ARG(vnpath
, vp
, ARG_VNODE1
);
2400 context
.vc_proc
= p
;
2401 context
.vc_ucred
= fp
->f_cred
;
2403 lf
.l_whence
= SEEK_SET
;
2406 if (how
& LOCK_UN
) {
2407 lf
.l_type
= F_UNLCK
;
2408 fp
->f_flag
&= ~FHASLOCK
;
2409 error
= VNOP_ADVLOCK(vp
, (caddr_t
)fp
->f_fglob
, F_UNLCK
, &lf
, F_FLOCK
, &context
);
2413 lf
.l_type
= F_WRLCK
;
2414 else if (how
& LOCK_SH
)
2415 lf
.l_type
= F_RDLCK
;
2420 fp
->f_flag
|= FHASLOCK
;
2421 if (how
& LOCK_NB
) {
2422 error
= VNOP_ADVLOCK(vp
, (caddr_t
)fp
->f_fglob
, F_SETLK
, &lf
, F_FLOCK
, &context
);
2425 error
= VNOP_ADVLOCK(vp
, (caddr_t
)fp
->f_fglob
, F_SETLK
, &lf
, F_FLOCK
|F_WAIT
, &context
);
2427 (void)vnode_put(vp
);
2429 fp_drop(p
, fd
, fp
, 0);
2435 * File Descriptor pseudo-device driver (/dev/fd/).
2437 * Opening minor device N dup()s the file (if any) connected to file
2438 * descriptor N belonging to the calling process. Note that this driver
2439 * consists of only the ``open()'' routine, because all subsequent
2440 * references to this file will be direct to the other driver.
2443 fdopen(dev_t dev
, __unused
int mode
, __unused
int type
, struct proc
*p
)
2447 * XXX Kludge: set curproc->p_dupfd to contain the value of the
2448 * the file descriptor being sought for duplication. The error
2449 * return ensures that the vnode for this device will be released
2450 * by vn_open. Open will detect this special error and take the
2451 * actions in dupfdopen below. Other callers of vn_open or vnop_open
2452 * will simply report the error.
2454 p
->p_dupfd
= minor(dev
);
2459 * Duplicate the specified descriptor to a free descriptor.
2462 dupfdopen(fdp
, indx
, dfd
, mode
, error
)
2463 register struct filedesc
*fdp
;
2464 register int indx
, dfd
;
2468 struct fileproc
*wfp
;
2469 struct fileproc
*fp
;
2470 struct proc
* p
= current_proc();
2473 * If the to-be-dup'd fd number is greater than the allowed number
2474 * of file descriptors, or the fd to be dup'd has already been
2475 * closed, reject. Note, check for new == old is necessary as
2476 * falloc could allocate an already closed to-be-dup'd descriptor
2477 * as the new descriptor.
2481 fp
= fdp
->fd_ofiles
[indx
];
2482 if (dfd
< 0 || dfd
>= fdp
->fd_nfiles
||
2483 (wfp
= fdp
->fd_ofiles
[dfd
]) == NULL
|| wfp
== fp
||
2484 (fdp
->fd_ofileflags
[dfd
] & UF_RESERVED
)) {
2490 * There are two cases of interest here.
2492 * For ENODEV simply dup (dfd) to file descriptor
2493 * (indx) and return.
2495 * For ENXIO steal away the file structure from (dfd) and
2496 * store it in (indx). (dfd) is effectively closed by
2499 * Any other error code is just returned.
2504 * Check that the mode the file is being opened for is a
2505 * subset of the mode of the existing descriptor.
2507 if (((mode
& (FREAD
|FWRITE
)) | wfp
->f_flag
) != wfp
->f_flag
) {
2511 if (indx
> fdp
->fd_lastfile
)
2512 fdp
->fd_lastfile
= indx
;
2516 fg_free(fp
->f_fglob
);
2517 fp
->f_fglob
= wfp
->f_fglob
;
2519 fdp
->fd_ofileflags
[indx
] = fdp
->fd_ofileflags
[dfd
];
2526 * Steal away the file pointer from dfd, and stuff it into indx.
2528 if (indx
> fdp
->fd_lastfile
)
2529 fdp
->fd_lastfile
= indx
;
2532 fg_free(fp
->f_fglob
);
2533 fp
->f_fglob
= wfp
->f_fglob
;
2535 fdp
->fd_ofileflags
[indx
] = fdp
->fd_ofileflags
[dfd
];
2540 FREE_ZONE(wfp
, sizeof *fp
, M_FILEPROC
);
2552 fg_ref(struct fileproc
* fp
)
2554 struct fileglob
*fg
;
2558 lck_mtx_lock(&fg
->fg_lock
);
2560 lck_mtx_unlock(&fg
->fg_lock
);
2564 fg_drop(struct fileproc
* fp
)
2566 struct fileglob
*fg
;
2569 lck_mtx_lock(&fg
->fg_lock
);
2571 lck_mtx_unlock(&fg
->fg_lock
);
2576 fg_insertuipc(struct fileglob
* fg
)
2580 lck_mtx_lock(&fg
->fg_lock
);
2582 while (fg
->fg_lflags
& FG_RMMSGQ
) {
2583 fg
->fg_lflags
|= FG_WRMMSGQ
;
2584 msleep(&fg
->fg_lflags
, &fg
->fg_lock
, 0, "fg_insertuipc", 0);
2589 if (fg
->fg_msgcount
== 1) {
2590 fg
->fg_lflags
|= FG_INSMSGQ
;
2593 lck_mtx_unlock(&fg
->fg_lock
);
2596 lck_mtx_lock(uipc_lock
);
2597 LIST_INSERT_HEAD(&fmsghead
, fg
, f_msglist
);
2598 lck_mtx_unlock(uipc_lock
);
2599 lck_mtx_lock(&fg
->fg_lock
);
2600 fg
->fg_lflags
&= ~FG_INSMSGQ
;
2601 if (fg
->fg_lflags
& FG_WINSMSGQ
) {
2602 fg
->fg_lflags
&= ~FG_WINSMSGQ
;
2603 wakeup(&fg
->fg_lflags
);
2605 lck_mtx_unlock(&fg
->fg_lock
);
2611 fg_removeuipc(struct fileglob
* fg
)
2615 lck_mtx_lock(&fg
->fg_lock
);
2616 while (fg
->fg_lflags
& FG_INSMSGQ
) {
2617 fg
->fg_lflags
|= FG_WINSMSGQ
;
2618 msleep(&fg
->fg_lflags
, &fg
->fg_lock
, 0, "fg_removeuipc", 0);
2621 if (fg
->fg_msgcount
== 0) {
2622 fg
->fg_lflags
|= FG_RMMSGQ
;
2625 lck_mtx_unlock(&fg
->fg_lock
);
2628 lck_mtx_lock(uipc_lock
);
2629 LIST_REMOVE(fg
, f_msglist
);
2630 lck_mtx_unlock(uipc_lock
);
2631 lck_mtx_lock(&fg
->fg_lock
);
2632 fg
->fg_lflags
&= ~FG_RMMSGQ
;
2633 if (fg
->fg_lflags
& FG_WRMMSGQ
) {
2634 fg
->fg_lflags
&= ~FG_WRMMSGQ
;
2635 wakeup(&fg
->fg_lflags
);
2637 lck_mtx_unlock(&fg
->fg_lock
);
2643 fo_read(struct fileproc
*fp
, struct uio
*uio
, kauth_cred_t cred
, int flags
, struct proc
*p
)
2645 return ((*fp
->f_ops
->fo_read
)(fp
, uio
, cred
, flags
, p
));
2649 fo_write(struct fileproc
*fp
, struct uio
*uio
, kauth_cred_t cred
, int flags
, struct proc
*p
)
2651 return((*fp
->f_ops
->fo_write
)(fp
, uio
, cred
, flags
, p
));
2655 fo_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
, struct proc
*p
)
2660 error
= (*fp
->f_ops
->fo_ioctl
)(fp
, com
, data
, p
);
2666 fo_select(struct fileproc
*fp
, int which
, void *wql
, struct proc
*p
)
2668 return((*fp
->f_ops
->fo_select
)(fp
, which
, wql
, p
));
2672 fo_close(struct fileglob
*fg
, struct proc
*p
)
2674 return((*fg
->fg_ops
->fo_close
)(fg
, p
));
2678 fo_kqfilter(struct fileproc
*fp
, struct knote
*kn
, struct proc
*p
)
2680 return ((*fp
->f_ops
->fo_kqfilter
)(fp
, kn
, p
));