2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
76 #include <sys/param.h>
77 #include <sys/types.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/file_internal.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/namei.h>
86 #include <sys/vnode_internal.h>
87 #include <sys/ioctl.h>
89 /* Temporary workaround for ubc.h until <rdar://4714366 is resolved */
90 #define ubc_setcred ubc_setcred_deprecated
93 int ubc_setcred(struct vnode
*, struct proc
*);
96 #include <sys/fsevents.h>
97 #include <sys/kdebug.h>
98 #include <sys/xattr.h>
99 #include <sys/ubc_internal.h>
100 #include <sys/uio_internal.h>
101 #include <sys/resourcevar.h>
102 #include <sys/signalvar.h>
104 #include <vm/vm_kern.h>
105 #include <vm/vm_map.h>
107 #include <miscfs/specfs/specdev.h>
108 #include <miscfs/fifofs/fifo.h>
111 #include <security/mac_framework.h>
114 #include <IOKit/IOBSD.h>
115 #include <libkern/section_keywords.h>
117 static int vn_closefile(struct fileglob
*fp
, vfs_context_t ctx
);
118 static int vn_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
,
120 static int vn_read(struct fileproc
*fp
, struct uio
*uio
, int flags
,
122 static int vn_write(struct fileproc
*fp
, struct uio
*uio
, int flags
,
124 static int vn_select( struct fileproc
*fp
, int which
, void * wql
,
126 static int vn_kqfilter(struct fileproc
*fp
, struct knote
*kn
,
127 struct kevent_qos_s
*kev
);
128 static void filt_vndetach(struct knote
*kn
);
129 static int filt_vnode(struct knote
*kn
, long hint
);
130 static int filt_vnode_common(struct knote
*kn
, struct kevent_qos_s
*kev
,
131 vnode_t vp
, long hint
);
132 static int vn_open_auth_finish(vnode_t vp
, int fmode
, vfs_context_t ctx
);
134 const struct fileops vnops
= {
135 .fo_type
= DTYPE_VNODE
,
137 .fo_write
= vn_write
,
138 .fo_ioctl
= vn_ioctl
,
139 .fo_select
= vn_select
,
140 .fo_close
= vn_closefile
,
141 .fo_drain
= fo_no_drain
,
142 .fo_kqfilter
= vn_kqfilter
,
145 static int filt_vntouch(struct knote
*kn
, struct kevent_qos_s
*kev
);
146 static int filt_vnprocess(struct knote
*kn
, struct kevent_qos_s
*kev
);
148 SECURITY_READ_ONLY_EARLY(struct filterops
) vnode_filtops
= {
151 .f_detach
= filt_vndetach
,
152 .f_event
= filt_vnode
,
153 .f_touch
= filt_vntouch
,
154 .f_process
= filt_vnprocess
,
158 * Common code for vnode open operations.
159 * Check permissions, and call the VNOP_OPEN or VNOP_CREATE routine.
161 * XXX the profusion of interfaces here is probably a bad thing.
164 vn_open(struct nameidata
*ndp
, int fmode
, int cmode
)
166 return vn_open_modflags(ndp
, &fmode
, cmode
);
170 vn_open_modflags(struct nameidata
*ndp
, int *fmodep
, int cmode
)
173 struct vnode_attr
*vap
;
175 vap
= kheap_alloc(KHEAP_TEMP
, sizeof(struct vnode_attr
), M_WAITOK
);
178 VATTR_SET(vap
, va_mode
, (mode_t
)cmode
);
180 error
= vn_open_auth(ndp
, fmodep
, vap
);
182 kheap_free(KHEAP_TEMP
, vap
, sizeof(struct vnode_attr
));
188 vn_open_auth_finish(vnode_t vp
, int fmode
, vfs_context_t ctx
)
192 if ((error
= vnode_ref_ext(vp
, fmode
, 0)) != 0) {
196 /* Call out to allow 3rd party notification of open.
197 * Ignore result of kauth_authorize_fileop call.
200 mac_vnode_notify_open(ctx
, vp
, fmode
);
202 kauth_authorize_fileop(vfs_context_ucred(ctx
), KAUTH_FILEOP_OPEN
,
212 * May do nameidone() to allow safely adding an FSEvent. Cue off of ni_dvp to
213 * determine whether that has happened.
216 vn_open_auth_do_create(struct nameidata
*ndp
, struct vnode_attr
*vap
, int fmode
, boolean_t
*did_create
, boolean_t
*did_open
, vfs_context_t ctx
)
219 vnode_t dvp
= ndp
->ni_dvp
;
224 batched
= vnode_compound_open_available(ndp
->ni_dvp
);
227 VATTR_SET(vap
, va_type
, VREG
);
228 if (fmode
& O_EXCL
) {
229 vap
->va_vaflags
|= VA_EXCLUSIVE
;
233 if (ndp
->ni_cnd
.cn_flags
& CN_WANTSRSRCFORK
) {
234 if ((error
= vn_authorize_create(dvp
, &ndp
->ni_cnd
, vap
, ctx
, NULL
)) != 0) {
237 if ((error
= vnode_makenamedstream(dvp
, &ndp
->ni_vp
, XATTR_RESOURCEFORK_NAME
, 0, ctx
)) != 0) {
244 if ((error
= vn_authorize_create(dvp
, &ndp
->ni_cnd
, vap
, ctx
, NULL
)) != 0) {
249 error
= vn_create(dvp
, &ndp
->ni_vp
, ndp
, vap
, VN_CREATE_DOOPEN
, fmode
, &status
, ctx
);
252 *did_create
= (status
& COMPOUND_OPEN_STATUS_DID_CREATE
) ? TRUE
: FALSE
;
257 if (error
== EKEEPLOOKING
) {
259 panic("EKEEPLOOKING, but we did a create?");
262 panic("EKEEPLOOKING from filesystem that doesn't support compound vnops?");
264 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
265 panic("EKEEPLOOKING, but continue flag not set?");
269 * Do NOT drop the dvp: we need everything to continue the lookup.
275 *did_create
= (status
& COMPOUND_OPEN_STATUS_DID_CREATE
) ? 1 : 0;
288 int update_flags
= 0;
290 // Make sure the name & parent pointers are hooked up
291 if (vp
->v_name
== NULL
) {
292 update_flags
|= VNODE_UPDATE_NAME
;
294 if (vp
->v_parent
== NULLVP
) {
295 update_flags
|= VNODE_UPDATE_PARENT
;
299 vnode_update_identity(vp
, dvp
, ndp
->ni_cnd
.cn_nameptr
, ndp
->ni_cnd
.cn_namelen
, ndp
->ni_cnd
.cn_hash
, update_flags
);
303 ndp
->ni_dvp
= NULLVP
;
306 if (need_fsevent(FSE_CREATE_FILE
, vp
)) {
307 add_fsevent(FSE_CREATE_FILE
, ctx
,
314 if (ndp
->ni_dvp
!= NULLVP
) {
316 ndp
->ni_dvp
= NULLVP
;
323 * This is the number of times we'll loop in vn_open_auth without explicitly
324 * yielding the CPU when we determine we have to retry.
326 #define RETRY_NO_YIELD_COUNT 5
329 * Open a file with authorization, updating the contents of the structures
330 * pointed to by ndp, fmodep, and vap as necessary to perform the requested
331 * operation. This function is used for both opens of existing files, and
332 * creation of new files.
334 * Parameters: ndp The nami data pointer describing the
336 * fmodep A pointer to an int containg the mode
337 * information to be used for the open
338 * vap A pointer to the vnode attribute
339 * descriptor to be used for the open
341 * Indirect: * Contents of the data structures pointed
342 * to by the parameters are modified as
343 * necessary to the requested operation.
348 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
350 * The contents of '*ndp' will be modified, based on the other
351 * arguments to this function, and to return file and directory
352 * data necessary to satisfy the requested operation.
354 * If the file does not exist and we are creating it, then the
355 * O_TRUNC flag will be cleared in '*fmodep' to indicate to the
356 * caller that the file was not truncated.
358 * If the file exists and the O_EXCL flag was not specified, then
359 * the O_CREAT flag will be cleared in '*fmodep' to indicate to
360 * the caller that the existing file was merely opened rather
363 * The contents of '*vap' will be modified as necessary to
364 * complete the operation, including setting of supported
365 * attribute, clearing of fields containing unsupported attributes
366 * in the request, if the request proceeds without them, etc..
368 * XXX: This function is too complicated in actings on its arguments
370 * XXX: We should enummerate the possible errno values here, and where
371 * in the code they originated.
374 vn_open_auth(struct nameidata
*ndp
, int *fmodep
, struct vnode_attr
*vap
)
378 vfs_context_t ctx
= ndp
->ni_cnd
.cn_context
;
381 uint32_t origcnflags
;
382 boolean_t did_create
;
384 boolean_t need_vnop_open
;
386 boolean_t ref_failed
;
394 need_vnop_open
= TRUE
;
397 origcnflags
= ndp
->ni_cnd
.cn_flags
;
399 // If raw encrypted mode is requested, handle that here
400 if (VATTR_IS_ACTIVE(vap
, va_dataprotect_flags
)
401 && ISSET(vap
->va_dataprotect_flags
, VA_DP_RAWENCRYPTED
)) {
405 if ((fmode
& O_NOFOLLOW_ANY
) && (fmode
& (O_SYMLINK
| O_NOFOLLOW
))) {
413 if (fmode
& O_CREAT
) {
414 if ((fmode
& O_DIRECTORY
)) {
418 ndp
->ni_cnd
.cn_nameiop
= CREATE
;
420 ndp
->ni_op
= OP_LINK
;
422 /* Inherit USEDVP, vnode_open() supported flags only */
423 ndp
->ni_cnd
.cn_flags
&= (USEDVP
| NOCROSSMOUNT
);
424 ndp
->ni_cnd
.cn_flags
|= LOCKPARENT
| LOCKLEAF
| AUDITVNPATH1
;
425 ndp
->ni_flag
= NAMEI_COMPOUNDOPEN
;
427 /* open calls are allowed for resource forks. */
428 ndp
->ni_cnd
.cn_flags
|= CN_ALLOWRSRCFORK
;
430 if ((fmode
& O_EXCL
) == 0 && (fmode
& O_NOFOLLOW
) == 0 && (origcnflags
& FOLLOW
) != 0) {
431 ndp
->ni_cnd
.cn_flags
|= FOLLOW
;
433 if (fmode
& O_NOFOLLOW_ANY
) {
434 /* will return ELOOP on the first symlink to be hit */
435 ndp
->ni_flag
|= NAMEI_NOFOLLOW_ANY
;
438 continue_create_lookup
:
439 if ((error
= namei(ndp
))) {
446 batched
= vnode_compound_open_available(dvp
);
448 /* not found, create */
450 /* must have attributes for a new file */
457 * Attempt a create. For a system supporting compound VNOPs, we may
458 * find an existing file or create one; in either case, we will already
459 * have the file open and no VNOP_OPEN() will be needed.
461 error
= vn_open_auth_do_create(ndp
, vap
, fmode
, &did_create
, &did_open
, ctx
);
467 * Detected a node that the filesystem couldn't handle. Don't call
468 * nameidone() yet, because we need that path buffer.
470 if (error
== EKEEPLOOKING
) {
472 panic("EKEEPLOOKING from a filesystem that doesn't support compound VNOPs?");
474 goto continue_create_lookup
;
479 panic("Shouldn't have a dvp here.");
484 * Check for a create race.
486 if ((error
== EEXIST
) && !(fmode
& O_EXCL
)) {
495 need_vnop_open
= !did_open
;
497 if (fmode
& O_EXCL
) {
502 * We have a vnode. Use compound open if available
503 * or else fall through to "traditional" path. Note: can't
504 * do a compound open for root, because the parent belongs
507 if (error
== 0 && batched
&& (vnode_mount(dvp
) == vnode_mount(vp
))) {
508 error
= VNOP_COMPOUND_OPEN(dvp
, &ndp
->ni_vp
, ndp
, 0, fmode
, NULL
, NULL
, ctx
);
512 need_vnop_open
= FALSE
;
513 } else if (error
== EKEEPLOOKING
) {
514 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
515 panic("EKEEPLOOKING, but continue flag not set?");
517 goto continue_create_lookup
;
522 ndp
->ni_dvp
= NULLVP
;
536 ndp
->ni_cnd
.cn_nameiop
= LOOKUP
;
537 /* Inherit USEDVP, vnode_open() supported flags only */
538 ndp
->ni_cnd
.cn_flags
&= (USEDVP
| NOCROSSMOUNT
);
539 ndp
->ni_cnd
.cn_flags
|= FOLLOW
| LOCKLEAF
| AUDITVNPATH1
| WANTPARENT
;
541 /* open calls are allowed for resource forks. */
542 ndp
->ni_cnd
.cn_flags
|= CN_ALLOWRSRCFORK
;
544 if (fmode
& FENCRYPTED
) {
545 ndp
->ni_cnd
.cn_flags
|= CN_RAW_ENCRYPTED
| CN_SKIPNAMECACHE
;
547 ndp
->ni_flag
= NAMEI_COMPOUNDOPEN
;
549 /* preserve NOFOLLOW from vnode_open() */
550 if (fmode
& O_NOFOLLOW
|| fmode
& O_SYMLINK
|| (origcnflags
& FOLLOW
) == 0) {
551 ndp
->ni_cnd
.cn_flags
&= ~FOLLOW
;
553 if (fmode
& O_NOFOLLOW_ANY
) {
554 /* will return ELOOP on the first symlink to be hit */
555 ndp
->ni_flag
|= NAMEI_NOFOLLOW_ANY
;
558 /* Do a lookup, possibly going directly to filesystem for compound operation */
560 if ((error
= namei(ndp
))) {
566 /* Check for batched lookup-open */
567 batched
= vnode_compound_open_available(dvp
);
568 if (batched
&& ((vp
== NULLVP
) || (vnode_mount(dvp
) == vnode_mount(vp
)))) {
569 error
= VNOP_COMPOUND_OPEN(dvp
, &ndp
->ni_vp
, ndp
, 0, fmode
, NULL
, NULL
, ctx
);
572 need_vnop_open
= FALSE
;
573 } else if (error
== EKEEPLOOKING
) {
574 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
575 panic("EKEEPLOOKING, but continue flag not set?");
579 } while (error
== EKEEPLOOKING
);
583 ndp
->ni_dvp
= NULLVP
;
591 * By this point, nameidone() is called, dvp iocount is dropped,
592 * and dvp pointer is cleared.
594 if (ndp
->ni_dvp
!= NULLVP
) {
595 panic("Haven't cleaned up adequately in vn_open_auth()");
599 * Expect to use this code for filesystems without compound VNOPs, for the root
600 * of a filesystem, which can't be "looked up" in the sense of VNOP_LOOKUP(),
601 * and for shadow files, which do not live on the same filesystems as their "parents."
603 if (need_vnop_open
) {
604 if (batched
&& !vnode_isvroot(vp
) && !vnode_isnamedstream(vp
)) {
605 panic("Why am I trying to use VNOP_OPEN() on anything other than the root or a named stream?");
609 error
= vn_authorize_open_existing(vp
, &ndp
->ni_cnd
, fmode
, ctx
, NULL
);
615 if (VATTR_IS_ACTIVE(vap
, va_dataprotect_flags
)
616 && ISSET(vap
->va_dataprotect_flags
, VA_DP_RAWUNENCRYPTED
)) {
617 /* Don't allow unencrypted io request from user space unless entitled */
618 boolean_t entitled
= FALSE
;
620 entitled
= IOTaskHasEntitlement(current_task(), "com.apple.private.security.file-unencrypt-access");
626 fmode
|= FUNENCRYPTED
;
629 error
= VNOP_OPEN(vp
, fmode
, ctx
);
633 need_vnop_open
= FALSE
;
636 // if the vnode is tagged VOPENEVT and the current process
637 // has the P_CHECKOPENEVT flag set, then we or in the O_EVTONLY
638 // flag to the open mode so that this open won't count against
639 // the vnode when carbon delete() does a vnode_isinuse() to see
640 // if a file is currently in use. this allows spotlight
641 // importers to not interfere with carbon apps that depend on
642 // the no-delete-if-busy semantics of carbon delete().
644 if (!did_create
&& (vp
->v_flag
& VOPENEVT
) && (current_proc()->p_flag
& P_CHECKOPENEVT
)) {
649 * Grab reference, etc.
651 error
= vn_open_auth_finish(vp
, fmode
, ctx
);
657 /* Compound VNOP open is responsible for doing the truncate */
658 if (batched
|| did_create
) {
666 /* Opened either explicitly or by a batched create */
667 if (!need_vnop_open
) {
668 VNOP_CLOSE(vp
, fmode
, ctx
);
674 /* Aggressively recycle shadow files if we error'd out during open() */
675 if ((vnode_isnamedstream(vp
)) &&
676 (vp
->v_parent
!= NULLVP
) &&
677 (vnode_isshadow(vp
))) {
683 * Check for a race against unlink. We had a vnode
684 * but according to vnode_authorize or VNOP_OPEN it
687 * EREDRIVEOPEN: means that we were hit by the tty allocation race.
689 if (((error
== ENOENT
) && (*fmodep
& O_CREAT
)) || (error
== EREDRIVEOPEN
) || ref_failed
) {
691 * We'll retry here but it may be possible that we get
692 * into a retry "spin" inside the kernel and not allow
693 * threads, which need to run in order for the retry
694 * loop to end, to run. An example is an open of a
695 * terminal which is getting revoked and we spin here
696 * without yielding becasue namei and VNOP_OPEN are
697 * successful but vnode_ref fails. The revoke needs
698 * threads with an iocount to run but if spin here we
699 * may possibly be blcoking other threads from running.
701 * We start yielding the CPU after some number of
702 * retries for increasing durations. Note that this is
703 * still a loop without an exit condition.
706 if (nretries
> RETRY_NO_YIELD_COUNT
) {
707 /* Every hz/100 secs is 10 msecs ... */
708 tsleep(&nretries
, PVFS
, "vn_open_auth_retry",
709 MIN((nretries
* (hz
/ 100)), hz
));
719 #if vn_access_DEPRECATED
721 * Authorize an action against a vnode. This has been the canonical way to
722 * ensure that the credential/process/etc. referenced by a vfs_context
723 * is granted the rights called out in 'mode' against the vnode 'vp'.
725 * Unfortunately, the use of VREAD/VWRITE/VEXEC makes it very difficult
726 * to add support for more rights. As such, this interface will be deprecated
727 * and callers will use vnode_authorize instead.
730 vn_access(vnode_t vp
, int mode
, vfs_context_t context
)
732 kauth_action_t action
;
736 action
|= KAUTH_VNODE_READ_DATA
;
739 action
|= KAUTH_VNODE_WRITE_DATA
;
742 action
|= KAUTH_VNODE_EXECUTE
;
745 return vnode_authorize(vp
, NULL
, action
, context
);
747 #endif /* vn_access_DEPRECATED */
753 vn_close(struct vnode
*vp
, int flags
, vfs_context_t ctx
)
759 /* Sync data from resource fork shadow file if needed. */
760 if ((vp
->v_flag
& VISNAMEDSTREAM
) &&
761 (vp
->v_parent
!= NULLVP
) &&
762 vnode_isshadow(vp
)) {
763 if (flags
& FWASWRITTEN
) {
764 flusherror
= vnode_flushnamedstream(vp
->v_parent
, vp
, ctx
);
769 * If vnode @vp belongs to a chardev or a blkdev then it is handled
770 * specially. We first drop its user reference count @vp->v_usecount
771 * before calling VNOP_CLOSE(). This was done historically to ensure
772 * that the last close of a special device vnode performed some
773 * conditional cleanups. Now we still need to drop this reference here
774 * to ensure that devfsspec_close() can check if the vnode is still in
777 if (vnode_isspec(vp
)) {
778 (void)vnode_rele_ext(vp
, flags
, 0);
782 * On HFS, we flush when the last writer closes. We do this
783 * because resource fork vnodes hold a reference on data fork
784 * vnodes and that will prevent them from getting VNOP_INACTIVE
785 * which will delay when we flush cached data. In future, we
786 * might find it beneficial to do this for all file systems.
787 * Note that it's OK to access v_writecount without the lock
790 if (vp
->v_tag
== VT_HFS
&& (flags
& FWRITE
) && vp
->v_writecount
== 1) {
791 VNOP_FSYNC(vp
, MNT_NOWAIT
, ctx
);
794 error
= VNOP_CLOSE(vp
, flags
, ctx
);
797 if (flags
& FWASWRITTEN
) {
798 if (need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) {
799 add_fsevent(FSE_CONTENT_MODIFIED
, ctx
,
806 if (!vnode_isspec(vp
)) {
807 (void)vnode_rele_ext(vp
, flags
, 0);
822 off_t swap_count
, this_count
;
823 off_t file_end
, read_end
;
828 * Reading from a swap file will get you zeroes.
833 swap_count
= uio_resid(uio
);
835 file_end
= ubc_getsize(vp
);
836 read_end
= uio
->uio_offset
+ uio_resid(uio
);
837 if (uio
->uio_offset
>= file_end
) {
838 /* uio starts after end of file: nothing to read */
840 } else if (read_end
> file_end
) {
841 /* uio extends beyond end of file: stop before that */
842 swap_count
-= (read_end
- file_end
);
845 while (swap_count
> 0) {
846 if (my_swap_page
== NULL
) {
847 my_swap_page
= kheap_alloc(KHEAP_TEMP
, PAGE_SIZE
, Z_WAITOK
| Z_ZERO
);
848 /* add an end-of-line to keep line counters happy */
849 my_swap_page
[PAGE_SIZE
- 1] = '\n';
851 this_count
= swap_count
;
852 if (this_count
> PAGE_SIZE
) {
853 this_count
= PAGE_SIZE
;
856 prev_resid
= uio_resid(uio
);
857 error
= uiomove((caddr_t
) my_swap_page
,
863 swap_count
-= (prev_resid
- uio_resid(uio
));
865 kheap_free(KHEAP_TEMP
, my_swap_page
, PAGE_SIZE
);
870 * Package up an I/O request on a vnode into a uio and do it.
892 result
= vn_rdwr_64(rw
,
894 (uint64_t)(uintptr_t)base
,
903 /* "resid" should be bounded above by "len," which is an int */
904 if (aresid
!= NULL
) {
905 *aresid
= (int)resid
;
927 struct vfs_context context
;
929 char uio_buf
[UIO_SIZEOF(1)];
931 context
.vc_thread
= current_thread();
932 context
.vc_ucred
= cred
;
934 if (UIO_SEG_IS_USER_SPACE(segflg
)) {
935 spacetype
= proc_is64bit(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
;
937 spacetype
= UIO_SYSSPACE
;
944 auio
= uio_createwithbuffer(1, offset
, spacetype
, rw
,
945 &uio_buf
[0], sizeof(uio_buf
));
946 uio_addiov(auio
, CAST_USER_ADDR_T(base
), (user_size_t
)len
);
950 * IO_NOAUTH should be re-examined.
951 * Likely that mediation should be performed in caller.
953 if ((ioflg
& IO_NOAUTH
) == 0) {
954 /* passed cred is fp->f_cred */
955 if (rw
== UIO_READ
) {
956 error
= mac_vnode_check_read(&context
, cred
, vp
);
958 error
= mac_vnode_check_write(&context
, cred
, vp
);
964 if (rw
== UIO_READ
) {
965 if (vnode_isswap(vp
) && ((ioflg
& IO_SWAP_DISPATCH
) == 0)) {
966 error
= vn_read_swapfile(vp
, auio
);
968 error
= VNOP_READ(vp
, auio
, ioflg
, &context
);
971 error
= VNOP_WRITE(vp
, auio
, ioflg
, &context
);
976 *aresid
= uio_resid(auio
);
977 assert(*aresid
<= len
);
978 } else if (uio_resid(auio
) && error
== 0) {
985 vn_offset_lock(struct fileglob
*fg
)
987 lck_mtx_lock_spin(&fg
->fg_lock
);
988 while (fg
->fg_lflags
& FG_OFF_LOCKED
) {
989 fg
->fg_lflags
|= FG_OFF_LOCKWANT
;
990 msleep(&fg
->fg_lflags
, &fg
->fg_lock
, PVFS
| PSPIN
,
991 "fg_offset_lock_wait", 0);
993 fg
->fg_lflags
|= FG_OFF_LOCKED
;
994 lck_mtx_unlock(&fg
->fg_lock
);
998 vn_offset_unlock(struct fileglob
*fg
)
1000 int lock_wanted
= 0;
1002 lck_mtx_lock_spin(&fg
->fg_lock
);
1003 if (fg
->fg_lflags
& FG_OFF_LOCKWANT
) {
1006 fg
->fg_lflags
&= ~(FG_OFF_LOCKED
| FG_OFF_LOCKWANT
);
1007 lck_mtx_unlock(&fg
->fg_lock
);
1009 wakeup(&fg
->fg_lflags
);
1014 * File table vnode read routine.
1017 vn_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
1023 user_ssize_t read_len
;
1024 user_ssize_t adjusted_read_len
;
1025 user_ssize_t clippedsize
;
1028 read_len
= uio_resid(uio
);
1029 if (read_len
< 0 || read_len
> INT_MAX
) {
1032 adjusted_read_len
= read_len
;
1034 offset_locked
= false;
1036 vp
= (struct vnode
*)fp
->fp_glob
->fg_data
;
1037 if ((error
= vnode_getwithref(vp
))) {
1042 error
= mac_vnode_check_read(ctx
, vfs_context_ucred(ctx
), vp
);
1044 (void)vnode_put(vp
);
1049 /* This signals to VNOP handlers that this read came from a file table read */
1050 ioflag
= IO_SYSCALL_DISPATCH
;
1052 if (fp
->fp_glob
->fg_flag
& FNONBLOCK
) {
1053 ioflag
|= IO_NDELAY
;
1055 if ((fp
->fp_glob
->fg_flag
& FNOCACHE
) || vnode_isnocache(vp
)) {
1056 ioflag
|= IO_NOCACHE
;
1058 if (fp
->fp_glob
->fg_flag
& FENCRYPTED
) {
1059 ioflag
|= IO_ENCRYPTED
;
1061 if (fp
->fp_glob
->fg_flag
& FUNENCRYPTED
) {
1062 ioflag
|= IO_SKIP_ENCRYPTION
;
1064 if (fp
->fp_glob
->fg_flag
& O_EVTONLY
) {
1065 ioflag
|= IO_EVTONLY
;
1067 if (fp
->fp_glob
->fg_flag
& FNORDAHEAD
) {
1071 if ((flags
& FOF_OFFSET
) == 0) {
1072 if ((vnode_vtype(vp
) == VREG
) && !vnode_isswap(vp
)) {
1073 vn_offset_lock(fp
->fp_glob
);
1074 offset_locked
= true;
1076 read_offset
= fp
->fp_glob
->fg_offset
;
1077 uio_setoffset(uio
, read_offset
);
1079 read_offset
= uio_offset(uio
);
1080 /* POSIX allows negative offsets for character devices. */
1081 if ((read_offset
< 0) && (vnode_vtype(vp
) != VCHR
)) {
1087 if (read_offset
== INT64_MAX
) {
1088 /* can't read any more */
1094 * If offset + len will cause overflow, reduce the len to a value
1095 * (adjusted_read_len) where it won't
1097 if ((read_offset
>= 0) && (INT64_MAX
- read_offset
) < read_len
) {
1099 * 0 read_offset INT64_MAX
1100 * |-----------------------------------------------|----------|~~~
1104 adjusted_read_len
= (user_ssize_t
)(INT64_MAX
- read_offset
);
1107 if (adjusted_read_len
< read_len
) {
1108 uio_setresid(uio
, adjusted_read_len
);
1109 clippedsize
= read_len
- adjusted_read_len
;
1112 if (vnode_isswap(vp
) && !(IO_SKIP_ENCRYPTION
& ioflag
)) {
1113 /* special case for swap files */
1114 error
= vn_read_swapfile(vp
, uio
);
1116 error
= VNOP_READ(vp
, uio
, ioflag
, ctx
);
1120 uio_setresid(uio
, (uio_resid(uio
) + clippedsize
));
1123 if ((flags
& FOF_OFFSET
) == 0) {
1124 fp
->fp_glob
->fg_offset
+= read_len
- uio_resid(uio
);
1128 if (offset_locked
) {
1129 vn_offset_unlock(fp
->fp_glob
);
1130 offset_locked
= false;
1133 (void)vnode_put(vp
);
1139 * File table vnode write routine.
1142 vn_write(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
1147 off_t write_end_offset
;
1148 user_ssize_t write_len
;
1149 user_ssize_t adjusted_write_len
;
1150 user_ssize_t clippedsize
;
1152 proc_t p
= vfs_context_proc(ctx
);
1153 rlim_t rlim_cur_fsize
= p
? proc_limitgetcur(p
, RLIMIT_FSIZE
, TRUE
) : 0;
1155 write_len
= uio_resid(uio
);
1156 if (write_len
< 0 || write_len
> INT_MAX
) {
1159 adjusted_write_len
= write_len
;
1161 offset_locked
= false;
1163 vp
= (struct vnode
*)fp
->fp_glob
->fg_data
;
1164 if ((error
= vnode_getwithref(vp
))) {
1169 error
= mac_vnode_check_write(ctx
, vfs_context_ucred(ctx
), vp
);
1171 (void)vnode_put(vp
);
1177 * IO_SYSCALL_DISPATCH signals to VNOP handlers that this write came from
1178 * a file table write
1180 ioflag
= (IO_UNIT
| IO_SYSCALL_DISPATCH
);
1182 if (vp
->v_type
== VREG
&& (fp
->fp_glob
->fg_flag
& O_APPEND
)) {
1183 ioflag
|= IO_APPEND
;
1185 if (fp
->fp_glob
->fg_flag
& FNONBLOCK
) {
1186 ioflag
|= IO_NDELAY
;
1188 if ((fp
->fp_glob
->fg_flag
& FNOCACHE
) || vnode_isnocache(vp
)) {
1189 ioflag
|= IO_NOCACHE
;
1191 if (fp
->fp_glob
->fg_flag
& FNODIRECT
) {
1192 ioflag
|= IO_NODIRECT
;
1194 if (fp
->fp_glob
->fg_flag
& FSINGLE_WRITER
) {
1195 ioflag
|= IO_SINGLE_WRITER
;
1197 if (fp
->fp_glob
->fg_flag
& O_EVTONLY
) {
1198 ioflag
|= IO_EVTONLY
;
1202 * Treat synchronous mounts and O_FSYNC on the fd as equivalent.
1204 * XXX We treat O_DSYNC as O_FSYNC for now, since we can not delay
1205 * XXX the non-essential metadata without some additional VFS work;
1206 * XXX the intent at this point is to plumb the interface for it.
1208 if ((fp
->fp_glob
->fg_flag
& (O_FSYNC
| O_DSYNC
)) ||
1209 (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))) {
1213 if ((flags
& FOF_OFFSET
) == 0) {
1214 if ((vnode_vtype(vp
) == VREG
) && !vnode_isswap(vp
)) {
1215 vn_offset_lock(fp
->fp_glob
);
1216 offset_locked
= true;
1218 write_offset
= fp
->fp_glob
->fg_offset
;
1219 uio_setoffset(uio
, write_offset
);
1221 /* for pwrite, append should be ignored */
1222 ioflag
&= ~IO_APPEND
;
1223 write_offset
= uio_offset(uio
);
1224 /* POSIX allows negative offsets for character devices. */
1225 if ((write_offset
< 0) && (vnode_vtype(vp
) != VCHR
)) {
1231 if (write_offset
== INT64_MAX
) {
1232 /* writes are not possible */
1238 * write_len is the original write length that was requested.
1239 * We may however need to reduce that becasue of two reasons
1241 * 1) If write_offset + write_len will exceed OFF_T_MAX (i.e. INT64_MAX)
1243 * 2) If write_offset + write_len will exceed the administrative
1244 * limit for the maximum file size.
1246 * In both cases the write will be denied if we can't write even a single
1247 * byte otherwise it will be "clipped" (i.e. a short write).
1251 * If offset + len will cause overflow, reduce the len
1252 * to a value (adjusted_write_len) where it won't
1254 if ((write_offset
>= 0) && (INT64_MAX
- write_offset
) < write_len
) {
1256 * 0 write_offset INT64_MAX
1257 * |-----------------------------------------------|----------|~~~
1261 adjusted_write_len
= (user_ssize_t
)(INT64_MAX
- write_offset
);
1264 /* write_end_offset will always be [0, INT64_MAX] */
1265 write_end_offset
= write_offset
+ adjusted_write_len
;
1267 if (p
&& (vp
->v_type
== VREG
) &&
1268 (rlim_cur_fsize
!= RLIM_INFINITY
) &&
1269 (rlim_cur_fsize
<= INT64_MAX
) &&
1270 (write_end_offset
> (off_t
)rlim_cur_fsize
)) {
1272 * If the requested residual would cause us to go past the
1273 * administrative limit, then we need to adjust the residual
1274 * down to cause fewer bytes than requested to be written. If
1275 * we can't do that (e.g. the residual is already 1 byte),
1276 * then we fail the write with EFBIG.
1278 if (write_offset
>= (off_t
)rlim_cur_fsize
) {
1280 * 0 rlim_fsize write_offset write_end INT64_MAX
1281 * |------------------------|----------|-------------|--------|
1284 * write not permitted
1286 psignal(p
, SIGXFSZ
);
1292 * 0 write_offset rlim_fsize write_end INT64_MAX
1293 * |------------------------|-----------|---------|------------|
1294 * <------write_len------>
1297 adjusted_write_len
= (user_ssize_t
)((off_t
)rlim_cur_fsize
- write_offset
);
1298 assert((adjusted_write_len
> 0) && (adjusted_write_len
< write_len
));
1301 if (adjusted_write_len
< write_len
) {
1302 uio_setresid(uio
, adjusted_write_len
);
1303 clippedsize
= write_len
- adjusted_write_len
;
1306 error
= VNOP_WRITE(vp
, uio
, ioflag
, ctx
);
1309 * If we had to reduce the size of write requested either because
1310 * of rlimit or because it would have exceeded
1311 * maximum file size, we have to add that back to the residual so
1312 * it correctly reflects what we did in this function.
1315 uio_setresid(uio
, (uio_resid(uio
) + clippedsize
));
1318 if ((flags
& FOF_OFFSET
) == 0) {
1319 if (ioflag
& IO_APPEND
) {
1320 fp
->fp_glob
->fg_offset
= uio_offset(uio
);
1322 fp
->fp_glob
->fg_offset
+= (write_len
- uio_resid(uio
));
1324 if (offset_locked
) {
1325 vn_offset_unlock(fp
->fp_glob
);
1326 offset_locked
= false;
1331 * Set the credentials on successful writes
1333 if ((error
== 0) && (vp
->v_tag
== VT_NFS
) && (UBCINFOEXISTS(vp
))) {
1335 * When called from aio subsystem, we only have the proc from
1336 * which to get the credential, at this point, so use that
1337 * instead. This means aio functions are incompatible with
1338 * per-thread credentials (aio operations are proxied). We
1339 * can't easily correct the aio vs. settid race in this case
1340 * anyway, so we disallow it.
1342 if ((flags
& FOF_PCRED
) == 0) {
1343 ubc_setthreadcred(vp
, p
, current_thread());
1348 (void)vnode_put(vp
);
1352 if (offset_locked
) {
1353 vn_offset_unlock(fp
->fp_glob
);
1355 (void)vnode_put(vp
);
1360 * File table vnode stat routine.
1362 * Returns: 0 Success
1368 vn_stat_noauth(struct vnode
*vp
, void *sbptr
, kauth_filesec_t
*xsec
, int isstat64
,
1369 int needsrealdev
, vfs_context_t ctx
, struct ucred
*file_cred
)
1371 struct vnode_attr va
;
1374 kauth_filesec_t fsec
;
1375 struct stat
*sb
= (struct stat
*)0; /* warning avoidance ; protected by isstat64 */
1376 struct stat64
* sb64
= (struct stat64
*)0; /* warning avoidance ; protected by isstat64 */
1378 if (isstat64
!= 0) {
1379 sb64
= (struct stat64
*)sbptr
;
1381 sb
= (struct stat
*)sbptr
;
1383 memset(&va
, 0, sizeof(va
));
1385 VATTR_WANTED(&va
, va_fsid
);
1386 VATTR_WANTED(&va
, va_fileid
);
1387 VATTR_WANTED(&va
, va_mode
);
1388 VATTR_WANTED(&va
, va_type
);
1389 VATTR_WANTED(&va
, va_nlink
);
1390 VATTR_WANTED(&va
, va_uid
);
1391 VATTR_WANTED(&va
, va_gid
);
1392 VATTR_WANTED(&va
, va_rdev
);
1393 VATTR_WANTED(&va
, va_data_size
);
1394 VATTR_WANTED(&va
, va_access_time
);
1395 VATTR_WANTED(&va
, va_modify_time
);
1396 VATTR_WANTED(&va
, va_change_time
);
1397 VATTR_WANTED(&va
, va_create_time
);
1398 VATTR_WANTED(&va
, va_flags
);
1399 VATTR_WANTED(&va
, va_gen
);
1400 VATTR_WANTED(&va
, va_iosize
);
1401 /* lower layers will synthesise va_total_alloc from va_data_size if required */
1402 VATTR_WANTED(&va
, va_total_alloc
);
1404 VATTR_WANTED(&va
, va_uuuid
);
1405 VATTR_WANTED(&va
, va_guuid
);
1406 VATTR_WANTED(&va
, va_acl
);
1409 va
.va_vaflags
= VA_REALFSID
;
1411 error
= vnode_getattr(vp
, &va
, ctx
);
1417 * Give MAC polices a chance to reject or filter the attributes
1418 * returned by the filesystem. Note that MAC policies are consulted
1419 * *after* calling the filesystem because filesystems can return more
1420 * attributes than were requested so policies wouldn't be authoritative
1421 * is consulted beforehand. This also gives policies an opportunity
1422 * to change the values of attributes retrieved.
1424 error
= mac_vnode_check_getattr(ctx
, file_cred
, vp
, &va
);
1430 * Copy from vattr table
1432 if (isstat64
!= 0) {
1433 sb64
->st_dev
= va
.va_fsid
;
1434 sb64
->st_ino
= (ino64_t
)va
.va_fileid
;
1436 sb
->st_dev
= va
.va_fsid
;
1437 sb
->st_ino
= (ino_t
)va
.va_fileid
;
1440 switch (vp
->v_type
) {
1467 if (isstat64
!= 0) {
1468 sb64
->st_mode
= mode
;
1469 sb64
->st_nlink
= VATTR_IS_SUPPORTED(&va
, va_nlink
) ? va
.va_nlink
> UINT16_MAX
? UINT16_MAX
: (u_int16_t
)va
.va_nlink
: 1;
1470 sb64
->st_uid
= va
.va_uid
;
1471 sb64
->st_gid
= va
.va_gid
;
1472 sb64
->st_rdev
= va
.va_rdev
;
1473 sb64
->st_size
= va
.va_data_size
;
1474 sb64
->st_atimespec
= va
.va_access_time
;
1475 sb64
->st_mtimespec
= va
.va_modify_time
;
1476 sb64
->st_ctimespec
= va
.va_change_time
;
1477 if (VATTR_IS_SUPPORTED(&va
, va_create_time
)) {
1478 sb64
->st_birthtimespec
= va
.va_create_time
;
1480 sb64
->st_birthtimespec
.tv_sec
= sb64
->st_birthtimespec
.tv_nsec
= 0;
1482 sb64
->st_blksize
= va
.va_iosize
;
1483 sb64
->st_flags
= va
.va_flags
;
1484 sb64
->st_blocks
= roundup(va
.va_total_alloc
, 512) / 512;
1487 sb
->st_nlink
= VATTR_IS_SUPPORTED(&va
, va_nlink
) ? va
.va_nlink
> UINT16_MAX
? UINT16_MAX
: (u_int16_t
)va
.va_nlink
: 1;
1488 sb
->st_uid
= va
.va_uid
;
1489 sb
->st_gid
= va
.va_gid
;
1490 sb
->st_rdev
= va
.va_rdev
;
1491 sb
->st_size
= va
.va_data_size
;
1492 sb
->st_atimespec
= va
.va_access_time
;
1493 sb
->st_mtimespec
= va
.va_modify_time
;
1494 sb
->st_ctimespec
= va
.va_change_time
;
1495 sb
->st_blksize
= va
.va_iosize
;
1496 sb
->st_flags
= va
.va_flags
;
1497 sb
->st_blocks
= roundup(va
.va_total_alloc
, 512) / 512;
1500 /* if we're interested in extended security data and we got an ACL */
1502 if (!VATTR_IS_SUPPORTED(&va
, va_acl
) &&
1503 !VATTR_IS_SUPPORTED(&va
, va_uuuid
) &&
1504 !VATTR_IS_SUPPORTED(&va
, va_guuid
)) {
1505 *xsec
= KAUTH_FILESEC_NONE
;
1507 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
1508 fsec
= kauth_filesec_alloc(va
.va_acl
->acl_entrycount
);
1510 fsec
= kauth_filesec_alloc(0);
1516 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
1517 if (VATTR_IS_SUPPORTED(&va
, va_uuuid
)) {
1518 fsec
->fsec_owner
= va
.va_uuuid
;
1520 fsec
->fsec_owner
= kauth_null_guid
;
1522 if (VATTR_IS_SUPPORTED(&va
, va_guuid
)) {
1523 fsec
->fsec_group
= va
.va_guuid
;
1525 fsec
->fsec_group
= kauth_null_guid
;
1527 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
1528 __nochk_bcopy(va
.va_acl
, &(fsec
->fsec_acl
), KAUTH_ACL_COPYSIZE(va
.va_acl
));
1530 fsec
->fsec_acl
.acl_entrycount
= KAUTH_FILESEC_NOACL
;
1536 /* Do not give the generation number out to unpriviledged users */
1537 if (va
.va_gen
&& !vfs_context_issuser(ctx
)) {
1538 if (isstat64
!= 0) {
1544 if (isstat64
!= 0) {
1545 sb64
->st_gen
= va
.va_gen
;
1547 sb
->st_gen
= va
.va_gen
;
1553 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && va
.va_acl
!= NULL
) {
1554 kauth_acl_free(va
.va_acl
);
1560 vn_stat(struct vnode
*vp
, void *sb
, kauth_filesec_t
*xsec
, int isstat64
, int needsrealdev
, vfs_context_t ctx
)
1565 error
= mac_vnode_check_stat(ctx
, NOCRED
, vp
);
1572 if ((error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_READ_ATTRIBUTES
| KAUTH_VNODE_READ_SECURITY
, ctx
)) != 0) {
1577 return vn_stat_noauth(vp
, sb
, xsec
, isstat64
, needsrealdev
, ctx
, NOCRED
);
1582 * File table vnode ioctl routine.
1585 vn_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
, vfs_context_t ctx
)
1587 struct vnode
*vp
= ((struct vnode
*)fp
->fp_glob
->fg_data
);
1590 struct vnode
*ttyvp
;
1591 struct session
* sessp
;
1593 if ((error
= vnode_getwithref(vp
))) {
1598 error
= mac_vnode_check_ioctl(ctx
, vp
, com
);
1604 switch (vp
->v_type
) {
1607 if (com
== FIONREAD
) {
1609 if ((error
= vnode_size(vp
, &file_size
, ctx
)) != 0) {
1612 temp_nbytes
= file_size
- fp
->fp_glob
->fg_offset
;
1613 if (temp_nbytes
> INT_MAX
) {
1614 *(int *)data
= INT_MAX
;
1615 } else if (temp_nbytes
< 0) {
1618 *(int *)data
= (int)temp_nbytes
;
1622 if (com
== FIONBIO
|| com
== FIOASYNC
) { /* XXX */
1635 if (com
== TIOCREVOKE
|| com
== TIOCREVOKECLEAR
) {
1640 /* Should not be able to set block size from user space */
1641 if (com
== DKIOCSETBLOCKSIZE
) {
1646 if (com
== FIODTYPE
) {
1647 if (vp
->v_type
== VBLK
) {
1648 if (major(vp
->v_rdev
) >= nblkdev
) {
1652 *(int *)data
= bdevsw
[major(vp
->v_rdev
)].d_type
;
1653 } else if (vp
->v_type
== VCHR
) {
1654 if (major(vp
->v_rdev
) >= nchrdev
) {
1658 *(int *)data
= cdevsw
[major(vp
->v_rdev
)].d_type
;
1665 error
= VNOP_IOCTL(vp
, com
, data
, fp
->fp_glob
->fg_flag
, ctx
);
1667 if (error
== 0 && com
== TIOCSCTTY
) {
1668 sessp
= proc_session(vfs_context_proc(ctx
));
1670 session_lock(sessp
);
1671 ttyvp
= sessp
->s_ttyvp
;
1672 sessp
->s_ttyvp
= vp
;
1673 sessp
->s_ttyvid
= vnode_vid(vp
);
1674 session_unlock(sessp
);
1675 session_rele(sessp
);
1679 (void)vnode_put(vp
);
1684 * File table vnode select routine.
1687 vn_select(struct fileproc
*fp
, int which
, void *wql
, __unused vfs_context_t ctx
)
1690 struct vnode
* vp
= (struct vnode
*)fp
->fp_glob
->fg_data
;
1691 struct vfs_context context
;
1693 if ((error
= vnode_getwithref(vp
)) == 0) {
1694 context
.vc_thread
= current_thread();
1695 context
.vc_ucred
= fp
->fp_glob
->fg_cred
;
1699 * XXX We should use a per thread credential here; minimally,
1700 * XXX the process credential should have a persistent
1701 * XXX reference on it before being passed in here.
1703 error
= mac_vnode_check_select(ctx
, vp
, which
);
1706 error
= VNOP_SELECT(vp
, which
, fp
->fp_glob
->fg_flag
, wql
, ctx
);
1708 (void)vnode_put(vp
);
1714 * File table vnode close routine.
1717 vn_closefile(struct fileglob
*fg
, vfs_context_t ctx
)
1719 struct vnode
*vp
= fg
->fg_data
;
1722 if ((error
= vnode_getwithref(vp
)) == 0) {
1723 if (FILEGLOB_DTYPE(fg
) == DTYPE_VNODE
&&
1724 ((fg
->fg_flag
& FWASLOCKED
) != 0 ||
1725 (fg
->fg_lflags
& FG_HAS_OFDLOCK
) != 0)) {
1727 .l_whence
= SEEK_SET
,
1733 if ((fg
->fg_flag
& FWASLOCKED
) != 0) {
1734 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
,
1735 F_UNLCK
, &lf
, F_FLOCK
, ctx
, NULL
);
1738 if ((fg
->fg_lflags
& FG_HAS_OFDLOCK
) != 0) {
1739 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
,
1740 F_UNLCK
, &lf
, F_OFD_LOCK
, ctx
, NULL
);
1743 error
= vn_close(vp
, fg
->fg_flag
, ctx
);
1744 (void) vnode_put(vp
);
1750 * Returns: 0 Success
1754 vn_pathconf(vnode_t vp
, int name
, int32_t *retval
, vfs_context_t ctx
)
1757 struct vfs_attr vfa
;
1760 case _PC_EXTENDED_SECURITY_NP
:
1761 *retval
= vfs_extendedsecurity(vnode_mount(vp
)) ? 1 : 0;
1763 case _PC_AUTH_OPAQUE_NP
:
1764 *retval
= vfs_authopaque(vnode_mount(vp
));
1766 case _PC_2_SYMLINKS
:
1767 *retval
= 1; /* XXX NOTSUP on MSDOS, etc. */
1769 case _PC_ALLOC_SIZE_MIN
:
1770 *retval
= 1; /* XXX lie: 1 byte */
1772 case _PC_ASYNC_IO
: /* unistd.h: _POSIX_ASYNCHRONUS_IO */
1773 *retval
= 1; /* [AIO] option is supported */
1775 case _PC_PRIO_IO
: /* unistd.h: _POSIX_PRIORITIZED_IO */
1776 *retval
= 0; /* [PIO] option is not supported */
1778 case _PC_REC_INCR_XFER_SIZE
:
1779 *retval
= 4096; /* XXX go from MIN to MAX 4K at a time */
1781 case _PC_REC_MIN_XFER_SIZE
:
1782 *retval
= 4096; /* XXX recommend 4K minimum reads/writes */
1784 case _PC_REC_MAX_XFER_SIZE
:
1785 *retval
= 65536; /* XXX recommend 64K maximum reads/writes */
1787 case _PC_REC_XFER_ALIGN
:
1788 *retval
= 4096; /* XXX recommend page aligned buffers */
1790 case _PC_SYMLINK_MAX
:
1791 *retval
= 255; /* Minimum acceptable POSIX value */
1793 case _PC_SYNC_IO
: /* unistd.h: _POSIX_SYNCHRONIZED_IO */
1794 *retval
= 0; /* [SIO] option is not supported */
1796 case _PC_XATTR_SIZE_BITS
:
1797 /* The number of bits used to store maximum extended
1798 * attribute size in bytes. For example, if the maximum
1799 * attribute size supported by a file system is 128K, the
1800 * value returned will be 18. However a value 18 can mean
1801 * that the maximum attribute size can be anywhere from
1802 * (256KB - 1) to 128KB. As a special case, the resource
1803 * fork can have much larger size, and some file system
1804 * specific extended attributes can have smaller and preset
1805 * size; for example, Finder Info is always 32 bytes.
1807 memset(&vfa
, 0, sizeof(vfa
));
1809 VFSATTR_WANTED(&vfa
, f_capabilities
);
1810 if (vfs_getattr(vnode_mount(vp
), &vfa
, ctx
) == 0 &&
1811 (VFSATTR_IS_SUPPORTED(&vfa
, f_capabilities
)) &&
1812 (vfa
.f_capabilities
.capabilities
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
) &&
1813 (vfa
.f_capabilities
.valid
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
)) {
1814 /* Supports native extended attributes */
1815 error
= VNOP_PATHCONF(vp
, name
, retval
, ctx
);
1817 /* Number of bits used to represent the maximum size of
1818 * extended attribute stored in an Apple Double file.
1820 *retval
= AD_XATTR_SIZE_BITS
;
1824 error
= VNOP_PATHCONF(vp
, name
, retval
, ctx
);
1832 vn_kqfilter(struct fileproc
*fp
, struct knote
*kn
, struct kevent_qos_s
*kev
)
1834 vfs_context_t ctx
= vfs_context_current();
1839 vp
= (struct vnode
*)fp
->fp_glob
->fg_data
;
1842 * Don't attach a knote to a dead vnode.
1844 if ((error
= vget_internal(vp
, 0, VNODE_NODEAD
)) == 0) {
1845 switch (kn
->kn_filter
) {
1848 if (vnode_isfifo(vp
)) {
1849 /* We'll only watch FIFOs that use our fifofs */
1850 if (!(vp
->v_fifoinfo
&& vp
->v_fifoinfo
->fi_readsock
)) {
1853 } else if (!vnode_isreg(vp
)) {
1854 if (vnode_ischr(vp
)) {
1855 result
= spec_kqfilter(vp
, kn
, kev
);
1856 if ((kn
->kn_flags
& EV_ERROR
) == 0) {
1857 /* claimed by a special device */
1873 error
= mac_vnode_check_kqfilter(ctx
, fp
->fp_glob
->fg_cred
, kn
, vp
);
1880 kn
->kn_hook
= (void*)vp
;
1881 kn
->kn_filtid
= EVFILTID_VN
;
1884 KNOTE_ATTACH(&vp
->v_knotes
, kn
);
1885 result
= filt_vnode_common(kn
, NULL
, vp
, 0);
1889 * Ask the filesystem to provide remove notifications,
1890 * but ignore failure
1892 VNOP_MONITOR(vp
, 0, VNODE_MONITOR_BEGIN
, (void*) kn
, ctx
);
1900 knote_set_error(kn
, error
);
1907 filt_vndetach(struct knote
*kn
)
1909 vfs_context_t ctx
= vfs_context_current();
1910 struct vnode
*vp
= (struct vnode
*)kn
->kn_hook
;
1911 uint32_t vid
= vnode_vid(vp
);
1912 if (vnode_getwithvid(vp
, vid
)) {
1917 KNOTE_DETACH(&vp
->v_knotes
, kn
);
1921 * Tell a (generally networked) filesystem that we're no longer watching
1922 * If the FS wants to track contexts, it should still be using the one from
1923 * the VNODE_MONITOR_BEGIN.
1925 VNOP_MONITOR(vp
, 0, VNODE_MONITOR_END
, (void*)kn
, ctx
);
1931 * Used for EVFILT_READ
1933 * Takes only VFIFO or VREG. vnode is locked. We handle the "poll" case
1934 * differently than the regular case for VREG files. If not in poll(),
1935 * then we need to know current fileproc offset for VREG.
1938 vnode_readable_data_count(vnode_t vp
, off_t current_offset
, int ispoll
)
1940 if (vnode_isfifo(vp
)) {
1943 int err
= fifo_charcount(vp
, &cnt
);
1945 return (int64_t)cnt
;
1951 } else if (vnode_isreg(vp
)) {
1957 amount
= vp
->v_un
.vu_ubcinfo
->ui_size
- current_offset
;
1958 if (amount
> INT64_MAX
) {
1960 } else if (amount
< INT64_MIN
) {
1963 return (int64_t)amount
;
1966 panic("Should never have an EVFILT_READ except for reg or fifo.");
1972 * Used for EVFILT_WRITE.
1974 * For regular vnodes, we can always write (1). For named pipes,
1975 * see how much space there is in the buffer. Nothing else is covered.
1978 vnode_writable_space_count(vnode_t vp
)
1980 if (vnode_isfifo(vp
)) {
1983 int err
= fifo_freespace(vp
, &spc
);
1985 return (intptr_t)spc
;
1991 } else if (vnode_isreg(vp
)) {
1994 panic("Should never have an EVFILT_READ except for reg or fifo.");
2000 * Determine whether this knote should be active
2002 * This is kind of subtle.
2003 * --First, notice if the vnode has been revoked: in so, override hint
2004 * --EVFILT_READ knotes are checked no matter what the hint is
2005 * --Other knotes activate based on hint.
2006 * --If hint is revoke, set special flags and activate
2009 filt_vnode_common(struct knote
*kn
, struct kevent_qos_s
*kev
, vnode_t vp
, long hint
)
2014 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
2016 /* Special handling for vnodes that are in recycle or already gone */
2017 if (NOTE_REVOKE
== hint
) {
2018 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
2021 if ((kn
->kn_filter
== EVFILT_VNODE
) && (kn
->kn_sfflags
& NOTE_REVOKE
)) {
2022 kn
->kn_fflags
|= NOTE_REVOKE
;
2025 switch (kn
->kn_filter
) {
2027 data
= vnode_readable_data_count(vp
, kn
->kn_fp
->fp_glob
->fg_offset
, (kn
->kn_flags
& EV_POLL
));
2028 activate
= (data
!= 0);
2031 data
= vnode_writable_space_count(vp
);
2032 activate
= (data
!= 0);
2035 /* Check events this note matches against the hint */
2036 if (kn
->kn_sfflags
& hint
) {
2037 kn
->kn_fflags
|= hint
; /* Set which event occurred */
2039 activate
= (kn
->kn_fflags
!= 0);
2042 panic("Invalid knote filter on a vnode!\n");
2046 if (kev
&& activate
) {
2047 knote_fill_kevent(kn
, kev
, data
);
2054 filt_vnode(struct knote
*kn
, long hint
)
2056 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
2058 return filt_vnode_common(kn
, NULL
, vp
, hint
);
2062 filt_vntouch(struct knote
*kn
, struct kevent_qos_s
*kev
)
2064 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
2065 uint32_t vid
= vnode_vid(vp
);
2070 if (vnode_getiocount(vp
, vid
, VNODE_NODEAD
| VNODE_WITHID
) != 0) {
2075 /* accept new input fflags mask */
2076 kn
->kn_sfflags
= kev
->fflags
;
2078 activate
= filt_vnode_common(kn
, NULL
, vp
, hint
);
2081 vnode_put_locked(vp
);
2089 filt_vnprocess(struct knote
*kn
, struct kevent_qos_s
*kev
)
2091 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
2092 uint32_t vid
= vnode_vid(vp
);
2097 if (vnode_getiocount(vp
, vid
, VNODE_NODEAD
| VNODE_WITHID
) != 0) {
2101 activate
= filt_vnode_common(kn
, kev
, vp
, hint
);
2103 /* Definitely need to unlock, may need to put */
2105 vnode_put_locked(vp
);