2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
76 #include <sys/param.h>
77 #include <sys/types.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/file_internal.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/namei.h>
86 #include <sys/vnode_internal.h>
87 #include <sys/ioctl.h>
89 /* Temporary workaround for ubc.h until <rdar://4714366 is resolved */
90 #define ubc_setcred ubc_setcred_deprecated
93 int ubc_setcred(struct vnode
*, struct proc
*);
96 #include <sys/fsevents.h>
97 #include <sys/kdebug.h>
98 #include <sys/xattr.h>
99 #include <sys/ubc_internal.h>
100 #include <sys/uio_internal.h>
101 #include <sys/resourcevar.h>
102 #include <sys/signalvar.h>
104 #include <vm/vm_kern.h>
105 #include <vm/vm_map.h>
107 #include <miscfs/specfs/specdev.h>
108 #include <miscfs/fifofs/fifo.h>
111 #include <security/mac_framework.h>
114 #include <IOKit/IOBSD.h>
115 #include <libkern/section_keywords.h>
117 static int vn_closefile(struct fileglob
*fp
, vfs_context_t ctx
);
118 static int vn_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
,
120 static int vn_read(struct fileproc
*fp
, struct uio
*uio
, int flags
,
122 static int vn_write(struct fileproc
*fp
, struct uio
*uio
, int flags
,
124 static int vn_select( struct fileproc
*fp
, int which
, void * wql
,
126 static int vn_kqfilt_add(struct fileproc
*fp
, struct knote
*kn
,
127 struct kevent_internal_s
*kev
, vfs_context_t ctx
);
128 static void filt_vndetach(struct knote
*kn
);
129 static int filt_vnode(struct knote
*kn
, long hint
);
130 static int filt_vnode_common(struct knote
*kn
, vnode_t vp
, long hint
);
131 static int vn_open_auth_finish(vnode_t vp
, int fmode
, vfs_context_t ctx
);
133 static int vn_kqfilt_remove(struct vnode
*vp
, uintptr_t ident
,
137 const struct fileops vnops
= {
138 .fo_type
= DTYPE_VNODE
,
140 .fo_write
= vn_write
,
141 .fo_ioctl
= vn_ioctl
,
142 .fo_select
= vn_select
,
143 .fo_close
= vn_closefile
,
144 .fo_kqfilter
= vn_kqfilt_add
,
148 static int filt_vntouch(struct knote
*kn
, struct kevent_internal_s
*kev
);
149 static int filt_vnprocess(struct knote
*kn
, struct filt_process_s
*data
, struct kevent_internal_s
*kev
);
151 SECURITY_READ_ONLY_EARLY(struct filterops
) vnode_filtops
= {
154 .f_detach
= filt_vndetach
,
155 .f_event
= filt_vnode
,
156 .f_touch
= filt_vntouch
,
157 .f_process
= filt_vnprocess
,
161 * Common code for vnode open operations.
162 * Check permissions, and call the VNOP_OPEN or VNOP_CREATE routine.
164 * XXX the profusion of interfaces here is probably a bad thing.
167 vn_open(struct nameidata
*ndp
, int fmode
, int cmode
)
169 return vn_open_modflags(ndp
, &fmode
, cmode
);
173 vn_open_modflags(struct nameidata
*ndp
, int *fmodep
, int cmode
)
175 struct vnode_attr va
;
178 VATTR_SET(&va
, va_mode
, cmode
);
180 return vn_open_auth(ndp
, fmodep
, &va
);
184 vn_open_auth_finish(vnode_t vp
, int fmode
, vfs_context_t ctx
)
188 if ((error
= vnode_ref_ext(vp
, fmode
, 0)) != 0) {
192 /* Call out to allow 3rd party notification of open.
193 * Ignore result of kauth_authorize_fileop call.
196 mac_vnode_notify_open(ctx
, vp
, fmode
);
198 kauth_authorize_fileop(vfs_context_ucred(ctx
), KAUTH_FILEOP_OPEN
,
208 * May do nameidone() to allow safely adding an FSEvent. Cue off of ni_dvp to
209 * determine whether that has happened.
212 vn_open_auth_do_create(struct nameidata
*ndp
, struct vnode_attr
*vap
, int fmode
, boolean_t
*did_create
, boolean_t
*did_open
, vfs_context_t ctx
)
215 vnode_t dvp
= ndp
->ni_dvp
;
220 batched
= vnode_compound_open_available(ndp
->ni_dvp
);
223 VATTR_SET(vap
, va_type
, VREG
);
224 if (fmode
& O_EXCL
) {
225 vap
->va_vaflags
|= VA_EXCLUSIVE
;
229 if (ndp
->ni_cnd
.cn_flags
& CN_WANTSRSRCFORK
) {
230 if ((error
= vn_authorize_create(dvp
, &ndp
->ni_cnd
, vap
, ctx
, NULL
)) != 0) {
233 if ((error
= vnode_makenamedstream(dvp
, &ndp
->ni_vp
, XATTR_RESOURCEFORK_NAME
, 0, ctx
)) != 0) {
240 if ((error
= vn_authorize_create(dvp
, &ndp
->ni_cnd
, vap
, ctx
, NULL
)) != 0) {
245 error
= vn_create(dvp
, &ndp
->ni_vp
, ndp
, vap
, VN_CREATE_DOOPEN
, fmode
, &status
, ctx
);
248 *did_create
= (status
& COMPOUND_OPEN_STATUS_DID_CREATE
) ? TRUE
: FALSE
;
253 if (error
== EKEEPLOOKING
) {
255 panic("EKEEPLOOKING, but we did a create?");
258 panic("EKEEPLOOKING from filesystem that doesn't support compound vnops?");
260 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
261 panic("EKEEPLOOKING, but continue flag not set?");
265 * Do NOT drop the dvp: we need everything to continue the lookup.
271 *did_create
= (status
& COMPOUND_OPEN_STATUS_DID_CREATE
) ? 1 : 0;
284 int update_flags
= 0;
286 // Make sure the name & parent pointers are hooked up
287 if (vp
->v_name
== NULL
) {
288 update_flags
|= VNODE_UPDATE_NAME
;
290 if (vp
->v_parent
== NULLVP
) {
291 update_flags
|= VNODE_UPDATE_PARENT
;
295 vnode_update_identity(vp
, dvp
, ndp
->ni_cnd
.cn_nameptr
, ndp
->ni_cnd
.cn_namelen
, ndp
->ni_cnd
.cn_hash
, update_flags
);
299 ndp
->ni_dvp
= NULLVP
;
302 if (need_fsevent(FSE_CREATE_FILE
, vp
)) {
303 add_fsevent(FSE_CREATE_FILE
, ctx
,
310 if (ndp
->ni_dvp
!= NULLVP
) {
312 ndp
->ni_dvp
= NULLVP
;
319 * This is the number of times we'll loop in vn_open_auth without explicitly
320 * yielding the CPU when we determine we have to retry.
322 #define RETRY_NO_YIELD_COUNT 5
325 * Open a file with authorization, updating the contents of the structures
326 * pointed to by ndp, fmodep, and vap as necessary to perform the requested
327 * operation. This function is used for both opens of existing files, and
328 * creation of new files.
330 * Parameters: ndp The nami data pointer describing the
332 * fmodep A pointer to an int containg the mode
333 * information to be used for the open
334 * vap A pointer to the vnode attribute
335 * descriptor to be used for the open
337 * Indirect: * Contents of the data structures pointed
338 * to by the parameters are modified as
339 * necessary to the requested operation.
344 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
346 * The contents of '*ndp' will be modified, based on the other
347 * arguments to this function, and to return file and directory
348 * data necessary to satisfy the requested operation.
350 * If the file does not exist and we are creating it, then the
351 * O_TRUNC flag will be cleared in '*fmodep' to indicate to the
352 * caller that the file was not truncated.
354 * If the file exists and the O_EXCL flag was not specified, then
355 * the O_CREAT flag will be cleared in '*fmodep' to indicate to
356 * the caller that the existing file was merely opened rather
359 * The contents of '*vap' will be modified as necessary to
360 * complete the operation, including setting of supported
361 * attribute, clearing of fields containing unsupported attributes
362 * in the request, if the request proceeds without them, etc..
364 * XXX: This function is too complicated in actings on its arguments
366 * XXX: We should enummerate the possible errno values here, and where
367 * in the code they originated.
370 vn_open_auth(struct nameidata
*ndp
, int *fmodep
, struct vnode_attr
*vap
)
374 vfs_context_t ctx
= ndp
->ni_cnd
.cn_context
;
377 uint32_t origcnflags
;
378 boolean_t did_create
;
380 boolean_t need_vnop_open
;
382 boolean_t ref_failed
;
390 need_vnop_open
= TRUE
;
393 origcnflags
= ndp
->ni_cnd
.cn_flags
;
395 // If raw encrypted mode is requested, handle that here
396 if (VATTR_IS_ACTIVE(vap
, va_dataprotect_flags
)
397 && ISSET(vap
->va_dataprotect_flags
, VA_DP_RAWENCRYPTED
)) {
404 if (fmode
& O_CREAT
) {
405 if ((fmode
& O_DIRECTORY
)) {
409 ndp
->ni_cnd
.cn_nameiop
= CREATE
;
411 ndp
->ni_op
= OP_LINK
;
413 /* Inherit USEDVP, vnode_open() supported flags only */
414 ndp
->ni_cnd
.cn_flags
&= (USEDVP
| NOCROSSMOUNT
);
415 ndp
->ni_cnd
.cn_flags
|= LOCKPARENT
| LOCKLEAF
| AUDITVNPATH1
;
416 ndp
->ni_flag
= NAMEI_COMPOUNDOPEN
;
418 /* open calls are allowed for resource forks. */
419 ndp
->ni_cnd
.cn_flags
|= CN_ALLOWRSRCFORK
;
421 if ((fmode
& O_EXCL
) == 0 && (fmode
& O_NOFOLLOW
) == 0 && (origcnflags
& FOLLOW
) != 0) {
422 ndp
->ni_cnd
.cn_flags
|= FOLLOW
;
425 continue_create_lookup
:
426 if ((error
= namei(ndp
))) {
433 batched
= vnode_compound_open_available(dvp
);
435 /* not found, create */
437 /* must have attributes for a new file */
444 * Attempt a create. For a system supporting compound VNOPs, we may
445 * find an existing file or create one; in either case, we will already
446 * have the file open and no VNOP_OPEN() will be needed.
448 error
= vn_open_auth_do_create(ndp
, vap
, fmode
, &did_create
, &did_open
, ctx
);
454 * Detected a node that the filesystem couldn't handle. Don't call
455 * nameidone() yet, because we need that path buffer.
457 if (error
== EKEEPLOOKING
) {
459 panic("EKEEPLOOKING from a filesystem that doesn't support compound VNOPs?");
461 goto continue_create_lookup
;
466 panic("Shouldn't have a dvp here.");
471 * Check for a create race.
473 if ((error
== EEXIST
) && !(fmode
& O_EXCL
)) {
482 need_vnop_open
= !did_open
;
484 if (fmode
& O_EXCL
) {
489 * We have a vnode. Use compound open if available
490 * or else fall through to "traditional" path. Note: can't
491 * do a compound open for root, because the parent belongs
494 if (error
== 0 && batched
&& (vnode_mount(dvp
) == vnode_mount(vp
))) {
495 error
= VNOP_COMPOUND_OPEN(dvp
, &ndp
->ni_vp
, ndp
, 0, fmode
, NULL
, NULL
, ctx
);
499 need_vnop_open
= FALSE
;
500 } else if (error
== EKEEPLOOKING
) {
501 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
502 panic("EKEEPLOOKING, but continue flag not set?");
504 goto continue_create_lookup
;
509 ndp
->ni_dvp
= NULLVP
;
523 ndp
->ni_cnd
.cn_nameiop
= LOOKUP
;
524 /* Inherit USEDVP, vnode_open() supported flags only */
525 ndp
->ni_cnd
.cn_flags
&= (USEDVP
| NOCROSSMOUNT
);
526 ndp
->ni_cnd
.cn_flags
|= FOLLOW
| LOCKLEAF
| AUDITVNPATH1
| WANTPARENT
;
528 /* open calls are allowed for resource forks. */
529 ndp
->ni_cnd
.cn_flags
|= CN_ALLOWRSRCFORK
;
531 if (fmode
& FENCRYPTED
) {
532 ndp
->ni_cnd
.cn_flags
|= CN_RAW_ENCRYPTED
| CN_SKIPNAMECACHE
;
534 ndp
->ni_flag
= NAMEI_COMPOUNDOPEN
;
536 /* preserve NOFOLLOW from vnode_open() */
537 if (fmode
& O_NOFOLLOW
|| fmode
& O_SYMLINK
|| (origcnflags
& FOLLOW
) == 0) {
538 ndp
->ni_cnd
.cn_flags
&= ~FOLLOW
;
541 /* Do a lookup, possibly going directly to filesystem for compound operation */
543 if ((error
= namei(ndp
))) {
549 /* Check for batched lookup-open */
550 batched
= vnode_compound_open_available(dvp
);
551 if (batched
&& ((vp
== NULLVP
) || (vnode_mount(dvp
) == vnode_mount(vp
)))) {
552 error
= VNOP_COMPOUND_OPEN(dvp
, &ndp
->ni_vp
, ndp
, 0, fmode
, NULL
, NULL
, ctx
);
555 need_vnop_open
= FALSE
;
556 } else if (error
== EKEEPLOOKING
) {
557 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
558 panic("EKEEPLOOKING, but continue flag not set?");
562 } while (error
== EKEEPLOOKING
);
566 ndp
->ni_dvp
= NULLVP
;
574 * By this point, nameidone() is called, dvp iocount is dropped,
575 * and dvp pointer is cleared.
577 if (ndp
->ni_dvp
!= NULLVP
) {
578 panic("Haven't cleaned up adequately in vn_open_auth()");
581 #if DEVELOPMENT || DEBUG
583 * XXX VSWAP: Check for entitlements or special flag here
584 * so we can restrict access appropriately.
586 #else /* DEVELOPMENT || DEBUG */
588 if (vnode_isswap(vp
) && (fmode
& (FWRITE
| O_TRUNC
)) && (ctx
!= vfs_context_kernel())) {
592 #endif /* DEVELOPMENT || DEBUG */
595 * Expect to use this code for filesystems without compound VNOPs, for the root
596 * of a filesystem, which can't be "looked up" in the sense of VNOP_LOOKUP(),
597 * and for shadow files, which do not live on the same filesystems as their "parents."
599 if (need_vnop_open
) {
600 if (batched
&& !vnode_isvroot(vp
) && !vnode_isnamedstream(vp
)) {
601 panic("Why am I trying to use VNOP_OPEN() on anything other than the root or a named stream?");
605 error
= vn_authorize_open_existing(vp
, &ndp
->ni_cnd
, fmode
, ctx
, NULL
);
611 if (VATTR_IS_ACTIVE(vap
, va_dataprotect_flags
)
612 && ISSET(vap
->va_dataprotect_flags
, VA_DP_RAWUNENCRYPTED
)) {
613 /* Don't allow unencrypted io request from user space unless entitled */
614 boolean_t entitled
= FALSE
;
616 entitled
= IOTaskHasEntitlement(current_task(), "com.apple.private.security.file-unencrypt-access");
622 fmode
|= FUNENCRYPTED
;
625 error
= VNOP_OPEN(vp
, fmode
, ctx
);
629 need_vnop_open
= FALSE
;
632 // if the vnode is tagged VOPENEVT and the current process
633 // has the P_CHECKOPENEVT flag set, then we or in the O_EVTONLY
634 // flag to the open mode so that this open won't count against
635 // the vnode when carbon delete() does a vnode_isinuse() to see
636 // if a file is currently in use. this allows spotlight
637 // importers to not interfere with carbon apps that depend on
638 // the no-delete-if-busy semantics of carbon delete().
640 if (!did_create
&& (vp
->v_flag
& VOPENEVT
) && (current_proc()->p_flag
& P_CHECKOPENEVT
)) {
645 * Grab reference, etc.
647 error
= vn_open_auth_finish(vp
, fmode
, ctx
);
653 /* Compound VNOP open is responsible for doing the truncate */
654 if (batched
|| did_create
) {
662 /* Opened either explicitly or by a batched create */
663 if (!need_vnop_open
) {
664 VNOP_CLOSE(vp
, fmode
, ctx
);
670 /* Aggressively recycle shadow files if we error'd out during open() */
671 if ((vnode_isnamedstream(vp
)) &&
672 (vp
->v_parent
!= NULLVP
) &&
673 (vnode_isshadow(vp
))) {
679 * Check for a race against unlink. We had a vnode
680 * but according to vnode_authorize or VNOP_OPEN it
683 * EREDRIVEOPEN: means that we were hit by the tty allocation race.
685 if (((error
== ENOENT
) && (*fmodep
& O_CREAT
)) || (error
== EREDRIVEOPEN
) || ref_failed
) {
687 * We'll retry here but it may be possible that we get
688 * into a retry "spin" inside the kernel and not allow
689 * threads, which need to run in order for the retry
690 * loop to end, to run. An example is an open of a
691 * terminal which is getting revoked and we spin here
692 * without yielding becasue namei and VNOP_OPEN are
693 * successful but vnode_ref fails. The revoke needs
694 * threads with an iocount to run but if spin here we
695 * may possibly be blcoking other threads from running.
697 * We start yielding the CPU after some number of
698 * retries for increasing durations. Note that this is
699 * still a loop without an exit condition.
702 if (nretries
> RETRY_NO_YIELD_COUNT
) {
703 /* Every hz/100 secs is 10 msecs ... */
704 tsleep(&nretries
, PVFS
, "vn_open_auth_retry",
705 MIN((nretries
* (hz
/ 100)), hz
));
715 #if vn_access_DEPRECATED
717 * Authorize an action against a vnode. This has been the canonical way to
718 * ensure that the credential/process/etc. referenced by a vfs_context
719 * is granted the rights called out in 'mode' against the vnode 'vp'.
721 * Unfortunately, the use of VREAD/VWRITE/VEXEC makes it very difficult
722 * to add support for more rights. As such, this interface will be deprecated
723 * and callers will use vnode_authorize instead.
726 vn_access(vnode_t vp
, int mode
, vfs_context_t context
)
728 kauth_action_t action
;
732 action
|= KAUTH_VNODE_READ_DATA
;
735 action
|= KAUTH_VNODE_WRITE_DATA
;
738 action
|= KAUTH_VNODE_EXECUTE
;
741 return vnode_authorize(vp
, NULL
, action
, context
);
743 #endif /* vn_access_DEPRECATED */
749 vn_close(struct vnode
*vp
, int flags
, vfs_context_t ctx
)
755 /* Sync data from resource fork shadow file if needed. */
756 if ((vp
->v_flag
& VISNAMEDSTREAM
) &&
757 (vp
->v_parent
!= NULLVP
) &&
758 vnode_isshadow(vp
)) {
759 if (flags
& FWASWRITTEN
) {
760 flusherror
= vnode_flushnamedstream(vp
->v_parent
, vp
, ctx
);
765 /* work around for foxhound */
766 if (vnode_isspec(vp
)) {
767 (void)vnode_rele_ext(vp
, flags
, 0);
771 * On HFS, we flush when the last writer closes. We do this
772 * because resource fork vnodes hold a reference on data fork
773 * vnodes and that will prevent them from getting VNOP_INACTIVE
774 * which will delay when we flush cached data. In future, we
775 * might find it beneficial to do this for all file systems.
776 * Note that it's OK to access v_writecount without the lock
779 if (vp
->v_tag
== VT_HFS
&& (flags
& FWRITE
) && vp
->v_writecount
== 1) {
780 VNOP_FSYNC(vp
, MNT_NOWAIT
, ctx
);
783 error
= VNOP_CLOSE(vp
, flags
, ctx
);
786 if (flags
& FWASWRITTEN
) {
787 if (need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) {
788 add_fsevent(FSE_CONTENT_MODIFIED
, ctx
,
795 if (!vnode_isspec(vp
)) {
796 (void)vnode_rele_ext(vp
, flags
, 0);
811 off_t swap_count
, this_count
;
812 off_t file_end
, read_end
;
817 * Reading from a swap file will get you zeroes.
822 swap_count
= uio_resid(uio
);
824 file_end
= ubc_getsize(vp
);
825 read_end
= uio
->uio_offset
+ uio_resid(uio
);
826 if (uio
->uio_offset
>= file_end
) {
827 /* uio starts after end of file: nothing to read */
829 } else if (read_end
> file_end
) {
830 /* uio extends beyond end of file: stop before that */
831 swap_count
-= (read_end
- file_end
);
834 while (swap_count
> 0) {
835 if (my_swap_page
== NULL
) {
836 MALLOC(my_swap_page
, char *, PAGE_SIZE
,
838 memset(my_swap_page
, '\0', PAGE_SIZE
);
839 /* add an end-of-line to keep line counters happy */
840 my_swap_page
[PAGE_SIZE
- 1] = '\n';
842 this_count
= swap_count
;
843 if (this_count
> PAGE_SIZE
) {
844 this_count
= PAGE_SIZE
;
847 prev_resid
= uio_resid(uio
);
848 error
= uiomove((caddr_t
) my_swap_page
,
854 swap_count
-= (prev_resid
- uio_resid(uio
));
856 if (my_swap_page
!= NULL
) {
857 FREE(my_swap_page
, M_TEMP
);
864 * Package up an I/O request on a vnode into a uio and do it.
882 result
= vn_rdwr_64(rw
,
884 (uint64_t)(uintptr_t)base
,
893 /* "resid" should be bounded above by "len," which is an int */
894 if (aresid
!= NULL
) {
917 struct vfs_context context
;
919 char uio_buf
[UIO_SIZEOF(1)];
921 context
.vc_thread
= current_thread();
922 context
.vc_ucred
= cred
;
924 if (UIO_SEG_IS_USER_SPACE(segflg
)) {
925 spacetype
= proc_is64bit(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
;
927 spacetype
= UIO_SYSSPACE
;
929 auio
= uio_createwithbuffer(1, offset
, spacetype
, rw
,
930 &uio_buf
[0], sizeof(uio_buf
));
931 uio_addiov(auio
, base
, len
);
935 * IO_NOAUTH should be re-examined.
936 * Likely that mediation should be performed in caller.
938 if ((ioflg
& IO_NOAUTH
) == 0) {
939 /* passed cred is fp->f_cred */
940 if (rw
== UIO_READ
) {
941 error
= mac_vnode_check_read(&context
, cred
, vp
);
943 error
= mac_vnode_check_write(&context
, cred
, vp
);
949 if (rw
== UIO_READ
) {
950 if (vnode_isswap(vp
) && ((ioflg
& IO_SWAP_DISPATCH
) == 0)) {
951 error
= vn_read_swapfile(vp
, auio
);
953 error
= VNOP_READ(vp
, auio
, ioflg
, &context
);
956 #if DEVELOPMENT || DEBUG
958 * XXX VSWAP: Check for entitlements or special flag here
959 * so we can restrict access appropriately.
961 error
= VNOP_WRITE(vp
, auio
, ioflg
, &context
);
962 #else /* DEVELOPMENT || DEBUG */
964 if (vnode_isswap(vp
) && ((ioflg
& (IO_SWAP_DISPATCH
| IO_SKIP_ENCRYPTION
)) == 0)) {
967 error
= VNOP_WRITE(vp
, auio
, ioflg
, &context
);
969 #endif /* DEVELOPMENT || DEBUG */
974 *aresid
= uio_resid(auio
);
975 } else if (uio_resid(auio
) && error
== 0) {
982 vn_offset_lock(struct fileglob
*fg
)
984 lck_mtx_lock_spin(&fg
->fg_lock
);
985 while (fg
->fg_lflags
& FG_OFF_LOCKED
) {
986 fg
->fg_lflags
|= FG_OFF_LOCKWANT
;
987 msleep(&fg
->fg_lflags
, &fg
->fg_lock
, PVFS
| PSPIN
,
988 "fg_offset_lock_wait", 0);
990 fg
->fg_lflags
|= FG_OFF_LOCKED
;
991 lck_mtx_unlock(&fg
->fg_lock
);
995 vn_offset_unlock(struct fileglob
*fg
)
999 lck_mtx_lock_spin(&fg
->fg_lock
);
1000 if (fg
->fg_lflags
& FG_OFF_LOCKWANT
) {
1003 fg
->fg_lflags
&= ~(FG_OFF_LOCKED
| FG_OFF_LOCKWANT
);
1004 lck_mtx_unlock(&fg
->fg_lock
);
1006 wakeup(&fg
->fg_lflags
);
1011 * File table vnode read routine.
1014 vn_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
1020 int offset_locked
= 0;
1022 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1023 if ((error
= vnode_getwithref(vp
))) {
1028 error
= mac_vnode_check_read(ctx
, vfs_context_ucred(ctx
), vp
);
1030 (void)vnode_put(vp
);
1035 /* This signals to VNOP handlers that this read came from a file table read */
1036 ioflag
= IO_SYSCALL_DISPATCH
;
1038 if (fp
->f_fglob
->fg_flag
& FNONBLOCK
) {
1039 ioflag
|= IO_NDELAY
;
1041 if ((fp
->f_fglob
->fg_flag
& FNOCACHE
) || vnode_isnocache(vp
)) {
1042 ioflag
|= IO_NOCACHE
;
1044 if (fp
->f_fglob
->fg_flag
& FENCRYPTED
) {
1045 ioflag
|= IO_ENCRYPTED
;
1047 if (fp
->f_fglob
->fg_flag
& FUNENCRYPTED
) {
1048 ioflag
|= IO_SKIP_ENCRYPTION
;
1050 if (fp
->f_fglob
->fg_flag
& O_EVTONLY
) {
1051 ioflag
|= IO_EVTONLY
;
1053 if (fp
->f_fglob
->fg_flag
& FNORDAHEAD
) {
1057 if ((flags
& FOF_OFFSET
) == 0) {
1058 if ((vnode_vtype(vp
) == VREG
) && !vnode_isswap(vp
)) {
1059 vn_offset_lock(fp
->f_fglob
);
1062 uio
->uio_offset
= fp
->f_fglob
->fg_offset
;
1064 count
= uio_resid(uio
);
1066 if (vnode_isswap(vp
) && !(IO_SKIP_ENCRYPTION
& ioflag
)) {
1067 /* special case for swap files */
1068 error
= vn_read_swapfile(vp
, uio
);
1070 error
= VNOP_READ(vp
, uio
, ioflag
, ctx
);
1073 if ((flags
& FOF_OFFSET
) == 0) {
1074 fp
->f_fglob
->fg_offset
+= count
- uio_resid(uio
);
1075 if (offset_locked
) {
1076 vn_offset_unlock(fp
->f_fglob
);
1081 (void)vnode_put(vp
);
1087 * File table vnode write routine.
1090 vn_write(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
1095 int clippedsize
= 0;
1096 int partialwrite
= 0;
1097 int residcount
, oldcount
;
1098 int offset_locked
= 0;
1099 proc_t p
= vfs_context_proc(ctx
);
1102 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1103 if ((error
= vnode_getwithref(vp
))) {
1107 #if DEVELOPMENT || DEBUG
1109 * XXX VSWAP: Check for entitlements or special flag here
1110 * so we can restrict access appropriately.
1112 #else /* DEVELOPMENT || DEBUG */
1114 if (vnode_isswap(vp
)) {
1115 (void)vnode_put(vp
);
1119 #endif /* DEVELOPMENT || DEBUG */
1123 error
= mac_vnode_check_write(ctx
, vfs_context_ucred(ctx
), vp
);
1125 (void)vnode_put(vp
);
1131 * IO_SYSCALL_DISPATCH signals to VNOP handlers that this write came from
1132 * a file table write
1134 ioflag
= (IO_UNIT
| IO_SYSCALL_DISPATCH
);
1136 if (vp
->v_type
== VREG
&& (fp
->f_fglob
->fg_flag
& O_APPEND
)) {
1137 ioflag
|= IO_APPEND
;
1139 if (fp
->f_fglob
->fg_flag
& FNONBLOCK
) {
1140 ioflag
|= IO_NDELAY
;
1142 if ((fp
->f_fglob
->fg_flag
& FNOCACHE
) || vnode_isnocache(vp
)) {
1143 ioflag
|= IO_NOCACHE
;
1145 if (fp
->f_fglob
->fg_flag
& FNODIRECT
) {
1146 ioflag
|= IO_NODIRECT
;
1148 if (fp
->f_fglob
->fg_flag
& FSINGLE_WRITER
) {
1149 ioflag
|= IO_SINGLE_WRITER
;
1151 if (fp
->f_fglob
->fg_flag
& O_EVTONLY
) {
1152 ioflag
|= IO_EVTONLY
;
1156 * Treat synchronous mounts and O_FSYNC on the fd as equivalent.
1158 * XXX We treat O_DSYNC as O_FSYNC for now, since we can not delay
1159 * XXX the non-essential metadata without some additional VFS work;
1160 * XXX the intent at this point is to plumb the interface for it.
1162 if ((fp
->f_fglob
->fg_flag
& (O_FSYNC
| O_DSYNC
)) ||
1163 (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))) {
1167 if ((flags
& FOF_OFFSET
) == 0) {
1168 if ((vnode_vtype(vp
) == VREG
) && !vnode_isswap(vp
)) {
1169 vn_offset_lock(fp
->f_fglob
);
1172 uio
->uio_offset
= fp
->f_fglob
->fg_offset
;
1173 count
= uio_resid(uio
);
1175 if (((flags
& FOF_OFFSET
) == 0) &&
1176 vfs_context_proc(ctx
) && (vp
->v_type
== VREG
) &&
1177 (((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) ||
1178 ((rlim_t
)uio_resid(uio
) > (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
)))) {
1180 * If the requested residual would cause us to go past the
1181 * administrative limit, then we need to adjust the residual
1182 * down to cause fewer bytes than requested to be written. If
1183 * we can't do that (e.g. the residual is already 1 byte),
1184 * then we fail the write with EFBIG.
1186 residcount
= uio_resid(uio
);
1187 if ((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) {
1188 clippedsize
= (uio
->uio_offset
+ uio_resid(uio
)) - p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
;
1189 } else if ((rlim_t
)uio_resid(uio
) > (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
)) {
1190 clippedsize
= (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
);
1192 if (clippedsize
>= residcount
) {
1193 psignal(p
, SIGXFSZ
);
1198 uio_setresid(uio
, residcount
- clippedsize
);
1200 if ((flags
& FOF_OFFSET
) != 0) {
1201 /* for pwrite, append should be ignored */
1202 ioflag
&= ~IO_APPEND
;
1203 if (p
&& (vp
->v_type
== VREG
) &&
1204 ((rlim_t
)uio
->uio_offset
>= p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
)) {
1205 psignal(p
, SIGXFSZ
);
1209 if (p
&& (vp
->v_type
== VREG
) &&
1210 ((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
)) {
1211 //Debugger("vn_bwrite:overstepping the bounds");
1212 residcount
= uio_resid(uio
);
1213 clippedsize
= (uio
->uio_offset
+ uio_resid(uio
)) - p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
;
1215 uio_setresid(uio
, residcount
- clippedsize
);
1219 error
= VNOP_WRITE(vp
, uio
, ioflag
, ctx
);
1222 oldcount
= uio_resid(uio
);
1223 uio_setresid(uio
, oldcount
+ clippedsize
);
1226 if ((flags
& FOF_OFFSET
) == 0) {
1227 if (ioflag
& IO_APPEND
) {
1228 fp
->f_fglob
->fg_offset
= uio
->uio_offset
;
1230 fp
->f_fglob
->fg_offset
+= count
- uio_resid(uio
);
1232 if (offset_locked
) {
1233 vn_offset_unlock(fp
->f_fglob
);
1239 * Set the credentials on successful writes
1241 if ((error
== 0) && (vp
->v_tag
== VT_NFS
) && (UBCINFOEXISTS(vp
))) {
1243 * When called from aio subsystem, we only have the proc from
1244 * which to get the credential, at this point, so use that
1245 * instead. This means aio functions are incompatible with
1246 * per-thread credentials (aio operations are proxied). We
1247 * can't easily correct the aio vs. settid race in this case
1248 * anyway, so we disallow it.
1250 if ((flags
& FOF_PCRED
) == 0) {
1251 ubc_setthreadcred(vp
, p
, current_thread());
1256 (void)vnode_put(vp
);
1260 if (offset_locked
) {
1261 vn_offset_unlock(fp
->f_fglob
);
1263 (void)vnode_put(vp
);
1268 * File table vnode stat routine.
1270 * Returns: 0 Success
1276 vn_stat_noauth(struct vnode
*vp
, void *sbptr
, kauth_filesec_t
*xsec
, int isstat64
,
1277 vfs_context_t ctx
, struct ucred
*file_cred
)
1279 struct vnode_attr va
;
1282 kauth_filesec_t fsec
;
1283 struct stat
*sb
= (struct stat
*)0; /* warning avoidance ; protected by isstat64 */
1284 struct stat64
* sb64
= (struct stat64
*)0; /* warning avoidance ; protected by isstat64 */
1286 if (isstat64
!= 0) {
1287 sb64
= (struct stat64
*)sbptr
;
1289 sb
= (struct stat
*)sbptr
;
1291 memset(&va
, 0, sizeof(va
));
1293 VATTR_WANTED(&va
, va_fsid
);
1294 VATTR_WANTED(&va
, va_fileid
);
1295 VATTR_WANTED(&va
, va_mode
);
1296 VATTR_WANTED(&va
, va_type
);
1297 VATTR_WANTED(&va
, va_nlink
);
1298 VATTR_WANTED(&va
, va_uid
);
1299 VATTR_WANTED(&va
, va_gid
);
1300 VATTR_WANTED(&va
, va_rdev
);
1301 VATTR_WANTED(&va
, va_data_size
);
1302 VATTR_WANTED(&va
, va_access_time
);
1303 VATTR_WANTED(&va
, va_modify_time
);
1304 VATTR_WANTED(&va
, va_change_time
);
1305 VATTR_WANTED(&va
, va_create_time
);
1306 VATTR_WANTED(&va
, va_flags
);
1307 VATTR_WANTED(&va
, va_gen
);
1308 VATTR_WANTED(&va
, va_iosize
);
1309 /* lower layers will synthesise va_total_alloc from va_data_size if required */
1310 VATTR_WANTED(&va
, va_total_alloc
);
1312 VATTR_WANTED(&va
, va_uuuid
);
1313 VATTR_WANTED(&va
, va_guuid
);
1314 VATTR_WANTED(&va
, va_acl
);
1316 error
= vnode_getattr(vp
, &va
, ctx
);
1322 * Give MAC polices a chance to reject or filter the attributes
1323 * returned by the filesystem. Note that MAC policies are consulted
1324 * *after* calling the filesystem because filesystems can return more
1325 * attributes than were requested so policies wouldn't be authoritative
1326 * is consulted beforehand. This also gives policies an opportunity
1327 * to change the values of attributes retrieved.
1329 error
= mac_vnode_check_getattr(ctx
, file_cred
, vp
, &va
);
1335 * Copy from vattr table
1337 if (isstat64
!= 0) {
1338 sb64
->st_dev
= va
.va_fsid
;
1339 sb64
->st_ino
= (ino64_t
)va
.va_fileid
;
1341 sb
->st_dev
= va
.va_fsid
;
1342 sb
->st_ino
= (ino_t
)va
.va_fileid
;
1345 switch (vp
->v_type
) {
1372 if (isstat64
!= 0) {
1373 sb64
->st_mode
= mode
;
1374 sb64
->st_nlink
= VATTR_IS_SUPPORTED(&va
, va_nlink
) ? va
.va_nlink
> UINT16_MAX
? UINT16_MAX
: (u_int16_t
)va
.va_nlink
: 1;
1375 sb64
->st_uid
= va
.va_uid
;
1376 sb64
->st_gid
= va
.va_gid
;
1377 sb64
->st_rdev
= va
.va_rdev
;
1378 sb64
->st_size
= va
.va_data_size
;
1379 sb64
->st_atimespec
= va
.va_access_time
;
1380 sb64
->st_mtimespec
= va
.va_modify_time
;
1381 sb64
->st_ctimespec
= va
.va_change_time
;
1382 if (VATTR_IS_SUPPORTED(&va
, va_create_time
)) {
1383 sb64
->st_birthtimespec
= va
.va_create_time
;
1385 sb64
->st_birthtimespec
.tv_sec
= sb64
->st_birthtimespec
.tv_nsec
= 0;
1387 sb64
->st_blksize
= va
.va_iosize
;
1388 sb64
->st_flags
= va
.va_flags
;
1389 sb64
->st_blocks
= roundup(va
.va_total_alloc
, 512) / 512;
1392 sb
->st_nlink
= VATTR_IS_SUPPORTED(&va
, va_nlink
) ? va
.va_nlink
> UINT16_MAX
? UINT16_MAX
: (u_int16_t
)va
.va_nlink
: 1;
1393 sb
->st_uid
= va
.va_uid
;
1394 sb
->st_gid
= va
.va_gid
;
1395 sb
->st_rdev
= va
.va_rdev
;
1396 sb
->st_size
= va
.va_data_size
;
1397 sb
->st_atimespec
= va
.va_access_time
;
1398 sb
->st_mtimespec
= va
.va_modify_time
;
1399 sb
->st_ctimespec
= va
.va_change_time
;
1400 sb
->st_blksize
= va
.va_iosize
;
1401 sb
->st_flags
= va
.va_flags
;
1402 sb
->st_blocks
= roundup(va
.va_total_alloc
, 512) / 512;
1405 /* if we're interested in extended security data and we got an ACL */
1407 if (!VATTR_IS_SUPPORTED(&va
, va_acl
) &&
1408 !VATTR_IS_SUPPORTED(&va
, va_uuuid
) &&
1409 !VATTR_IS_SUPPORTED(&va
, va_guuid
)) {
1410 *xsec
= KAUTH_FILESEC_NONE
;
1412 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
1413 fsec
= kauth_filesec_alloc(va
.va_acl
->acl_entrycount
);
1415 fsec
= kauth_filesec_alloc(0);
1421 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
1422 if (VATTR_IS_SUPPORTED(&va
, va_uuuid
)) {
1423 fsec
->fsec_owner
= va
.va_uuuid
;
1425 fsec
->fsec_owner
= kauth_null_guid
;
1427 if (VATTR_IS_SUPPORTED(&va
, va_guuid
)) {
1428 fsec
->fsec_group
= va
.va_guuid
;
1430 fsec
->fsec_group
= kauth_null_guid
;
1432 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
1433 bcopy(va
.va_acl
, &(fsec
->fsec_acl
), KAUTH_ACL_COPYSIZE(va
.va_acl
));
1435 fsec
->fsec_acl
.acl_entrycount
= KAUTH_FILESEC_NOACL
;
1441 /* Do not give the generation number out to unpriviledged users */
1442 if (va
.va_gen
&& !vfs_context_issuser(ctx
)) {
1443 if (isstat64
!= 0) {
1449 if (isstat64
!= 0) {
1450 sb64
->st_gen
= va
.va_gen
;
1452 sb
->st_gen
= va
.va_gen
;
1458 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && va
.va_acl
!= NULL
) {
1459 kauth_acl_free(va
.va_acl
);
1465 vn_stat(struct vnode
*vp
, void *sb
, kauth_filesec_t
*xsec
, int isstat64
, vfs_context_t ctx
)
1470 error
= mac_vnode_check_stat(ctx
, NOCRED
, vp
);
1477 if ((error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_READ_ATTRIBUTES
| KAUTH_VNODE_READ_SECURITY
, ctx
)) != 0) {
1482 return vn_stat_noauth(vp
, sb
, xsec
, isstat64
, ctx
, NOCRED
);
1487 * File table vnode ioctl routine.
1490 vn_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
, vfs_context_t ctx
)
1492 struct vnode
*vp
= ((struct vnode
*)fp
->f_fglob
->fg_data
);
1495 struct vnode
*ttyvp
;
1496 struct session
* sessp
;
1498 if ((error
= vnode_getwithref(vp
))) {
1503 error
= mac_vnode_check_ioctl(ctx
, vp
, com
);
1509 switch (vp
->v_type
) {
1512 if (com
== FIONREAD
) {
1513 if ((error
= vnode_size(vp
, &file_size
, ctx
)) != 0) {
1516 *(int *)data
= file_size
- fp
->f_fglob
->fg_offset
;
1519 if (com
== FIONBIO
|| com
== FIOASYNC
) { /* XXX */
1532 /* Should not be able to set block size from user space */
1533 if (com
== DKIOCSETBLOCKSIZE
) {
1538 if (com
== FIODTYPE
) {
1539 if (vp
->v_type
== VBLK
) {
1540 if (major(vp
->v_rdev
) >= nblkdev
) {
1544 *(int *)data
= bdevsw
[major(vp
->v_rdev
)].d_type
;
1545 } else if (vp
->v_type
== VCHR
) {
1546 if (major(vp
->v_rdev
) >= nchrdev
) {
1550 *(int *)data
= cdevsw
[major(vp
->v_rdev
)].d_type
;
1557 error
= VNOP_IOCTL(vp
, com
, data
, fp
->f_fglob
->fg_flag
, ctx
);
1559 if (error
== 0 && com
== TIOCSCTTY
) {
1560 sessp
= proc_session(vfs_context_proc(ctx
));
1562 session_lock(sessp
);
1563 ttyvp
= sessp
->s_ttyvp
;
1564 sessp
->s_ttyvp
= vp
;
1565 sessp
->s_ttyvid
= vnode_vid(vp
);
1566 session_unlock(sessp
);
1567 session_rele(sessp
);
1571 (void)vnode_put(vp
);
1576 * File table vnode select routine.
1579 vn_select(struct fileproc
*fp
, int which
, void *wql
, __unused vfs_context_t ctx
)
1582 struct vnode
* vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1583 struct vfs_context context
;
1585 if ((error
= vnode_getwithref(vp
)) == 0) {
1586 context
.vc_thread
= current_thread();
1587 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
1591 * XXX We should use a per thread credential here; minimally,
1592 * XXX the process credential should have a persistent
1593 * XXX reference on it before being passed in here.
1595 error
= mac_vnode_check_select(ctx
, vp
, which
);
1598 error
= VNOP_SELECT(vp
, which
, fp
->f_fglob
->fg_flag
, wql
, ctx
);
1600 (void)vnode_put(vp
);
1606 * File table vnode close routine.
1609 vn_closefile(struct fileglob
*fg
, vfs_context_t ctx
)
1611 struct vnode
*vp
= fg
->fg_data
;
1614 if ((error
= vnode_getwithref(vp
)) == 0) {
1615 if (FILEGLOB_DTYPE(fg
) == DTYPE_VNODE
&&
1616 ((fg
->fg_flag
& FHASLOCK
) != 0 ||
1617 (fg
->fg_lflags
& FG_HAS_OFDLOCK
) != 0)) {
1619 .l_whence
= SEEK_SET
,
1625 if ((fg
->fg_flag
& FHASLOCK
) != 0) {
1626 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
,
1627 F_UNLCK
, &lf
, F_FLOCK
, ctx
, NULL
);
1630 if ((fg
->fg_lflags
& FG_HAS_OFDLOCK
) != 0) {
1631 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
,
1632 F_UNLCK
, &lf
, F_OFD_LOCK
, ctx
, NULL
);
1635 error
= vn_close(vp
, fg
->fg_flag
, ctx
);
1636 (void) vnode_put(vp
);
1642 * Returns: 0 Success
1646 vn_pathconf(vnode_t vp
, int name
, int32_t *retval
, vfs_context_t ctx
)
1649 struct vfs_attr vfa
;
1652 case _PC_EXTENDED_SECURITY_NP
:
1653 *retval
= vfs_extendedsecurity(vnode_mount(vp
)) ? 1 : 0;
1655 case _PC_AUTH_OPAQUE_NP
:
1656 *retval
= vfs_authopaque(vnode_mount(vp
));
1658 case _PC_2_SYMLINKS
:
1659 *retval
= 1; /* XXX NOTSUP on MSDOS, etc. */
1661 case _PC_ALLOC_SIZE_MIN
:
1662 *retval
= 1; /* XXX lie: 1 byte */
1664 case _PC_ASYNC_IO
: /* unistd.h: _POSIX_ASYNCHRONUS_IO */
1665 *retval
= 1; /* [AIO] option is supported */
1667 case _PC_PRIO_IO
: /* unistd.h: _POSIX_PRIORITIZED_IO */
1668 *retval
= 0; /* [PIO] option is not supported */
1670 case _PC_REC_INCR_XFER_SIZE
:
1671 *retval
= 4096; /* XXX go from MIN to MAX 4K at a time */
1673 case _PC_REC_MIN_XFER_SIZE
:
1674 *retval
= 4096; /* XXX recommend 4K minimum reads/writes */
1676 case _PC_REC_MAX_XFER_SIZE
:
1677 *retval
= 65536; /* XXX recommend 64K maximum reads/writes */
1679 case _PC_REC_XFER_ALIGN
:
1680 *retval
= 4096; /* XXX recommend page aligned buffers */
1682 case _PC_SYMLINK_MAX
:
1683 *retval
= 255; /* Minimum acceptable POSIX value */
1685 case _PC_SYNC_IO
: /* unistd.h: _POSIX_SYNCHRONIZED_IO */
1686 *retval
= 0; /* [SIO] option is not supported */
1688 case _PC_XATTR_SIZE_BITS
:
1689 /* The number of bits used to store maximum extended
1690 * attribute size in bytes. For example, if the maximum
1691 * attribute size supported by a file system is 128K, the
1692 * value returned will be 18. However a value 18 can mean
1693 * that the maximum attribute size can be anywhere from
1694 * (256KB - 1) to 128KB. As a special case, the resource
1695 * fork can have much larger size, and some file system
1696 * specific extended attributes can have smaller and preset
1697 * size; for example, Finder Info is always 32 bytes.
1699 memset(&vfa
, 0, sizeof(vfa
));
1701 VFSATTR_WANTED(&vfa
, f_capabilities
);
1702 if (vfs_getattr(vnode_mount(vp
), &vfa
, ctx
) == 0 &&
1703 (VFSATTR_IS_SUPPORTED(&vfa
, f_capabilities
)) &&
1704 (vfa
.f_capabilities
.capabilities
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
) &&
1705 (vfa
.f_capabilities
.valid
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
)) {
1706 /* Supports native extended attributes */
1707 error
= VNOP_PATHCONF(vp
, name
, retval
, ctx
);
1709 /* Number of bits used to represent the maximum size of
1710 * extended attribute stored in an Apple Double file.
1712 *retval
= AD_XATTR_SIZE_BITS
;
1716 error
= VNOP_PATHCONF(vp
, name
, retval
, ctx
);
1724 vn_kqfilt_add(struct fileproc
*fp
, struct knote
*kn
,
1725 struct kevent_internal_s
*kev
, vfs_context_t ctx
)
1731 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1734 * Don't attach a knote to a dead vnode.
1736 if ((error
= vget_internal(vp
, 0, VNODE_NODEAD
)) == 0) {
1737 switch (kn
->kn_filter
) {
1740 if (vnode_isfifo(vp
)) {
1741 /* We'll only watch FIFOs that use our fifofs */
1742 if (!(vp
->v_fifoinfo
&& vp
->v_fifoinfo
->fi_readsock
)) {
1745 } else if (!vnode_isreg(vp
)) {
1746 if (vnode_ischr(vp
)) {
1747 result
= spec_kqfilter(vp
, kn
, kev
);
1748 if ((kn
->kn_flags
& EV_ERROR
) == 0) {
1749 /* claimed by a special device */
1765 error
= mac_vnode_check_kqfilter(ctx
, fp
->f_fglob
->fg_cred
, kn
, vp
);
1772 kn
->kn_hook
= (void*)vp
;
1773 kn
->kn_hookid
= vnode_vid(vp
);
1774 kn
->kn_filtid
= EVFILTID_VN
;
1777 KNOTE_ATTACH(&vp
->v_knotes
, kn
);
1778 result
= filt_vnode_common(kn
, vp
, 0);
1782 * Ask the filesystem to provide remove notifications,
1783 * but ignore failure
1785 VNOP_MONITOR(vp
, 0, VNODE_MONITOR_BEGIN
, (void*) kn
, ctx
);
1793 kn
->kn_flags
= EV_ERROR
;
1794 kn
->kn_data
= error
;
1801 filt_vndetach(struct knote
*kn
)
1803 vfs_context_t ctx
= vfs_context_current();
1805 vp
= (struct vnode
*)kn
->kn_hook
;
1806 if (vnode_getwithvid(vp
, kn
->kn_hookid
)) {
1811 KNOTE_DETACH(&vp
->v_knotes
, kn
);
1815 * Tell a (generally networked) filesystem that we're no longer watching
1816 * If the FS wants to track contexts, it should still be using the one from
1817 * the VNODE_MONITOR_BEGIN.
1819 VNOP_MONITOR(vp
, 0, VNODE_MONITOR_END
, (void*)kn
, ctx
);
1825 * Used for EVFILT_READ
1827 * Takes only VFIFO or VREG. vnode is locked. We handle the "poll" case
1828 * differently than the regular case for VREG files. If not in poll(),
1829 * then we need to know current fileproc offset for VREG.
1832 vnode_readable_data_count(vnode_t vp
, off_t current_offset
, int ispoll
)
1834 if (vnode_isfifo(vp
)) {
1837 int err
= fifo_charcount(vp
, &cnt
);
1839 return (int64_t)cnt
;
1845 } else if (vnode_isreg(vp
)) {
1851 amount
= vp
->v_un
.vu_ubcinfo
->ui_size
- current_offset
;
1852 if (amount
> INT64_MAX
) {
1854 } else if (amount
< INT64_MIN
) {
1857 return (int64_t)amount
;
1860 panic("Should never have an EVFILT_READ except for reg or fifo.");
1866 * Used for EVFILT_WRITE.
1868 * For regular vnodes, we can always write (1). For named pipes,
1869 * see how much space there is in the buffer. Nothing else is covered.
1872 vnode_writable_space_count(vnode_t vp
)
1874 if (vnode_isfifo(vp
)) {
1877 int err
= fifo_freespace(vp
, &spc
);
1879 return (intptr_t)spc
;
1885 } else if (vnode_isreg(vp
)) {
1888 panic("Should never have an EVFILT_READ except for reg or fifo.");
1894 * Determine whether this knote should be active
1896 * This is kind of subtle.
1897 * --First, notice if the vnode has been revoked: in so, override hint
1898 * --EVFILT_READ knotes are checked no matter what the hint is
1899 * --Other knotes activate based on hint.
1900 * --If hint is revoke, set special flags and activate
1903 filt_vnode_common(struct knote
*kn
, vnode_t vp
, long hint
)
1907 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
1909 /* Special handling for vnodes that are in recycle or already gone */
1910 if (NOTE_REVOKE
== hint
) {
1911 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
1914 if ((kn
->kn_filter
== EVFILT_VNODE
) && (kn
->kn_sfflags
& NOTE_REVOKE
)) {
1915 kn
->kn_fflags
|= NOTE_REVOKE
;
1918 switch (kn
->kn_filter
) {
1920 kn
->kn_data
= vnode_readable_data_count(vp
, kn
->kn_fp
->f_fglob
->fg_offset
, (kn
->kn_flags
& EV_POLL
));
1922 if (kn
->kn_data
!= 0) {
1927 kn
->kn_data
= vnode_writable_space_count(vp
);
1929 if (kn
->kn_data
!= 0) {
1934 /* Check events this note matches against the hint */
1935 if (kn
->kn_sfflags
& hint
) {
1936 kn
->kn_fflags
|= hint
; /* Set which event occurred */
1938 if (kn
->kn_fflags
!= 0) {
1943 panic("Invalid knote filter on a vnode!\n");
1950 filt_vnode(struct knote
*kn
, long hint
)
1952 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
1954 return filt_vnode_common(kn
, vp
, hint
);
1958 filt_vntouch(struct knote
*kn
, struct kevent_internal_s
*kev
)
1960 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
1965 if (vnode_getiocount(vp
, kn
->kn_hookid
, VNODE_NODEAD
| VNODE_WITHID
) != 0) {
1970 /* accept new input fflags mask */
1971 kn
->kn_sfflags
= kev
->fflags
;
1973 activate
= filt_vnode_common(kn
, vp
, hint
);
1976 vnode_put_locked(vp
);
1984 filt_vnprocess(struct knote
*kn
, struct filt_process_s
*data
, struct kevent_internal_s
*kev
)
1986 #pragma unused(data)
1987 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
1992 if (vnode_getiocount(vp
, kn
->kn_hookid
, VNODE_NODEAD
| VNODE_WITHID
) != 0) {
1996 activate
= filt_vnode_common(kn
, vp
, hint
);
1998 *kev
= kn
->kn_kevent
;
1999 if (kn
->kn_flags
& EV_CLEAR
) {
2005 /* Definitely need to unlock, may need to put */
2007 vnode_put_locked(vp
);