2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
76 #include <sys/param.h>
77 #include <sys/types.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/file_internal.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/namei.h>
86 #include <sys/vnode_internal.h>
87 #include <sys/ioctl.h>
89 /* Temporary workaround for ubc.h until <rdar://4714366 is resolved */
90 #define ubc_setcred ubc_setcred_deprecated
93 int ubc_setcred(struct vnode
*, struct proc
*);
96 #include <sys/fsevents.h>
97 #include <sys/kdebug.h>
98 #include <sys/xattr.h>
99 #include <sys/ubc_internal.h>
100 #include <sys/uio_internal.h>
101 #include <sys/resourcevar.h>
102 #include <sys/signalvar.h>
104 #include <vm/vm_kern.h>
105 #include <vm/vm_map.h>
107 #include <miscfs/specfs/specdev.h>
108 #include <miscfs/fifofs/fifo.h>
111 #include <security/mac_framework.h>
114 #include <IOKit/IOBSD.h>
115 #include <libkern/section_keywords.h>
117 static int vn_closefile(struct fileglob
*fp
, vfs_context_t ctx
);
118 static int vn_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
,
120 static int vn_read(struct fileproc
*fp
, struct uio
*uio
, int flags
,
122 static int vn_write(struct fileproc
*fp
, struct uio
*uio
, int flags
,
124 static int vn_select( struct fileproc
*fp
, int which
, void * wql
,
126 static int vn_kqfilter(struct fileproc
*fp
, struct knote
*kn
,
127 struct kevent_qos_s
*kev
);
128 static void filt_vndetach(struct knote
*kn
);
129 static int filt_vnode(struct knote
*kn
, long hint
);
130 static int filt_vnode_common(struct knote
*kn
, struct kevent_qos_s
*kev
,
131 vnode_t vp
, long hint
);
132 static int vn_open_auth_finish(vnode_t vp
, int fmode
, vfs_context_t ctx
);
134 const struct fileops vnops
= {
135 .fo_type
= DTYPE_VNODE
,
137 .fo_write
= vn_write
,
138 .fo_ioctl
= vn_ioctl
,
139 .fo_select
= vn_select
,
140 .fo_close
= vn_closefile
,
141 .fo_drain
= fo_no_drain
,
142 .fo_kqfilter
= vn_kqfilter
,
145 static int filt_vntouch(struct knote
*kn
, struct kevent_qos_s
*kev
);
146 static int filt_vnprocess(struct knote
*kn
, struct kevent_qos_s
*kev
);
148 SECURITY_READ_ONLY_EARLY(struct filterops
) vnode_filtops
= {
151 .f_detach
= filt_vndetach
,
152 .f_event
= filt_vnode
,
153 .f_touch
= filt_vntouch
,
154 .f_process
= filt_vnprocess
,
158 * Common code for vnode open operations.
159 * Check permissions, and call the VNOP_OPEN or VNOP_CREATE routine.
161 * XXX the profusion of interfaces here is probably a bad thing.
164 vn_open(struct nameidata
*ndp
, int fmode
, int cmode
)
166 return vn_open_modflags(ndp
, &fmode
, cmode
);
170 vn_open_modflags(struct nameidata
*ndp
, int *fmodep
, int cmode
)
172 struct vnode_attr va
;
175 VATTR_SET(&va
, va_mode
, cmode
);
177 return vn_open_auth(ndp
, fmodep
, &va
);
181 vn_open_auth_finish(vnode_t vp
, int fmode
, vfs_context_t ctx
)
185 if ((error
= vnode_ref_ext(vp
, fmode
, 0)) != 0) {
189 /* Call out to allow 3rd party notification of open.
190 * Ignore result of kauth_authorize_fileop call.
193 mac_vnode_notify_open(ctx
, vp
, fmode
);
195 kauth_authorize_fileop(vfs_context_ucred(ctx
), KAUTH_FILEOP_OPEN
,
205 * May do nameidone() to allow safely adding an FSEvent. Cue off of ni_dvp to
206 * determine whether that has happened.
209 vn_open_auth_do_create(struct nameidata
*ndp
, struct vnode_attr
*vap
, int fmode
, boolean_t
*did_create
, boolean_t
*did_open
, vfs_context_t ctx
)
212 vnode_t dvp
= ndp
->ni_dvp
;
217 batched
= vnode_compound_open_available(ndp
->ni_dvp
);
220 VATTR_SET(vap
, va_type
, VREG
);
221 if (fmode
& O_EXCL
) {
222 vap
->va_vaflags
|= VA_EXCLUSIVE
;
226 if (ndp
->ni_cnd
.cn_flags
& CN_WANTSRSRCFORK
) {
227 if ((error
= vn_authorize_create(dvp
, &ndp
->ni_cnd
, vap
, ctx
, NULL
)) != 0) {
230 if ((error
= vnode_makenamedstream(dvp
, &ndp
->ni_vp
, XATTR_RESOURCEFORK_NAME
, 0, ctx
)) != 0) {
237 if ((error
= vn_authorize_create(dvp
, &ndp
->ni_cnd
, vap
, ctx
, NULL
)) != 0) {
242 error
= vn_create(dvp
, &ndp
->ni_vp
, ndp
, vap
, VN_CREATE_DOOPEN
, fmode
, &status
, ctx
);
245 *did_create
= (status
& COMPOUND_OPEN_STATUS_DID_CREATE
) ? TRUE
: FALSE
;
250 if (error
== EKEEPLOOKING
) {
252 panic("EKEEPLOOKING, but we did a create?");
255 panic("EKEEPLOOKING from filesystem that doesn't support compound vnops?");
257 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
258 panic("EKEEPLOOKING, but continue flag not set?");
262 * Do NOT drop the dvp: we need everything to continue the lookup.
268 *did_create
= (status
& COMPOUND_OPEN_STATUS_DID_CREATE
) ? 1 : 0;
281 int update_flags
= 0;
283 // Make sure the name & parent pointers are hooked up
284 if (vp
->v_name
== NULL
) {
285 update_flags
|= VNODE_UPDATE_NAME
;
287 if (vp
->v_parent
== NULLVP
) {
288 update_flags
|= VNODE_UPDATE_PARENT
;
292 vnode_update_identity(vp
, dvp
, ndp
->ni_cnd
.cn_nameptr
, ndp
->ni_cnd
.cn_namelen
, ndp
->ni_cnd
.cn_hash
, update_flags
);
296 ndp
->ni_dvp
= NULLVP
;
299 if (need_fsevent(FSE_CREATE_FILE
, vp
)) {
300 add_fsevent(FSE_CREATE_FILE
, ctx
,
307 if (ndp
->ni_dvp
!= NULLVP
) {
309 ndp
->ni_dvp
= NULLVP
;
316 * This is the number of times we'll loop in vn_open_auth without explicitly
317 * yielding the CPU when we determine we have to retry.
319 #define RETRY_NO_YIELD_COUNT 5
322 * Open a file with authorization, updating the contents of the structures
323 * pointed to by ndp, fmodep, and vap as necessary to perform the requested
324 * operation. This function is used for both opens of existing files, and
325 * creation of new files.
327 * Parameters: ndp The nami data pointer describing the
329 * fmodep A pointer to an int containg the mode
330 * information to be used for the open
331 * vap A pointer to the vnode attribute
332 * descriptor to be used for the open
334 * Indirect: * Contents of the data structures pointed
335 * to by the parameters are modified as
336 * necessary to the requested operation.
341 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
343 * The contents of '*ndp' will be modified, based on the other
344 * arguments to this function, and to return file and directory
345 * data necessary to satisfy the requested operation.
347 * If the file does not exist and we are creating it, then the
348 * O_TRUNC flag will be cleared in '*fmodep' to indicate to the
349 * caller that the file was not truncated.
351 * If the file exists and the O_EXCL flag was not specified, then
352 * the O_CREAT flag will be cleared in '*fmodep' to indicate to
353 * the caller that the existing file was merely opened rather
356 * The contents of '*vap' will be modified as necessary to
357 * complete the operation, including setting of supported
358 * attribute, clearing of fields containing unsupported attributes
359 * in the request, if the request proceeds without them, etc..
361 * XXX: This function is too complicated in actings on its arguments
363 * XXX: We should enummerate the possible errno values here, and where
364 * in the code they originated.
367 vn_open_auth(struct nameidata
*ndp
, int *fmodep
, struct vnode_attr
*vap
)
371 vfs_context_t ctx
= ndp
->ni_cnd
.cn_context
;
374 uint32_t origcnflags
;
375 boolean_t did_create
;
377 boolean_t need_vnop_open
;
379 boolean_t ref_failed
;
387 need_vnop_open
= TRUE
;
390 origcnflags
= ndp
->ni_cnd
.cn_flags
;
392 // If raw encrypted mode is requested, handle that here
393 if (VATTR_IS_ACTIVE(vap
, va_dataprotect_flags
)
394 && ISSET(vap
->va_dataprotect_flags
, VA_DP_RAWENCRYPTED
)) {
401 if (fmode
& O_CREAT
) {
402 if ((fmode
& O_DIRECTORY
)) {
406 ndp
->ni_cnd
.cn_nameiop
= CREATE
;
408 ndp
->ni_op
= OP_LINK
;
410 /* Inherit USEDVP, vnode_open() supported flags only */
411 ndp
->ni_cnd
.cn_flags
&= (USEDVP
| NOCROSSMOUNT
);
412 ndp
->ni_cnd
.cn_flags
|= LOCKPARENT
| LOCKLEAF
| AUDITVNPATH1
;
413 ndp
->ni_flag
= NAMEI_COMPOUNDOPEN
;
415 /* open calls are allowed for resource forks. */
416 ndp
->ni_cnd
.cn_flags
|= CN_ALLOWRSRCFORK
;
418 if ((fmode
& O_EXCL
) == 0 && (fmode
& O_NOFOLLOW
) == 0 && (origcnflags
& FOLLOW
) != 0) {
419 ndp
->ni_cnd
.cn_flags
|= FOLLOW
;
422 continue_create_lookup
:
423 if ((error
= namei(ndp
))) {
430 batched
= vnode_compound_open_available(dvp
);
432 /* not found, create */
434 /* must have attributes for a new file */
441 * Attempt a create. For a system supporting compound VNOPs, we may
442 * find an existing file or create one; in either case, we will already
443 * have the file open and no VNOP_OPEN() will be needed.
445 error
= vn_open_auth_do_create(ndp
, vap
, fmode
, &did_create
, &did_open
, ctx
);
451 * Detected a node that the filesystem couldn't handle. Don't call
452 * nameidone() yet, because we need that path buffer.
454 if (error
== EKEEPLOOKING
) {
456 panic("EKEEPLOOKING from a filesystem that doesn't support compound VNOPs?");
458 goto continue_create_lookup
;
463 panic("Shouldn't have a dvp here.");
468 * Check for a create race.
470 if ((error
== EEXIST
) && !(fmode
& O_EXCL
)) {
479 need_vnop_open
= !did_open
;
481 if (fmode
& O_EXCL
) {
486 * We have a vnode. Use compound open if available
487 * or else fall through to "traditional" path. Note: can't
488 * do a compound open for root, because the parent belongs
491 if (error
== 0 && batched
&& (vnode_mount(dvp
) == vnode_mount(vp
))) {
492 error
= VNOP_COMPOUND_OPEN(dvp
, &ndp
->ni_vp
, ndp
, 0, fmode
, NULL
, NULL
, ctx
);
496 need_vnop_open
= FALSE
;
497 } else if (error
== EKEEPLOOKING
) {
498 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
499 panic("EKEEPLOOKING, but continue flag not set?");
501 goto continue_create_lookup
;
506 ndp
->ni_dvp
= NULLVP
;
520 ndp
->ni_cnd
.cn_nameiop
= LOOKUP
;
521 /* Inherit USEDVP, vnode_open() supported flags only */
522 ndp
->ni_cnd
.cn_flags
&= (USEDVP
| NOCROSSMOUNT
);
523 ndp
->ni_cnd
.cn_flags
|= FOLLOW
| LOCKLEAF
| AUDITVNPATH1
| WANTPARENT
;
525 /* open calls are allowed for resource forks. */
526 ndp
->ni_cnd
.cn_flags
|= CN_ALLOWRSRCFORK
;
528 if (fmode
& FENCRYPTED
) {
529 ndp
->ni_cnd
.cn_flags
|= CN_RAW_ENCRYPTED
| CN_SKIPNAMECACHE
;
531 ndp
->ni_flag
= NAMEI_COMPOUNDOPEN
;
533 /* preserve NOFOLLOW from vnode_open() */
534 if (fmode
& O_NOFOLLOW
|| fmode
& O_SYMLINK
|| (origcnflags
& FOLLOW
) == 0) {
535 ndp
->ni_cnd
.cn_flags
&= ~FOLLOW
;
538 /* Do a lookup, possibly going directly to filesystem for compound operation */
540 if ((error
= namei(ndp
))) {
546 /* Check for batched lookup-open */
547 batched
= vnode_compound_open_available(dvp
);
548 if (batched
&& ((vp
== NULLVP
) || (vnode_mount(dvp
) == vnode_mount(vp
)))) {
549 error
= VNOP_COMPOUND_OPEN(dvp
, &ndp
->ni_vp
, ndp
, 0, fmode
, NULL
, NULL
, ctx
);
552 need_vnop_open
= FALSE
;
553 } else if (error
== EKEEPLOOKING
) {
554 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
555 panic("EKEEPLOOKING, but continue flag not set?");
559 } while (error
== EKEEPLOOKING
);
563 ndp
->ni_dvp
= NULLVP
;
571 * By this point, nameidone() is called, dvp iocount is dropped,
572 * and dvp pointer is cleared.
574 if (ndp
->ni_dvp
!= NULLVP
) {
575 panic("Haven't cleaned up adequately in vn_open_auth()");
579 * Expect to use this code for filesystems without compound VNOPs, for the root
580 * of a filesystem, which can't be "looked up" in the sense of VNOP_LOOKUP(),
581 * and for shadow files, which do not live on the same filesystems as their "parents."
583 if (need_vnop_open
) {
584 if (batched
&& !vnode_isvroot(vp
) && !vnode_isnamedstream(vp
)) {
585 panic("Why am I trying to use VNOP_OPEN() on anything other than the root or a named stream?");
589 error
= vn_authorize_open_existing(vp
, &ndp
->ni_cnd
, fmode
, ctx
, NULL
);
595 if (VATTR_IS_ACTIVE(vap
, va_dataprotect_flags
)
596 && ISSET(vap
->va_dataprotect_flags
, VA_DP_RAWUNENCRYPTED
)) {
597 /* Don't allow unencrypted io request from user space unless entitled */
598 boolean_t entitled
= FALSE
;
600 entitled
= IOTaskHasEntitlement(current_task(), "com.apple.private.security.file-unencrypt-access");
606 fmode
|= FUNENCRYPTED
;
609 error
= VNOP_OPEN(vp
, fmode
, ctx
);
613 need_vnop_open
= FALSE
;
616 // if the vnode is tagged VOPENEVT and the current process
617 // has the P_CHECKOPENEVT flag set, then we or in the O_EVTONLY
618 // flag to the open mode so that this open won't count against
619 // the vnode when carbon delete() does a vnode_isinuse() to see
620 // if a file is currently in use. this allows spotlight
621 // importers to not interfere with carbon apps that depend on
622 // the no-delete-if-busy semantics of carbon delete().
624 if (!did_create
&& (vp
->v_flag
& VOPENEVT
) && (current_proc()->p_flag
& P_CHECKOPENEVT
)) {
629 * Grab reference, etc.
631 error
= vn_open_auth_finish(vp
, fmode
, ctx
);
637 /* Compound VNOP open is responsible for doing the truncate */
638 if (batched
|| did_create
) {
646 /* Opened either explicitly or by a batched create */
647 if (!need_vnop_open
) {
648 VNOP_CLOSE(vp
, fmode
, ctx
);
654 /* Aggressively recycle shadow files if we error'd out during open() */
655 if ((vnode_isnamedstream(vp
)) &&
656 (vp
->v_parent
!= NULLVP
) &&
657 (vnode_isshadow(vp
))) {
663 * Check for a race against unlink. We had a vnode
664 * but according to vnode_authorize or VNOP_OPEN it
667 * EREDRIVEOPEN: means that we were hit by the tty allocation race.
669 if (((error
== ENOENT
) && (*fmodep
& O_CREAT
)) || (error
== EREDRIVEOPEN
) || ref_failed
) {
671 * We'll retry here but it may be possible that we get
672 * into a retry "spin" inside the kernel and not allow
673 * threads, which need to run in order for the retry
674 * loop to end, to run. An example is an open of a
675 * terminal which is getting revoked and we spin here
676 * without yielding becasue namei and VNOP_OPEN are
677 * successful but vnode_ref fails. The revoke needs
678 * threads with an iocount to run but if spin here we
679 * may possibly be blcoking other threads from running.
681 * We start yielding the CPU after some number of
682 * retries for increasing durations. Note that this is
683 * still a loop without an exit condition.
686 if (nretries
> RETRY_NO_YIELD_COUNT
) {
687 /* Every hz/100 secs is 10 msecs ... */
688 tsleep(&nretries
, PVFS
, "vn_open_auth_retry",
689 MIN((nretries
* (hz
/ 100)), hz
));
699 #if vn_access_DEPRECATED
701 * Authorize an action against a vnode. This has been the canonical way to
702 * ensure that the credential/process/etc. referenced by a vfs_context
703 * is granted the rights called out in 'mode' against the vnode 'vp'.
705 * Unfortunately, the use of VREAD/VWRITE/VEXEC makes it very difficult
706 * to add support for more rights. As such, this interface will be deprecated
707 * and callers will use vnode_authorize instead.
710 vn_access(vnode_t vp
, int mode
, vfs_context_t context
)
712 kauth_action_t action
;
716 action
|= KAUTH_VNODE_READ_DATA
;
719 action
|= KAUTH_VNODE_WRITE_DATA
;
722 action
|= KAUTH_VNODE_EXECUTE
;
725 return vnode_authorize(vp
, NULL
, action
, context
);
727 #endif /* vn_access_DEPRECATED */
733 vn_close(struct vnode
*vp
, int flags
, vfs_context_t ctx
)
739 /* Sync data from resource fork shadow file if needed. */
740 if ((vp
->v_flag
& VISNAMEDSTREAM
) &&
741 (vp
->v_parent
!= NULLVP
) &&
742 vnode_isshadow(vp
)) {
743 if (flags
& FWASWRITTEN
) {
744 flusherror
= vnode_flushnamedstream(vp
->v_parent
, vp
, ctx
);
749 * If vnode @vp belongs to a chardev or a blkdev then it is handled
750 * specially. We first drop its user reference count @vp->v_usecount
751 * before calling VNOP_CLOSE(). This was done historically to ensure
752 * that the last close of a special device vnode performed some
753 * conditional cleanups. Now we still need to drop this reference here
754 * to ensure that devfsspec_close() can check if the vnode is still in
757 if (vnode_isspec(vp
)) {
758 (void)vnode_rele_ext(vp
, flags
, 0);
762 * On HFS, we flush when the last writer closes. We do this
763 * because resource fork vnodes hold a reference on data fork
764 * vnodes and that will prevent them from getting VNOP_INACTIVE
765 * which will delay when we flush cached data. In future, we
766 * might find it beneficial to do this for all file systems.
767 * Note that it's OK to access v_writecount without the lock
770 if (vp
->v_tag
== VT_HFS
&& (flags
& FWRITE
) && vp
->v_writecount
== 1) {
771 VNOP_FSYNC(vp
, MNT_NOWAIT
, ctx
);
774 error
= VNOP_CLOSE(vp
, flags
, ctx
);
777 if (flags
& FWASWRITTEN
) {
778 if (need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) {
779 add_fsevent(FSE_CONTENT_MODIFIED
, ctx
,
786 if (!vnode_isspec(vp
)) {
787 (void)vnode_rele_ext(vp
, flags
, 0);
802 off_t swap_count
, this_count
;
803 off_t file_end
, read_end
;
808 * Reading from a swap file will get you zeroes.
813 swap_count
= uio_resid(uio
);
815 file_end
= ubc_getsize(vp
);
816 read_end
= uio
->uio_offset
+ uio_resid(uio
);
817 if (uio
->uio_offset
>= file_end
) {
818 /* uio starts after end of file: nothing to read */
820 } else if (read_end
> file_end
) {
821 /* uio extends beyond end of file: stop before that */
822 swap_count
-= (read_end
- file_end
);
825 while (swap_count
> 0) {
826 if (my_swap_page
== NULL
) {
827 MALLOC(my_swap_page
, char *, PAGE_SIZE
,
829 memset(my_swap_page
, '\0', PAGE_SIZE
);
830 /* add an end-of-line to keep line counters happy */
831 my_swap_page
[PAGE_SIZE
- 1] = '\n';
833 this_count
= swap_count
;
834 if (this_count
> PAGE_SIZE
) {
835 this_count
= PAGE_SIZE
;
838 prev_resid
= uio_resid(uio
);
839 error
= uiomove((caddr_t
) my_swap_page
,
845 swap_count
-= (prev_resid
- uio_resid(uio
));
847 if (my_swap_page
!= NULL
) {
848 FREE(my_swap_page
, M_TEMP
);
855 * Package up an I/O request on a vnode into a uio and do it.
873 result
= vn_rdwr_64(rw
,
875 (uint64_t)(uintptr_t)base
,
884 /* "resid" should be bounded above by "len," which is an int */
885 if (aresid
!= NULL
) {
908 struct vfs_context context
;
910 char uio_buf
[UIO_SIZEOF(1)];
912 context
.vc_thread
= current_thread();
913 context
.vc_ucred
= cred
;
915 if (UIO_SEG_IS_USER_SPACE(segflg
)) {
916 spacetype
= proc_is64bit(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
;
918 spacetype
= UIO_SYSSPACE
;
920 auio
= uio_createwithbuffer(1, offset
, spacetype
, rw
,
921 &uio_buf
[0], sizeof(uio_buf
));
922 uio_addiov(auio
, base
, len
);
926 * IO_NOAUTH should be re-examined.
927 * Likely that mediation should be performed in caller.
929 if ((ioflg
& IO_NOAUTH
) == 0) {
930 /* passed cred is fp->f_cred */
931 if (rw
== UIO_READ
) {
932 error
= mac_vnode_check_read(&context
, cred
, vp
);
934 error
= mac_vnode_check_write(&context
, cred
, vp
);
940 if (rw
== UIO_READ
) {
941 if (vnode_isswap(vp
) && ((ioflg
& IO_SWAP_DISPATCH
) == 0)) {
942 error
= vn_read_swapfile(vp
, auio
);
944 error
= VNOP_READ(vp
, auio
, ioflg
, &context
);
947 error
= VNOP_WRITE(vp
, auio
, ioflg
, &context
);
952 *aresid
= uio_resid(auio
);
953 } else if (uio_resid(auio
) && error
== 0) {
960 vn_offset_lock(struct fileglob
*fg
)
962 lck_mtx_lock_spin(&fg
->fg_lock
);
963 while (fg
->fg_lflags
& FG_OFF_LOCKED
) {
964 fg
->fg_lflags
|= FG_OFF_LOCKWANT
;
965 msleep(&fg
->fg_lflags
, &fg
->fg_lock
, PVFS
| PSPIN
,
966 "fg_offset_lock_wait", 0);
968 fg
->fg_lflags
|= FG_OFF_LOCKED
;
969 lck_mtx_unlock(&fg
->fg_lock
);
973 vn_offset_unlock(struct fileglob
*fg
)
977 lck_mtx_lock_spin(&fg
->fg_lock
);
978 if (fg
->fg_lflags
& FG_OFF_LOCKWANT
) {
981 fg
->fg_lflags
&= ~(FG_OFF_LOCKED
| FG_OFF_LOCKWANT
);
982 lck_mtx_unlock(&fg
->fg_lock
);
984 wakeup(&fg
->fg_lflags
);
989 * File table vnode read routine.
992 vn_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
998 int offset_locked
= 0;
1000 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1001 if ((error
= vnode_getwithref(vp
))) {
1006 error
= mac_vnode_check_read(ctx
, vfs_context_ucred(ctx
), vp
);
1008 (void)vnode_put(vp
);
1013 /* This signals to VNOP handlers that this read came from a file table read */
1014 ioflag
= IO_SYSCALL_DISPATCH
;
1016 if (fp
->f_fglob
->fg_flag
& FNONBLOCK
) {
1017 ioflag
|= IO_NDELAY
;
1019 if ((fp
->f_fglob
->fg_flag
& FNOCACHE
) || vnode_isnocache(vp
)) {
1020 ioflag
|= IO_NOCACHE
;
1022 if (fp
->f_fglob
->fg_flag
& FENCRYPTED
) {
1023 ioflag
|= IO_ENCRYPTED
;
1025 if (fp
->f_fglob
->fg_flag
& FUNENCRYPTED
) {
1026 ioflag
|= IO_SKIP_ENCRYPTION
;
1028 if (fp
->f_fglob
->fg_flag
& O_EVTONLY
) {
1029 ioflag
|= IO_EVTONLY
;
1031 if (fp
->f_fglob
->fg_flag
& FNORDAHEAD
) {
1035 if ((flags
& FOF_OFFSET
) == 0) {
1036 if ((vnode_vtype(vp
) == VREG
) && !vnode_isswap(vp
)) {
1037 vn_offset_lock(fp
->f_fglob
);
1040 uio
->uio_offset
= fp
->f_fglob
->fg_offset
;
1042 count
= uio_resid(uio
);
1044 if (vnode_isswap(vp
) && !(IO_SKIP_ENCRYPTION
& ioflag
)) {
1045 /* special case for swap files */
1046 error
= vn_read_swapfile(vp
, uio
);
1048 error
= VNOP_READ(vp
, uio
, ioflag
, ctx
);
1051 if ((flags
& FOF_OFFSET
) == 0) {
1052 fp
->f_fglob
->fg_offset
+= count
- uio_resid(uio
);
1053 if (offset_locked
) {
1054 vn_offset_unlock(fp
->f_fglob
);
1059 (void)vnode_put(vp
);
1065 * File table vnode write routine.
1068 vn_write(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
1073 int clippedsize
= 0;
1074 int partialwrite
= 0;
1075 int residcount
, oldcount
;
1076 int offset_locked
= 0;
1077 proc_t p
= vfs_context_proc(ctx
);
1080 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1081 if ((error
= vnode_getwithref(vp
))) {
1086 error
= mac_vnode_check_write(ctx
, vfs_context_ucred(ctx
), vp
);
1088 (void)vnode_put(vp
);
1094 * IO_SYSCALL_DISPATCH signals to VNOP handlers that this write came from
1095 * a file table write
1097 ioflag
= (IO_UNIT
| IO_SYSCALL_DISPATCH
);
1099 if (vp
->v_type
== VREG
&& (fp
->f_fglob
->fg_flag
& O_APPEND
)) {
1100 ioflag
|= IO_APPEND
;
1102 if (fp
->f_fglob
->fg_flag
& FNONBLOCK
) {
1103 ioflag
|= IO_NDELAY
;
1105 if ((fp
->f_fglob
->fg_flag
& FNOCACHE
) || vnode_isnocache(vp
)) {
1106 ioflag
|= IO_NOCACHE
;
1108 if (fp
->f_fglob
->fg_flag
& FNODIRECT
) {
1109 ioflag
|= IO_NODIRECT
;
1111 if (fp
->f_fglob
->fg_flag
& FSINGLE_WRITER
) {
1112 ioflag
|= IO_SINGLE_WRITER
;
1114 if (fp
->f_fglob
->fg_flag
& O_EVTONLY
) {
1115 ioflag
|= IO_EVTONLY
;
1119 * Treat synchronous mounts and O_FSYNC on the fd as equivalent.
1121 * XXX We treat O_DSYNC as O_FSYNC for now, since we can not delay
1122 * XXX the non-essential metadata without some additional VFS work;
1123 * XXX the intent at this point is to plumb the interface for it.
1125 if ((fp
->f_fglob
->fg_flag
& (O_FSYNC
| O_DSYNC
)) ||
1126 (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))) {
1130 if ((flags
& FOF_OFFSET
) == 0) {
1131 if ((vnode_vtype(vp
) == VREG
) && !vnode_isswap(vp
)) {
1132 vn_offset_lock(fp
->f_fglob
);
1135 uio
->uio_offset
= fp
->f_fglob
->fg_offset
;
1136 count
= uio_resid(uio
);
1138 if (((flags
& FOF_OFFSET
) == 0) &&
1139 vfs_context_proc(ctx
) && (vp
->v_type
== VREG
) &&
1140 (((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) ||
1141 ((rlim_t
)uio_resid(uio
) > (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
)))) {
1143 * If the requested residual would cause us to go past the
1144 * administrative limit, then we need to adjust the residual
1145 * down to cause fewer bytes than requested to be written. If
1146 * we can't do that (e.g. the residual is already 1 byte),
1147 * then we fail the write with EFBIG.
1149 residcount
= uio_resid(uio
);
1150 if ((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) {
1151 clippedsize
= (uio
->uio_offset
+ uio_resid(uio
)) - p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
;
1152 } else if ((rlim_t
)uio_resid(uio
) > (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
)) {
1153 clippedsize
= (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
);
1155 if (clippedsize
>= residcount
) {
1156 psignal(p
, SIGXFSZ
);
1161 uio_setresid(uio
, residcount
- clippedsize
);
1163 if ((flags
& FOF_OFFSET
) != 0) {
1164 /* for pwrite, append should be ignored */
1165 ioflag
&= ~IO_APPEND
;
1166 if (p
&& (vp
->v_type
== VREG
) &&
1167 ((rlim_t
)uio
->uio_offset
>= p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
)) {
1168 psignal(p
, SIGXFSZ
);
1172 if (p
&& (vp
->v_type
== VREG
) &&
1173 ((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
)) {
1174 //Debugger("vn_bwrite:overstepping the bounds");
1175 residcount
= uio_resid(uio
);
1176 clippedsize
= (uio
->uio_offset
+ uio_resid(uio
)) - p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
;
1178 uio_setresid(uio
, residcount
- clippedsize
);
1182 error
= VNOP_WRITE(vp
, uio
, ioflag
, ctx
);
1185 oldcount
= uio_resid(uio
);
1186 uio_setresid(uio
, oldcount
+ clippedsize
);
1189 if ((flags
& FOF_OFFSET
) == 0) {
1190 if (ioflag
& IO_APPEND
) {
1191 fp
->f_fglob
->fg_offset
= uio
->uio_offset
;
1193 fp
->f_fglob
->fg_offset
+= count
- uio_resid(uio
);
1195 if (offset_locked
) {
1196 vn_offset_unlock(fp
->f_fglob
);
1202 * Set the credentials on successful writes
1204 if ((error
== 0) && (vp
->v_tag
== VT_NFS
) && (UBCINFOEXISTS(vp
))) {
1206 * When called from aio subsystem, we only have the proc from
1207 * which to get the credential, at this point, so use that
1208 * instead. This means aio functions are incompatible with
1209 * per-thread credentials (aio operations are proxied). We
1210 * can't easily correct the aio vs. settid race in this case
1211 * anyway, so we disallow it.
1213 if ((flags
& FOF_PCRED
) == 0) {
1214 ubc_setthreadcred(vp
, p
, current_thread());
1219 (void)vnode_put(vp
);
1223 if (offset_locked
) {
1224 vn_offset_unlock(fp
->f_fglob
);
1226 (void)vnode_put(vp
);
1231 * File table vnode stat routine.
1233 * Returns: 0 Success
1239 vn_stat_noauth(struct vnode
*vp
, void *sbptr
, kauth_filesec_t
*xsec
, int isstat64
,
1240 int needsrealdev
, vfs_context_t ctx
, struct ucred
*file_cred
)
1242 struct vnode_attr va
;
1245 kauth_filesec_t fsec
;
1246 struct stat
*sb
= (struct stat
*)0; /* warning avoidance ; protected by isstat64 */
1247 struct stat64
* sb64
= (struct stat64
*)0; /* warning avoidance ; protected by isstat64 */
1249 if (isstat64
!= 0) {
1250 sb64
= (struct stat64
*)sbptr
;
1252 sb
= (struct stat
*)sbptr
;
1254 memset(&va
, 0, sizeof(va
));
1256 VATTR_WANTED(&va
, va_fsid
);
1257 VATTR_WANTED(&va
, va_fileid
);
1258 VATTR_WANTED(&va
, va_mode
);
1259 VATTR_WANTED(&va
, va_type
);
1260 VATTR_WANTED(&va
, va_nlink
);
1261 VATTR_WANTED(&va
, va_uid
);
1262 VATTR_WANTED(&va
, va_gid
);
1263 VATTR_WANTED(&va
, va_rdev
);
1264 VATTR_WANTED(&va
, va_data_size
);
1265 VATTR_WANTED(&va
, va_access_time
);
1266 VATTR_WANTED(&va
, va_modify_time
);
1267 VATTR_WANTED(&va
, va_change_time
);
1268 VATTR_WANTED(&va
, va_create_time
);
1269 VATTR_WANTED(&va
, va_flags
);
1270 VATTR_WANTED(&va
, va_gen
);
1271 VATTR_WANTED(&va
, va_iosize
);
1272 /* lower layers will synthesise va_total_alloc from va_data_size if required */
1273 VATTR_WANTED(&va
, va_total_alloc
);
1275 VATTR_WANTED(&va
, va_uuuid
);
1276 VATTR_WANTED(&va
, va_guuid
);
1277 VATTR_WANTED(&va
, va_acl
);
1280 va
.va_vaflags
= VA_REALFSID
;
1282 error
= vnode_getattr(vp
, &va
, ctx
);
1288 * Give MAC polices a chance to reject or filter the attributes
1289 * returned by the filesystem. Note that MAC policies are consulted
1290 * *after* calling the filesystem because filesystems can return more
1291 * attributes than were requested so policies wouldn't be authoritative
1292 * is consulted beforehand. This also gives policies an opportunity
1293 * to change the values of attributes retrieved.
1295 error
= mac_vnode_check_getattr(ctx
, file_cred
, vp
, &va
);
1301 * Copy from vattr table
1303 if (isstat64
!= 0) {
1304 sb64
->st_dev
= va
.va_fsid
;
1305 sb64
->st_ino
= (ino64_t
)va
.va_fileid
;
1307 sb
->st_dev
= va
.va_fsid
;
1308 sb
->st_ino
= (ino_t
)va
.va_fileid
;
1311 switch (vp
->v_type
) {
1338 if (isstat64
!= 0) {
1339 sb64
->st_mode
= mode
;
1340 sb64
->st_nlink
= VATTR_IS_SUPPORTED(&va
, va_nlink
) ? va
.va_nlink
> UINT16_MAX
? UINT16_MAX
: (u_int16_t
)va
.va_nlink
: 1;
1341 sb64
->st_uid
= va
.va_uid
;
1342 sb64
->st_gid
= va
.va_gid
;
1343 sb64
->st_rdev
= va
.va_rdev
;
1344 sb64
->st_size
= va
.va_data_size
;
1345 sb64
->st_atimespec
= va
.va_access_time
;
1346 sb64
->st_mtimespec
= va
.va_modify_time
;
1347 sb64
->st_ctimespec
= va
.va_change_time
;
1348 if (VATTR_IS_SUPPORTED(&va
, va_create_time
)) {
1349 sb64
->st_birthtimespec
= va
.va_create_time
;
1351 sb64
->st_birthtimespec
.tv_sec
= sb64
->st_birthtimespec
.tv_nsec
= 0;
1353 sb64
->st_blksize
= va
.va_iosize
;
1354 sb64
->st_flags
= va
.va_flags
;
1355 sb64
->st_blocks
= roundup(va
.va_total_alloc
, 512) / 512;
1358 sb
->st_nlink
= VATTR_IS_SUPPORTED(&va
, va_nlink
) ? va
.va_nlink
> UINT16_MAX
? UINT16_MAX
: (u_int16_t
)va
.va_nlink
: 1;
1359 sb
->st_uid
= va
.va_uid
;
1360 sb
->st_gid
= va
.va_gid
;
1361 sb
->st_rdev
= va
.va_rdev
;
1362 sb
->st_size
= va
.va_data_size
;
1363 sb
->st_atimespec
= va
.va_access_time
;
1364 sb
->st_mtimespec
= va
.va_modify_time
;
1365 sb
->st_ctimespec
= va
.va_change_time
;
1366 sb
->st_blksize
= va
.va_iosize
;
1367 sb
->st_flags
= va
.va_flags
;
1368 sb
->st_blocks
= roundup(va
.va_total_alloc
, 512) / 512;
1371 /* if we're interested in extended security data and we got an ACL */
1373 if (!VATTR_IS_SUPPORTED(&va
, va_acl
) &&
1374 !VATTR_IS_SUPPORTED(&va
, va_uuuid
) &&
1375 !VATTR_IS_SUPPORTED(&va
, va_guuid
)) {
1376 *xsec
= KAUTH_FILESEC_NONE
;
1378 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
1379 fsec
= kauth_filesec_alloc(va
.va_acl
->acl_entrycount
);
1381 fsec
= kauth_filesec_alloc(0);
1387 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
1388 if (VATTR_IS_SUPPORTED(&va
, va_uuuid
)) {
1389 fsec
->fsec_owner
= va
.va_uuuid
;
1391 fsec
->fsec_owner
= kauth_null_guid
;
1393 if (VATTR_IS_SUPPORTED(&va
, va_guuid
)) {
1394 fsec
->fsec_group
= va
.va_guuid
;
1396 fsec
->fsec_group
= kauth_null_guid
;
1398 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
1399 __nochk_bcopy(va
.va_acl
, &(fsec
->fsec_acl
), KAUTH_ACL_COPYSIZE(va
.va_acl
));
1401 fsec
->fsec_acl
.acl_entrycount
= KAUTH_FILESEC_NOACL
;
1407 /* Do not give the generation number out to unpriviledged users */
1408 if (va
.va_gen
&& !vfs_context_issuser(ctx
)) {
1409 if (isstat64
!= 0) {
1415 if (isstat64
!= 0) {
1416 sb64
->st_gen
= va
.va_gen
;
1418 sb
->st_gen
= va
.va_gen
;
1424 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && va
.va_acl
!= NULL
) {
1425 kauth_acl_free(va
.va_acl
);
1431 vn_stat(struct vnode
*vp
, void *sb
, kauth_filesec_t
*xsec
, int isstat64
, int needsrealdev
, vfs_context_t ctx
)
1436 error
= mac_vnode_check_stat(ctx
, NOCRED
, vp
);
1443 if ((error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_READ_ATTRIBUTES
| KAUTH_VNODE_READ_SECURITY
, ctx
)) != 0) {
1448 return vn_stat_noauth(vp
, sb
, xsec
, isstat64
, needsrealdev
, ctx
, NOCRED
);
1453 * File table vnode ioctl routine.
1456 vn_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
, vfs_context_t ctx
)
1458 struct vnode
*vp
= ((struct vnode
*)fp
->f_fglob
->fg_data
);
1461 struct vnode
*ttyvp
;
1462 struct session
* sessp
;
1464 if ((error
= vnode_getwithref(vp
))) {
1469 error
= mac_vnode_check_ioctl(ctx
, vp
, com
);
1475 switch (vp
->v_type
) {
1478 if (com
== FIONREAD
) {
1479 if ((error
= vnode_size(vp
, &file_size
, ctx
)) != 0) {
1482 *(int *)data
= file_size
- fp
->f_fglob
->fg_offset
;
1485 if (com
== FIONBIO
|| com
== FIOASYNC
) { /* XXX */
1498 if (com
== TIOCREVOKE
) {
1503 /* Should not be able to set block size from user space */
1504 if (com
== DKIOCSETBLOCKSIZE
) {
1509 if (com
== FIODTYPE
) {
1510 if (vp
->v_type
== VBLK
) {
1511 if (major(vp
->v_rdev
) >= nblkdev
) {
1515 *(int *)data
= bdevsw
[major(vp
->v_rdev
)].d_type
;
1516 } else if (vp
->v_type
== VCHR
) {
1517 if (major(vp
->v_rdev
) >= nchrdev
) {
1521 *(int *)data
= cdevsw
[major(vp
->v_rdev
)].d_type
;
1528 error
= VNOP_IOCTL(vp
, com
, data
, fp
->f_fglob
->fg_flag
, ctx
);
1530 if (error
== 0 && com
== TIOCSCTTY
) {
1531 sessp
= proc_session(vfs_context_proc(ctx
));
1533 session_lock(sessp
);
1534 ttyvp
= sessp
->s_ttyvp
;
1535 sessp
->s_ttyvp
= vp
;
1536 sessp
->s_ttyvid
= vnode_vid(vp
);
1537 session_unlock(sessp
);
1538 session_rele(sessp
);
1542 (void)vnode_put(vp
);
1547 * File table vnode select routine.
1550 vn_select(struct fileproc
*fp
, int which
, void *wql
, __unused vfs_context_t ctx
)
1553 struct vnode
* vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1554 struct vfs_context context
;
1556 if ((error
= vnode_getwithref(vp
)) == 0) {
1557 context
.vc_thread
= current_thread();
1558 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
1562 * XXX We should use a per thread credential here; minimally,
1563 * XXX the process credential should have a persistent
1564 * XXX reference on it before being passed in here.
1566 error
= mac_vnode_check_select(ctx
, vp
, which
);
1569 error
= VNOP_SELECT(vp
, which
, fp
->f_fglob
->fg_flag
, wql
, ctx
);
1571 (void)vnode_put(vp
);
1577 * File table vnode close routine.
1580 vn_closefile(struct fileglob
*fg
, vfs_context_t ctx
)
1582 struct vnode
*vp
= fg
->fg_data
;
1585 if ((error
= vnode_getwithref(vp
)) == 0) {
1586 if (FILEGLOB_DTYPE(fg
) == DTYPE_VNODE
&&
1587 ((fg
->fg_flag
& FHASLOCK
) != 0 ||
1588 (fg
->fg_lflags
& FG_HAS_OFDLOCK
) != 0)) {
1590 .l_whence
= SEEK_SET
,
1596 if ((fg
->fg_flag
& FHASLOCK
) != 0) {
1597 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
,
1598 F_UNLCK
, &lf
, F_FLOCK
, ctx
, NULL
);
1601 if ((fg
->fg_lflags
& FG_HAS_OFDLOCK
) != 0) {
1602 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
,
1603 F_UNLCK
, &lf
, F_OFD_LOCK
, ctx
, NULL
);
1606 error
= vn_close(vp
, fg
->fg_flag
, ctx
);
1607 (void) vnode_put(vp
);
1613 * Returns: 0 Success
1617 vn_pathconf(vnode_t vp
, int name
, int32_t *retval
, vfs_context_t ctx
)
1620 struct vfs_attr vfa
;
1623 case _PC_EXTENDED_SECURITY_NP
:
1624 *retval
= vfs_extendedsecurity(vnode_mount(vp
)) ? 1 : 0;
1626 case _PC_AUTH_OPAQUE_NP
:
1627 *retval
= vfs_authopaque(vnode_mount(vp
));
1629 case _PC_2_SYMLINKS
:
1630 *retval
= 1; /* XXX NOTSUP on MSDOS, etc. */
1632 case _PC_ALLOC_SIZE_MIN
:
1633 *retval
= 1; /* XXX lie: 1 byte */
1635 case _PC_ASYNC_IO
: /* unistd.h: _POSIX_ASYNCHRONUS_IO */
1636 *retval
= 1; /* [AIO] option is supported */
1638 case _PC_PRIO_IO
: /* unistd.h: _POSIX_PRIORITIZED_IO */
1639 *retval
= 0; /* [PIO] option is not supported */
1641 case _PC_REC_INCR_XFER_SIZE
:
1642 *retval
= 4096; /* XXX go from MIN to MAX 4K at a time */
1644 case _PC_REC_MIN_XFER_SIZE
:
1645 *retval
= 4096; /* XXX recommend 4K minimum reads/writes */
1647 case _PC_REC_MAX_XFER_SIZE
:
1648 *retval
= 65536; /* XXX recommend 64K maximum reads/writes */
1650 case _PC_REC_XFER_ALIGN
:
1651 *retval
= 4096; /* XXX recommend page aligned buffers */
1653 case _PC_SYMLINK_MAX
:
1654 *retval
= 255; /* Minimum acceptable POSIX value */
1656 case _PC_SYNC_IO
: /* unistd.h: _POSIX_SYNCHRONIZED_IO */
1657 *retval
= 0; /* [SIO] option is not supported */
1659 case _PC_XATTR_SIZE_BITS
:
1660 /* The number of bits used to store maximum extended
1661 * attribute size in bytes. For example, if the maximum
1662 * attribute size supported by a file system is 128K, the
1663 * value returned will be 18. However a value 18 can mean
1664 * that the maximum attribute size can be anywhere from
1665 * (256KB - 1) to 128KB. As a special case, the resource
1666 * fork can have much larger size, and some file system
1667 * specific extended attributes can have smaller and preset
1668 * size; for example, Finder Info is always 32 bytes.
1670 memset(&vfa
, 0, sizeof(vfa
));
1672 VFSATTR_WANTED(&vfa
, f_capabilities
);
1673 if (vfs_getattr(vnode_mount(vp
), &vfa
, ctx
) == 0 &&
1674 (VFSATTR_IS_SUPPORTED(&vfa
, f_capabilities
)) &&
1675 (vfa
.f_capabilities
.capabilities
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
) &&
1676 (vfa
.f_capabilities
.valid
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
)) {
1677 /* Supports native extended attributes */
1678 error
= VNOP_PATHCONF(vp
, name
, retval
, ctx
);
1680 /* Number of bits used to represent the maximum size of
1681 * extended attribute stored in an Apple Double file.
1683 *retval
= AD_XATTR_SIZE_BITS
;
1687 error
= VNOP_PATHCONF(vp
, name
, retval
, ctx
);
1695 vn_kqfilter(struct fileproc
*fp
, struct knote
*kn
, struct kevent_qos_s
*kev
)
1697 vfs_context_t ctx
= vfs_context_current();
1702 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1705 * Don't attach a knote to a dead vnode.
1707 if ((error
= vget_internal(vp
, 0, VNODE_NODEAD
)) == 0) {
1708 switch (kn
->kn_filter
) {
1711 if (vnode_isfifo(vp
)) {
1712 /* We'll only watch FIFOs that use our fifofs */
1713 if (!(vp
->v_fifoinfo
&& vp
->v_fifoinfo
->fi_readsock
)) {
1716 } else if (!vnode_isreg(vp
)) {
1717 if (vnode_ischr(vp
)) {
1718 result
= spec_kqfilter(vp
, kn
, kev
);
1719 if ((kn
->kn_flags
& EV_ERROR
) == 0) {
1720 /* claimed by a special device */
1736 error
= mac_vnode_check_kqfilter(ctx
, fp
->f_fglob
->fg_cred
, kn
, vp
);
1743 kn
->kn_hook
= (void*)vp
;
1744 kn
->kn_filtid
= EVFILTID_VN
;
1747 KNOTE_ATTACH(&vp
->v_knotes
, kn
);
1748 result
= filt_vnode_common(kn
, NULL
, vp
, 0);
1752 * Ask the filesystem to provide remove notifications,
1753 * but ignore failure
1755 VNOP_MONITOR(vp
, 0, VNODE_MONITOR_BEGIN
, (void*) kn
, ctx
);
1763 knote_set_error(kn
, error
);
1770 filt_vndetach(struct knote
*kn
)
1772 vfs_context_t ctx
= vfs_context_current();
1773 struct vnode
*vp
= (struct vnode
*)kn
->kn_hook
;
1774 uint32_t vid
= vnode_vid(vp
);
1775 if (vnode_getwithvid(vp
, vid
)) {
1780 KNOTE_DETACH(&vp
->v_knotes
, kn
);
1784 * Tell a (generally networked) filesystem that we're no longer watching
1785 * If the FS wants to track contexts, it should still be using the one from
1786 * the VNODE_MONITOR_BEGIN.
1788 VNOP_MONITOR(vp
, 0, VNODE_MONITOR_END
, (void*)kn
, ctx
);
1794 * Used for EVFILT_READ
1796 * Takes only VFIFO or VREG. vnode is locked. We handle the "poll" case
1797 * differently than the regular case for VREG files. If not in poll(),
1798 * then we need to know current fileproc offset for VREG.
1801 vnode_readable_data_count(vnode_t vp
, off_t current_offset
, int ispoll
)
1803 if (vnode_isfifo(vp
)) {
1806 int err
= fifo_charcount(vp
, &cnt
);
1808 return (int64_t)cnt
;
1814 } else if (vnode_isreg(vp
)) {
1820 amount
= vp
->v_un
.vu_ubcinfo
->ui_size
- current_offset
;
1821 if (amount
> INT64_MAX
) {
1823 } else if (amount
< INT64_MIN
) {
1826 return (int64_t)amount
;
1829 panic("Should never have an EVFILT_READ except for reg or fifo.");
1835 * Used for EVFILT_WRITE.
1837 * For regular vnodes, we can always write (1). For named pipes,
1838 * see how much space there is in the buffer. Nothing else is covered.
1841 vnode_writable_space_count(vnode_t vp
)
1843 if (vnode_isfifo(vp
)) {
1846 int err
= fifo_freespace(vp
, &spc
);
1848 return (intptr_t)spc
;
1854 } else if (vnode_isreg(vp
)) {
1857 panic("Should never have an EVFILT_READ except for reg or fifo.");
1863 * Determine whether this knote should be active
1865 * This is kind of subtle.
1866 * --First, notice if the vnode has been revoked: in so, override hint
1867 * --EVFILT_READ knotes are checked no matter what the hint is
1868 * --Other knotes activate based on hint.
1869 * --If hint is revoke, set special flags and activate
1872 filt_vnode_common(struct knote
*kn
, struct kevent_qos_s
*kev
, vnode_t vp
, long hint
)
1877 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
1879 /* Special handling for vnodes that are in recycle or already gone */
1880 if (NOTE_REVOKE
== hint
) {
1881 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
1884 if ((kn
->kn_filter
== EVFILT_VNODE
) && (kn
->kn_sfflags
& NOTE_REVOKE
)) {
1885 kn
->kn_fflags
|= NOTE_REVOKE
;
1888 switch (kn
->kn_filter
) {
1890 data
= vnode_readable_data_count(vp
, kn
->kn_fp
->f_fglob
->fg_offset
, (kn
->kn_flags
& EV_POLL
));
1891 activate
= (data
!= 0);
1894 data
= vnode_writable_space_count(vp
);
1895 activate
= (data
!= 0);
1898 /* Check events this note matches against the hint */
1899 if (kn
->kn_sfflags
& hint
) {
1900 kn
->kn_fflags
|= hint
; /* Set which event occurred */
1902 activate
= (kn
->kn_fflags
!= 0);
1905 panic("Invalid knote filter on a vnode!\n");
1909 if (kev
&& activate
) {
1910 knote_fill_kevent(kn
, kev
, data
);
1917 filt_vnode(struct knote
*kn
, long hint
)
1919 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
1921 return filt_vnode_common(kn
, NULL
, vp
, hint
);
1925 filt_vntouch(struct knote
*kn
, struct kevent_qos_s
*kev
)
1927 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
1928 uint32_t vid
= vnode_vid(vp
);
1933 if (vnode_getiocount(vp
, vid
, VNODE_NODEAD
| VNODE_WITHID
) != 0) {
1938 /* accept new input fflags mask */
1939 kn
->kn_sfflags
= kev
->fflags
;
1941 activate
= filt_vnode_common(kn
, NULL
, vp
, hint
);
1944 vnode_put_locked(vp
);
1952 filt_vnprocess(struct knote
*kn
, struct kevent_qos_s
*kev
)
1954 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
1955 uint32_t vid
= vnode_vid(vp
);
1960 if (vnode_getiocount(vp
, vid
, VNODE_NODEAD
| VNODE_WITHID
) != 0) {
1964 activate
= filt_vnode_common(kn
, kev
, vp
, hint
);
1966 /* Definitely need to unlock, may need to put */
1968 vnode_put_locked(vp
);