2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
76 #include <sys/param.h>
77 #include <sys/types.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/file_internal.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/namei.h>
86 #include <sys/vnode_internal.h>
87 #include <sys/ioctl.h>
89 /* Temporary workaround for ubc.h until <rdar://4714366 is resolved */
90 #define ubc_setcred ubc_setcred_deprecated
93 int ubc_setcred(struct vnode
*, struct proc
*);
96 #include <sys/fsevents.h>
97 #include <sys/kdebug.h>
98 #include <sys/xattr.h>
99 #include <sys/ubc_internal.h>
100 #include <sys/uio_internal.h>
101 #include <sys/resourcevar.h>
102 #include <sys/signalvar.h>
104 #include <vm/vm_kern.h>
105 #include <vm/vm_map.h>
107 #include <miscfs/specfs/specdev.h>
108 #include <miscfs/fifofs/fifo.h>
111 #include <security/mac_framework.h>
114 #include <IOKit/IOBSD.h>
116 static int vn_closefile(struct fileglob
*fp
, vfs_context_t ctx
);
117 static int vn_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
,
119 static int vn_read(struct fileproc
*fp
, struct uio
*uio
, int flags
,
121 static int vn_write(struct fileproc
*fp
, struct uio
*uio
, int flags
,
123 static int vn_select( struct fileproc
*fp
, int which
, void * wql
,
125 static int vn_kqfilt_add(struct fileproc
*fp
, struct knote
*kn
,
127 static void filt_vndetach(struct knote
*kn
);
128 static int filt_vnode(struct knote
*kn
, long hint
);
129 static int filt_vnode_common(struct knote
*kn
, vnode_t vp
, long hint
);
130 static int vn_open_auth_finish(vnode_t vp
, int fmode
, vfs_context_t ctx
);
132 static int vn_kqfilt_remove(struct vnode
*vp
, uintptr_t ident
,
136 const struct fileops vnops
= {
137 .fo_type
= DTYPE_VNODE
,
139 .fo_write
= vn_write
,
140 .fo_ioctl
= vn_ioctl
,
141 .fo_select
= vn_select
,
142 .fo_close
= vn_closefile
,
143 .fo_kqfilter
= vn_kqfilt_add
,
147 static int filt_vntouch(struct knote
*kn
, struct kevent_internal_s
*kev
);
148 static int filt_vnprocess(struct knote
*kn
, struct filt_process_s
*data
, struct kevent_internal_s
*kev
);
150 struct filterops vnode_filtops
= {
153 .f_detach
= filt_vndetach
,
154 .f_event
= filt_vnode
,
155 .f_touch
= filt_vntouch
,
156 .f_process
= filt_vnprocess
,
160 * Common code for vnode open operations.
161 * Check permissions, and call the VNOP_OPEN or VNOP_CREATE routine.
163 * XXX the profusion of interfaces here is probably a bad thing.
166 vn_open(struct nameidata
*ndp
, int fmode
, int cmode
)
168 return(vn_open_modflags(ndp
, &fmode
, cmode
));
172 vn_open_modflags(struct nameidata
*ndp
, int *fmodep
, int cmode
)
174 struct vnode_attr va
;
177 VATTR_SET(&va
, va_mode
, cmode
);
179 return(vn_open_auth(ndp
, fmodep
, &va
));
183 vn_open_auth_finish(vnode_t vp
, int fmode
, vfs_context_t ctx
)
187 if ((error
= vnode_ref_ext(vp
, fmode
, 0)) != 0) {
191 /* Call out to allow 3rd party notification of open.
192 * Ignore result of kauth_authorize_fileop call.
195 mac_vnode_notify_open(ctx
, vp
, fmode
);
197 kauth_authorize_fileop(vfs_context_ucred(ctx
), KAUTH_FILEOP_OPEN
,
208 * May do nameidone() to allow safely adding an FSEvent. Cue off of ni_dvp to
209 * determine whether that has happened.
212 vn_open_auth_do_create(struct nameidata
*ndp
, struct vnode_attr
*vap
, int fmode
, boolean_t
*did_create
, boolean_t
*did_open
, vfs_context_t ctx
)
215 vnode_t dvp
= ndp
->ni_dvp
;
220 batched
= vnode_compound_open_available(ndp
->ni_dvp
);
223 VATTR_SET(vap
, va_type
, VREG
);
225 vap
->va_vaflags
|= VA_EXCLUSIVE
;
228 if (ndp
->ni_cnd
.cn_flags
& CN_WANTSRSRCFORK
) {
229 if ((error
= vn_authorize_create(dvp
, &ndp
->ni_cnd
, vap
, ctx
, NULL
)) != 0)
231 if ((error
= vnode_makenamedstream(dvp
, &ndp
->ni_vp
, XATTR_RESOURCEFORK_NAME
, 0, ctx
)) != 0)
237 if ((error
= vn_authorize_create(dvp
, &ndp
->ni_cnd
, vap
, ctx
, NULL
)) != 0)
241 error
= vn_create(dvp
, &ndp
->ni_vp
, ndp
, vap
, VN_CREATE_DOOPEN
, fmode
, &status
, ctx
);
244 *did_create
= (status
& COMPOUND_OPEN_STATUS_DID_CREATE
) ? TRUE
: FALSE
;
249 if (error
== EKEEPLOOKING
) {
251 panic("EKEEPLOOKING, but we did a create?");
254 panic("EKEEPLOOKING from filesystem that doesn't support compound vnops?");
256 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
257 panic("EKEEPLOOKING, but continue flag not set?");
261 * Do NOT drop the dvp: we need everything to continue the lookup.
267 *did_create
= (status
& COMPOUND_OPEN_STATUS_DID_CREATE
) ? 1 : 0;
280 int update_flags
= 0;
282 // Make sure the name & parent pointers are hooked up
283 if (vp
->v_name
== NULL
)
284 update_flags
|= VNODE_UPDATE_NAME
;
285 if (vp
->v_parent
== NULLVP
)
286 update_flags
|= VNODE_UPDATE_PARENT
;
289 vnode_update_identity(vp
, dvp
, ndp
->ni_cnd
.cn_nameptr
, ndp
->ni_cnd
.cn_namelen
, ndp
->ni_cnd
.cn_hash
, update_flags
);
292 ndp
->ni_dvp
= NULLVP
;
295 if (need_fsevent(FSE_CREATE_FILE
, vp
)) {
296 add_fsevent(FSE_CREATE_FILE
, ctx
,
303 if (ndp
->ni_dvp
!= NULLVP
) {
305 ndp
->ni_dvp
= NULLVP
;
312 * This is the number of times we'll loop in vn_open_auth without explicitly
313 * yielding the CPU when we determine we have to retry.
315 #define RETRY_NO_YIELD_COUNT 5
318 * Open a file with authorization, updating the contents of the structures
319 * pointed to by ndp, fmodep, and vap as necessary to perform the requested
320 * operation. This function is used for both opens of existing files, and
321 * creation of new files.
323 * Parameters: ndp The nami data pointer describing the
325 * fmodep A pointer to an int containg the mode
326 * information to be used for the open
327 * vap A pointer to the vnode attribute
328 * descriptor to be used for the open
330 * Indirect: * Contents of the data structures pointed
331 * to by the parameters are modified as
332 * necessary to the requested operation.
337 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
339 * The contents of '*ndp' will be modified, based on the other
340 * arguments to this function, and to return file and directory
341 * data necessary to satisfy the requested operation.
343 * If the file does not exist and we are creating it, then the
344 * O_TRUNC flag will be cleared in '*fmodep' to indicate to the
345 * caller that the file was not truncated.
347 * If the file exists and the O_EXCL flag was not specified, then
348 * the O_CREAT flag will be cleared in '*fmodep' to indicate to
349 * the caller that the existing file was merely opened rather
352 * The contents of '*vap' will be modified as necessary to
353 * complete the operation, including setting of supported
354 * attribute, clearing of fields containing unsupported attributes
355 * in the request, if the request proceeds without them, etc..
357 * XXX: This function is too complicated in actings on its arguments
359 * XXX: We should enummerate the possible errno values here, and where
360 * in the code they originated.
363 vn_open_auth(struct nameidata
*ndp
, int *fmodep
, struct vnode_attr
*vap
)
367 vfs_context_t ctx
= ndp
->ni_cnd
.cn_context
;
370 uint32_t origcnflags
;
371 boolean_t did_create
;
373 boolean_t need_vnop_open
;
375 boolean_t ref_failed
;
383 need_vnop_open
= TRUE
;
386 origcnflags
= ndp
->ni_cnd
.cn_flags
;
388 // If raw encrypted mode is requested, handle that here
389 if (VATTR_IS_ACTIVE (vap
, va_dataprotect_flags
)
390 && ISSET(vap
->va_dataprotect_flags
, VA_DP_RAWENCRYPTED
)) {
397 if (fmode
& O_CREAT
) {
398 if ( (fmode
& O_DIRECTORY
) ) {
402 ndp
->ni_cnd
.cn_nameiop
= CREATE
;
404 ndp
->ni_op
= OP_LINK
;
406 /* Inherit USEDVP, vnode_open() supported flags only */
407 ndp
->ni_cnd
.cn_flags
&= (USEDVP
| NOCROSSMOUNT
);
408 ndp
->ni_cnd
.cn_flags
|= LOCKPARENT
| LOCKLEAF
| AUDITVNPATH1
;
409 ndp
->ni_flag
= NAMEI_COMPOUNDOPEN
;
411 /* open calls are allowed for resource forks. */
412 ndp
->ni_cnd
.cn_flags
|= CN_ALLOWRSRCFORK
;
414 if ((fmode
& O_EXCL
) == 0 && (fmode
& O_NOFOLLOW
) == 0 && (origcnflags
& FOLLOW
) != 0)
415 ndp
->ni_cnd
.cn_flags
|= FOLLOW
;
417 continue_create_lookup
:
418 if ( (error
= namei(ndp
)) )
424 batched
= vnode_compound_open_available(dvp
);
426 /* not found, create */
428 /* must have attributes for a new file */
435 * Attempt a create. For a system supporting compound VNOPs, we may
436 * find an existing file or create one; in either case, we will already
437 * have the file open and no VNOP_OPEN() will be needed.
439 error
= vn_open_auth_do_create(ndp
, vap
, fmode
, &did_create
, &did_open
, ctx
);
445 * Detected a node that the filesystem couldn't handle. Don't call
446 * nameidone() yet, because we need that path buffer.
448 if (error
== EKEEPLOOKING
) {
450 panic("EKEEPLOOKING from a filesystem that doesn't support compound VNOPs?");
452 goto continue_create_lookup
;
457 panic("Shouldn't have a dvp here.");
462 * Check for a create race.
464 if ((error
== EEXIST
) && !(fmode
& O_EXCL
)){
472 need_vnop_open
= !did_open
;
479 * We have a vnode. Use compound open if available
480 * or else fall through to "traditional" path. Note: can't
481 * do a compound open for root, because the parent belongs
484 if (error
== 0 && batched
&& (vnode_mount(dvp
) == vnode_mount(vp
))) {
485 error
= VNOP_COMPOUND_OPEN(dvp
, &ndp
->ni_vp
, ndp
, 0, fmode
, NULL
, NULL
, ctx
);
489 need_vnop_open
= FALSE
;
490 } else if (error
== EKEEPLOOKING
) {
491 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
492 panic("EKEEPLOOKING, but continue flag not set?");
494 goto continue_create_lookup
;
499 ndp
->ni_dvp
= NULLVP
;
514 ndp
->ni_cnd
.cn_nameiop
= LOOKUP
;
515 /* Inherit USEDVP, vnode_open() supported flags only */
516 ndp
->ni_cnd
.cn_flags
&= (USEDVP
| NOCROSSMOUNT
);
517 ndp
->ni_cnd
.cn_flags
|= FOLLOW
| LOCKLEAF
| AUDITVNPATH1
| WANTPARENT
;
519 /* open calls are allowed for resource forks. */
520 ndp
->ni_cnd
.cn_flags
|= CN_ALLOWRSRCFORK
;
522 if (fmode
& FENCRYPTED
)
523 ndp
->ni_cnd
.cn_flags
|= CN_RAW_ENCRYPTED
| CN_SKIPNAMECACHE
;
524 ndp
->ni_flag
= NAMEI_COMPOUNDOPEN
;
526 /* preserve NOFOLLOW from vnode_open() */
527 if (fmode
& O_NOFOLLOW
|| fmode
& O_SYMLINK
|| (origcnflags
& FOLLOW
) == 0) {
528 ndp
->ni_cnd
.cn_flags
&= ~FOLLOW
;
531 /* Do a lookup, possibly going directly to filesystem for compound operation */
533 if ( (error
= namei(ndp
)) )
538 /* Check for batched lookup-open */
539 batched
= vnode_compound_open_available(dvp
);
540 if (batched
&& ((vp
== NULLVP
) || (vnode_mount(dvp
) == vnode_mount(vp
)))) {
541 error
= VNOP_COMPOUND_OPEN(dvp
, &ndp
->ni_vp
, ndp
, 0, fmode
, NULL
, NULL
, ctx
);
544 need_vnop_open
= FALSE
;
545 } else if (error
== EKEEPLOOKING
) {
546 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
547 panic("EKEEPLOOKING, but continue flag not set?");
551 } while (error
== EKEEPLOOKING
);
555 ndp
->ni_dvp
= NULLVP
;
563 * By this point, nameidone() is called, dvp iocount is dropped,
564 * and dvp pointer is cleared.
566 if (ndp
->ni_dvp
!= NULLVP
) {
567 panic("Haven't cleaned up adequately in vn_open_auth()");
571 * Expect to use this code for filesystems without compound VNOPs, for the root
572 * of a filesystem, which can't be "looked up" in the sense of VNOP_LOOKUP(),
573 * and for shadow files, which do not live on the same filesystems as their "parents."
575 if (need_vnop_open
) {
576 if (batched
&& !vnode_isvroot(vp
) && !vnode_isnamedstream(vp
)) {
577 panic("Why am I trying to use VNOP_OPEN() on anything other than the root or a named stream?");
581 error
= vn_authorize_open_existing(vp
, &ndp
->ni_cnd
, fmode
, ctx
, NULL
);
587 if (VATTR_IS_ACTIVE (vap
, va_dataprotect_flags
)
588 && ISSET(vap
->va_dataprotect_flags
, VA_DP_RAWUNENCRYPTED
)) {
589 /* Don't allow unencrypted io request from user space unless entitled */
590 boolean_t entitled
= FALSE
;
592 entitled
= IOTaskHasEntitlement(current_task(), "com.apple.private.security.file-unencrypt-access");
598 fmode
|= FUNENCRYPTED
;
601 error
= VNOP_OPEN(vp
, fmode
, ctx
);
605 need_vnop_open
= FALSE
;
608 // if the vnode is tagged VOPENEVT and the current process
609 // has the P_CHECKOPENEVT flag set, then we or in the O_EVTONLY
610 // flag to the open mode so that this open won't count against
611 // the vnode when carbon delete() does a vnode_isinuse() to see
612 // if a file is currently in use. this allows spotlight
613 // importers to not interfere with carbon apps that depend on
614 // the no-delete-if-busy semantics of carbon delete().
616 if (!did_create
&& (vp
->v_flag
& VOPENEVT
) && (current_proc()->p_flag
& P_CHECKOPENEVT
)) {
621 * Grab reference, etc.
623 error
= vn_open_auth_finish(vp
, fmode
, ctx
);
629 /* Compound VNOP open is responsible for doing the truncate */
630 if (batched
|| did_create
)
637 /* Opened either explicitly or by a batched create */
638 if (!need_vnop_open
) {
639 VNOP_CLOSE(vp
, fmode
, ctx
);
645 /* Aggressively recycle shadow files if we error'd out during open() */
646 if ((vnode_isnamedstream(vp
)) &&
647 (vp
->v_parent
!= NULLVP
) &&
648 (vnode_isshadow(vp
))) {
654 * Check for a race against unlink. We had a vnode
655 * but according to vnode_authorize or VNOP_OPEN it
658 * EREDRIVEOPEN: means that we were hit by the tty allocation race.
660 if (((error
== ENOENT
) && (*fmodep
& O_CREAT
)) || (error
== EREDRIVEOPEN
) || ref_failed
) {
662 * We'll retry here but it may be possible that we get
663 * into a retry "spin" inside the kernel and not allow
664 * threads, which need to run in order for the retry
665 * loop to end, to run. An example is an open of a
666 * terminal which is getting revoked and we spin here
667 * without yielding becasue namei and VNOP_OPEN are
668 * successful but vnode_ref fails. The revoke needs
669 * threads with an iocount to run but if spin here we
670 * may possibly be blcoking other threads from running.
672 * We start yielding the CPU after some number of
673 * retries for increasing durations. Note that this is
674 * still a loop without an exit condition.
677 if (nretries
> RETRY_NO_YIELD_COUNT
) {
678 /* Every hz/100 secs is 10 msecs ... */
679 tsleep(&nretries
, PVFS
, "vn_open_auth_retry",
680 MIN((nretries
* (hz
/100)), hz
));
690 #if vn_access_DEPRECATED
692 * Authorize an action against a vnode. This has been the canonical way to
693 * ensure that the credential/process/etc. referenced by a vfs_context
694 * is granted the rights called out in 'mode' against the vnode 'vp'.
696 * Unfortunately, the use of VREAD/VWRITE/VEXEC makes it very difficult
697 * to add support for more rights. As such, this interface will be deprecated
698 * and callers will use vnode_authorize instead.
701 vn_access(vnode_t vp
, int mode
, vfs_context_t context
)
703 kauth_action_t action
;
707 action
|= KAUTH_VNODE_READ_DATA
;
709 action
|= KAUTH_VNODE_WRITE_DATA
;
711 action
|= KAUTH_VNODE_EXECUTE
;
713 return(vnode_authorize(vp
, NULL
, action
, context
));
715 #endif /* vn_access_DEPRECATED */
721 vn_close(struct vnode
*vp
, int flags
, vfs_context_t ctx
)
727 /* Sync data from resource fork shadow file if needed. */
728 if ((vp
->v_flag
& VISNAMEDSTREAM
) &&
729 (vp
->v_parent
!= NULLVP
) &&
730 vnode_isshadow(vp
)) {
731 if (flags
& FWASWRITTEN
) {
732 flusherror
= vnode_flushnamedstream(vp
->v_parent
, vp
, ctx
);
737 /* work around for foxhound */
738 if (vnode_isspec(vp
))
739 (void)vnode_rele_ext(vp
, flags
, 0);
742 * On HFS, we flush when the last writer closes. We do this
743 * because resource fork vnodes hold a reference on data fork
744 * vnodes and that will prevent them from getting VNOP_INACTIVE
745 * which will delay when we flush cached data. In future, we
746 * might find it beneficial to do this for all file systems.
747 * Note that it's OK to access v_writecount without the lock
750 if (vp
->v_tag
== VT_HFS
&& (flags
& FWRITE
) && vp
->v_writecount
== 1)
751 VNOP_FSYNC(vp
, MNT_NOWAIT
, ctx
);
753 error
= VNOP_CLOSE(vp
, flags
, ctx
);
756 if (flags
& FWASWRITTEN
) {
757 if (need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) {
758 add_fsevent(FSE_CONTENT_MODIFIED
, ctx
,
765 if (!vnode_isspec(vp
))
766 (void)vnode_rele_ext(vp
, flags
, 0);
780 off_t swap_count
, this_count
;
781 off_t file_end
, read_end
;
786 * Reading from a swap file will get you zeroes.
791 swap_count
= uio_resid(uio
);
793 file_end
= ubc_getsize(vp
);
794 read_end
= uio
->uio_offset
+ uio_resid(uio
);
795 if (uio
->uio_offset
>= file_end
) {
796 /* uio starts after end of file: nothing to read */
798 } else if (read_end
> file_end
) {
799 /* uio extends beyond end of file: stop before that */
800 swap_count
-= (read_end
- file_end
);
803 while (swap_count
> 0) {
804 if (my_swap_page
== NULL
) {
805 MALLOC(my_swap_page
, char *, PAGE_SIZE
,
807 memset(my_swap_page
, '\0', PAGE_SIZE
);
808 /* add an end-of-line to keep line counters happy */
809 my_swap_page
[PAGE_SIZE
-1] = '\n';
811 this_count
= swap_count
;
812 if (this_count
> PAGE_SIZE
) {
813 this_count
= PAGE_SIZE
;
816 prev_resid
= uio_resid(uio
);
817 error
= uiomove((caddr_t
) my_swap_page
,
823 swap_count
-= (prev_resid
- uio_resid(uio
));
825 if (my_swap_page
!= NULL
) {
826 FREE(my_swap_page
, M_TEMP
);
833 * Package up an I/O request on a vnode into a uio and do it.
851 result
= vn_rdwr_64(rw
,
853 (uint64_t)(uintptr_t)base
,
862 /* "resid" should be bounded above by "len," which is an int */
863 if (aresid
!= NULL
) {
886 struct vfs_context context
;
888 char uio_buf
[ UIO_SIZEOF(1) ];
890 context
.vc_thread
= current_thread();
891 context
.vc_ucred
= cred
;
893 if (UIO_SEG_IS_USER_SPACE(segflg
)) {
894 spacetype
= proc_is64bit(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
;
897 spacetype
= UIO_SYSSPACE
;
899 auio
= uio_createwithbuffer(1, offset
, spacetype
, rw
,
900 &uio_buf
[0], sizeof(uio_buf
));
901 uio_addiov(auio
, base
, len
);
905 * IO_NOAUTH should be re-examined.
906 * Likely that mediation should be performed in caller.
908 if ((ioflg
& IO_NOAUTH
) == 0) {
909 /* passed cred is fp->f_cred */
911 error
= mac_vnode_check_read(&context
, cred
, vp
);
913 error
= mac_vnode_check_write(&context
, cred
, vp
);
918 if (rw
== UIO_READ
) {
919 if (vnode_isswap(vp
) && ((ioflg
& IO_SWAP_DISPATCH
) == 0)) {
920 error
= vn_read_swapfile(vp
, auio
);
922 error
= VNOP_READ(vp
, auio
, ioflg
, &context
);
925 error
= VNOP_WRITE(vp
, auio
, ioflg
, &context
);
930 *aresid
= uio_resid(auio
);
932 if (uio_resid(auio
) && error
== 0)
938 vn_offset_lock(struct fileglob
*fg
)
940 lck_mtx_lock_spin(&fg
->fg_lock
);
941 while (fg
->fg_lflags
& FG_OFF_LOCKED
) {
942 fg
->fg_lflags
|= FG_OFF_LOCKWANT
;
943 msleep(&fg
->fg_lflags
, &fg
->fg_lock
, PVFS
| PSPIN
,
944 "fg_offset_lock_wait", 0);
946 fg
->fg_lflags
|= FG_OFF_LOCKED
;
947 lck_mtx_unlock(&fg
->fg_lock
);
951 vn_offset_unlock(struct fileglob
*fg
)
955 lck_mtx_lock_spin(&fg
->fg_lock
);
956 if (fg
->fg_lflags
& FG_OFF_LOCKWANT
) {
959 fg
->fg_lflags
&= ~(FG_OFF_LOCKED
| FG_OFF_LOCKWANT
);
960 lck_mtx_unlock(&fg
->fg_lock
);
962 wakeup(&fg
->fg_lflags
);
967 * File table vnode read routine.
970 vn_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
976 int offset_locked
= 0;
978 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
979 if ( (error
= vnode_getwithref(vp
)) ) {
984 error
= mac_vnode_check_read(ctx
, vfs_context_ucred(ctx
), vp
);
991 /* This signals to VNOP handlers that this read came from a file table read */
992 ioflag
= IO_SYSCALL_DISPATCH
;
994 if (fp
->f_fglob
->fg_flag
& FNONBLOCK
)
996 if ((fp
->f_fglob
->fg_flag
& FNOCACHE
) || vnode_isnocache(vp
))
997 ioflag
|= IO_NOCACHE
;
998 if (fp
->f_fglob
->fg_flag
& FENCRYPTED
) {
999 ioflag
|= IO_ENCRYPTED
;
1001 if (fp
->f_fglob
->fg_flag
& FUNENCRYPTED
) {
1002 ioflag
|= IO_SKIP_ENCRYPTION
;
1004 if (fp
->f_fglob
->fg_flag
& O_EVTONLY
) {
1005 ioflag
|= IO_EVTONLY
;
1007 if (fp
->f_fglob
->fg_flag
& FNORDAHEAD
)
1010 if ((flags
& FOF_OFFSET
) == 0) {
1011 if ((vnode_vtype(vp
) == VREG
) && !vnode_isswap(vp
)) {
1012 vn_offset_lock(fp
->f_fglob
);
1015 uio
->uio_offset
= fp
->f_fglob
->fg_offset
;
1017 count
= uio_resid(uio
);
1019 if (vnode_isswap(vp
) && !(IO_SKIP_ENCRYPTION
& ioflag
)) {
1020 /* special case for swap files */
1021 error
= vn_read_swapfile(vp
, uio
);
1023 error
= VNOP_READ(vp
, uio
, ioflag
, ctx
);
1025 if ((flags
& FOF_OFFSET
) == 0) {
1026 fp
->f_fglob
->fg_offset
+= count
- uio_resid(uio
);
1027 if (offset_locked
) {
1028 vn_offset_unlock(fp
->f_fglob
);
1033 (void)vnode_put(vp
);
1039 * File table vnode write routine.
1042 vn_write(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
1047 int clippedsize
= 0;
1049 int residcount
, oldcount
;
1050 int offset_locked
= 0;
1051 proc_t p
= vfs_context_proc(ctx
);
1054 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1055 if ( (error
= vnode_getwithref(vp
)) ) {
1060 error
= mac_vnode_check_write(ctx
, vfs_context_ucred(ctx
), vp
);
1062 (void)vnode_put(vp
);
1068 * IO_SYSCALL_DISPATCH signals to VNOP handlers that this write came from
1069 * a file table write
1071 ioflag
= (IO_UNIT
| IO_SYSCALL_DISPATCH
);
1073 if (vp
->v_type
== VREG
&& (fp
->f_fglob
->fg_flag
& O_APPEND
))
1074 ioflag
|= IO_APPEND
;
1075 if (fp
->f_fglob
->fg_flag
& FNONBLOCK
)
1076 ioflag
|= IO_NDELAY
;
1077 if ((fp
->f_fglob
->fg_flag
& FNOCACHE
) || vnode_isnocache(vp
))
1078 ioflag
|= IO_NOCACHE
;
1079 if (fp
->f_fglob
->fg_flag
& FNODIRECT
)
1080 ioflag
|= IO_NODIRECT
;
1081 if (fp
->f_fglob
->fg_flag
& FSINGLE_WRITER
)
1082 ioflag
|= IO_SINGLE_WRITER
;
1083 if (fp
->f_fglob
->fg_flag
& O_EVTONLY
)
1084 ioflag
|= IO_EVTONLY
;
1087 * Treat synchronous mounts and O_FSYNC on the fd as equivalent.
1089 * XXX We treat O_DSYNC as O_FSYNC for now, since we can not delay
1090 * XXX the non-essential metadata without some additional VFS work;
1091 * XXX the intent at this point is to plumb the interface for it.
1093 if ((fp
->f_fglob
->fg_flag
& (O_FSYNC
|O_DSYNC
)) ||
1094 (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))) {
1098 if ((flags
& FOF_OFFSET
) == 0) {
1099 if ((vnode_vtype(vp
) == VREG
) && !vnode_isswap(vp
)) {
1100 vn_offset_lock(fp
->f_fglob
);
1103 uio
->uio_offset
= fp
->f_fglob
->fg_offset
;
1104 count
= uio_resid(uio
);
1106 if (((flags
& FOF_OFFSET
) == 0) &&
1107 vfs_context_proc(ctx
) && (vp
->v_type
== VREG
) &&
1108 (((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) ||
1109 ((rlim_t
)uio_resid(uio
) > (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
)))) {
1111 * If the requested residual would cause us to go past the
1112 * administrative limit, then we need to adjust the residual
1113 * down to cause fewer bytes than requested to be written. If
1114 * we can't do that (e.g. the residual is already 1 byte),
1115 * then we fail the write with EFBIG.
1117 residcount
= uio_resid(uio
);
1118 if ((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) {
1119 clippedsize
= (uio
->uio_offset
+ uio_resid(uio
)) - p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
;
1120 } else if ((rlim_t
)uio_resid(uio
) > (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
)) {
1121 clippedsize
= (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
);
1123 if (clippedsize
>= residcount
) {
1124 psignal(p
, SIGXFSZ
);
1129 uio_setresid(uio
, residcount
-clippedsize
);
1131 if ((flags
& FOF_OFFSET
) != 0) {
1132 /* for pwrite, append should be ignored */
1133 ioflag
&= ~IO_APPEND
;
1134 if (p
&& (vp
->v_type
== VREG
) &&
1135 ((rlim_t
)uio
->uio_offset
>= p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
)) {
1136 psignal(p
, SIGXFSZ
);
1140 if (p
&& (vp
->v_type
== VREG
) &&
1141 ((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
)) {
1142 //Debugger("vn_bwrite:overstepping the bounds");
1143 residcount
= uio_resid(uio
);
1144 clippedsize
= (uio
->uio_offset
+ uio_resid(uio
)) - p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
;
1146 uio_setresid(uio
, residcount
-clippedsize
);
1150 error
= VNOP_WRITE(vp
, uio
, ioflag
, ctx
);
1153 oldcount
= uio_resid(uio
);
1154 uio_setresid(uio
, oldcount
+ clippedsize
);
1157 if ((flags
& FOF_OFFSET
) == 0) {
1158 if (ioflag
& IO_APPEND
)
1159 fp
->f_fglob
->fg_offset
= uio
->uio_offset
;
1161 fp
->f_fglob
->fg_offset
+= count
- uio_resid(uio
);
1162 if (offset_locked
) {
1163 vn_offset_unlock(fp
->f_fglob
);
1169 * Set the credentials on successful writes
1171 if ((error
== 0) && (vp
->v_tag
== VT_NFS
) && (UBCINFOEXISTS(vp
))) {
1173 * When called from aio subsystem, we only have the proc from
1174 * which to get the credential, at this point, so use that
1175 * instead. This means aio functions are incompatible with
1176 * per-thread credentials (aio operations are proxied). We
1177 * can't easily correct the aio vs. settid race in this case
1178 * anyway, so we disallow it.
1180 if ((flags
& FOF_PCRED
) == 0) {
1181 ubc_setthreadcred(vp
, p
, current_thread());
1186 (void)vnode_put(vp
);
1190 if (offset_locked
) {
1191 vn_offset_unlock(fp
->f_fglob
);
1193 (void)vnode_put(vp
);
1198 * File table vnode stat routine.
1200 * Returns: 0 Success
1206 vn_stat_noauth(struct vnode
*vp
, void *sbptr
, kauth_filesec_t
*xsec
, int isstat64
, vfs_context_t ctx
)
1208 struct vnode_attr va
;
1211 kauth_filesec_t fsec
;
1212 struct stat
*sb
= (struct stat
*)0; /* warning avoidance ; protected by isstat64 */
1213 struct stat64
* sb64
= (struct stat64
*)0; /* warning avoidance ; protected by isstat64 */
1216 sb64
= (struct stat64
*)sbptr
;
1218 sb
= (struct stat
*)sbptr
;
1219 memset(&va
, 0, sizeof(va
));
1221 VATTR_WANTED(&va
, va_fsid
);
1222 VATTR_WANTED(&va
, va_fileid
);
1223 VATTR_WANTED(&va
, va_mode
);
1224 VATTR_WANTED(&va
, va_type
);
1225 VATTR_WANTED(&va
, va_nlink
);
1226 VATTR_WANTED(&va
, va_uid
);
1227 VATTR_WANTED(&va
, va_gid
);
1228 VATTR_WANTED(&va
, va_rdev
);
1229 VATTR_WANTED(&va
, va_data_size
);
1230 VATTR_WANTED(&va
, va_access_time
);
1231 VATTR_WANTED(&va
, va_modify_time
);
1232 VATTR_WANTED(&va
, va_change_time
);
1233 VATTR_WANTED(&va
, va_create_time
);
1234 VATTR_WANTED(&va
, va_flags
);
1235 VATTR_WANTED(&va
, va_gen
);
1236 VATTR_WANTED(&va
, va_iosize
);
1237 /* lower layers will synthesise va_total_alloc from va_data_size if required */
1238 VATTR_WANTED(&va
, va_total_alloc
);
1240 VATTR_WANTED(&va
, va_uuuid
);
1241 VATTR_WANTED(&va
, va_guuid
);
1242 VATTR_WANTED(&va
, va_acl
);
1244 error
= vnode_getattr(vp
, &va
, ctx
);
1248 * Copy from vattr table
1250 if (isstat64
!= 0) {
1251 sb64
->st_dev
= va
.va_fsid
;
1252 sb64
->st_ino
= (ino64_t
)va
.va_fileid
;
1255 sb
->st_dev
= va
.va_fsid
;
1256 sb
->st_ino
= (ino_t
)va
.va_fileid
;
1259 switch (vp
->v_type
) {
1285 if (isstat64
!= 0) {
1286 sb64
->st_mode
= mode
;
1287 sb64
->st_nlink
= VATTR_IS_SUPPORTED(&va
, va_nlink
) ? (u_int16_t
)va
.va_nlink
: 1;
1288 sb64
->st_uid
= va
.va_uid
;
1289 sb64
->st_gid
= va
.va_gid
;
1290 sb64
->st_rdev
= va
.va_rdev
;
1291 sb64
->st_size
= va
.va_data_size
;
1292 sb64
->st_atimespec
= va
.va_access_time
;
1293 sb64
->st_mtimespec
= va
.va_modify_time
;
1294 sb64
->st_ctimespec
= va
.va_change_time
;
1295 if (VATTR_IS_SUPPORTED(&va
, va_create_time
)) {
1296 sb64
->st_birthtimespec
= va
.va_create_time
;
1298 sb64
->st_birthtimespec
.tv_sec
= sb64
->st_birthtimespec
.tv_nsec
= 0;
1300 sb64
->st_blksize
= va
.va_iosize
;
1301 sb64
->st_flags
= va
.va_flags
;
1302 sb64
->st_blocks
= roundup(va
.va_total_alloc
, 512) / 512;
1305 sb
->st_nlink
= VATTR_IS_SUPPORTED(&va
, va_nlink
) ? (u_int16_t
)va
.va_nlink
: 1;
1306 sb
->st_uid
= va
.va_uid
;
1307 sb
->st_gid
= va
.va_gid
;
1308 sb
->st_rdev
= va
.va_rdev
;
1309 sb
->st_size
= va
.va_data_size
;
1310 sb
->st_atimespec
= va
.va_access_time
;
1311 sb
->st_mtimespec
= va
.va_modify_time
;
1312 sb
->st_ctimespec
= va
.va_change_time
;
1313 sb
->st_blksize
= va
.va_iosize
;
1314 sb
->st_flags
= va
.va_flags
;
1315 sb
->st_blocks
= roundup(va
.va_total_alloc
, 512) / 512;
1318 /* if we're interested in extended security data and we got an ACL */
1320 if (!VATTR_IS_SUPPORTED(&va
, va_acl
) &&
1321 !VATTR_IS_SUPPORTED(&va
, va_uuuid
) &&
1322 !VATTR_IS_SUPPORTED(&va
, va_guuid
)) {
1323 *xsec
= KAUTH_FILESEC_NONE
;
1326 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
1327 fsec
= kauth_filesec_alloc(va
.va_acl
->acl_entrycount
);
1329 fsec
= kauth_filesec_alloc(0);
1335 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
1336 if (VATTR_IS_SUPPORTED(&va
, va_uuuid
)) {
1337 fsec
->fsec_owner
= va
.va_uuuid
;
1339 fsec
->fsec_owner
= kauth_null_guid
;
1341 if (VATTR_IS_SUPPORTED(&va
, va_guuid
)) {
1342 fsec
->fsec_group
= va
.va_guuid
;
1344 fsec
->fsec_group
= kauth_null_guid
;
1346 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
1347 bcopy(va
.va_acl
, &(fsec
->fsec_acl
), KAUTH_ACL_COPYSIZE(va
.va_acl
));
1349 fsec
->fsec_acl
.acl_entrycount
= KAUTH_FILESEC_NOACL
;
1355 /* Do not give the generation number out to unpriviledged users */
1356 if (va
.va_gen
&& !vfs_context_issuser(ctx
)) {
1363 sb64
->st_gen
= va
.va_gen
;
1365 sb
->st_gen
= va
.va_gen
;
1370 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && va
.va_acl
!= NULL
)
1371 kauth_acl_free(va
.va_acl
);
1376 vn_stat(struct vnode
*vp
, void *sb
, kauth_filesec_t
*xsec
, int isstat64
, vfs_context_t ctx
)
1381 error
= mac_vnode_check_stat(ctx
, NOCRED
, vp
);
1387 if ((error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_READ_ATTRIBUTES
| KAUTH_VNODE_READ_SECURITY
, ctx
)) != 0)
1391 return(vn_stat_noauth(vp
, sb
, xsec
, isstat64
, ctx
));
1396 * File table vnode ioctl routine.
1399 vn_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
, vfs_context_t ctx
)
1401 struct vnode
*vp
= ((struct vnode
*)fp
->f_fglob
->fg_data
);
1404 struct vnode
*ttyvp
;
1405 struct session
* sessp
;
1407 if ( (error
= vnode_getwithref(vp
)) ) {
1412 error
= mac_vnode_check_ioctl(ctx
, vp
, com
);
1417 switch (vp
->v_type
) {
1420 if (com
== FIONREAD
) {
1421 if ((error
= vnode_size(vp
, &file_size
, ctx
)) != 0)
1423 *(int *)data
= file_size
- fp
->f_fglob
->fg_offset
;
1426 if (com
== FIONBIO
|| com
== FIOASYNC
) { /* XXX */
1439 /* Should not be able to set block size from user space */
1440 if (com
== DKIOCSETBLOCKSIZE
) {
1445 if (com
== FIODTYPE
) {
1446 if (vp
->v_type
== VBLK
) {
1447 if (major(vp
->v_rdev
) >= nblkdev
) {
1451 *(int *)data
= bdevsw
[major(vp
->v_rdev
)].d_type
;
1453 } else if (vp
->v_type
== VCHR
) {
1454 if (major(vp
->v_rdev
) >= nchrdev
) {
1458 *(int *)data
= cdevsw
[major(vp
->v_rdev
)].d_type
;
1465 error
= VNOP_IOCTL(vp
, com
, data
, fp
->f_fglob
->fg_flag
, ctx
);
1467 if (error
== 0 && com
== TIOCSCTTY
) {
1468 sessp
= proc_session(vfs_context_proc(ctx
));
1470 session_lock(sessp
);
1471 ttyvp
= sessp
->s_ttyvp
;
1472 sessp
->s_ttyvp
= vp
;
1473 sessp
->s_ttyvid
= vnode_vid(vp
);
1474 session_unlock(sessp
);
1475 session_rele(sessp
);
1479 (void)vnode_put(vp
);
1484 * File table vnode select routine.
1487 vn_select(struct fileproc
*fp
, int which
, void *wql
, __unused vfs_context_t ctx
)
1490 struct vnode
* vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1491 struct vfs_context context
;
1493 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
1494 context
.vc_thread
= current_thread();
1495 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
1499 * XXX We should use a per thread credential here; minimally,
1500 * XXX the process credential should have a persistent
1501 * XXX reference on it before being passed in here.
1503 error
= mac_vnode_check_select(ctx
, vp
, which
);
1506 error
= VNOP_SELECT(vp
, which
, fp
->f_fglob
->fg_flag
, wql
, ctx
);
1508 (void)vnode_put(vp
);
1515 * File table vnode close routine.
1518 vn_closefile(struct fileglob
*fg
, vfs_context_t ctx
)
1520 struct vnode
*vp
= fg
->fg_data
;
1523 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
1524 if (FILEGLOB_DTYPE(fg
) == DTYPE_VNODE
&&
1525 ((fg
->fg_flag
& FHASLOCK
) != 0 ||
1526 (fg
->fg_lflags
& FG_HAS_OFDLOCK
) != 0)) {
1528 .l_whence
= SEEK_SET
,
1534 if ((fg
->fg_flag
& FHASLOCK
) != 0)
1535 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
,
1536 F_UNLCK
, &lf
, F_FLOCK
, ctx
, NULL
);
1538 if ((fg
->fg_lflags
& FG_HAS_OFDLOCK
) != 0)
1539 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
,
1540 F_UNLCK
, &lf
, F_OFD_LOCK
, ctx
, NULL
);
1542 error
= vn_close(vp
, fg
->fg_flag
, ctx
);
1543 (void) vnode_put(vp
);
1549 * Returns: 0 Success
1553 vn_pathconf(vnode_t vp
, int name
, int32_t *retval
, vfs_context_t ctx
)
1556 struct vfs_attr vfa
;
1559 case _PC_EXTENDED_SECURITY_NP
:
1560 *retval
= vfs_extendedsecurity(vnode_mount(vp
)) ? 1 : 0;
1562 case _PC_AUTH_OPAQUE_NP
:
1563 *retval
= vfs_authopaque(vnode_mount(vp
));
1565 case _PC_2_SYMLINKS
:
1566 *retval
= 1; /* XXX NOTSUP on MSDOS, etc. */
1568 case _PC_ALLOC_SIZE_MIN
:
1569 *retval
= 1; /* XXX lie: 1 byte */
1571 case _PC_ASYNC_IO
: /* unistd.h: _POSIX_ASYNCHRONUS_IO */
1572 *retval
= 1; /* [AIO] option is supported */
1574 case _PC_PRIO_IO
: /* unistd.h: _POSIX_PRIORITIZED_IO */
1575 *retval
= 0; /* [PIO] option is not supported */
1577 case _PC_REC_INCR_XFER_SIZE
:
1578 *retval
= 4096; /* XXX go from MIN to MAX 4K at a time */
1580 case _PC_REC_MIN_XFER_SIZE
:
1581 *retval
= 4096; /* XXX recommend 4K minimum reads/writes */
1583 case _PC_REC_MAX_XFER_SIZE
:
1584 *retval
= 65536; /* XXX recommend 64K maximum reads/writes */
1586 case _PC_REC_XFER_ALIGN
:
1587 *retval
= 4096; /* XXX recommend page aligned buffers */
1589 case _PC_SYMLINK_MAX
:
1590 *retval
= 255; /* Minimum acceptable POSIX value */
1592 case _PC_SYNC_IO
: /* unistd.h: _POSIX_SYNCHRONIZED_IO */
1593 *retval
= 0; /* [SIO] option is not supported */
1595 case _PC_XATTR_SIZE_BITS
:
1596 /* The number of bits used to store maximum extended
1597 * attribute size in bytes. For example, if the maximum
1598 * attribute size supported by a file system is 128K, the
1599 * value returned will be 18. However a value 18 can mean
1600 * that the maximum attribute size can be anywhere from
1601 * (256KB - 1) to 128KB. As a special case, the resource
1602 * fork can have much larger size, and some file system
1603 * specific extended attributes can have smaller and preset
1604 * size; for example, Finder Info is always 32 bytes.
1606 memset(&vfa
, 0, sizeof(vfa
));
1608 VFSATTR_WANTED(&vfa
, f_capabilities
);
1609 if (vfs_getattr(vnode_mount(vp
), &vfa
, ctx
) == 0 &&
1610 (VFSATTR_IS_SUPPORTED(&vfa
, f_capabilities
)) &&
1611 (vfa
.f_capabilities
.capabilities
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
) &&
1612 (vfa
.f_capabilities
.valid
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
)) {
1613 /* Supports native extended attributes */
1614 error
= VNOP_PATHCONF(vp
, name
, retval
, ctx
);
1616 /* Number of bits used to represent the maximum size of
1617 * extended attribute stored in an Apple Double file.
1619 *retval
= AD_XATTR_SIZE_BITS
;
1623 error
= VNOP_PATHCONF(vp
, name
, retval
, ctx
);
1631 vn_kqfilt_add(struct fileproc
*fp
, struct knote
*kn
, vfs_context_t ctx
)
1637 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1640 * Don't attach a knote to a dead vnode.
1642 if ((error
= vget_internal(vp
, 0, VNODE_NODEAD
)) == 0) {
1643 switch (kn
->kn_filter
) {
1646 if (vnode_isfifo(vp
)) {
1647 /* We'll only watch FIFOs that use our fifofs */
1648 if (!(vp
->v_fifoinfo
&& vp
->v_fifoinfo
->fi_readsock
)) {
1652 } else if (!vnode_isreg(vp
)) {
1653 if (vnode_ischr(vp
)) {
1654 result
= spec_kqfilter(vp
, kn
);
1655 if ((kn
->kn_flags
& EV_ERROR
) == 0) {
1656 /* claimed by a special device */
1673 error
= mac_vnode_check_kqfilter(ctx
, fp
->f_fglob
->fg_cred
, kn
, vp
);
1680 kn
->kn_hook
= (void*)vp
;
1681 kn
->kn_hookid
= vnode_vid(vp
);
1682 kn
->kn_filtid
= EVFILTID_VN
;
1685 KNOTE_ATTACH(&vp
->v_knotes
, kn
);
1686 result
= filt_vnode_common(kn
, vp
, 0);
1690 * Ask the filesystem to provide remove notifications,
1691 * but ignore failure
1693 VNOP_MONITOR(vp
, 0, VNODE_MONITOR_BEGIN
, (void*) kn
, ctx
);
1701 kn
->kn_flags
= EV_ERROR
;
1702 kn
->kn_data
= error
;
1709 filt_vndetach(struct knote
*kn
)
1711 vfs_context_t ctx
= vfs_context_current();
1713 vp
= (struct vnode
*)kn
->kn_hook
;
1714 if (vnode_getwithvid(vp
, kn
->kn_hookid
))
1718 KNOTE_DETACH(&vp
->v_knotes
, kn
);
1722 * Tell a (generally networked) filesystem that we're no longer watching
1723 * If the FS wants to track contexts, it should still be using the one from
1724 * the VNODE_MONITOR_BEGIN.
1726 VNOP_MONITOR(vp
, 0, VNODE_MONITOR_END
, (void*)kn
, ctx
);
1732 * Used for EVFILT_READ
1734 * Takes only VFIFO or VREG. vnode is locked. We handle the "poll" case
1735 * differently than the regular case for VREG files. If not in poll(),
1736 * then we need to know current fileproc offset for VREG.
1739 vnode_readable_data_count(vnode_t vp
, off_t current_offset
, int ispoll
)
1741 if (vnode_isfifo(vp
)) {
1744 int err
= fifo_charcount(vp
, &cnt
);
1746 return (intptr_t)cnt
;
1752 } else if (vnode_isreg(vp
)) {
1758 amount
= vp
->v_un
.vu_ubcinfo
->ui_size
- current_offset
;
1759 if (amount
> (off_t
)INTPTR_MAX
) {
1761 } else if (amount
< (off_t
)INTPTR_MIN
) {
1764 return (intptr_t)amount
;
1767 panic("Should never have an EVFILT_READ except for reg or fifo.");
1773 * Used for EVFILT_WRITE.
1775 * For regular vnodes, we can always write (1). For named pipes,
1776 * see how much space there is in the buffer. Nothing else is covered.
1779 vnode_writable_space_count(vnode_t vp
)
1781 if (vnode_isfifo(vp
)) {
1784 int err
= fifo_freespace(vp
, &spc
);
1786 return (intptr_t)spc
;
1792 } else if (vnode_isreg(vp
)) {
1795 panic("Should never have an EVFILT_READ except for reg or fifo.");
1801 * Determine whether this knote should be active
1803 * This is kind of subtle.
1804 * --First, notice if the vnode has been revoked: in so, override hint
1805 * --EVFILT_READ knotes are checked no matter what the hint is
1806 * --Other knotes activate based on hint.
1807 * --If hint is revoke, set special flags and activate
1810 filt_vnode_common(struct knote
*kn
, vnode_t vp
, long hint
)
1814 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
1816 /* Special handling for vnodes that are in recycle or already gone */
1817 if (NOTE_REVOKE
== hint
) {
1818 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
1821 if ((kn
->kn_filter
== EVFILT_VNODE
) && (kn
->kn_sfflags
& NOTE_REVOKE
)) {
1822 kn
->kn_fflags
|= NOTE_REVOKE
;
1825 switch(kn
->kn_filter
) {
1827 kn
->kn_data
= vnode_readable_data_count(vp
, kn
->kn_fp
->f_fglob
->fg_offset
, (kn
->kn_flags
& EV_POLL
));
1829 if (kn
->kn_data
!= 0) {
1834 kn
->kn_data
= vnode_writable_space_count(vp
);
1836 if (kn
->kn_data
!= 0) {
1841 /* Check events this note matches against the hint */
1842 if (kn
->kn_sfflags
& hint
) {
1843 kn
->kn_fflags
|= hint
; /* Set which event occurred */
1845 if (kn
->kn_fflags
!= 0) {
1850 panic("Invalid knote filter on a vnode!\n");
1857 filt_vnode(struct knote
*kn
, long hint
)
1859 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
1861 return filt_vnode_common(kn
, vp
, hint
);
1865 filt_vntouch(struct knote
*kn
, struct kevent_internal_s
*kev
)
1867 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
1872 if (vnode_getiocount(vp
, kn
->kn_hookid
, VNODE_NODEAD
| VNODE_WITHID
) != 0) {
1877 /* accept new input fflags mask */
1878 kn
->kn_sfflags
= kev
->fflags
;
1879 if ((kn
->kn_status
& KN_UDATA_SPECIFIC
) == 0)
1880 kn
->kn_udata
= kev
->udata
;
1882 activate
= filt_vnode_common(kn
, vp
, hint
);
1885 vnode_put_locked(vp
);
1892 filt_vnprocess(struct knote
*kn
, struct filt_process_s
*data
, struct kevent_internal_s
*kev
)
1894 #pragma unused(data)
1895 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
1900 if (vnode_getiocount(vp
, kn
->kn_hookid
, VNODE_NODEAD
| VNODE_WITHID
) != 0) {
1904 activate
= filt_vnode_common(kn
, vp
, hint
);
1906 *kev
= kn
->kn_kevent
;
1907 if (kn
->kn_flags
& EV_CLEAR
) {
1913 /* Definitely need to unlock, may need to put */
1915 vnode_put_locked(vp
);