2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
76 #include <sys/param.h>
77 #include <sys/types.h>
78 #include <sys/systm.h>
79 #include <sys/kernel.h>
80 #include <sys/file_internal.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/namei.h>
86 #include <sys/vnode_internal.h>
87 #include <sys/ioctl.h>
89 /* Temporary workaround for ubc.h until <rdar://4714366 is resolved */
90 #define ubc_setcred ubc_setcred_deprecated
93 int ubc_setcred(struct vnode
*, struct proc
*);
96 #include <sys/fsevents.h>
97 #include <sys/kdebug.h>
98 #include <sys/xattr.h>
99 #include <sys/ubc_internal.h>
100 #include <sys/uio_internal.h>
101 #include <sys/resourcevar.h>
102 #include <sys/signalvar.h>
104 #include <vm/vm_kern.h>
105 #include <vm/vm_map.h>
107 #include <miscfs/specfs/specdev.h>
108 #include <miscfs/fifofs/fifo.h>
111 #include <security/mac_framework.h>
115 #include <sys/cprotect.h>
118 #include <IOKit/IOBSD.h>
120 static int vn_closefile(struct fileglob
*fp
, vfs_context_t ctx
);
121 static int vn_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
,
123 static int vn_read(struct fileproc
*fp
, struct uio
*uio
, int flags
,
125 static int vn_write(struct fileproc
*fp
, struct uio
*uio
, int flags
,
127 static int vn_select( struct fileproc
*fp
, int which
, void * wql
,
129 static int vn_kqfilt_add(struct fileproc
*fp
, struct knote
*kn
,
131 static void filt_vndetach(struct knote
*kn
);
132 static int filt_vnode(struct knote
*kn
, long hint
);
133 static int vn_open_auth_finish(vnode_t vp
, int fmode
, vfs_context_t ctx
);
135 static int vn_kqfilt_remove(struct vnode
*vp
, uintptr_t ident
,
139 const struct fileops vnops
= {
150 struct filterops vnode_filtops
= {
153 .f_detach
= filt_vndetach
,
154 .f_event
= filt_vnode
158 * Common code for vnode open operations.
159 * Check permissions, and call the VNOP_OPEN or VNOP_CREATE routine.
161 * XXX the profusion of interfaces here is probably a bad thing.
164 vn_open(struct nameidata
*ndp
, int fmode
, int cmode
)
166 return(vn_open_modflags(ndp
, &fmode
, cmode
));
170 vn_open_modflags(struct nameidata
*ndp
, int *fmodep
, int cmode
)
172 struct vnode_attr va
;
175 VATTR_SET(&va
, va_mode
, cmode
);
177 return(vn_open_auth(ndp
, fmodep
, &va
));
181 vn_open_auth_finish(vnode_t vp
, int fmode
, vfs_context_t ctx
)
185 if ((error
= vnode_ref_ext(vp
, fmode
, 0)) != 0) {
189 /* Call out to allow 3rd party notification of open.
190 * Ignore result of kauth_authorize_fileop call.
193 mac_vnode_notify_open(ctx
, vp
, fmode
);
195 kauth_authorize_fileop(vfs_context_ucred(ctx
), KAUTH_FILEOP_OPEN
,
206 * May do nameidone() to allow safely adding an FSEvent. Cue off of ni_dvp to
207 * determine whether that has happened.
210 vn_open_auth_do_create(struct nameidata
*ndp
, struct vnode_attr
*vap
, int fmode
, boolean_t
*did_create
, boolean_t
*did_open
, vfs_context_t ctx
)
213 vnode_t dvp
= ndp
->ni_dvp
;
218 batched
= vnode_compound_open_available(ndp
->ni_dvp
);
221 VATTR_SET(vap
, va_type
, VREG
);
223 vap
->va_vaflags
|= VA_EXCLUSIVE
;
226 if (ndp
->ni_cnd
.cn_flags
& CN_WANTSRSRCFORK
) {
227 if ((error
= vn_authorize_create(dvp
, &ndp
->ni_cnd
, vap
, ctx
, NULL
)) != 0)
229 if ((error
= vnode_makenamedstream(dvp
, &ndp
->ni_vp
, XATTR_RESOURCEFORK_NAME
, 0, ctx
)) != 0)
235 if ((error
= vn_authorize_create(dvp
, &ndp
->ni_cnd
, vap
, ctx
, NULL
)) != 0)
239 error
= vn_create(dvp
, &ndp
->ni_vp
, ndp
, vap
, VN_CREATE_DOOPEN
, fmode
, &status
, ctx
);
242 *did_create
= (status
& COMPOUND_OPEN_STATUS_DID_CREATE
) ? TRUE
: FALSE
;
247 if (error
== EKEEPLOOKING
) {
249 panic("EKEEPLOOKING, but we did a create?");
252 panic("EKEEPLOOKING from filesystem that doesn't support compound vnops?");
254 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
255 panic("EKEEPLOOKING, but continue flag not set?");
259 * Do NOT drop the dvp: we need everything to continue the lookup.
265 *did_create
= (status
& COMPOUND_OPEN_STATUS_DID_CREATE
) ? 1 : 0;
278 int update_flags
= 0;
280 // Make sure the name & parent pointers are hooked up
281 if (vp
->v_name
== NULL
)
282 update_flags
|= VNODE_UPDATE_NAME
;
283 if (vp
->v_parent
== NULLVP
)
284 update_flags
|= VNODE_UPDATE_PARENT
;
287 vnode_update_identity(vp
, dvp
, ndp
->ni_cnd
.cn_nameptr
, ndp
->ni_cnd
.cn_namelen
, ndp
->ni_cnd
.cn_hash
, update_flags
);
290 ndp
->ni_dvp
= NULLVP
;
293 if (need_fsevent(FSE_CREATE_FILE
, vp
)) {
294 add_fsevent(FSE_CREATE_FILE
, ctx
,
301 if (ndp
->ni_dvp
!= NULLVP
) {
303 ndp
->ni_dvp
= NULLVP
;
310 * This is the number of times we'll loop in vn_open_auth without explicitly
311 * yielding the CPU when we determine we have to retry.
313 #define RETRY_NO_YIELD_COUNT 5
316 * Open a file with authorization, updating the contents of the structures
317 * pointed to by ndp, fmodep, and vap as necessary to perform the requested
318 * operation. This function is used for both opens of existing files, and
319 * creation of new files.
321 * Parameters: ndp The nami data pointer describing the
323 * fmodep A pointer to an int containg the mode
324 * information to be used for the open
325 * vap A pointer to the vnode attribute
326 * descriptor to be used for the open
328 * Indirect: * Contents of the data structures pointed
329 * to by the parameters are modified as
330 * necessary to the requested operation.
335 * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order.
337 * The contents of '*ndp' will be modified, based on the other
338 * arguments to this function, and to return file and directory
339 * data necessary to satisfy the requested operation.
341 * If the file does not exist and we are creating it, then the
342 * O_TRUNC flag will be cleared in '*fmodep' to indicate to the
343 * caller that the file was not truncated.
345 * If the file exists and the O_EXCL flag was not specified, then
346 * the O_CREAT flag will be cleared in '*fmodep' to indicate to
347 * the caller that the existing file was merely opened rather
350 * The contents of '*vap' will be modified as necessary to
351 * complete the operation, including setting of supported
352 * attribute, clearing of fields containing unsupported attributes
353 * in the request, if the request proceeds without them, etc..
355 * XXX: This function is too complicated in actings on its arguments
357 * XXX: We should enummerate the possible errno values here, and where
358 * in the code they originated.
361 vn_open_auth(struct nameidata
*ndp
, int *fmodep
, struct vnode_attr
*vap
)
365 vfs_context_t ctx
= ndp
->ni_cnd
.cn_context
;
368 uint32_t origcnflags
;
369 boolean_t did_create
;
371 boolean_t need_vnop_open
;
373 boolean_t ref_failed
;
381 need_vnop_open
= TRUE
;
384 origcnflags
= ndp
->ni_cnd
.cn_flags
;
389 if (fmode
& O_CREAT
) {
390 if ( (fmode
& O_DIRECTORY
) ) {
394 ndp
->ni_cnd
.cn_nameiop
= CREATE
;
396 ndp
->ni_op
= OP_LINK
;
398 /* Inherit USEDVP, vnode_open() supported flags only */
399 ndp
->ni_cnd
.cn_flags
&= (USEDVP
| NOCROSSMOUNT
);
400 ndp
->ni_cnd
.cn_flags
|= LOCKPARENT
| LOCKLEAF
| AUDITVNPATH1
;
401 ndp
->ni_flag
= NAMEI_COMPOUNDOPEN
;
403 /* open calls are allowed for resource forks. */
404 ndp
->ni_cnd
.cn_flags
|= CN_ALLOWRSRCFORK
;
406 if ((fmode
& O_EXCL
) == 0 && (fmode
& O_NOFOLLOW
) == 0 && (origcnflags
& FOLLOW
) != 0)
407 ndp
->ni_cnd
.cn_flags
|= FOLLOW
;
409 continue_create_lookup
:
410 if ( (error
= namei(ndp
)) )
416 batched
= vnode_compound_open_available(dvp
);
418 /* not found, create */
420 /* must have attributes for a new file */
427 * Attempt a create. For a system supporting compound VNOPs, we may
428 * find an existing file or create one; in either case, we will already
429 * have the file open and no VNOP_OPEN() will be needed.
431 error
= vn_open_auth_do_create(ndp
, vap
, fmode
, &did_create
, &did_open
, ctx
);
437 * Detected a node that the filesystem couldn't handle. Don't call
438 * nameidone() yet, because we need that path buffer.
440 if (error
== EKEEPLOOKING
) {
442 panic("EKEEPLOOKING from a filesystem that doesn't support compound VNOPs?");
444 goto continue_create_lookup
;
449 panic("Shouldn't have a dvp here.");
454 * Check for a create race.
456 if ((error
== EEXIST
) && !(fmode
& O_EXCL
)){
464 need_vnop_open
= !did_open
;
471 * We have a vnode. Use compound open if available
472 * or else fall through to "traditional" path. Note: can't
473 * do a compound open for root, because the parent belongs
476 if (error
== 0 && batched
&& (vnode_mount(dvp
) == vnode_mount(vp
))) {
477 error
= VNOP_COMPOUND_OPEN(dvp
, &ndp
->ni_vp
, ndp
, 0, fmode
, NULL
, NULL
, ctx
);
481 need_vnop_open
= FALSE
;
482 } else if (error
== EKEEPLOOKING
) {
483 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
484 panic("EKEEPLOOKING, but continue flag not set?");
486 goto continue_create_lookup
;
491 ndp
->ni_dvp
= NULLVP
;
506 ndp
->ni_cnd
.cn_nameiop
= LOOKUP
;
507 /* Inherit USEDVP, vnode_open() supported flags only */
508 ndp
->ni_cnd
.cn_flags
&= (USEDVP
| NOCROSSMOUNT
);
509 ndp
->ni_cnd
.cn_flags
|= FOLLOW
| LOCKLEAF
| AUDITVNPATH1
| WANTPARENT
;
511 /* open calls are allowed for resource forks. */
512 ndp
->ni_cnd
.cn_flags
|= CN_ALLOWRSRCFORK
;
514 ndp
->ni_flag
= NAMEI_COMPOUNDOPEN
;
516 /* preserve NOFOLLOW from vnode_open() */
517 if (fmode
& O_NOFOLLOW
|| fmode
& O_SYMLINK
|| (origcnflags
& FOLLOW
) == 0) {
518 ndp
->ni_cnd
.cn_flags
&= ~FOLLOW
;
521 /* Do a lookup, possibly going directly to filesystem for compound operation */
523 if ( (error
= namei(ndp
)) )
528 /* Check for batched lookup-open */
529 batched
= vnode_compound_open_available(dvp
);
530 if (batched
&& ((vp
== NULLVP
) || (vnode_mount(dvp
) == vnode_mount(vp
)))) {
531 error
= VNOP_COMPOUND_OPEN(dvp
, &ndp
->ni_vp
, ndp
, 0, fmode
, NULL
, NULL
, ctx
);
534 need_vnop_open
= FALSE
;
535 } else if (error
== EKEEPLOOKING
) {
536 if ((ndp
->ni_flag
& NAMEI_CONTLOOKUP
) == 0) {
537 panic("EKEEPLOOKING, but continue flag not set?");
541 } while (error
== EKEEPLOOKING
);
545 ndp
->ni_dvp
= NULLVP
;
553 * By this point, nameidone() is called, dvp iocount is dropped,
554 * and dvp pointer is cleared.
556 if (ndp
->ni_dvp
!= NULLVP
) {
557 panic("Haven't cleaned up adequately in vn_open_auth()");
561 * Expect to use this code for filesystems without compound VNOPs, for the root
562 * of a filesystem, which can't be "looked up" in the sense of VNOP_LOOKUP(),
563 * and for shadow files, which do not live on the same filesystems as their "parents."
565 if (need_vnop_open
) {
566 if (batched
&& !vnode_isvroot(vp
) && !vnode_isnamedstream(vp
)) {
567 panic("Why am I trying to use VNOP_OPEN() on anything other than the root or a named stream?");
571 error
= vn_authorize_open_existing(vp
, &ndp
->ni_cnd
, fmode
, ctx
, NULL
);
578 // If raw encrypted mode is requested, handle that here
579 if (VATTR_IS_ACTIVE (vap
, va_dataprotect_flags
)
580 && ISSET(vap
->va_dataprotect_flags
, VA_DP_RAWENCRYPTED
)) {
583 if (VATTR_IS_ACTIVE (vap
, va_dataprotect_flags
)
584 && ISSET(vap
->va_dataprotect_flags
, VA_DP_RAWUNENCRYPTED
)) {
585 /* Don't allow unencrypted io request from user space unless entitled */
586 boolean_t entitled
= FALSE
;
588 entitled
= IOTaskHasEntitlement(current_task(), "com.apple.private.security.file-unencrypt-access");
594 fmode
|= FUNENCRYPTED
;
598 * Perform any content protection access checks prior to calling
599 * into the filesystem.
601 error
= cp_handle_open (vp
, fmode
);
607 error
= VNOP_OPEN(vp
, fmode
, ctx
);
611 need_vnop_open
= FALSE
;
614 // if the vnode is tagged VOPENEVT and the current process
615 // has the P_CHECKOPENEVT flag set, then we or in the O_EVTONLY
616 // flag to the open mode so that this open won't count against
617 // the vnode when carbon delete() does a vnode_isinuse() to see
618 // if a file is currently in use. this allows spotlight
619 // importers to not interfere with carbon apps that depend on
620 // the no-delete-if-busy semantics of carbon delete().
622 if (!did_create
&& (vp
->v_flag
& VOPENEVT
) && (current_proc()->p_flag
& P_CHECKOPENEVT
)) {
627 * Grab reference, etc.
629 error
= vn_open_auth_finish(vp
, fmode
, ctx
);
635 /* Compound VNOP open is responsible for doing the truncate */
636 if (batched
|| did_create
)
643 /* Opened either explicitly or by a batched create */
644 if (!need_vnop_open
) {
645 VNOP_CLOSE(vp
, fmode
, ctx
);
651 /* Aggressively recycle shadow files if we error'd out during open() */
652 if ((vnode_isnamedstream(vp
)) &&
653 (vp
->v_parent
!= NULLVP
) &&
654 (vnode_isshadow(vp
))) {
660 * Check for a race against unlink. We had a vnode
661 * but according to vnode_authorize or VNOP_OPEN it
664 * EREDRIVEOPEN: means that we were hit by the tty allocation race.
666 if (((error
== ENOENT
) && (*fmodep
& O_CREAT
)) || (error
== EREDRIVEOPEN
) || ref_failed
) {
668 * We'll retry here but it may be possible that we get
669 * into a retry "spin" inside the kernel and not allow
670 * threads, which need to run in order for the retry
671 * loop to end, to run. An example is an open of a
672 * terminal which is getting revoked and we spin here
673 * without yielding becasue namei and VNOP_OPEN are
674 * successful but vnode_ref fails. The revoke needs
675 * threads with an iocount to run but if spin here we
676 * may possibly be blcoking other threads from running.
678 * We start yielding the CPU after some number of
679 * retries for increasing durations. Note that this is
680 * still a loop without an exit condition.
683 if (nretries
> RETRY_NO_YIELD_COUNT
) {
684 /* Every hz/100 secs is 10 msecs ... */
685 tsleep(&nretries
, PVFS
, "vn_open_auth_retry",
686 MIN((nretries
* (hz
/100)), hz
));
696 #if vn_access_DEPRECATED
698 * Authorize an action against a vnode. This has been the canonical way to
699 * ensure that the credential/process/etc. referenced by a vfs_context
700 * is granted the rights called out in 'mode' against the vnode 'vp'.
702 * Unfortunately, the use of VREAD/VWRITE/VEXEC makes it very difficult
703 * to add support for more rights. As such, this interface will be deprecated
704 * and callers will use vnode_authorize instead.
707 vn_access(vnode_t vp
, int mode
, vfs_context_t context
)
709 kauth_action_t action
;
713 action
|= KAUTH_VNODE_READ_DATA
;
715 action
|= KAUTH_VNODE_WRITE_DATA
;
717 action
|= KAUTH_VNODE_EXECUTE
;
719 return(vnode_authorize(vp
, NULL
, action
, context
));
721 #endif /* vn_access_DEPRECATED */
727 vn_close(struct vnode
*vp
, int flags
, vfs_context_t ctx
)
733 /* Sync data from resource fork shadow file if needed. */
734 if ((vp
->v_flag
& VISNAMEDSTREAM
) &&
735 (vp
->v_parent
!= NULLVP
) &&
736 vnode_isshadow(vp
)) {
737 if (flags
& FWASWRITTEN
) {
738 flusherror
= vnode_flushnamedstream(vp
->v_parent
, vp
, ctx
);
743 /* work around for foxhound */
744 if (vnode_isspec(vp
))
745 (void)vnode_rele_ext(vp
, flags
, 0);
748 * On HFS, we flush when the last writer closes. We do this
749 * because resource fork vnodes hold a reference on data fork
750 * vnodes and that will prevent them from getting VNOP_INACTIVE
751 * which will delay when we flush cached data. In future, we
752 * might find it beneficial to do this for all file systems.
753 * Note that it's OK to access v_writecount without the lock
756 if (vp
->v_tag
== VT_HFS
&& (flags
& FWRITE
) && vp
->v_writecount
== 1)
757 VNOP_FSYNC(vp
, MNT_NOWAIT
, ctx
);
759 error
= VNOP_CLOSE(vp
, flags
, ctx
);
762 if (flags
& FWASWRITTEN
) {
763 if (need_fsevent(FSE_CONTENT_MODIFIED
, vp
)) {
764 add_fsevent(FSE_CONTENT_MODIFIED
, ctx
,
771 if (!vnode_isspec(vp
))
772 (void)vnode_rele_ext(vp
, flags
, 0);
786 off_t swap_count
, this_count
;
787 off_t file_end
, read_end
;
792 * Reading from a swap file will get you zeroes.
797 swap_count
= uio_resid(uio
);
799 file_end
= ubc_getsize(vp
);
800 read_end
= uio
->uio_offset
+ uio_resid(uio
);
801 if (uio
->uio_offset
>= file_end
) {
802 /* uio starts after end of file: nothing to read */
804 } else if (read_end
> file_end
) {
805 /* uio extends beyond end of file: stop before that */
806 swap_count
-= (read_end
- file_end
);
809 while (swap_count
> 0) {
810 if (my_swap_page
== NULL
) {
811 MALLOC(my_swap_page
, char *, PAGE_SIZE
,
813 memset(my_swap_page
, '\0', PAGE_SIZE
);
814 /* add an end-of-line to keep line counters happy */
815 my_swap_page
[PAGE_SIZE
-1] = '\n';
817 this_count
= swap_count
;
818 if (this_count
> PAGE_SIZE
) {
819 this_count
= PAGE_SIZE
;
822 prev_resid
= uio_resid(uio
);
823 error
= uiomove((caddr_t
) my_swap_page
,
829 swap_count
-= (prev_resid
- uio_resid(uio
));
831 if (my_swap_page
!= NULL
) {
832 FREE(my_swap_page
, M_TEMP
);
839 * Package up an I/O request on a vnode into a uio and do it.
857 result
= vn_rdwr_64(rw
,
859 (uint64_t)(uintptr_t)base
,
868 /* "resid" should be bounded above by "len," which is an int */
869 if (aresid
!= NULL
) {
892 struct vfs_context context
;
894 char uio_buf
[ UIO_SIZEOF(1) ];
896 context
.vc_thread
= current_thread();
897 context
.vc_ucred
= cred
;
899 if (UIO_SEG_IS_USER_SPACE(segflg
)) {
900 spacetype
= proc_is64bit(p
) ? UIO_USERSPACE64
: UIO_USERSPACE32
;
903 spacetype
= UIO_SYSSPACE
;
905 auio
= uio_createwithbuffer(1, offset
, spacetype
, rw
,
906 &uio_buf
[0], sizeof(uio_buf
));
907 uio_addiov(auio
, base
, len
);
911 * IO_NOAUTH should be re-examined.
912 * Likely that mediation should be performed in caller.
914 if ((ioflg
& IO_NOAUTH
) == 0) {
915 /* passed cred is fp->f_cred */
917 error
= mac_vnode_check_read(&context
, cred
, vp
);
919 error
= mac_vnode_check_write(&context
, cred
, vp
);
924 if (rw
== UIO_READ
) {
925 if (vnode_isswap(vp
) && ((ioflg
& IO_SWAP_DISPATCH
) == 0)) {
926 error
= vn_read_swapfile(vp
, auio
);
928 error
= VNOP_READ(vp
, auio
, ioflg
, &context
);
931 error
= VNOP_WRITE(vp
, auio
, ioflg
, &context
);
936 *aresid
= uio_resid(auio
);
938 if (uio_resid(auio
) && error
== 0)
944 vn_offset_lock(struct fileglob
*fg
)
946 lck_mtx_lock_spin(&fg
->fg_lock
);
947 while (fg
->fg_lflags
& FG_OFF_LOCKED
) {
948 fg
->fg_lflags
|= FG_OFF_LOCKWANT
;
949 msleep(&fg
->fg_lflags
, &fg
->fg_lock
, PVFS
| PSPIN
,
950 "fg_offset_lock_wait", 0);
952 fg
->fg_lflags
|= FG_OFF_LOCKED
;
953 lck_mtx_unlock(&fg
->fg_lock
);
957 vn_offset_unlock(struct fileglob
*fg
)
961 lck_mtx_lock_spin(&fg
->fg_lock
);
962 if (fg
->fg_lflags
& FG_OFF_LOCKWANT
) {
965 fg
->fg_lflags
&= ~(FG_OFF_LOCKED
| FG_OFF_LOCKWANT
);
966 lck_mtx_unlock(&fg
->fg_lock
);
968 wakeup(&fg
->fg_lflags
);
973 * File table vnode read routine.
976 vn_read(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
982 int offset_locked
= 0;
984 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
985 if ( (error
= vnode_getwithref(vp
)) ) {
990 error
= mac_vnode_check_read(ctx
, vfs_context_ucred(ctx
), vp
);
997 /* This signals to VNOP handlers that this read came from a file table read */
998 ioflag
= IO_SYSCALL_DISPATCH
;
1000 if (fp
->f_fglob
->fg_flag
& FNONBLOCK
)
1001 ioflag
|= IO_NDELAY
;
1002 if ((fp
->f_fglob
->fg_flag
& FNOCACHE
) || vnode_isnocache(vp
))
1003 ioflag
|= IO_NOCACHE
;
1004 if (fp
->f_fglob
->fg_flag
& FENCRYPTED
) {
1005 ioflag
|= IO_ENCRYPTED
;
1007 if (fp
->f_fglob
->fg_flag
& FUNENCRYPTED
) {
1008 ioflag
|= IO_SKIP_ENCRYPTION
;
1010 if (fp
->f_fglob
->fg_flag
& O_EVTONLY
) {
1011 ioflag
|= IO_EVTONLY
;
1013 if (fp
->f_fglob
->fg_flag
& FNORDAHEAD
)
1016 if ((flags
& FOF_OFFSET
) == 0) {
1017 if ((vnode_vtype(vp
) == VREG
) && !vnode_isswap(vp
)) {
1018 vn_offset_lock(fp
->f_fglob
);
1021 uio
->uio_offset
= fp
->f_fglob
->fg_offset
;
1023 count
= uio_resid(uio
);
1025 if (vnode_isswap(vp
) && !(IO_SKIP_ENCRYPTION
& ioflag
)) {
1026 /* special case for swap files */
1027 error
= vn_read_swapfile(vp
, uio
);
1029 error
= VNOP_READ(vp
, uio
, ioflag
, ctx
);
1031 if ((flags
& FOF_OFFSET
) == 0) {
1032 fp
->f_fglob
->fg_offset
+= count
- uio_resid(uio
);
1033 if (offset_locked
) {
1034 vn_offset_unlock(fp
->f_fglob
);
1039 (void)vnode_put(vp
);
1045 * File table vnode write routine.
1048 vn_write(struct fileproc
*fp
, struct uio
*uio
, int flags
, vfs_context_t ctx
)
1053 int clippedsize
= 0;
1055 int residcount
, oldcount
;
1056 int offset_locked
= 0;
1057 proc_t p
= vfs_context_proc(ctx
);
1060 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1061 if ( (error
= vnode_getwithref(vp
)) ) {
1066 error
= mac_vnode_check_write(ctx
, vfs_context_ucred(ctx
), vp
);
1068 (void)vnode_put(vp
);
1074 * IO_SYSCALL_DISPATCH signals to VNOP handlers that this write came from
1075 * a file table write
1077 ioflag
= (IO_UNIT
| IO_SYSCALL_DISPATCH
);
1079 if (vp
->v_type
== VREG
&& (fp
->f_fglob
->fg_flag
& O_APPEND
))
1080 ioflag
|= IO_APPEND
;
1081 if (fp
->f_fglob
->fg_flag
& FNONBLOCK
)
1082 ioflag
|= IO_NDELAY
;
1083 if ((fp
->f_fglob
->fg_flag
& FNOCACHE
) || vnode_isnocache(vp
))
1084 ioflag
|= IO_NOCACHE
;
1085 if (fp
->f_fglob
->fg_flag
& FNODIRECT
)
1086 ioflag
|= IO_NODIRECT
;
1087 if (fp
->f_fglob
->fg_flag
& FSINGLE_WRITER
)
1088 ioflag
|= IO_SINGLE_WRITER
;
1089 if (fp
->f_fglob
->fg_flag
& O_EVTONLY
)
1090 ioflag
|= IO_EVTONLY
;
1093 * Treat synchronous mounts and O_FSYNC on the fd as equivalent.
1095 * XXX We treat O_DSYNC as O_FSYNC for now, since we can not delay
1096 * XXX the non-essential metadata without some additional VFS work;
1097 * XXX the intent at this point is to plumb the interface for it.
1099 if ((fp
->f_fglob
->fg_flag
& (O_FSYNC
|O_DSYNC
)) ||
1100 (vp
->v_mount
&& (vp
->v_mount
->mnt_flag
& MNT_SYNCHRONOUS
))) {
1104 if ((flags
& FOF_OFFSET
) == 0) {
1105 if ((vnode_vtype(vp
) == VREG
) && !vnode_isswap(vp
)) {
1106 vn_offset_lock(fp
->f_fglob
);
1109 uio
->uio_offset
= fp
->f_fglob
->fg_offset
;
1110 count
= uio_resid(uio
);
1112 if (((flags
& FOF_OFFSET
) == 0) &&
1113 vfs_context_proc(ctx
) && (vp
->v_type
== VREG
) &&
1114 (((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) ||
1115 ((rlim_t
)uio_resid(uio
) > (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
)))) {
1117 * If the requested residual would cause us to go past the
1118 * administrative limit, then we need to adjust the residual
1119 * down to cause fewer bytes than requested to be written. If
1120 * we can't do that (e.g. the residual is already 1 byte),
1121 * then we fail the write with EFBIG.
1123 residcount
= uio_resid(uio
);
1124 if ((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) {
1125 clippedsize
= (uio
->uio_offset
+ uio_resid(uio
)) - p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
;
1126 } else if ((rlim_t
)uio_resid(uio
) > (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
)) {
1127 clippedsize
= (p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
- uio
->uio_offset
);
1129 if (clippedsize
>= residcount
) {
1130 psignal(p
, SIGXFSZ
);
1135 uio_setresid(uio
, residcount
-clippedsize
);
1137 if ((flags
& FOF_OFFSET
) != 0) {
1138 /* for pwrite, append should be ignored */
1139 ioflag
&= ~IO_APPEND
;
1140 if (p
&& (vp
->v_type
== VREG
) &&
1141 ((rlim_t
)uio
->uio_offset
>= p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
)) {
1142 psignal(p
, SIGXFSZ
);
1146 if (p
&& (vp
->v_type
== VREG
) &&
1147 ((rlim_t
)(uio
->uio_offset
+ uio_resid(uio
)) > p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
)) {
1148 //Debugger("vn_bwrite:overstepping the bounds");
1149 residcount
= uio_resid(uio
);
1150 clippedsize
= (uio
->uio_offset
+ uio_resid(uio
)) - p
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
;
1152 uio_setresid(uio
, residcount
-clippedsize
);
1156 error
= VNOP_WRITE(vp
, uio
, ioflag
, ctx
);
1159 oldcount
= uio_resid(uio
);
1160 uio_setresid(uio
, oldcount
+ clippedsize
);
1163 if ((flags
& FOF_OFFSET
) == 0) {
1164 if (ioflag
& IO_APPEND
)
1165 fp
->f_fglob
->fg_offset
= uio
->uio_offset
;
1167 fp
->f_fglob
->fg_offset
+= count
- uio_resid(uio
);
1168 if (offset_locked
) {
1169 vn_offset_unlock(fp
->f_fglob
);
1175 * Set the credentials on successful writes
1177 if ((error
== 0) && (vp
->v_tag
== VT_NFS
) && (UBCINFOEXISTS(vp
))) {
1179 * When called from aio subsystem, we only have the proc from
1180 * which to get the credential, at this point, so use that
1181 * instead. This means aio functions are incompatible with
1182 * per-thread credentials (aio operations are proxied). We
1183 * can't easily correct the aio vs. settid race in this case
1184 * anyway, so we disallow it.
1186 if ((flags
& FOF_PCRED
) == 0) {
1187 ubc_setthreadcred(vp
, p
, current_thread());
1192 (void)vnode_put(vp
);
1196 if (offset_locked
) {
1197 vn_offset_unlock(fp
->f_fglob
);
1199 (void)vnode_put(vp
);
1204 * File table vnode stat routine.
1206 * Returns: 0 Success
1212 vn_stat_noauth(struct vnode
*vp
, void *sbptr
, kauth_filesec_t
*xsec
, int isstat64
, vfs_context_t ctx
)
1214 struct vnode_attr va
;
1217 kauth_filesec_t fsec
;
1218 struct stat
*sb
= (struct stat
*)0; /* warning avoidance ; protected by isstat64 */
1219 struct stat64
* sb64
= (struct stat64
*)0; /* warning avoidance ; protected by isstat64 */
1222 sb64
= (struct stat64
*)sbptr
;
1224 sb
= (struct stat
*)sbptr
;
1225 memset(&va
, 0, sizeof(va
));
1227 VATTR_WANTED(&va
, va_fsid
);
1228 VATTR_WANTED(&va
, va_fileid
);
1229 VATTR_WANTED(&va
, va_mode
);
1230 VATTR_WANTED(&va
, va_type
);
1231 VATTR_WANTED(&va
, va_nlink
);
1232 VATTR_WANTED(&va
, va_uid
);
1233 VATTR_WANTED(&va
, va_gid
);
1234 VATTR_WANTED(&va
, va_rdev
);
1235 VATTR_WANTED(&va
, va_data_size
);
1236 VATTR_WANTED(&va
, va_access_time
);
1237 VATTR_WANTED(&va
, va_modify_time
);
1238 VATTR_WANTED(&va
, va_change_time
);
1239 VATTR_WANTED(&va
, va_create_time
);
1240 VATTR_WANTED(&va
, va_flags
);
1241 VATTR_WANTED(&va
, va_gen
);
1242 VATTR_WANTED(&va
, va_iosize
);
1243 /* lower layers will synthesise va_total_alloc from va_data_size if required */
1244 VATTR_WANTED(&va
, va_total_alloc
);
1246 VATTR_WANTED(&va
, va_uuuid
);
1247 VATTR_WANTED(&va
, va_guuid
);
1248 VATTR_WANTED(&va
, va_acl
);
1250 error
= vnode_getattr(vp
, &va
, ctx
);
1254 * Copy from vattr table
1256 if (isstat64
!= 0) {
1257 sb64
->st_dev
= va
.va_fsid
;
1258 sb64
->st_ino
= (ino64_t
)va
.va_fileid
;
1261 sb
->st_dev
= va
.va_fsid
;
1262 sb
->st_ino
= (ino_t
)va
.va_fileid
;
1265 switch (vp
->v_type
) {
1291 if (isstat64
!= 0) {
1292 sb64
->st_mode
= mode
;
1293 sb64
->st_nlink
= VATTR_IS_SUPPORTED(&va
, va_nlink
) ? (u_int16_t
)va
.va_nlink
: 1;
1294 sb64
->st_uid
= va
.va_uid
;
1295 sb64
->st_gid
= va
.va_gid
;
1296 sb64
->st_rdev
= va
.va_rdev
;
1297 sb64
->st_size
= va
.va_data_size
;
1298 sb64
->st_atimespec
= va
.va_access_time
;
1299 sb64
->st_mtimespec
= va
.va_modify_time
;
1300 sb64
->st_ctimespec
= va
.va_change_time
;
1301 if (VATTR_IS_SUPPORTED(&va
, va_create_time
)) {
1302 sb64
->st_birthtimespec
= va
.va_create_time
;
1304 sb64
->st_birthtimespec
.tv_sec
= sb64
->st_birthtimespec
.tv_nsec
= 0;
1306 sb64
->st_blksize
= va
.va_iosize
;
1307 sb64
->st_flags
= va
.va_flags
;
1308 sb64
->st_blocks
= roundup(va
.va_total_alloc
, 512) / 512;
1311 sb
->st_nlink
= VATTR_IS_SUPPORTED(&va
, va_nlink
) ? (u_int16_t
)va
.va_nlink
: 1;
1312 sb
->st_uid
= va
.va_uid
;
1313 sb
->st_gid
= va
.va_gid
;
1314 sb
->st_rdev
= va
.va_rdev
;
1315 sb
->st_size
= va
.va_data_size
;
1316 sb
->st_atimespec
= va
.va_access_time
;
1317 sb
->st_mtimespec
= va
.va_modify_time
;
1318 sb
->st_ctimespec
= va
.va_change_time
;
1319 sb
->st_blksize
= va
.va_iosize
;
1320 sb
->st_flags
= va
.va_flags
;
1321 sb
->st_blocks
= roundup(va
.va_total_alloc
, 512) / 512;
1324 /* if we're interested in extended security data and we got an ACL */
1326 if (!VATTR_IS_SUPPORTED(&va
, va_acl
) &&
1327 !VATTR_IS_SUPPORTED(&va
, va_uuuid
) &&
1328 !VATTR_IS_SUPPORTED(&va
, va_guuid
)) {
1329 *xsec
= KAUTH_FILESEC_NONE
;
1332 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
1333 fsec
= kauth_filesec_alloc(va
.va_acl
->acl_entrycount
);
1335 fsec
= kauth_filesec_alloc(0);
1341 fsec
->fsec_magic
= KAUTH_FILESEC_MAGIC
;
1342 if (VATTR_IS_SUPPORTED(&va
, va_uuuid
)) {
1343 fsec
->fsec_owner
= va
.va_uuuid
;
1345 fsec
->fsec_owner
= kauth_null_guid
;
1347 if (VATTR_IS_SUPPORTED(&va
, va_guuid
)) {
1348 fsec
->fsec_group
= va
.va_guuid
;
1350 fsec
->fsec_group
= kauth_null_guid
;
1352 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && (va
.va_acl
!= NULL
)) {
1353 bcopy(va
.va_acl
, &(fsec
->fsec_acl
), KAUTH_ACL_COPYSIZE(va
.va_acl
));
1355 fsec
->fsec_acl
.acl_entrycount
= KAUTH_FILESEC_NOACL
;
1361 /* Do not give the generation number out to unpriviledged users */
1362 if (va
.va_gen
&& !vfs_context_issuser(ctx
)) {
1369 sb64
->st_gen
= va
.va_gen
;
1371 sb
->st_gen
= va
.va_gen
;
1376 if (VATTR_IS_SUPPORTED(&va
, va_acl
) && va
.va_acl
!= NULL
)
1377 kauth_acl_free(va
.va_acl
);
1382 vn_stat(struct vnode
*vp
, void *sb
, kauth_filesec_t
*xsec
, int isstat64
, vfs_context_t ctx
)
1387 error
= mac_vnode_check_stat(ctx
, NOCRED
, vp
);
1393 if ((error
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_READ_ATTRIBUTES
| KAUTH_VNODE_READ_SECURITY
, ctx
)) != 0)
1397 return(vn_stat_noauth(vp
, sb
, xsec
, isstat64
, ctx
));
1402 * File table vnode ioctl routine.
1405 vn_ioctl(struct fileproc
*fp
, u_long com
, caddr_t data
, vfs_context_t ctx
)
1407 struct vnode
*vp
= ((struct vnode
*)fp
->f_fglob
->fg_data
);
1410 struct vnode
*ttyvp
;
1411 struct session
* sessp
;
1413 if ( (error
= vnode_getwithref(vp
)) ) {
1418 error
= mac_vnode_check_ioctl(ctx
, vp
, com
);
1423 switch (vp
->v_type
) {
1426 if (com
== FIONREAD
) {
1427 if ((error
= vnode_size(vp
, &file_size
, ctx
)) != 0)
1429 *(int *)data
= file_size
- fp
->f_fglob
->fg_offset
;
1432 if (com
== FIONBIO
|| com
== FIOASYNC
) { /* XXX */
1445 /* Should not be able to set block size from user space */
1446 if (com
== DKIOCSETBLOCKSIZE
) {
1451 if (com
== FIODTYPE
) {
1452 if (vp
->v_type
== VBLK
) {
1453 if (major(vp
->v_rdev
) >= nblkdev
) {
1457 *(int *)data
= bdevsw
[major(vp
->v_rdev
)].d_type
;
1459 } else if (vp
->v_type
== VCHR
) {
1460 if (major(vp
->v_rdev
) >= nchrdev
) {
1464 *(int *)data
= cdevsw
[major(vp
->v_rdev
)].d_type
;
1471 error
= VNOP_IOCTL(vp
, com
, data
, fp
->f_fglob
->fg_flag
, ctx
);
1473 if (error
== 0 && com
== TIOCSCTTY
) {
1474 sessp
= proc_session(vfs_context_proc(ctx
));
1476 session_lock(sessp
);
1477 ttyvp
= sessp
->s_ttyvp
;
1478 sessp
->s_ttyvp
= vp
;
1479 sessp
->s_ttyvid
= vnode_vid(vp
);
1480 session_unlock(sessp
);
1481 session_rele(sessp
);
1485 (void)vnode_put(vp
);
1490 * File table vnode select routine.
1493 vn_select(struct fileproc
*fp
, int which
, void *wql
, __unused vfs_context_t ctx
)
1496 struct vnode
* vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1497 struct vfs_context context
;
1499 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
1500 context
.vc_thread
= current_thread();
1501 context
.vc_ucred
= fp
->f_fglob
->fg_cred
;
1505 * XXX We should use a per thread credential here; minimally,
1506 * XXX the process credential should have a persistent
1507 * XXX reference on it before being passed in here.
1509 error
= mac_vnode_check_select(ctx
, vp
, which
);
1512 error
= VNOP_SELECT(vp
, which
, fp
->f_fglob
->fg_flag
, wql
, ctx
);
1514 (void)vnode_put(vp
);
1521 * File table vnode close routine.
1524 vn_closefile(struct fileglob
*fg
, vfs_context_t ctx
)
1526 struct vnode
*vp
= fg
->fg_data
;
1529 if ( (error
= vnode_getwithref(vp
)) == 0 ) {
1530 if (FILEGLOB_DTYPE(fg
) == DTYPE_VNODE
&&
1531 ((fg
->fg_flag
& FHASLOCK
) != 0 ||
1532 (fg
->fg_lflags
& FG_HAS_OFDLOCK
) != 0)) {
1534 .l_whence
= SEEK_SET
,
1540 if ((fg
->fg_flag
& FHASLOCK
) != 0)
1541 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
,
1542 F_UNLCK
, &lf
, F_FLOCK
, ctx
, NULL
);
1544 if ((fg
->fg_lflags
& FG_HAS_OFDLOCK
) != 0)
1545 (void) VNOP_ADVLOCK(vp
, (caddr_t
)fg
,
1546 F_UNLCK
, &lf
, F_OFD_LOCK
, ctx
, NULL
);
1548 error
= vn_close(vp
, fg
->fg_flag
, ctx
);
1549 (void) vnode_put(vp
);
1555 * Returns: 0 Success
1559 vn_pathconf(vnode_t vp
, int name
, int32_t *retval
, vfs_context_t ctx
)
1562 struct vfs_attr vfa
;
1565 case _PC_EXTENDED_SECURITY_NP
:
1566 *retval
= vfs_extendedsecurity(vnode_mount(vp
)) ? 1 : 0;
1568 case _PC_AUTH_OPAQUE_NP
:
1569 *retval
= vfs_authopaque(vnode_mount(vp
));
1571 case _PC_2_SYMLINKS
:
1572 *retval
= 1; /* XXX NOTSUP on MSDOS, etc. */
1574 case _PC_ALLOC_SIZE_MIN
:
1575 *retval
= 1; /* XXX lie: 1 byte */
1577 case _PC_ASYNC_IO
: /* unistd.h: _POSIX_ASYNCHRONUS_IO */
1578 *retval
= 1; /* [AIO] option is supported */
1580 case _PC_PRIO_IO
: /* unistd.h: _POSIX_PRIORITIZED_IO */
1581 *retval
= 0; /* [PIO] option is not supported */
1583 case _PC_REC_INCR_XFER_SIZE
:
1584 *retval
= 4096; /* XXX go from MIN to MAX 4K at a time */
1586 case _PC_REC_MIN_XFER_SIZE
:
1587 *retval
= 4096; /* XXX recommend 4K minimum reads/writes */
1589 case _PC_REC_MAX_XFER_SIZE
:
1590 *retval
= 65536; /* XXX recommend 64K maximum reads/writes */
1592 case _PC_REC_XFER_ALIGN
:
1593 *retval
= 4096; /* XXX recommend page aligned buffers */
1595 case _PC_SYMLINK_MAX
:
1596 *retval
= 255; /* Minimum acceptable POSIX value */
1598 case _PC_SYNC_IO
: /* unistd.h: _POSIX_SYNCHRONIZED_IO */
1599 *retval
= 0; /* [SIO] option is not supported */
1601 case _PC_XATTR_SIZE_BITS
:
1602 /* The number of bits used to store maximum extended
1603 * attribute size in bytes. For example, if the maximum
1604 * attribute size supported by a file system is 128K, the
1605 * value returned will be 18. However a value 18 can mean
1606 * that the maximum attribute size can be anywhere from
1607 * (256KB - 1) to 128KB. As a special case, the resource
1608 * fork can have much larger size, and some file system
1609 * specific extended attributes can have smaller and preset
1610 * size; for example, Finder Info is always 32 bytes.
1612 memset(&vfa
, 0, sizeof(vfa
));
1614 VFSATTR_WANTED(&vfa
, f_capabilities
);
1615 if (vfs_getattr(vnode_mount(vp
), &vfa
, ctx
) == 0 &&
1616 (VFSATTR_IS_SUPPORTED(&vfa
, f_capabilities
)) &&
1617 (vfa
.f_capabilities
.capabilities
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
) &&
1618 (vfa
.f_capabilities
.valid
[VOL_CAPABILITIES_INTERFACES
] & VOL_CAP_INT_EXTENDED_ATTR
)) {
1619 /* Supports native extended attributes */
1620 error
= VNOP_PATHCONF(vp
, name
, retval
, ctx
);
1622 /* Number of bits used to represent the maximum size of
1623 * extended attribute stored in an Apple Double file.
1625 *retval
= AD_XATTR_SIZE_BITS
;
1629 error
= VNOP_PATHCONF(vp
, name
, retval
, ctx
);
1637 vn_kqfilt_add(struct fileproc
*fp
, struct knote
*kn
, vfs_context_t ctx
)
1642 vp
= (struct vnode
*)fp
->f_fglob
->fg_data
;
1645 * Don't attach a knote to a dead vnode.
1647 if ((error
= vget_internal(vp
, 0, VNODE_NODEAD
)) == 0) {
1648 switch (kn
->kn_filter
) {
1651 if (vnode_isfifo(vp
)) {
1652 /* We'll only watch FIFOs that use our fifofs */
1653 if (!(vp
->v_fifoinfo
&& vp
->v_fifoinfo
->fi_readsock
)) {
1657 } else if (!vnode_isreg(vp
)) {
1658 if (vnode_ischr(vp
) &&
1659 (error
= spec_kqfilter(vp
, kn
)) == 0) {
1660 /* claimed by a special device */
1680 error
= mac_vnode_check_kqfilter(ctx
, fp
->f_fglob
->fg_cred
, kn
, vp
);
1687 kn
->kn_hook
= (void*)vp
;
1688 kn
->kn_hookid
= vnode_vid(vp
);
1689 kn
->kn_fop
= &vnode_filtops
;
1692 KNOTE_ATTACH(&vp
->v_knotes
, kn
);
1695 /* Ask the filesystem to provide remove notifications, but ignore failure */
1696 VNOP_MONITOR(vp
, 0, VNODE_MONITOR_BEGIN
, (void*) kn
, ctx
);
1705 filt_vndetach(struct knote
*kn
)
1707 vfs_context_t ctx
= vfs_context_current();
1709 vp
= (struct vnode
*)kn
->kn_hook
;
1710 if (vnode_getwithvid(vp
, kn
->kn_hookid
))
1714 KNOTE_DETACH(&vp
->v_knotes
, kn
);
1718 * Tell a (generally networked) filesystem that we're no longer watching
1719 * If the FS wants to track contexts, it should still be using the one from
1720 * the VNODE_MONITOR_BEGIN.
1722 VNOP_MONITOR(vp
, 0, VNODE_MONITOR_END
, (void*)kn
, ctx
);
1728 * Used for EVFILT_READ
1730 * Takes only VFIFO or VREG. vnode is locked. We handle the "poll" case
1731 * differently than the regular case for VREG files. If not in poll(),
1732 * then we need to know current fileproc offset for VREG.
1735 vnode_readable_data_count(vnode_t vp
, off_t current_offset
, int ispoll
)
1737 if (vnode_isfifo(vp
)) {
1740 int err
= fifo_charcount(vp
, &cnt
);
1742 return (intptr_t)cnt
;
1748 } else if (vnode_isreg(vp
)) {
1754 amount
= vp
->v_un
.vu_ubcinfo
->ui_size
- current_offset
;
1755 if (amount
> (off_t
)INTPTR_MAX
) {
1757 } else if (amount
< (off_t
)INTPTR_MIN
) {
1760 return (intptr_t)amount
;
1763 panic("Should never have an EVFILT_READ except for reg or fifo.");
1769 * Used for EVFILT_WRITE.
1771 * For regular vnodes, we can always write (1). For named pipes,
1772 * see how much space there is in the buffer. Nothing else is covered.
1775 vnode_writable_space_count(vnode_t vp
)
1777 if (vnode_isfifo(vp
)) {
1780 int err
= fifo_freespace(vp
, &spc
);
1782 return (intptr_t)spc
;
1788 } else if (vnode_isreg(vp
)) {
1791 panic("Should never have an EVFILT_READ except for reg or fifo.");
1797 * Determine whether this knote should be active
1799 * This is kind of subtle.
1800 * --First, notice if the vnode has been revoked: in so, override hint
1801 * --EVFILT_READ knotes are checked no matter what the hint is
1802 * --Other knotes activate based on hint.
1803 * --If hint is revoke, set special flags and activate
1806 filt_vnode(struct knote
*kn
, long hint
)
1808 vnode_t vp
= (struct vnode
*)kn
->kn_hook
;
1810 long orig_hint
= hint
;
1815 if (vnode_getiocount(vp
, kn
->kn_hookid
, VNODE_NODEAD
| VNODE_WITHID
) != 0) {
1820 lck_mtx_assert(&vp
->v_lock
, LCK_MTX_ASSERT_OWNED
);
1823 /* Special handling for vnodes that are in recycle or already gone */
1824 if (NOTE_REVOKE
== hint
) {
1825 kn
->kn_flags
|= (EV_EOF
| EV_ONESHOT
);
1828 if ((kn
->kn_filter
== EVFILT_VNODE
) && (kn
->kn_sfflags
& NOTE_REVOKE
)) {
1829 kn
->kn_fflags
|= NOTE_REVOKE
;
1832 switch(kn
->kn_filter
) {
1834 kn
->kn_data
= vnode_readable_data_count(vp
, kn
->kn_fp
->f_fglob
->fg_offset
, (kn
->kn_flags
& EV_POLL
));
1836 if (kn
->kn_data
!= 0) {
1841 kn
->kn_data
= vnode_writable_space_count(vp
);
1843 if (kn
->kn_data
!= 0) {
1848 /* Check events this note matches against the hint */
1849 if (kn
->kn_sfflags
& hint
) {
1850 kn
->kn_fflags
|= hint
; /* Set which event occurred */
1852 if (kn
->kn_fflags
!= 0) {
1857 panic("Invalid knote filter on a vnode!\n");
1861 if (orig_hint
== 0) {
1863 * Definitely need to unlock, may need to put
1866 vnode_put_locked(vp
);