2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ 
  30  * Copyright (c) 1989, 1993 
  31  *      The Regents of the University of California.  All rights reserved. 
  33  * This code is derived from software contributed to Berkeley by 
  34  * Rick Macklem at The University of Guelph. 
  36  * Redistribution and use in source and binary forms, with or without 
  37  * modification, are permitted provided that the following conditions 
  39  * 1. Redistributions of source code must retain the above copyright 
  40  *    notice, this list of conditions and the following disclaimer. 
  41  * 2. Redistributions in binary form must reproduce the above copyright 
  42  *    notice, this list of conditions and the following disclaimer in the 
  43  *    documentation and/or other materials provided with the distribution. 
  44  * 3. All advertising materials mentioning features or use of this software 
  45  *    must display the following acknowledgement: 
  46  *      This product includes software developed by the University of 
  47  *      California, Berkeley and its contributors. 
  48  * 4. Neither the name of the University nor the names of its contributors 
  49  *    may be used to endorse or promote products derived from this software 
  50  *    without specific prior written permission. 
  52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 
  53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 
  56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
  57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 
  58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
  59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 
  60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 
  61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 
  64  *      @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 
  65  * FreeBSD-Id: nfs_vnops.c,v 1.72 1997/11/07 09:20:48 phk Exp $ 
  68 #include <nfs/nfs_conf.h> 
  72  * vnode op calls for Sun NFS version 2 and 3 
  74 #include <sys/param.h> 
  75 #include <sys/kernel.h> 
  76 #include <sys/systm.h> 
  77 #include <sys/resourcevar.h> 
  78 #include <sys/proc_internal.h> 
  79 #include <sys/kauth.h> 
  80 #include <sys/mount_internal.h> 
  81 #include <sys/malloc.h> 
  82 #include <sys/kpi_mbuf.h> 
  84 #include <sys/vnode_internal.h> 
  85 #include <sys/dirent.h> 
  86 #include <sys/fcntl.h> 
  87 #include <sys/lockf.h> 
  88 #include <sys/ubc_internal.h> 
  90 #include <sys/signalvar.h> 
  91 #include <sys/uio_internal.h> 
  92 #include <sys/xattr.h> 
  94 #include <vfs/vfs_support.h> 
  99 #include <kern/clock.h> 
 100 #include <libkern/OSAtomic.h> 
 102 #include <miscfs/fifofs/fifo.h> 
 103 #include <miscfs/specfs/specdev.h> 
 105 #include <nfs/rpcv2.h> 
 106 #include <nfs/nfsproto.h> 
 108 #include <nfs/nfsnode.h> 
 109 #include <nfs/nfs_gss.h> 
 110 #include <nfs/nfsmount.h> 
 111 #include <nfs/nfs_lock.h> 
 112 #include <nfs/xdr_subs.h> 
 113 #include <nfs/nfsm_subs.h> 
 116 #include <netinet/in.h> 
 117 #include <netinet/in_var.h> 
 119 #include <vm/vm_kern.h> 
 120 #include <vm/vm_pageout.h> 
 122 #include <kern/task.h> 
 123 #include <kern/sched_prim.h> 
 125 #define NFS_VNOP_DBG(...) NFS_DBG(NFS_FAC_VNOP, 7, ## __VA_ARGS__) 
 126 #define DEFAULT_READLINK_NOCACHE 0 
 131 int     nfs_vnop_lookup(struct vnop_lookup_args 
*); 
 132 int     nfsspec_vnop_read(struct vnop_read_args 
*); 
 133 int     nfsspec_vnop_write(struct vnop_write_args 
*); 
 134 int     nfsspec_vnop_close(struct vnop_close_args 
*); 
 136 int     nfsfifo_vnop_read(struct vnop_read_args 
*); 
 137 int     nfsfifo_vnop_write(struct vnop_write_args 
*); 
 138 int     nfsfifo_vnop_close(struct vnop_close_args 
*); 
 140 int     nfs_vnop_ioctl(struct vnop_ioctl_args 
*); 
 141 int     nfs_vnop_select(struct vnop_select_args 
*); 
 142 int     nfs_vnop_setattr(struct vnop_setattr_args 
*); 
 143 int     nfs_vnop_fsync(struct vnop_fsync_args 
*); 
 144 int     nfs_vnop_rename(struct vnop_rename_args 
*); 
 145 int     nfs_vnop_readdir(struct vnop_readdir_args 
*); 
 146 int     nfs_vnop_readlink(struct vnop_readlink_args 
*); 
 147 int     nfs_vnop_pathconf(struct vnop_pathconf_args 
*); 
 148 int     nfs_vnop_pagein(struct vnop_pagein_args 
*); 
 149 int     nfs_vnop_pageout(struct vnop_pageout_args 
*); 
 150 int     nfs_vnop_blktooff(struct vnop_blktooff_args 
*); 
 151 int     nfs_vnop_offtoblk(struct vnop_offtoblk_args 
*); 
 152 int     nfs_vnop_blockmap(struct vnop_blockmap_args 
*); 
 153 int     nfs_vnop_monitor(struct vnop_monitor_args 
*); 
 155 int     nfs3_vnop_create(struct vnop_create_args 
*); 
 156 int     nfs3_vnop_mknod(struct vnop_mknod_args 
*); 
 157 int     nfs3_vnop_getattr(struct vnop_getattr_args 
*); 
 158 int     nfs3_vnop_link(struct vnop_link_args 
*); 
 159 int     nfs3_vnop_mkdir(struct vnop_mkdir_args 
*); 
 160 int     nfs3_vnop_rmdir(struct vnop_rmdir_args 
*); 
 161 int     nfs3_vnop_symlink(struct vnop_symlink_args 
*); 
 164 vnop_t 
**nfsv2_vnodeop_p
; 
 165 static const struct vnodeopv_entry_desc nfsv2_vnodeop_entries
[] = { 
 166         { .opve_op 
= &vnop_default_desc
, .opve_impl 
= (vnop_t 
*)vn_default_error 
}, 
 167         { .opve_op 
= &vnop_lookup_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_lookup 
},         /* lookup */ 
 168         { .opve_op 
= &vnop_create_desc
, .opve_impl 
= (vnop_t 
*)nfs3_vnop_create 
},        /* create */ 
 169         { .opve_op 
= &vnop_mknod_desc
, .opve_impl 
= (vnop_t 
*)nfs3_vnop_mknod 
},          /* mknod */ 
 170         { .opve_op 
= &vnop_open_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_open 
},             /* open */ 
 171         { .opve_op 
= &vnop_close_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_close 
},           /* close */ 
 172         { .opve_op 
= &vnop_access_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_access 
},         /* access */ 
 173         { .opve_op 
= &vnop_getattr_desc
, .opve_impl 
= (vnop_t 
*)nfs3_vnop_getattr 
},      /* getattr */ 
 174         { .opve_op 
= &vnop_setattr_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_setattr 
},       /* setattr */ 
 175         { .opve_op 
= &vnop_read_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_read 
},             /* read */ 
 176         { .opve_op 
= &vnop_write_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_write 
},           /* write */ 
 177         { .opve_op 
= &vnop_ioctl_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_ioctl 
},           /* ioctl */ 
 178         { .opve_op 
= &vnop_select_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_select 
},         /* select */ 
 179         { .opve_op 
= &vnop_revoke_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_revoke 
},         /* revoke */ 
 180         { .opve_op 
= &vnop_mmap_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_mmap 
},             /* mmap */ 
 181         { .opve_op 
= &vnop_mmap_check_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_mmap_check 
}, /* mmap_check */ 
 182         { .opve_op 
= &vnop_mnomap_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_mnomap 
},         /* mnomap */ 
 183         { .opve_op 
= &vnop_fsync_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_fsync 
},           /* fsync */ 
 184         { .opve_op 
= &vnop_remove_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_remove 
},         /* remove */ 
 185         { .opve_op 
= &vnop_link_desc
, .opve_impl 
= (vnop_t 
*)nfs3_vnop_link 
},            /* link */ 
 186         { .opve_op 
= &vnop_rename_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_rename 
},         /* rename */ 
 187         { .opve_op 
= &vnop_mkdir_desc
, .opve_impl 
= (vnop_t 
*)nfs3_vnop_mkdir 
},          /* mkdir */ 
 188         { .opve_op 
= &vnop_rmdir_desc
, .opve_impl 
= (vnop_t 
*)nfs3_vnop_rmdir 
},          /* rmdir */ 
 189         { .opve_op 
= &vnop_symlink_desc
, .opve_impl 
= (vnop_t 
*)nfs3_vnop_symlink 
},      /* symlink */ 
 190         { .opve_op 
= &vnop_readdir_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_readdir 
},       /* readdir */ 
 191         { .opve_op 
= &vnop_readlink_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_readlink 
},     /* readlink */ 
 192         { .opve_op 
= &vnop_inactive_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_inactive 
},     /* inactive */ 
 193         { .opve_op 
= &vnop_reclaim_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_reclaim 
},       /* reclaim */ 
 194         { .opve_op 
= &vnop_strategy_desc
, .opve_impl 
= (vnop_t 
*)err_strategy 
},          /* strategy */ 
 195         { .opve_op 
= &vnop_pathconf_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_pathconf 
},     /* pathconf */ 
 196         { .opve_op 
= &vnop_advlock_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_advlock 
},       /* advlock */ 
 197         { .opve_op 
= &vnop_bwrite_desc
, .opve_impl 
= (vnop_t 
*)err_bwrite 
},              /* bwrite */ 
 198         { .opve_op 
= &vnop_pagein_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_pagein 
},         /* Pagein */ 
 199         { .opve_op 
= &vnop_pageout_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_pageout 
},       /* Pageout */ 
 200         { .opve_op 
= &vnop_copyfile_desc
, .opve_impl 
= (vnop_t 
*)err_copyfile 
},          /* Copyfile */ 
 201         { .opve_op 
= &vnop_blktooff_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_blktooff 
},     /* blktooff */ 
 202         { .opve_op 
= &vnop_offtoblk_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_offtoblk 
},     /* offtoblk */ 
 203         { .opve_op 
= &vnop_blockmap_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_blockmap 
},     /* blockmap */ 
 204         { .opve_op 
= &vnop_monitor_desc
, .opve_impl 
= (vnop_t 
*)nfs_vnop_monitor 
},       /* monitor */ 
 205         { .opve_op 
= NULL
, .opve_impl 
= NULL 
} 
 207 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc 
= 
 208 { &nfsv2_vnodeop_p
, nfsv2_vnodeop_entries 
}; 
 212 vnop_t 
**nfsv4_vnodeop_p
; 
 213 static const struct vnodeopv_entry_desc nfsv4_vnodeop_entries
[] = { 
 214         { &vnop_default_desc
, (vnop_t 
*)vn_default_error 
}, 
 215         { &vnop_lookup_desc
, (vnop_t 
*)nfs_vnop_lookup 
},                        /* lookup */ 
 216         { &vnop_create_desc
, (vnop_t 
*)nfs4_vnop_create 
},                       /* create */ 
 217         { &vnop_mknod_desc
, (vnop_t 
*)nfs4_vnop_mknod 
},                         /* mknod */ 
 218         { &vnop_open_desc
, (vnop_t 
*)nfs_vnop_open 
},                            /* open */ 
 219         { &vnop_close_desc
, (vnop_t 
*)nfs_vnop_close 
},                          /* close */ 
 220         { &vnop_access_desc
, (vnop_t 
*)nfs_vnop_access 
},                        /* access */ 
 221         { &vnop_getattr_desc
, (vnop_t 
*)nfs4_vnop_getattr 
},                     /* getattr */ 
 222         { &vnop_setattr_desc
, (vnop_t 
*)nfs_vnop_setattr 
},                      /* setattr */ 
 223         { &vnop_read_desc
, (vnop_t 
*)nfs_vnop_read 
},                            /* read */ 
 224         { &vnop_write_desc
, (vnop_t 
*)nfs_vnop_write 
},                          /* write */ 
 225         { &vnop_ioctl_desc
, (vnop_t 
*)nfs_vnop_ioctl 
},                          /* ioctl */ 
 226         { &vnop_select_desc
, (vnop_t 
*)nfs_vnop_select 
},                        /* select */ 
 227         { &vnop_revoke_desc
, (vnop_t 
*)nfs_vnop_revoke 
},                        /* revoke */ 
 228         { &vnop_mmap_desc
, (vnop_t 
*)nfs_vnop_mmap 
},                            /* mmap */ 
 229         { &vnop_mmap_check_desc
, (vnop_t 
*)nfs_vnop_mmap_check 
},                /* mmap_check */ 
 230         { &vnop_mnomap_desc
, (vnop_t 
*)nfs_vnop_mnomap 
},                        /* mnomap */ 
 231         { &vnop_fsync_desc
, (vnop_t 
*)nfs_vnop_fsync 
},                          /* fsync */ 
 232         { &vnop_remove_desc
, (vnop_t 
*)nfs_vnop_remove 
},                        /* remove */ 
 233         { &vnop_link_desc
, (vnop_t 
*)nfs4_vnop_link 
},                           /* link */ 
 234         { &vnop_rename_desc
, (vnop_t 
*)nfs_vnop_rename 
},                        /* rename */ 
 235         { &vnop_mkdir_desc
, (vnop_t 
*)nfs4_vnop_mkdir 
},                         /* mkdir */ 
 236         { &vnop_rmdir_desc
, (vnop_t 
*)nfs4_vnop_rmdir 
},                         /* rmdir */ 
 237         { &vnop_symlink_desc
, (vnop_t 
*)nfs4_vnop_symlink 
},                     /* symlink */ 
 238         { &vnop_readdir_desc
, (vnop_t 
*)nfs_vnop_readdir 
},                      /* readdir */ 
 239         { &vnop_readlink_desc
, (vnop_t 
*)nfs_vnop_readlink 
},                    /* readlink */ 
 240         { &vnop_inactive_desc
, (vnop_t 
*)nfs_vnop_inactive 
},                    /* inactive */ 
 241         { &vnop_reclaim_desc
, (vnop_t 
*)nfs_vnop_reclaim 
},                      /* reclaim */ 
 242         { &vnop_strategy_desc
, (vnop_t 
*)err_strategy 
},                         /* strategy */ 
 243         { &vnop_pathconf_desc
, (vnop_t 
*)nfs_vnop_pathconf 
},                    /* pathconf */ 
 244         { &vnop_advlock_desc
, (vnop_t 
*)nfs_vnop_advlock 
},                      /* advlock */ 
 245         { &vnop_bwrite_desc
, (vnop_t 
*)err_bwrite 
},                             /* bwrite */ 
 246         { &vnop_pagein_desc
, (vnop_t 
*)nfs_vnop_pagein 
},                        /* Pagein */ 
 247         { &vnop_pageout_desc
, (vnop_t 
*)nfs_vnop_pageout 
},                      /* Pageout */ 
 248         { &vnop_copyfile_desc
, (vnop_t 
*)err_copyfile 
},                         /* Copyfile */ 
 249         { &vnop_blktooff_desc
, (vnop_t 
*)nfs_vnop_blktooff 
},                    /* blktooff */ 
 250         { &vnop_offtoblk_desc
, (vnop_t 
*)nfs_vnop_offtoblk 
},                    /* offtoblk */ 
 251         { &vnop_blockmap_desc
, (vnop_t 
*)nfs_vnop_blockmap 
},                    /* blockmap */ 
 252         { &vnop_getxattr_desc
, (vnop_t 
*)nfs4_vnop_getxattr 
},                   /* getxattr */ 
 253         { &vnop_setxattr_desc
, (vnop_t 
*)nfs4_vnop_setxattr 
},                   /* setxattr */ 
 254         { &vnop_removexattr_desc
, (vnop_t 
*)nfs4_vnop_removexattr 
},             /* removexattr */ 
 255         { &vnop_listxattr_desc
, (vnop_t 
*)nfs4_vnop_listxattr 
},                 /* listxattr */ 
 257         { &vnop_getnamedstream_desc
, (vnop_t 
*)nfs4_vnop_getnamedstream 
},       /* getnamedstream */ 
 258         { &vnop_makenamedstream_desc
, (vnop_t 
*)nfs4_vnop_makenamedstream 
},     /* makenamedstream */ 
 259         { &vnop_removenamedstream_desc
, (vnop_t 
*)nfs4_vnop_removenamedstream 
}, /* removenamedstream */ 
 261         { &vnop_monitor_desc
, (vnop_t 
*)nfs_vnop_monitor 
},                      /* monitor */ 
 264 const struct vnodeopv_desc nfsv4_vnodeop_opv_desc 
= 
 265 { &nfsv4_vnodeop_p
, nfsv4_vnodeop_entries 
}; 
 269  * Special device vnode ops 
 271 vnop_t 
**spec_nfsv2nodeop_p
; 
 272 static const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries
[] = { 
 273         { &vnop_default_desc
, (vnop_t 
*)vn_default_error 
}, 
 274         { &vnop_lookup_desc
, (vnop_t 
*)spec_lookup 
},           /* lookup */ 
 275         { &vnop_create_desc
, (vnop_t 
*)spec_create 
},           /* create */ 
 276         { &vnop_mknod_desc
, (vnop_t 
*)spec_mknod 
},             /* mknod */ 
 277         { &vnop_open_desc
, (vnop_t 
*)spec_open 
},               /* open */ 
 278         { &vnop_close_desc
, (vnop_t 
*)nfsspec_vnop_close 
},     /* close */ 
 279         { &vnop_getattr_desc
, (vnop_t 
*)nfs3_vnop_getattr 
},    /* getattr */ 
 280         { &vnop_setattr_desc
, (vnop_t 
*)nfs_vnop_setattr 
},     /* setattr */ 
 281         { &vnop_read_desc
, (vnop_t 
*)nfsspec_vnop_read 
},       /* read */ 
 282         { &vnop_write_desc
, (vnop_t 
*)nfsspec_vnop_write 
},     /* write */ 
 283         { &vnop_ioctl_desc
, (vnop_t 
*)spec_ioctl 
},             /* ioctl */ 
 284         { &vnop_select_desc
, (vnop_t 
*)spec_select 
},           /* select */ 
 285         { &vnop_revoke_desc
, (vnop_t 
*)spec_revoke 
},           /* revoke */ 
 286         { &vnop_mmap_desc
, (vnop_t 
*)spec_mmap 
},               /* mmap */ 
 287         { &vnop_fsync_desc
, (vnop_t 
*)nfs_vnop_fsync 
},         /* fsync */ 
 288         { &vnop_remove_desc
, (vnop_t 
*)spec_remove 
},           /* remove */ 
 289         { &vnop_link_desc
, (vnop_t 
*)spec_link 
},               /* link */ 
 290         { &vnop_rename_desc
, (vnop_t 
*)spec_rename 
},           /* rename */ 
 291         { &vnop_mkdir_desc
, (vnop_t 
*)spec_mkdir 
},             /* mkdir */ 
 292         { &vnop_rmdir_desc
, (vnop_t 
*)spec_rmdir 
},             /* rmdir */ 
 293         { &vnop_symlink_desc
, (vnop_t 
*)spec_symlink 
},         /* symlink */ 
 294         { &vnop_readdir_desc
, (vnop_t 
*)spec_readdir 
},         /* readdir */ 
 295         { &vnop_readlink_desc
, (vnop_t 
*)spec_readlink 
},       /* readlink */ 
 296         { &vnop_inactive_desc
, (vnop_t 
*)nfs_vnop_inactive 
},   /* inactive */ 
 297         { &vnop_reclaim_desc
, (vnop_t 
*)nfs_vnop_reclaim 
},     /* reclaim */ 
 298         { &vnop_strategy_desc
, (vnop_t 
*)spec_strategy 
},       /* strategy */ 
 299         { &vnop_pathconf_desc
, (vnop_t 
*)spec_pathconf 
},       /* pathconf */ 
 300         { &vnop_advlock_desc
, (vnop_t 
*)spec_advlock 
},         /* advlock */ 
 301         { &vnop_bwrite_desc
, (vnop_t 
*)vn_bwrite 
},             /* bwrite */ 
 302         { &vnop_pagein_desc
, (vnop_t 
*)nfs_vnop_pagein 
},       /* Pagein */ 
 303         { &vnop_pageout_desc
, (vnop_t 
*)nfs_vnop_pageout 
},     /* Pageout */ 
 304         { &vnop_blktooff_desc
, (vnop_t 
*)nfs_vnop_blktooff 
},   /* blktooff */ 
 305         { &vnop_offtoblk_desc
, (vnop_t 
*)nfs_vnop_offtoblk 
},   /* offtoblk */ 
 306         { &vnop_blockmap_desc
, (vnop_t 
*)nfs_vnop_blockmap 
},   /* blockmap */ 
 307         { &vnop_monitor_desc
, (vnop_t 
*)nfs_vnop_monitor 
},     /* monitor */ 
 310 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc 
= 
 311 { &spec_nfsv2nodeop_p
, spec_nfsv2nodeop_entries 
}; 
 313 vnop_t 
**spec_nfsv4nodeop_p
; 
 314 static const struct vnodeopv_entry_desc spec_nfsv4nodeop_entries
[] = { 
 315         { &vnop_default_desc
, (vnop_t 
*)vn_default_error 
}, 
 316         { &vnop_lookup_desc
, (vnop_t 
*)spec_lookup 
},           /* lookup */ 
 317         { &vnop_create_desc
, (vnop_t 
*)spec_create 
},           /* create */ 
 318         { &vnop_mknod_desc
, (vnop_t 
*)spec_mknod 
},             /* mknod */ 
 319         { &vnop_open_desc
, (vnop_t 
*)spec_open 
},               /* open */ 
 320         { &vnop_close_desc
, (vnop_t 
*)nfsspec_vnop_close 
},     /* close */ 
 321         { &vnop_getattr_desc
, (vnop_t 
*)nfs4_vnop_getattr 
},    /* getattr */ 
 322         { &vnop_setattr_desc
, (vnop_t 
*)nfs_vnop_setattr 
},     /* setattr */ 
 323         { &vnop_read_desc
, (vnop_t 
*)nfsspec_vnop_read 
},       /* read */ 
 324         { &vnop_write_desc
, (vnop_t 
*)nfsspec_vnop_write 
},     /* write */ 
 325         { &vnop_ioctl_desc
, (vnop_t 
*)spec_ioctl 
},             /* ioctl */ 
 326         { &vnop_select_desc
, (vnop_t 
*)spec_select 
},           /* select */ 
 327         { &vnop_revoke_desc
, (vnop_t 
*)spec_revoke 
},           /* revoke */ 
 328         { &vnop_mmap_desc
, (vnop_t 
*)spec_mmap 
},               /* mmap */ 
 329         { &vnop_fsync_desc
, (vnop_t 
*)nfs_vnop_fsync 
},         /* fsync */ 
 330         { &vnop_remove_desc
, (vnop_t 
*)spec_remove 
},           /* remove */ 
 331         { &vnop_link_desc
, (vnop_t 
*)spec_link 
},               /* link */ 
 332         { &vnop_rename_desc
, (vnop_t 
*)spec_rename 
},           /* rename */ 
 333         { &vnop_mkdir_desc
, (vnop_t 
*)spec_mkdir 
},             /* mkdir */ 
 334         { &vnop_rmdir_desc
, (vnop_t 
*)spec_rmdir 
},             /* rmdir */ 
 335         { &vnop_symlink_desc
, (vnop_t 
*)spec_symlink 
},         /* symlink */ 
 336         { &vnop_readdir_desc
, (vnop_t 
*)spec_readdir 
},         /* readdir */ 
 337         { &vnop_readlink_desc
, (vnop_t 
*)spec_readlink 
},       /* readlink */ 
 338         { &vnop_inactive_desc
, (vnop_t 
*)nfs_vnop_inactive 
},   /* inactive */ 
 339         { &vnop_reclaim_desc
, (vnop_t 
*)nfs_vnop_reclaim 
},     /* reclaim */ 
 340         { &vnop_strategy_desc
, (vnop_t 
*)spec_strategy 
},       /* strategy */ 
 341         { &vnop_pathconf_desc
, (vnop_t 
*)spec_pathconf 
},       /* pathconf */ 
 342         { &vnop_advlock_desc
, (vnop_t 
*)spec_advlock 
},         /* advlock */ 
 343         { &vnop_bwrite_desc
, (vnop_t 
*)vn_bwrite 
},             /* bwrite */ 
 344         { &vnop_pagein_desc
, (vnop_t 
*)nfs_vnop_pagein 
},       /* Pagein */ 
 345         { &vnop_pageout_desc
, (vnop_t 
*)nfs_vnop_pageout 
},     /* Pageout */ 
 346         { &vnop_blktooff_desc
, (vnop_t 
*)nfs_vnop_blktooff 
},   /* blktooff */ 
 347         { &vnop_offtoblk_desc
, (vnop_t 
*)nfs_vnop_offtoblk 
},   /* offtoblk */ 
 348         { &vnop_blockmap_desc
, (vnop_t 
*)nfs_vnop_blockmap 
},   /* blockmap */ 
 349         { &vnop_getxattr_desc
, (vnop_t 
*)nfs4_vnop_getxattr 
},  /* getxattr */ 
 350         { &vnop_setxattr_desc
, (vnop_t 
*)nfs4_vnop_setxattr 
},  /* setxattr */ 
 351         { &vnop_removexattr_desc
, (vnop_t 
*)nfs4_vnop_removexattr 
},/* removexattr */ 
 352         { &vnop_listxattr_desc
, (vnop_t 
*)nfs4_vnop_listxattr 
},/* listxattr */ 
 354         { &vnop_getnamedstream_desc
, (vnop_t 
*)nfs4_vnop_getnamedstream 
},      /* getnamedstream */ 
 355         { &vnop_makenamedstream_desc
, (vnop_t 
*)nfs4_vnop_makenamedstream 
},    /* makenamedstream */ 
 356         { &vnop_removenamedstream_desc
, (vnop_t 
*)nfs4_vnop_removenamedstream 
},/* removenamedstream */ 
 358         { &vnop_monitor_desc
, (vnop_t 
*)nfs_vnop_monitor 
},     /* monitor */ 
 361 const struct vnodeopv_desc spec_nfsv4nodeop_opv_desc 
= 
 362 { &spec_nfsv4nodeop_p
, spec_nfsv4nodeop_entries 
}; 
 363 #endif /* CONFIG_NFS4 */ 
 366 vnop_t 
**fifo_nfsv2nodeop_p
; 
 367 static const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries
[] = { 
 368         { &vnop_default_desc
, (vnop_t 
*)vn_default_error 
}, 
 369         { &vnop_lookup_desc
, (vnop_t 
*)fifo_lookup 
},           /* lookup */ 
 370         { &vnop_create_desc
, (vnop_t 
*)fifo_create 
},           /* create */ 
 371         { &vnop_mknod_desc
, (vnop_t 
*)fifo_mknod 
},             /* mknod */ 
 372         { &vnop_open_desc
, (vnop_t 
*)fifo_open 
},               /* open */ 
 373         { &vnop_close_desc
, (vnop_t 
*)nfsfifo_vnop_close 
},     /* close */ 
 374         { &vnop_getattr_desc
, (vnop_t 
*)nfs3_vnop_getattr 
},    /* getattr */ 
 375         { &vnop_setattr_desc
, (vnop_t 
*)nfs_vnop_setattr 
},     /* setattr */ 
 376         { &vnop_read_desc
, (vnop_t 
*)nfsfifo_vnop_read 
},       /* read */ 
 377         { &vnop_write_desc
, (vnop_t 
*)nfsfifo_vnop_write 
},     /* write */ 
 378         { &vnop_ioctl_desc
, (vnop_t 
*)fifo_ioctl 
},             /* ioctl */ 
 379         { &vnop_select_desc
, (vnop_t 
*)fifo_select 
},           /* select */ 
 380         { &vnop_revoke_desc
, (vnop_t 
*)fifo_revoke 
},           /* revoke */ 
 381         { &vnop_mmap_desc
, (vnop_t 
*)fifo_mmap 
},               /* mmap */ 
 382         { &vnop_fsync_desc
, (vnop_t 
*)nfs_vnop_fsync 
},         /* fsync */ 
 383         { &vnop_remove_desc
, (vnop_t 
*)fifo_remove 
},           /* remove */ 
 384         { &vnop_link_desc
, (vnop_t 
*)fifo_link 
},               /* link */ 
 385         { &vnop_rename_desc
, (vnop_t 
*)fifo_rename 
},           /* rename */ 
 386         { &vnop_mkdir_desc
, (vnop_t 
*)fifo_mkdir 
},             /* mkdir */ 
 387         { &vnop_rmdir_desc
, (vnop_t 
*)fifo_rmdir 
},             /* rmdir */ 
 388         { &vnop_symlink_desc
, (vnop_t 
*)fifo_symlink 
},         /* symlink */ 
 389         { &vnop_readdir_desc
, (vnop_t 
*)fifo_readdir 
},         /* readdir */ 
 390         { &vnop_readlink_desc
, (vnop_t 
*)fifo_readlink 
},       /* readlink */ 
 391         { &vnop_inactive_desc
, (vnop_t 
*)nfs_vnop_inactive 
},   /* inactive */ 
 392         { &vnop_reclaim_desc
, (vnop_t 
*)nfs_vnop_reclaim 
},     /* reclaim */ 
 393         { &vnop_strategy_desc
, (vnop_t 
*)fifo_strategy 
},       /* strategy */ 
 394         { &vnop_pathconf_desc
, (vnop_t 
*)fifo_pathconf 
},       /* pathconf */ 
 395         { &vnop_advlock_desc
, (vnop_t 
*)fifo_advlock 
},         /* advlock */ 
 396         { &vnop_bwrite_desc
, (vnop_t 
*)vn_bwrite 
},             /* bwrite */ 
 397         { &vnop_pagein_desc
, (vnop_t 
*)nfs_vnop_pagein 
},       /* Pagein */ 
 398         { &vnop_pageout_desc
, (vnop_t 
*)nfs_vnop_pageout 
},     /* Pageout */ 
 399         { &vnop_blktooff_desc
, (vnop_t 
*)nfs_vnop_blktooff 
},   /* blktooff */ 
 400         { &vnop_offtoblk_desc
, (vnop_t 
*)nfs_vnop_offtoblk 
},   /* offtoblk */ 
 401         { &vnop_blockmap_desc
, (vnop_t 
*)nfs_vnop_blockmap 
},   /* blockmap */ 
 402         { &vnop_monitor_desc
, (vnop_t 
*)nfs_vnop_monitor 
},     /* monitor */ 
 405 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc 
= 
 406 { &fifo_nfsv2nodeop_p
, fifo_nfsv2nodeop_entries 
}; 
 411 vnop_t 
**fifo_nfsv4nodeop_p
; 
 412 static const struct vnodeopv_entry_desc fifo_nfsv4nodeop_entries
[] = { 
 413         { &vnop_default_desc
, (vnop_t 
*)vn_default_error 
}, 
 414         { &vnop_lookup_desc
, (vnop_t 
*)fifo_lookup 
},           /* lookup */ 
 415         { &vnop_create_desc
, (vnop_t 
*)fifo_create 
},           /* create */ 
 416         { &vnop_mknod_desc
, (vnop_t 
*)fifo_mknod 
},             /* mknod */ 
 417         { &vnop_open_desc
, (vnop_t 
*)fifo_open 
},               /* open */ 
 418         { &vnop_close_desc
, (vnop_t 
*)nfsfifo_vnop_close 
},     /* close */ 
 419         { &vnop_getattr_desc
, (vnop_t 
*)nfs4_vnop_getattr 
},    /* getattr */ 
 420         { &vnop_setattr_desc
, (vnop_t 
*)nfs_vnop_setattr 
},     /* setattr */ 
 421         { &vnop_read_desc
, (vnop_t 
*)nfsfifo_vnop_read 
},       /* read */ 
 422         { &vnop_write_desc
, (vnop_t 
*)nfsfifo_vnop_write 
},     /* write */ 
 423         { &vnop_ioctl_desc
, (vnop_t 
*)fifo_ioctl 
},             /* ioctl */ 
 424         { &vnop_select_desc
, (vnop_t 
*)fifo_select 
},           /* select */ 
 425         { &vnop_revoke_desc
, (vnop_t 
*)fifo_revoke 
},           /* revoke */ 
 426         { &vnop_mmap_desc
, (vnop_t 
*)fifo_mmap 
},               /* mmap */ 
 427         { &vnop_fsync_desc
, (vnop_t 
*)nfs_vnop_fsync 
},         /* fsync */ 
 428         { &vnop_remove_desc
, (vnop_t 
*)fifo_remove 
},           /* remove */ 
 429         { &vnop_link_desc
, (vnop_t 
*)fifo_link 
},               /* link */ 
 430         { &vnop_rename_desc
, (vnop_t 
*)fifo_rename 
},           /* rename */ 
 431         { &vnop_mkdir_desc
, (vnop_t 
*)fifo_mkdir 
},             /* mkdir */ 
 432         { &vnop_rmdir_desc
, (vnop_t 
*)fifo_rmdir 
},             /* rmdir */ 
 433         { &vnop_symlink_desc
, (vnop_t 
*)fifo_symlink 
},         /* symlink */ 
 434         { &vnop_readdir_desc
, (vnop_t 
*)fifo_readdir 
},         /* readdir */ 
 435         { &vnop_readlink_desc
, (vnop_t 
*)fifo_readlink 
},       /* readlink */ 
 436         { &vnop_inactive_desc
, (vnop_t 
*)nfs_vnop_inactive 
},   /* inactive */ 
 437         { &vnop_reclaim_desc
, (vnop_t 
*)nfs_vnop_reclaim 
},     /* reclaim */ 
 438         { &vnop_strategy_desc
, (vnop_t 
*)fifo_strategy 
},       /* strategy */ 
 439         { &vnop_pathconf_desc
, (vnop_t 
*)fifo_pathconf 
},       /* pathconf */ 
 440         { &vnop_advlock_desc
, (vnop_t 
*)fifo_advlock 
},         /* advlock */ 
 441         { &vnop_bwrite_desc
, (vnop_t 
*)vn_bwrite 
},             /* bwrite */ 
 442         { &vnop_pagein_desc
, (vnop_t 
*)nfs_vnop_pagein 
},       /* Pagein */ 
 443         { &vnop_pageout_desc
, (vnop_t 
*)nfs_vnop_pageout 
},     /* Pageout */ 
 444         { &vnop_blktooff_desc
, (vnop_t 
*)nfs_vnop_blktooff 
},   /* blktooff */ 
 445         { &vnop_offtoblk_desc
, (vnop_t 
*)nfs_vnop_offtoblk 
},   /* offtoblk */ 
 446         { &vnop_blockmap_desc
, (vnop_t 
*)nfs_vnop_blockmap 
},   /* blockmap */ 
 447         { &vnop_getxattr_desc
, (vnop_t 
*)nfs4_vnop_getxattr 
},  /* getxattr */ 
 448         { &vnop_setxattr_desc
, (vnop_t 
*)nfs4_vnop_setxattr 
},  /* setxattr */ 
 449         { &vnop_removexattr_desc
, (vnop_t 
*)nfs4_vnop_removexattr 
},/* removexattr */ 
 450         { &vnop_listxattr_desc
, (vnop_t 
*)nfs4_vnop_listxattr 
},/* listxattr */ 
 452         { &vnop_getnamedstream_desc
, (vnop_t 
*)nfs4_vnop_getnamedstream 
},      /* getnamedstream */ 
 453         { &vnop_makenamedstream_desc
, (vnop_t 
*)nfs4_vnop_makenamedstream 
},    /* makenamedstream */ 
 454         { &vnop_removenamedstream_desc
, (vnop_t 
*)nfs4_vnop_removenamedstream 
},/* removenamedstream */ 
 456         { &vnop_monitor_desc
, (vnop_t 
*)nfs_vnop_monitor 
},     /* monitor */ 
 459 const struct vnodeopv_desc fifo_nfsv4nodeop_opv_desc 
= 
 460 { &fifo_nfsv4nodeop_p
, fifo_nfsv4nodeop_entries 
}; 
 462 #endif /* CONFIG_NFS4 */ 
 464 int     nfs_sillyrename(nfsnode_t
, nfsnode_t
, struct componentname 
*, vfs_context_t
); 
 465 int     nfs_getattr_internal(nfsnode_t
, struct nfs_vattr 
*, vfs_context_t
, int); 
 466 int     nfs_refresh_fh(nfsnode_t
, vfs_context_t
); 
 469 ZONE_VIEW_DEFINE(ZV_NFSDIROFF
, "NFSV3 diroff", 
 470     KHEAP_ID_DATA_BUFFERS
, sizeof(struct nfsdmap
)); 
 473 nfs_dir_buf_cache_lookup_boundaries(struct nfsbuf 
*bp
, int *sof
, int *eof
) 
 476                 struct nfs_dir_buf_header 
*ndbhp 
= (struct nfs_dir_buf_header
*)bp
->nb_data
; 
 477                 if (sof 
&& bp
->nb_lblkno 
== 0) { 
 480                 if (eof 
&& ISSET(ndbhp
->ndbh_flags
, NDB_EOF
)) { 
 487  * Update nfsnode attributes to avoid extra getattr calls for each direntry. 
 488  * This function should be called only if RDIRPLUS flag is enabled. 
 491 nfs_rdirplus_update_node_attrs(nfsnode_t dnp
, struct direntry 
*dp
, fhandle_t 
*fhp
, struct nfs_vattr 
*nvattrp
, uint64_t *savedxidp
) 
 494         struct componentname cn
; 
 495         int isdot 
= (dp
->d_namlen 
== 1) && (dp
->d_name
[0] == '.'); 
 496         int isdotdot 
= (dp
->d_namlen 
== 2) && (dp
->d_name
[0] == '.') && (dp
->d_name
[1] == '.'); 
 497         int should_update_fileid 
= nvattrp
->nva_flags 
& NFS_FFLAG_FILEID_CONTAINS_XID
; 
 500         if (isdot 
|| isdotdot
) { 
 505         bzero(&cn
, sizeof(cn
)); 
 506         cn
.cn_nameptr 
= dp
->d_name
; 
 507         cn
.cn_namelen 
= dp
->d_namlen
; 
 508         cn
.cn_nameiop 
= LOOKUP
; 
 510         /* xid might be stashed in nva_fileid is rdirplus is enabled */ 
 511         if (should_update_fileid
) { 
 512                 xid 
= nvattrp
->nva_fileid
; 
 513                 nvattrp
->nva_fileid 
= dp
->d_fileno
; 
 515         nfs_nget(NFSTOMP(dnp
), dnp
, &cn
, fhp
->fh_data
, fhp
->fh_len
, nvattrp
, savedxidp
, RPCAUTH_UNKNOWN
, NG_NOCREATE
, &np
); 
 516         if (should_update_fileid
) { 
 517                 nvattrp
->nva_fileid 
= xid
; 
 521                 vnode_put(NFSTOV(np
)); 
 526  * Find the slot in the access cache for this UID. 
 527  * If adding and no existing slot is found, reuse slots in FIFO order. 
 528  * The index of the next slot to use is kept in the last entry of the n_access array. 
 531 nfs_node_access_slot(nfsnode_t np
, uid_t uid
, int add
) 
 535         for (slot 
= 0; slot 
< NFS_ACCESS_CACHE_SIZE
; slot
++) { 
 536                 if (np
->n_accessuid
[slot
] == uid
) { 
 540         if (slot 
== NFS_ACCESS_CACHE_SIZE
) { 
 544                 slot 
= np
->n_access
[NFS_ACCESS_CACHE_SIZE
]; 
 545                 np
->n_access
[NFS_ACCESS_CACHE_SIZE
] = (slot 
+ 1) % NFS_ACCESS_CACHE_SIZE
; 
 551 nfs3_access_rpc(nfsnode_t np
, u_int32_t 
*access
, int rpcflags
, vfs_context_t ctx
) 
 553         int error 
= 0, lockerror 
= ENOENT
, status 
= 0, slot
; 
 554         uint32_t access_result 
= 0; 
 556         struct nfsm_chain nmreq
, nmrep
; 
 557         struct nfsmount 
*nmp
; 
 561         nfsm_chain_null(&nmreq
); 
 562         nfsm_chain_null(&nmrep
); 
 564         nfsm_chain_build_alloc_init(error
, &nmreq
, NFSX_FH(NFS_VER3
) + NFSX_UNSIGNED
); 
 565         nfsm_chain_add_fh(error
, &nmreq
, NFS_VER3
, np
->n_fhp
, np
->n_fhsize
); 
 566         nfsm_chain_add_32(error
, &nmreq
, *access
); 
 567         nfsm_chain_build_done(error
, &nmreq
); 
 569         error 
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC_ACCESS
, 
 570             vfs_context_thread(ctx
), vfs_context_ucred(ctx
), 
 571             NULL
, rpcflags
, &nmrep
, &xid
, &status
); 
 572         if ((lockerror 
= nfs_node_lock(np
))) { 
 575         nfsm_chain_postop_attr_update(error
, &nmrep
, np
, &xid
); 
 579         nfsm_chain_get_32(error
, &nmrep
, access_result
); 
 582         /* XXXab do we really need mount here, also why are we doing access cache management here? */ 
 584         if (nfs_mount_gone(nmp
)) { 
 590         if (auth_is_kerberized(np
->n_auth
) || auth_is_kerberized(nmp
->nm_auth
)) { 
 591                 uid 
= nfs_cred_getasid2uid(vfs_context_ucred(ctx
)); 
 593                 uid 
= kauth_cred_getuid(vfs_context_ucred(ctx
)); 
 596         uid 
= kauth_cred_getuid(vfs_context_ucred(ctx
)); 
 597 #endif /* CONFIG_NFS_GSS */ 
 598         slot 
= nfs_node_access_slot(np
, uid
, 1); 
 599         np
->n_accessuid
[slot
] = uid
; 
 601         np
->n_accessstamp
[slot
] = now
.tv_sec
; 
 602         np
->n_access
[slot
] = access_result
; 
 605          * If we asked for DELETE but didn't get it, the server 
 606          * may simply not support returning that bit (possible 
 607          * on UNIX systems).  So, we'll assume that it is OK, 
 608          * and just let any subsequent delete action fail if it 
 609          * really isn't deletable. 
 611         if ((*access 
& NFS_ACCESS_DELETE
) && 
 612             !(np
->n_access
[slot
] & NFS_ACCESS_DELETE
)) { 
 613                 np
->n_access
[slot
] |= NFS_ACCESS_DELETE
; 
 615         /* ".zfs" subdirectories may erroneously give a denied answer for add/remove */ 
 616         if (nfs_access_dotzfs 
&& (np
->n_flag 
& NISDOTZFSCHILD
)) { 
 617                 np
->n_access
[slot
] |= (NFS_ACCESS_MODIFY 
| NFS_ACCESS_EXTEND 
| NFS_ACCESS_DELETE
); 
 619         /* pass back the access returned with this request */ 
 620         *access 
= np
->n_access
[slot
]; 
 625         nfsm_chain_cleanup(&nmreq
); 
 626         nfsm_chain_cleanup(&nmrep
); 
 632  * NFS access vnode op. 
 633  * For NFS version 2, just return ok. File accesses may fail later. 
 634  * For NFS version 3+, use the access RPC to check accessibility. If file 
 635  * permissions are changed on the server, accesses might still fail later. 
 639         struct vnop_access_args 
/* { 
 640                                  *  struct vnodeop_desc *a_desc; 
 643                                  *  vfs_context_t a_context; 
 646         vfs_context_t ctx 
= ap
->a_context
; 
 647         vnode_t vp 
= ap
->a_vp
; 
 648         int error 
= 0, slot
, dorpc
, rpcflags 
= 0; 
 649         u_int32_t access
, waccess
; 
 650         nfsnode_t np 
= VTONFS(vp
); 
 651         struct nfsmount 
*nmp
; 
 657         if (nfs_mount_gone(nmp
)) { 
 660         nfsvers 
= nmp
->nm_vers
; 
 663         if (nfsvers 
== NFS_VER2 
|| NMFLAG(nmp
, NOOPAQUE_AUTH
)) { 
 664                 if ((ap
->a_action 
& KAUTH_VNODE_WRITE_RIGHTS
) && 
 665                     vfs_isrdonly(vnode_mount(vp
))) { 
 672          * For NFS v3, do an access rpc, otherwise you are stuck emulating 
 673          * ufs_access() locally using the vattr. This may not be correct, 
 674          * since the server may apply other access criteria such as 
 675          * client uid-->server uid mapping that we do not know about, but 
 676          * this is better than just returning anything that is lying about 
 681          * Convert KAUTH primitives to NFS access rights. 
 684         if (vnode_isdir(vp
)) { 
 687                     (KAUTH_VNODE_LIST_DIRECTORY 
| 
 688                     KAUTH_VNODE_READ_EXTATTRIBUTES
)) { 
 689                         access 
|= NFS_ACCESS_READ
; 
 691                 if (ap
->a_action 
& KAUTH_VNODE_SEARCH
) { 
 692                         access 
|= NFS_ACCESS_LOOKUP
; 
 695                     (KAUTH_VNODE_ADD_FILE 
| 
 696                     KAUTH_VNODE_ADD_SUBDIRECTORY
)) { 
 697                         access 
|= NFS_ACCESS_MODIFY 
| NFS_ACCESS_EXTEND
; 
 699                 if (ap
->a_action 
& KAUTH_VNODE_DELETE_CHILD
) { 
 700                         access 
|= NFS_ACCESS_MODIFY
; 
 705                     (KAUTH_VNODE_READ_DATA 
| 
 706                     KAUTH_VNODE_READ_EXTATTRIBUTES
)) { 
 707                         access 
|= NFS_ACCESS_READ
; 
 709                 if (ap
->a_action 
& KAUTH_VNODE_WRITE_DATA
) { 
 710                         access 
|= NFS_ACCESS_MODIFY 
| NFS_ACCESS_EXTEND
; 
 712                 if (ap
->a_action 
& KAUTH_VNODE_APPEND_DATA
) { 
 713                         access 
|= NFS_ACCESS_EXTEND
; 
 715                 if (ap
->a_action 
& KAUTH_VNODE_EXECUTE
) { 
 716                         access 
|= NFS_ACCESS_EXECUTE
; 
 720         if (ap
->a_action 
& KAUTH_VNODE_DELETE
) { 
 721                 access 
|= NFS_ACCESS_DELETE
; 
 724             (KAUTH_VNODE_WRITE_ATTRIBUTES 
| 
 725             KAUTH_VNODE_WRITE_EXTATTRIBUTES 
| 
 726             KAUTH_VNODE_WRITE_SECURITY
)) { 
 727                 access 
|= NFS_ACCESS_MODIFY
; 
 729         /* XXX this is pretty dubious */ 
 730         if (ap
->a_action 
& KAUTH_VNODE_CHANGE_OWNER
) { 
 731                 access 
|= NFS_ACCESS_MODIFY
; 
 734         /* if caching, always ask for every right */ 
 735         if (nfs_access_cache_timeout 
> 0) { 
 736                 waccess 
= NFS_ACCESS_READ 
| NFS_ACCESS_MODIFY 
| 
 737                     NFS_ACCESS_EXTEND 
| NFS_ACCESS_EXECUTE 
| 
 738                     NFS_ACCESS_DELETE 
| NFS_ACCESS_LOOKUP
; 
 743         if ((error 
= nfs_node_lock(np
))) { 
 748          * Does our cached result allow us to give a definite yes to 
 752         if (auth_is_kerberized(np
->n_auth
) || auth_is_kerberized(nmp
->nm_auth
)) { 
 753                 uid 
= nfs_cred_getasid2uid(vfs_context_ucred(ctx
)); 
 755                 uid 
= kauth_cred_getuid(vfs_context_ucred(ctx
)); 
 758         uid 
= kauth_cred_getuid(vfs_context_ucred(ctx
)); 
 759 #endif /* CONFIG_NFS_GSS */ 
 760         slot 
= nfs_node_access_slot(np
, uid
, 0); 
 763                 /* not asking for any rights understood by NFS, so don't bother doing an RPC */ 
 764                 /* OSAddAtomic(1, &nfsstats.accesscache_hits); */ 
 767         } else if (NACCESSVALID(np
, slot
)) { 
 769                 if (((now
.tv_sec 
< (np
->n_accessstamp
[slot
] + nfs_access_cache_timeout
)) && 
 770                     ((np
->n_access
[slot
] & access
) == access
)) || nfs_use_cache(nmp
)) { 
 771                         /* OSAddAtomic(1, &nfsstats.accesscache_hits); */ 
 773                         waccess 
= np
->n_access
[slot
]; 
 778                 /* Either a no, or a don't know.  Go to the wire. */ 
 779                 /* OSAddAtomic(1, &nfsstats.accesscache_misses); */ 
 782                  * Allow an access call to timeout if we have it cached 
 783                  * so we won't hang if the server isn't responding. 
 785                 if (NACCESSVALID(np
, slot
)) { 
 789                 error 
= nmp
->nm_funcs
->nf_access_rpc(np
, &waccess
, rpcflags
, ctx
); 
 792                  * If the server didn't respond return the cached access. 
 794                 if ((error 
== ETIMEDOUT
) && (rpcflags 
& R_SOFT
)) { 
 796                         waccess 
= np
->n_access
[slot
]; 
 799         if (!error 
&& ((waccess 
& access
) != access
)) { 
 810  * Perform various update/invalidation checks and then add the 
 811  * open to the node.  Regular files will have an open file structure 
 812  * on the node and, for NFSv4, perform an OPEN request on the server. 
 816         struct vnop_open_args 
/* { 
 817                                *  struct vnodeop_desc *a_desc; 
 820                                *  vfs_context_t a_context; 
 823         vfs_context_t ctx 
= ap
->a_context
; 
 824         vnode_t vp 
= ap
->a_vp
; 
 825         nfsnode_t np 
= VTONFS(vp
); 
 826         struct nfsmount 
*nmp
; 
 827         int error
, accessMode
, denyMode
, opened 
= 0; 
 828         struct nfs_open_owner 
*noop 
= NULL
; 
 829         struct nfs_open_file 
*nofp 
= NULL
; 
 832         if (!(ap
->a_mode 
& (FREAD 
| FWRITE
))) { 
 837         if (nfs_mount_gone(nmp
)) { 
 840         if (np
->n_flag 
& NREVOKE
) { 
 844         vtype 
= vnode_vtype(vp
); 
 845         if ((vtype 
!= VREG
) && (vtype 
!= VDIR
) && (vtype 
!= VLNK
)) { 
 849         /* First, check if we need to update/invalidate */ 
 850         if (ISSET(np
->n_flag
, NUPDATESIZE
)) { 
 851                 nfs_data_update_size(np
, 0); 
 853         if ((error 
= nfs_node_lock(np
))) { 
 856         if (np
->n_flag 
& NNEEDINVALIDATE
) { 
 857                 np
->n_flag 
&= ~NNEEDINVALIDATE
; 
 862                 nfs_vinvalbuf(vp
, V_SAVE 
| V_IGNORE_WRITEERR
, ctx
, 1); 
 863                 if ((error 
= nfs_node_lock(np
))) { 
 868                 np
->n_lastrahead 
= -1; 
 870         if (np
->n_flag 
& NMODIFIED
) { 
 875                 if ((error 
= nfs_vinvalbuf(vp
, V_SAVE 
| V_IGNORE_WRITEERR
, ctx
, 1))) { 
 882         /* nfs_getattr() will check changed and purge caches */ 
 883         if ((error 
= nfs_getattr(np
, NULL
, ctx
, NGA_UNCACHED
))) { 
 888                 /* Just mark that it was opened */ 
 889                 lck_mtx_lock(&np
->n_openlock
); 
 891                 lck_mtx_unlock(&np
->n_openlock
); 
 895         /* mode contains some combination of: FREAD, FWRITE, O_SHLOCK, O_EXLOCK */ 
 897         if (ap
->a_mode 
& FREAD
) { 
 898                 accessMode 
|= NFS_OPEN_SHARE_ACCESS_READ
; 
 900         if (ap
->a_mode 
& FWRITE
) { 
 901                 accessMode 
|= NFS_OPEN_SHARE_ACCESS_WRITE
; 
 903         if (ap
->a_mode 
& O_EXLOCK
) { 
 904                 denyMode 
= NFS_OPEN_SHARE_DENY_BOTH
; 
 905         } else if (ap
->a_mode 
& O_SHLOCK
) { 
 906                 denyMode 
= NFS_OPEN_SHARE_DENY_WRITE
; 
 908                 denyMode 
= NFS_OPEN_SHARE_DENY_NONE
; 
 910         // XXX don't do deny modes just yet (and never do it for !v4) 
 911         denyMode 
= NFS_OPEN_SHARE_DENY_NONE
; 
 913         noop 
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1); 
 919         error 
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
)); 
 921                 nfs_open_owner_rele(noop
); 
 924         if (np
->n_flag 
& NREVOKE
) { 
 926                 nfs_mount_state_in_use_end(nmp
, 0); 
 927                 nfs_open_owner_rele(noop
); 
 931         error 
= nfs_open_file_find(np
, noop
, &nofp
, accessMode
, denyMode
, 1); 
 932         if (!error 
&& (nofp
->nof_flags 
& NFS_OPEN_FILE_LOST
)) { 
 933                 NP(np
, "nfs_vnop_open: LOST %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
)); 
 937         if (!error 
&& (nofp
->nof_flags 
& NFS_OPEN_FILE_REOPEN
)) { 
 938                 error 
= nfs4_reopen(nofp
, vfs_context_thread(ctx
)); 
 941                         nfs_mount_state_in_use_end(nmp
, 0); 
 947                 error 
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
)); 
 954         if (nmp
->nm_vers 
< NFS_VER4
) { 
 956                  * NFS v2/v3 opens are always allowed - so just add it. 
 958                 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, 0); 
 963          * If we just created the file and the modes match, then we simply use 
 964          * the open performed in the create.  Otherwise, send the request. 
 966         if ((nofp
->nof_flags 
& NFS_OPEN_FILE_CREATE
) && 
 967             (nofp
->nof_creator 
== current_thread()) && 
 968             (accessMode 
== NFS_OPEN_SHARE_ACCESS_BOTH
) && 
 969             (denyMode 
== NFS_OPEN_SHARE_DENY_NONE
)) { 
 970                 nofp
->nof_flags 
&= ~NFS_OPEN_FILE_CREATE
; 
 971                 nofp
->nof_creator 
= NULL
; 
 975                         error 
= nfs4_open(np
, nofp
, accessMode
, denyMode
, ctx
); 
 978                 if ((error 
== EACCES
) && (nofp
->nof_flags 
& NFS_OPEN_FILE_CREATE
) && 
 979                     (nofp
->nof_creator 
== current_thread())) { 
 981                          * Ugh.  This can happen if we just created the file with read-only 
 982                          * perms and we're trying to open it for real with different modes 
 983                          * (e.g. write-only or with a deny mode) and the server decides to 
 984                          * not allow the second open because of the read-only perms. 
 985                          * The best we can do is to just use the create's open. 
 986                          * We may have access we don't need or we may not have a requested 
 987                          * deny mode.  We may log complaints later, but we'll try to avoid it. 
 989                         if (denyMode 
!= NFS_OPEN_SHARE_DENY_NONE
) { 
 990                                 NP(np
, "nfs_vnop_open: deny mode foregone on create, %d", kauth_cred_getuid(nofp
->nof_owner
->noo_cred
)); 
 992                         nofp
->nof_creator 
= NULL
; 
1000                  * If we had just created the file, we already had it open. 
1001                  * If the actual open mode is less than what we grabbed at 
1002                  * create time, then we'll downgrade the open here. 
1004                 if ((nofp
->nof_flags 
& NFS_OPEN_FILE_CREATE
) && 
1005                     (nofp
->nof_creator 
== current_thread())) { 
1006                         error 
= nfs_close(np
, nofp
, NFS_OPEN_SHARE_ACCESS_BOTH
, NFS_OPEN_SHARE_DENY_NONE
, ctx
); 
1008                                 NP(np
, "nfs_vnop_open: create close error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
)); 
1010                         if (!nfs_mount_state_error_should_restart(error
)) { 
1012                                 nofp
->nof_flags 
&= ~NFS_OPEN_FILE_CREATE
; 
1019                 nfs_open_file_clear_busy(nofp
); 
1021         if (nfs_mount_state_in_use_end(nmp
, error
)) { 
1026                 NP(np
, "nfs_vnop_open: error %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
)); 
1029                 nfs_open_owner_rele(noop
); 
1031         if (!error 
&& vtype 
== VREG 
&& (ap
->a_mode 
& FWRITE
)) { 
1032                 lck_mtx_lock(&nmp
->nm_lock
); 
1033                 nmp
->nm_state 
&= ~NFSSTA_SQUISHY
; 
1034                 nmp
->nm_curdeadtimeout 
= nmp
->nm_deadtimeout
; 
1035                 if (nmp
->nm_curdeadtimeout 
<= 0) { 
1036                         nmp
->nm_deadto_start 
= 0; 
1039                 lck_mtx_unlock(&nmp
->nm_lock
); 
1046 nfs_no_of_open_file_writers(nfsnode_t np
) 
1048         uint32_t writers 
= 0; 
1049         struct nfs_open_file 
*nofp
; 
1051         TAILQ_FOREACH(nofp
, &np
->n_opens
, nof_link
) { 
1052                 writers 
+= nofp
->nof_w 
+ nofp
->nof_rw 
+ nofp
->nof_w_dw 
+ nofp
->nof_rw_dw 
+ 
1053                     nofp
->nof_w_drw 
+ nofp
->nof_rw_drw 
+ nofp
->nof_d_w_dw 
+ 
1054                     nofp
->nof_d_rw_dw 
+ nofp
->nof_d_w_drw 
+ nofp
->nof_d_rw_drw 
+ 
1055                     nofp
->nof_d_w 
+ nofp
->nof_d_rw
; 
1062  * NFS close vnode op 
1064  * What an NFS client should do upon close after writing is a debatable issue. 
1065  * Most NFS clients push delayed writes to the server upon close, basically for 
1067  * 1 - So that any write errors may be reported back to the client process 
1068  *     doing the close system call. By far the two most likely errors are 
1069  *     NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 
1070  * 2 - To put a worst case upper bound on cache inconsistency between 
1071  *     multiple clients for the file. 
1072  * There is also a consistency problem for Version 2 of the protocol w.r.t. 
1073  * not being able to tell if other clients are writing a file concurrently, 
1074  * since there is no way of knowing if the changed modify time in the reply 
1075  * is only due to the write for this client. 
1076  * (NFS Version 3 provides weak cache consistency data in the reply that 
1077  *  should be sufficient to detect and handle this case.) 
1079  * The current code does the following: 
1080  * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 
1081  * for NFS Version 3 - flush dirty buffers to the server but don't invalidate them. 
1082  * for NFS Version 4 - basically the same as NFSv3 
1086         struct vnop_close_args 
/* { 
1087                                 *  struct vnodeop_desc *a_desc; 
1090                                 *  vfs_context_t a_context; 
1093         vfs_context_t ctx 
= ap
->a_context
; 
1094         vnode_t vp 
= ap
->a_vp
; 
1095         nfsnode_t np 
= VTONFS(vp
); 
1096         struct nfsmount 
*nmp
; 
1097         int error 
= 0, error1
, nfsvers
; 
1098         int fflag 
= ap
->a_fflag
; 
1100         int accessMode
, denyMode
; 
1101         struct nfs_open_owner 
*noop 
= NULL
; 
1102         struct nfs_open_file 
*nofp 
= NULL
; 
1108         nfsvers 
= nmp
->nm_vers
; 
1109         vtype 
= vnode_vtype(vp
); 
1111         /* First, check if we need to update/flush/invalidate */ 
1112         if (ISSET(np
->n_flag
, NUPDATESIZE
)) { 
1113                 nfs_data_update_size(np
, 0); 
1115         nfs_node_lock_force(np
); 
1116         if (np
->n_flag 
& NNEEDINVALIDATE
) { 
1117                 np
->n_flag 
&= ~NNEEDINVALIDATE
; 
1118                 nfs_node_unlock(np
); 
1119                 nfs_vinvalbuf(vp
, V_SAVE 
| V_IGNORE_WRITEERR
, ctx
, 1); 
1120                 nfs_node_lock_force(np
); 
1122         if ((vtype 
== VREG
) && (np
->n_flag 
& NMODIFIED
) && (fflag 
& FWRITE
)) { 
1123                 /* we're closing an open for write and the file is modified, so flush it */ 
1124                 nfs_node_unlock(np
); 
1125                 if (nfsvers 
!= NFS_VER2
) { 
1126                         error 
= nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), 0); 
1128                         error 
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1); 
1130                 nfs_node_lock_force(np
); 
1131                 NATTRINVALIDATE(np
); 
1133         if (np
->n_flag 
& NWRITEERR
) { 
1134                 np
->n_flag 
&= ~NWRITEERR
; 
1135                 error 
= np
->n_error
; 
1137         nfs_node_unlock(np
); 
1139         if (vtype 
!= VREG
) { 
1140                 /* Just mark that it was closed */ 
1141                 lck_mtx_lock(&np
->n_openlock
); 
1142                 if (np
->n_openrefcnt 
== 0) { 
1143                         if (fflag 
& (FREAD 
| FWRITE
)) { 
1144                                 NP(np
, "nfs_vnop_close: open reference underrun"); 
1147                 } else if (fflag 
& (FREAD 
| FWRITE
)) { 
1150                         /* No FREAD/FWRITE set - probably the final close */ 
1151                         np
->n_openrefcnt 
= 0; 
1153                 lck_mtx_unlock(&np
->n_openlock
); 
1158         /* fflag should contain some combination of: FREAD, FWRITE */ 
1160         if (fflag 
& FREAD
) { 
1161                 accessMode 
|= NFS_OPEN_SHARE_ACCESS_READ
; 
1163         if (fflag 
& FWRITE
) { 
1164                 accessMode 
|= NFS_OPEN_SHARE_ACCESS_WRITE
; 
1166 // XXX It would be nice if we still had the O_EXLOCK/O_SHLOCK flags that were on the open 
1167 //      if (fflag & O_EXLOCK) 
1168 //              denyMode = NFS_OPEN_SHARE_DENY_BOTH; 
1169 //      else if (fflag & O_SHLOCK) 
1170 //              denyMode = NFS_OPEN_SHARE_DENY_WRITE; 
1172 //              denyMode = NFS_OPEN_SHARE_DENY_NONE; 
1173         // XXX don't do deny modes just yet (and never do it for !v4) 
1174         denyMode 
= NFS_OPEN_SHARE_DENY_NONE
; 
1178                  * No mode given to close? 
1179                  * Guess this is the final close. 
1180                  * We should unlock all locks and close all opens. 
1183                 mount_t mp 
= vnode_mount(vp
); 
1184                 int force 
= (!mp 
|| vfs_isforce(mp
)); 
1186                 writers 
= nfs_no_of_open_file_writers(np
); 
1187                 nfs_release_open_state_for_node(np
, force
); 
1189                         lck_mtx_lock(&nmp
->nm_lock
); 
1190                         if (writers 
> nmp
->nm_writers
) { 
1191                                 NP(np
, "nfs_vnop_close: number of write opens for mount underrun. Node has %d" 
1192                                     " opens for write. Mount has total of %d opens for write\n", 
1193                                     writers
, nmp
->nm_writers
); 
1194                                 nmp
->nm_writers 
= 0; 
1196                                 nmp
->nm_writers 
-= writers
; 
1198                         lck_mtx_unlock(&nmp
->nm_lock
); 
1202         } else if (fflag 
& FWRITE
) { 
1203                 lck_mtx_lock(&nmp
->nm_lock
); 
1204                 if (nmp
->nm_writers 
== 0) { 
1205                         NP(np
, "nfs_vnop_close: removing open writer from mount, but mount has no files open for writing"); 
1209                 lck_mtx_unlock(&nmp
->nm_lock
); 
1213         noop 
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 0); 
1215                 // printf("nfs_vnop_close: can't get open owner!\n"); 
1220         error 
= nfs_mount_state_in_use_start(nmp
, NULL
); 
1222                 nfs_open_owner_rele(noop
); 
1226         error 
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 0); 
1228         if (!error 
&& (nofp
->nof_flags 
& NFS_OPEN_FILE_REOPEN
)) { 
1229                 error 
= nfs4_reopen(nofp
, NULL
); 
1232                         nfs_mount_state_in_use_end(nmp
, 0); 
1238                 NP(np
, "nfs_vnop_close: no open file for owner, error %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
)); 
1242         error 
= nfs_open_file_set_busy(nofp
, NULL
); 
1248         error 
= nfs_close(np
, nofp
, accessMode
, denyMode
, ctx
); 
1250                 NP(np
, "nfs_vnop_close: close error %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
)); 
1255                 nfs_open_file_clear_busy(nofp
); 
1257         if (nfs_mount_state_in_use_end(nmp
, error
)) { 
1265                 NP(np
, "nfs_vnop_close: error %d, %d", error
, kauth_cred_getuid(noop
->noo_cred
)); 
1268                 nfs_open_owner_rele(noop
); 
1274  * nfs_close(): common function that does all the heavy lifting of file closure 
1276  * Takes an open file structure and a set of access/deny modes and figures out how 
1277  * to update the open file structure (and the state on the server) appropriately. 
1282         struct nfs_open_file 
*nofp
, 
1283         uint32_t accessMode
, 
1285         __unused vfs_context_t ctx
) 
1288         struct nfs_lock_owner 
*nlop
; 
1290         int error 
= 0, changed 
= 0, delegated 
= 0, closed 
= 0, downgrade 
= 0; 
1291         uint8_t newAccessMode
, newDenyMode
; 
1293         /* warn if modes don't match current state */ 
1294         if (((accessMode 
& nofp
->nof_access
) != accessMode
) || ((denyMode 
& nofp
->nof_deny
) != denyMode
)) { 
1295                 NP(np
, "nfs_close: mode mismatch %d %d, current %d %d, %d", 
1296                     accessMode
, denyMode
, nofp
->nof_access
, nofp
->nof_deny
, 
1297                     kauth_cred_getuid(nofp
->nof_owner
->noo_cred
)); 
1301          * If we're closing a write-only open, we may not have a write-only count 
1302          * if we also grabbed read access.  So, check the read-write count. 
1304         if (denyMode 
== NFS_OPEN_SHARE_DENY_NONE
) { 
1305                 if ((accessMode 
== NFS_OPEN_SHARE_ACCESS_WRITE
) && 
1306                     (nofp
->nof_w 
== 0) && (nofp
->nof_d_w 
== 0) && 
1307                     (nofp
->nof_rw 
|| nofp
->nof_d_rw
)) { 
1308                         accessMode 
= NFS_OPEN_SHARE_ACCESS_BOTH
; 
1310         } else if (denyMode 
== NFS_OPEN_SHARE_DENY_WRITE
) { 
1311                 if ((accessMode 
== NFS_OPEN_SHARE_ACCESS_WRITE
) && 
1312                     (nofp
->nof_w_dw 
== 0) && (nofp
->nof_d_w_dw 
== 0) && 
1313                     (nofp
->nof_rw_dw 
|| nofp
->nof_d_rw_dw
)) { 
1314                         accessMode 
= NFS_OPEN_SHARE_ACCESS_BOTH
; 
1316         } else { /* NFS_OPEN_SHARE_DENY_BOTH */ 
1317                 if ((accessMode 
== NFS_OPEN_SHARE_ACCESS_WRITE
) && 
1318                     (nofp
->nof_w_drw 
== 0) && (nofp
->nof_d_w_drw 
== 0) && 
1319                     (nofp
->nof_rw_drw 
|| nofp
->nof_d_rw_drw
)) { 
1320                         accessMode 
= NFS_OPEN_SHARE_ACCESS_BOTH
; 
1324         nfs_open_file_remove_open_find(nofp
, accessMode
, denyMode
, &newAccessMode
, &newDenyMode
, &delegated
); 
1325         if ((newAccessMode 
!= nofp
->nof_access
) || (newDenyMode 
!= nofp
->nof_deny
)) { 
1331         if (NFSTONMP(np
)->nm_vers 
< NFS_VER4
) { 
1332                 /* NFS v2/v3 closes simply need to remove the open. */ 
1336         if ((newAccessMode 
== 0) || (nofp
->nof_opencnt 
== 1)) { 
1338                  * No more access after this close, so clean up and close it. 
1339                  * Don't send a close RPC if we're closing a delegated open. 
1343                 if (!delegated 
&& !(nofp
->nof_flags 
& NFS_OPEN_FILE_LOST
)) { 
1344                         error 
= nfs4_close_rpc(np
, nofp
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), 0); 
1346                 if (error 
== NFSERR_LOCKS_HELD
) { 
1348                          * Hmm... the server says we have locks we need to release first 
1349                          * Find the lock owner and try to unlock everything. 
1351                         nlop 
= nfs_lock_owner_find(np
, vfs_context_proc(ctx
), 0); 
1353                                 nfs4_unlock_rpc(np
, nlop
, F_WRLCK
, 0, UINT64_MAX
, 
1354                                     0, vfs_context_thread(ctx
), vfs_context_ucred(ctx
)); 
1355                                 nfs_lock_owner_rele(nlop
); 
1357                         error 
= nfs4_close_rpc(np
, nofp
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), 0); 
1359         } else if (changed
) { 
1361                  * File is still open but with less access, so downgrade the open. 
1362                  * Don't send a downgrade RPC if we're closing a delegated open. 
1364                 if (!delegated 
&& !(nofp
->nof_flags 
& NFS_OPEN_FILE_LOST
)) { 
1367                          * If we have delegated opens, we should probably claim them before sending 
1368                          * the downgrade because the server may not know the open we are downgrading to. 
1370                         if (nofp
->nof_d_rw_drw 
|| nofp
->nof_d_w_drw 
|| nofp
->nof_d_r_drw 
|| 
1371                             nofp
->nof_d_rw_dw 
|| nofp
->nof_d_w_dw 
|| nofp
->nof_d_r_dw 
|| 
1372                             nofp
->nof_d_rw 
|| nofp
->nof_d_w 
|| nofp
->nof_d_r
) { 
1373                                 nfs4_claim_delegated_state_for_open_file(nofp
, 0); 
1375                         /* need to remove the open before sending the downgrade */ 
1376                         nfs_open_file_remove_open(nofp
, accessMode
, denyMode
); 
1377                         error 
= nfs4_open_downgrade_rpc(np
, nofp
, ctx
); 
1378                         if (error
) { /* Hmm.. that didn't work. Add the open back in. */ 
1379                                 nfs_open_file_add_open(nofp
, accessMode
, denyMode
, delegated
); 
1386                 NP(np
, "nfs_close: error %d, %d", error
, kauth_cred_getuid(nofp
->nof_owner
->noo_cred
)); 
1391                 nfs_open_file_remove_open(nofp
, accessMode
, denyMode
); 
1395                 lck_mtx_lock(&nofp
->nof_lock
); 
1396                 if (nofp
->nof_r 
|| nofp
->nof_d_r 
|| nofp
->nof_w 
|| nofp
->nof_d_w 
|| nofp
->nof_d_rw 
|| 
1397                     (nofp
->nof_rw 
&& !((nofp
->nof_flags 
& NFS_OPEN_FILE_CREATE
) && !nofp
->nof_creator 
&& (nofp
->nof_rw 
== 1))) || 
1398                     nofp
->nof_r_dw 
|| nofp
->nof_d_r_dw 
|| nofp
->nof_w_dw 
|| nofp
->nof_d_w_dw 
|| 
1399                     nofp
->nof_rw_dw 
|| nofp
->nof_d_rw_dw 
|| nofp
->nof_r_drw 
|| nofp
->nof_d_r_drw 
|| 
1400                     nofp
->nof_w_drw 
|| nofp
->nof_d_w_drw 
|| nofp
->nof_rw_drw 
|| nofp
->nof_d_rw_drw
) { 
1401                         NP(np
, "nfs_close: unexpected count: %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u flags 0x%x, %d", 
1402                             nofp
->nof_r
, nofp
->nof_d_r
, nofp
->nof_w
, nofp
->nof_d_w
, 
1403                             nofp
->nof_rw
, nofp
->nof_d_rw
, nofp
->nof_r_dw
, nofp
->nof_d_r_dw
, 
1404                             nofp
->nof_w_dw
, nofp
->nof_d_w_dw
, nofp
->nof_rw_dw
, nofp
->nof_d_rw_dw
, 
1405                             nofp
->nof_r_drw
, nofp
->nof_d_r_drw
, nofp
->nof_w_drw
, nofp
->nof_d_w_drw
, 
1406                             nofp
->nof_rw_drw
, nofp
->nof_d_rw_drw
, nofp
->nof_flags
, 
1407                             kauth_cred_getuid(nofp
->nof_owner
->noo_cred
)); 
1409                 /* clear out all open info, just to be safe */ 
1410                 nofp
->nof_access 
= nofp
->nof_deny 
= 0; 
1411                 nofp
->nof_mmap_access 
= nofp
->nof_mmap_deny 
= 0; 
1412                 nofp
->nof_r 
= nofp
->nof_d_r 
= 0; 
1413                 nofp
->nof_w 
= nofp
->nof_d_w 
= 0; 
1414                 nofp
->nof_rw 
= nofp
->nof_d_rw 
= 0; 
1415                 nofp
->nof_r_dw 
= nofp
->nof_d_r_dw 
= 0; 
1416                 nofp
->nof_w_dw 
= nofp
->nof_d_w_dw 
= 0; 
1417                 nofp
->nof_rw_dw 
= nofp
->nof_d_rw_dw 
= 0; 
1418                 nofp
->nof_r_drw 
= nofp
->nof_d_r_drw 
= 0; 
1419                 nofp
->nof_w_drw 
= nofp
->nof_d_w_drw 
= 0; 
1420                 nofp
->nof_rw_drw 
= nofp
->nof_d_rw_drw 
= 0; 
1421                 nofp
->nof_flags 
&= ~NFS_OPEN_FILE_CREATE
; 
1422                 lck_mtx_unlock(&nofp
->nof_lock
); 
1423                 /* XXX we may potentially want to clean up idle/unused open file structures */ 
1425         if (nofp
->nof_flags 
& NFS_OPEN_FILE_LOST
) { 
1427                 NP(np
, "nfs_close: LOST%s, %d", !nofp
->nof_opencnt 
? " (last)" : "", 
1428                     kauth_cred_getuid(nofp
->nof_owner
->noo_cred
)); 
1443         struct nfs_vattr 
*nvap
, 
1446         struct nfsmount 
*nmp 
= mp 
? VFSTONFS(mp
) : NFSTONMP(np
); 
1447         int error 
= 0, status 
= 0, nfsvers
, rpcflags 
= 0; 
1448         struct nfsm_chain nmreq
, nmrep
; 
1450         if (nfs_mount_gone(nmp
)) { 
1453         nfsvers 
= nmp
->nm_vers
; 
1455         if (flags 
& NGA_MONITOR
) { /* vnode monitor requests should be soft */ 
1456                 rpcflags 
= R_RECOVER
; 
1459         if (flags 
& NGA_SOFT
) { /* Return ETIMEDOUT if server not responding */ 
1463         nfsm_chain_null(&nmreq
); 
1464         nfsm_chain_null(&nmrep
); 
1466         nfsm_chain_build_alloc_init(error
, &nmreq
, NFSX_FH(nfsvers
)); 
1467         if (nfsvers 
!= NFS_VER2
) { 
1468                 nfsm_chain_add_32(error
, &nmreq
, fhsize
); 
1470         nfsm_chain_add_opaque(error
, &nmreq
, fhp
, fhsize
); 
1471         nfsm_chain_build_done(error
, &nmreq
); 
1473         error 
= nfs_request2(np
, mp
, &nmreq
, NFSPROC_GETATTR
, 
1474             vfs_context_thread(ctx
), vfs_context_ucred(ctx
), 
1475             NULL
, rpcflags
, &nmrep
, xidp
, &status
); 
1480         error 
= nfs_parsefattr(nmp
, &nmrep
, nfsvers
, nvap
); 
1482         nfsm_chain_cleanup(&nmreq
); 
1483         nfsm_chain_cleanup(&nmrep
); 
1488  * nfs_refresh_fh will attempt to update the file handle for the node. 
1490  * It only does this for symbolic links and regular files that are not currently opened. 
1492  * On Success returns 0 and the nodes file handle is updated, or ESTALE on failure. 
1495 nfs_refresh_fh(nfsnode_t np
, vfs_context_t ctx
) 
1497         vnode_t dvp
, vp 
= NFSTOV(np
); 
1499         const char *v_name 
= vnode_getname(vp
); 
1501         int namelen
, refreshed
; 
1503         int error
, wanted 
= 0; 
1505         struct timespec ts 
= {.tv_sec 
= 2, .tv_nsec 
= 0}; 
1507         NFS_VNOP_DBG("vnode is %d\n", vnode_vtype(vp
)); 
1509         dvp 
= vnode_parent(vp
); 
1510         if ((vnode_vtype(vp
) != VREG 
&& vnode_vtype(vp
) != VLNK
) || 
1511             v_name 
== NULL 
|| *v_name 
== '\0' || dvp 
== NULL
) { 
1512                 if (v_name 
!= NULL
) { 
1513                         vnode_putname(v_name
); 
1519         namelen 
= NFS_STRLEN_INT(v_name
); 
1520         MALLOC(name
, char *, namelen 
+ 1, M_TEMP
, M_WAITOK
); 
1522                 vnode_putname(v_name
); 
1525         bcopy(v_name
, name
, namelen 
+ 1); 
1526         NFS_VNOP_DBG("Trying to refresh %s : %s\n", v_name
, name
); 
1527         vnode_putname(v_name
); 
1529         /* Allocate the maximum size file handle */ 
1530         MALLOC(fhp
, uint8_t *, NFS4_FHSIZE
, M_FHANDLE
, M_WAITOK
); 
1536         if ((error 
= nfs_node_lock(np
))) { 
1538                 FREE(fhp
, M_FHANDLE
); 
1542         fhsize 
= np
->n_fhsize
; 
1543         bcopy(np
->n_fhp
, fhp
, fhsize
); 
1544         while (ISSET(np
->n_flag
, NREFRESH
)) { 
1545                 SET(np
->n_flag
, NREFRESHWANT
); 
1546                 NFS_VNOP_DBG("Waiting for refresh of %s\n", name
); 
1547                 msleep(np
, &np
->n_lock
, PZERO 
- 1, "nfsrefreshwant", &ts
); 
1548                 if ((error 
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0))) { 
1552         refreshed 
= error 
? 0 : !NFS_CMPFH(np
, fhp
, fhsize
); 
1553         SET(np
->n_flag
, NREFRESH
); 
1554         nfs_node_unlock(np
); 
1556         NFS_VNOP_DBG("error = %d, refreshed = %d\n", error
, refreshed
); 
1557         if (error 
|| refreshed
) { 
1561         /* Check that there are no open references for this file */ 
1562         lck_mtx_lock(&np
->n_openlock
); 
1563         if (np
->n_openrefcnt 
|| !TAILQ_EMPTY(&np
->n_opens
) || !TAILQ_EMPTY(&np
->n_lock_owners
)) { 
1565                 struct nfs_open_file 
*ofp
; 
1567                 TAILQ_FOREACH(ofp
, &np
->n_opens
, nof_link
) { 
1568                         cnt 
+= ofp
->nof_opencnt
; 
1571                         lck_mtx_unlock(&np
->n_openlock
); 
1572                         NFS_VNOP_DBG("Can not refresh file handle for %s with open state\n", name
); 
1573                         NFS_VNOP_DBG("\topenrefcnt = %d, opens = %d lock_owners = %d\n", 
1574                             np
->n_openrefcnt
, cnt
, !TAILQ_EMPTY(&np
->n_lock_owners
)); 
1579         lck_mtx_unlock(&np
->n_openlock
); 
1581          * Since the FH is currently stale we should not be able to 
1582          * establish any open state until the FH is refreshed. 
1585         error 
= nfs_node_lock(np
); 
1588          * Symlinks should never need invalidations and are holding 
1589          * the one and only nfsbuf in an uncached acquired state 
1590          * trying to do a readlink. So we will hang if we invalidate 
1591          * in that case. Only in in the VREG case do we need to 
1594         if (vnode_vtype(vp
) == VREG
) { 
1595                 np
->n_flag 
&= ~NNEEDINVALIDATE
; 
1596                 nfs_node_unlock(np
); 
1597                 error 
= nfs_vinvalbuf(vp
, V_IGNORE_WRITEERR
, ctx
, 1); 
1599                         NFS_VNOP_DBG("nfs_vinvalbuf returned %d\n", error
); 
1603                 nfs_node_unlock(np
); 
1606         NFS_VNOP_DBG("Looking up %s\n", name
); 
1607         error 
= nfs_lookitup(dnp
, name
, namelen
, ctx
, &np
); 
1609                 NFS_VNOP_DBG("nfs_lookitup returned %d\n", error
); 
1613         nfs_node_lock_force(np
); 
1614         wanted 
= ISSET(np
->n_flag
, NREFRESHWANT
); 
1615         CLR(np
->n_flag
, NREFRESH 
| NREFRESHWANT
); 
1616         nfs_node_unlock(np
); 
1622                 NFS_VNOP_DBG("%s refreshed file handle\n", name
); 
1626         FREE(fhp
, M_FHANDLE
); 
1628         return error 
? ESTALE 
: 0; 
1632 nfs_getattr(nfsnode_t np
, struct nfs_vattr 
*nvap
, vfs_context_t ctx
, int flags
) 
1637         error 
= nfs_getattr_internal(np
, nvap
, ctx
, flags
); 
1638         if (error 
== ESTALE
) { 
1639                 error 
= nfs_refresh_fh(np
, ctx
); 
1648 nfs_getattr_internal(nfsnode_t np
, struct nfs_vattr 
*nvap
, vfs_context_t ctx
, int flags
) 
1650         struct nfsmount 
*nmp
; 
1651         int error 
= 0, nfsvers
, inprogset 
= 0, wanted 
= 0, avoidfloods 
= 0; 
1652         struct nfs_vattr 
*nvattr 
= NULL
; 
1653         struct timespec ts 
= { .tv_sec 
= 2, .tv_nsec 
= 0 }; 
1656         FSDBG_TOP(513, np
->n_size
, np
, np
->n_vattr
.nva_size
, np
->n_flag
); 
1660         if (nfs_mount_gone(nmp
)) { 
1663         nfsvers 
= nmp
->nm_vers
; 
1666                 MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
1671         /* Update local times for special files. */ 
1672         if (np
->n_flag 
& (NACC 
| NUPD
)) { 
1673                 nfs_node_lock_force(np
); 
1675                 nfs_node_unlock(np
); 
1677         /* Update size, if necessary */ 
1678         if (ISSET(np
->n_flag
, NUPDATESIZE
)) { 
1679                 nfs_data_update_size(np
, 0); 
1682         error 
= nfs_node_lock(np
); 
1684         if (!(flags 
& (NGA_UNCACHED 
| NGA_MONITOR
)) || ((nfsvers 
>= NFS_VER4
) && (np
->n_openflags 
& N_DELEG_MASK
))) { 
1686                  * Use the cache or wait for any getattr in progress if: 
1687                  * - it's a cached request, or 
1688                  * - we have a delegation, or 
1689                  * - the server isn't responding 
1692                         error 
= nfs_getattrcache(np
, nvap
, flags
); 
1693                         if (!error 
|| (error 
!= ENOENT
)) { 
1694                                 nfs_node_unlock(np
); 
1698                         if (!ISSET(np
->n_flag
, NGETATTRINPROG
)) { 
1701                         if (flags 
& NGA_MONITOR
) { 
1702                                 /* no need to wait if a request is pending */ 
1703                                 error 
= EINPROGRESS
; 
1704                                 nfs_node_unlock(np
); 
1707                         SET(np
->n_flag
, NGETATTRWANT
); 
1708                         msleep(np
, &np
->n_lock
, PZERO 
- 1, "nfsgetattrwant", &ts
); 
1709                         if ((error 
= nfs_sigintr(NFSTONMP(np
), NULL
, vfs_context_thread(ctx
), 0))) { 
1710                                 nfs_node_unlock(np
); 
1714                 SET(np
->n_flag
, NGETATTRINPROG
); 
1716         } else if (!ISSET(np
->n_flag
, NGETATTRINPROG
)) { 
1717                 SET(np
->n_flag
, NGETATTRINPROG
); 
1719         } else if (flags 
& NGA_MONITOR
) { 
1720                 /* no need to make a request if one is pending */ 
1721                 error 
= EINPROGRESS
; 
1723         nfs_node_unlock(np
); 
1726         if (nfs_mount_gone(nmp
)) { 
1734          * Return cached attributes if they are valid, 
1735          * if the server doesn't respond, and this is 
1736          * some softened up style of mount. 
1738         if (NATTRVALID(np
) && nfs_use_cache(nmp
)) { 
1743          * We might want to try to get both the attributes and access info by 
1744          * making an ACCESS call and seeing if it returns updated attributes. 
1745          * But don't bother if we aren't caching access info or if the 
1746          * attributes returned wouldn't be cached. 
1748         if (!(flags 
& NGA_ACL
) && (nfsvers 
!= NFS_VER2
) && nfs_access_for_getattr 
&& (nfs_access_cache_timeout 
> 0)) { 
1749                 if (nfs_attrcachetimeout(np
) > 0) { 
1750                         /*  OSAddAtomic(1, &nfsstats.accesscache_misses); */ 
1751                         u_int32_t access 
= NFS_ACCESS_ALL
; 
1754                         /* Return cached attrs if server doesn't respond */ 
1755                         if (flags 
& NGA_SOFT
) { 
1759                         error 
= nmp
->nm_funcs
->nf_access_rpc(np
, &access
, rpcflags
, ctx
); 
1761                         if (error 
== ETIMEDOUT
) { 
1768                         nfs_node_lock_force(np
); 
1769                         error 
= nfs_getattrcache(np
, nvap
, flags
); 
1770                         nfs_node_unlock(np
); 
1771                         if (!error 
|| (error 
!= ENOENT
)) { 
1774                         /* Well, that didn't work... just do a getattr... */ 
1782         error 
= nmp
->nm_funcs
->nf_getattr_rpc(np
, NULL
, np
->n_fhp
, np
->n_fhsize
, flags
, ctx
, nvap
, &xid
); 
1784                 nfs_node_lock_force(np
); 
1785                 error 
= nfs_loadattrcache(np
, nvap
, &xid
, 0); 
1786                 nfs_node_unlock(np
); 
1790          * If the server didn't respond, return cached attributes. 
1793         if ((flags 
& NGA_SOFT
) && (error 
== ETIMEDOUT
)) { 
1794                 nfs_node_lock_force(np
); 
1795                 error 
= nfs_getattrcache(np
, nvap
, flags
); 
1796                 if (!error 
|| (error 
!= ENOENT
)) { 
1797                         nfs_node_unlock(np
); 
1800                 nfs_node_unlock(np
); 
1804         if (!xid
) { /* out-of-order rpc - attributes were dropped */ 
1805                 FSDBG(513, -1, np
, np
->n_xid 
>> 32, np
->n_xid
); 
1806                 if (avoidfloods
++ < 20) { 
1809                 /* avoidfloods>1 is bizarre.  at 20 pull the plug */ 
1810                 /* just return the last attributes we got */ 
1813         nfs_node_lock_force(np
); 
1815                 wanted 
= ISSET(np
->n_flag
, NGETATTRWANT
); 
1816                 CLR(np
->n_flag
, (NGETATTRINPROG 
| NGETATTRWANT
)); 
1819                 /* check if the node changed on us */ 
1820                 vnode_t vp 
= NFSTOV(np
); 
1821                 enum vtype vtype 
= vnode_vtype(vp
); 
1822                 if ((vtype 
== VDIR
) && NFS_CHANGED_NC(nfsvers
, np
, nvap
)) { 
1823                         FSDBG(513, -1, np
, 0, np
); 
1824                         np
->n_flag 
&= ~NNEGNCENTRIES
; 
1827                         NFS_CHANGED_UPDATE_NC(nfsvers
, np
, nvap
); 
1828                         NFS_VNOP_DBG("Purge directory 0x%llx\n", 
1829                             (uint64_t)VM_KERNEL_ADDRPERM(vp
)); 
1831                 if (NFS_CHANGED(nfsvers
, np
, nvap
)) { 
1832                         FSDBG(513, -1, np
, -1, np
); 
1833                         if (vtype 
== VDIR
) { 
1834                                 NFS_VNOP_DBG("Invalidate directory 0x%llx\n", 
1835                                     (uint64_t)VM_KERNEL_ADDRPERM(vp
)); 
1838                         nfs_node_unlock(np
); 
1842                         error 
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1); 
1843                         FSDBG(513, -1, np
, -2, error
); 
1845                                 nfs_node_lock_force(np
); 
1846                                 NFS_CHANGED_UPDATE(nfsvers
, np
, nvap
); 
1847                                 nfs_node_unlock(np
); 
1850                         nfs_node_unlock(np
); 
1856                 nfs_node_unlock(np
); 
1862         if (nvattr 
!= NULL
) { 
1863                 NVATTR_CLEANUP(nvap
); 
1864                 FREE(nvattr
, M_TEMP
); 
1865         } else if (!(flags 
& NGA_ACL
)) { 
1866                 /* make sure we don't return an ACL if it wasn't asked for */ 
1867                 NFS_BITMAP_CLR(nvap
->nva_bitmap
, NFS_FATTR_ACL
); 
1868                 if (nvap
->nva_acl
) { 
1869                         kauth_acl_free(nvap
->nva_acl
); 
1870                         nvap
->nva_acl 
= NULL
; 
1873         FSDBG_BOT(513, np
->n_size
, error
, np
->n_vattr
.nva_size
, np
->n_flag
); 
1879  * NFS getattr call from vfs. 
1883  * The attributes we support over the wire. 
1884  * We also get fsid but the vfs layer gets it out of the mount 
1885  * structure after this calling us so there's no need to return it, 
1886  * and Finder expects to call getattrlist just looking for the FSID 
1887  * with out hanging on a non responsive server. 
1889 #define NFS3_SUPPORTED_VATTRS \ 
1890         (VNODE_ATTR_va_rdev |           \ 
1891          VNODE_ATTR_va_nlink |          \ 
1892          VNODE_ATTR_va_data_size |      \ 
1893          VNODE_ATTR_va_data_alloc |     \ 
1894          VNODE_ATTR_va_uid |            \ 
1895          VNODE_ATTR_va_gid |            \ 
1896          VNODE_ATTR_va_mode |           \ 
1897          VNODE_ATTR_va_modify_time |    \ 
1898          VNODE_ATTR_va_change_time |    \ 
1899          VNODE_ATTR_va_access_time |    \ 
1900          VNODE_ATTR_va_fileid |         \ 
1906         struct vnop_getattr_args 
/* { 
1907                                   *  struct vnodeop_desc *a_desc; 
1909                                   *  struct vnode_attr *a_vap; 
1910                                   *  vfs_context_t a_context; 
1915         uint64_t supported_attrs
; 
1916         struct nfs_vattr 
*nva
; 
1917         struct vnode_attr 
*vap 
= ap
->a_vap
; 
1918         struct nfsmount 
*nmp
; 
1921         nmp 
= VTONMP(ap
->a_vp
); 
1924          * Lets don't go over the wire if we don't support any of the attributes. 
1925          * Just fall through at the VFS layer and let it cons up what it needs. 
1927         /* Return the io size no matter what, since we don't go over the wire for this */ 
1928         VATTR_RETURN(vap
, va_iosize
, nfs_iosize
); 
1930         supported_attrs 
= NFS3_SUPPORTED_VATTRS
; 
1932         if ((vap
->va_active 
& supported_attrs
) == 0) { 
1936         if (VATTR_IS_ACTIVE(ap
->a_vap
, va_name
)) { 
1937                 NFS_VNOP_DBG("Getting attrs for 0x%llx, vname is %s\n", 
1938                     (uint64_t)VM_KERNEL_ADDRPERM(ap
->a_vp
), 
1939                     ap
->a_vp
->v_name 
? ap
->a_vp
->v_name 
: "empty"); 
1943          * We should not go over the wire if only fileid was requested and has ever been populated. 
1945         if ((vap
->va_active 
& supported_attrs
) == VNODE_ATTR_va_fileid
) { 
1946                 np 
= VTONFS(ap
->a_vp
); 
1947                 if (np
->n_attrstamp
) { 
1948                         VATTR_RETURN(vap
, va_fileid
, np
->n_vattr
.nva_fileid
); 
1953         MALLOC(nva
, struct nfs_vattr 
*, sizeof(*nva
), M_TEMP
, M_WAITOK
); 
1954         error 
= nfs_getattr(VTONFS(ap
->a_vp
), nva
, ap
->a_context
, NGA_CACHED
); 
1959         /* copy nva to *a_vap */ 
1960         VATTR_RETURN(vap
, va_type
, nva
->nva_type
); 
1961         VATTR_RETURN(vap
, va_mode
, nva
->nva_mode
); 
1962         rdev 
= makedev(nva
->nva_rawdev
.specdata1
, nva
->nva_rawdev
.specdata2
); 
1963         VATTR_RETURN(vap
, va_rdev
, rdev
); 
1964         VATTR_RETURN(vap
, va_uid
, nva
->nva_uid
); 
1965         VATTR_RETURN(vap
, va_gid
, nva
->nva_gid
); 
1966         VATTR_RETURN(vap
, va_nlink
, nva
->nva_nlink
); 
1967         VATTR_RETURN(vap
, va_fileid
, nva
->nva_fileid
); 
1968         VATTR_RETURN(vap
, va_data_size
, nva
->nva_size
); 
1969         VATTR_RETURN(vap
, va_data_alloc
, nva
->nva_bytes
); 
1970         vap
->va_access_time
.tv_sec 
= nva
->nva_timesec
[NFSTIME_ACCESS
]; 
1971         vap
->va_access_time
.tv_nsec 
= nva
->nva_timensec
[NFSTIME_ACCESS
]; 
1972         VATTR_SET_SUPPORTED(vap
, va_access_time
); 
1973         vap
->va_modify_time
.tv_sec 
= nva
->nva_timesec
[NFSTIME_MODIFY
]; 
1974         vap
->va_modify_time
.tv_nsec 
= nva
->nva_timensec
[NFSTIME_MODIFY
]; 
1975         VATTR_SET_SUPPORTED(vap
, va_modify_time
); 
1976         vap
->va_change_time
.tv_sec 
= nva
->nva_timesec
[NFSTIME_CHANGE
]; 
1977         vap
->va_change_time
.tv_nsec 
= nva
->nva_timensec
[NFSTIME_CHANGE
]; 
1978         VATTR_SET_SUPPORTED(vap
, va_change_time
); 
1981         // VATTR_RETURN(vap, va_encoding, 0xffff /* kTextEncodingUnknown */); 
1992         struct vnop_setattr_args 
/* { 
1993                                   *  struct vnodeop_desc *a_desc; 
1995                                   *  struct vnode_attr *a_vap; 
1996                                   *  vfs_context_t a_context; 
1999         vfs_context_t ctx 
= ap
->a_context
; 
2000         vnode_t vp 
= ap
->a_vp
; 
2001         nfsnode_t np 
= VTONFS(vp
); 
2002         struct nfsmount 
*nmp
; 
2003         struct vnode_attr 
*vap 
= ap
->a_vap
; 
2005         int biosize
, nfsvers
, namedattrs
; 
2006         u_quad_t origsize
, vapsize
; 
2007         struct nfs_dulookup 
*dul
; 
2008         nfsnode_t dnp 
= NULL
; 
2009         int dul_in_progress 
= 0; 
2011         const char *vname 
= NULL
; 
2013         struct nfs_open_owner 
*noop 
= NULL
; 
2014         struct nfs_open_file 
*nofp 
= NULL
; 
2017         if (nfs_mount_gone(nmp
)) { 
2020         nfsvers 
= nmp
->nm_vers
; 
2021         namedattrs 
= (nmp
->nm_fsattr
.nfsa_flags 
& NFS_FSFLAG_NAMED_ATTR
); 
2022         biosize 
= nmp
->nm_biosize
; 
2024         /* Disallow write attempts if the filesystem is mounted read-only. */ 
2025         if (vnode_vfsisrdonly(vp
)) { 
2029         origsize 
= np
->n_size
; 
2030         if (VATTR_IS_ACTIVE(vap
, va_data_size
)) { 
2031                 switch (vnode_vtype(vp
)) { 
2038                         if (!VATTR_IS_ACTIVE(vap
, va_modify_time
) && 
2039                             !VATTR_IS_ACTIVE(vap
, va_access_time
) && 
2040                             !VATTR_IS_ACTIVE(vap
, va_mode
) && 
2041                             !VATTR_IS_ACTIVE(vap
, va_uid
) && 
2042                             !VATTR_IS_ACTIVE(vap
, va_gid
)) { 
2045                         VATTR_CLEAR_ACTIVE(vap
, va_data_size
); 
2049                          * Disallow write attempts if the filesystem is 
2050                          * mounted read-only. 
2052                         if (vnode_vfsisrdonly(vp
)) { 
2055                         FSDBG_TOP(512, np
->n_size
, vap
->va_data_size
, 
2056                             np
->n_vattr
.nva_size
, np
->n_flag
); 
2057                         /* clear NNEEDINVALIDATE, if set */ 
2058                         if ((error 
= nfs_node_lock(np
))) { 
2061                         if (np
->n_flag 
& NNEEDINVALIDATE
) { 
2062                                 np
->n_flag 
&= ~NNEEDINVALIDATE
; 
2064                         nfs_node_unlock(np
); 
2065                         /* flush everything */ 
2066                         error 
= nfs_vinvalbuf(vp
, (vap
->va_data_size 
? V_SAVE 
: 0), ctx
, 1); 
2068                                 NP(np
, "nfs_setattr: nfs_vinvalbuf %d", error
); 
2069                                 FSDBG_BOT(512, np
->n_size
, vap
->va_data_size
, np
->n_vattr
.nva_size
, -1); 
2073                         if (nfsvers 
>= NFS_VER4
) { 
2074                                 /* setting file size requires having the file open for write access */ 
2075                                 if (np
->n_flag 
& NREVOKE
) { 
2078                                 noop 
= nfs_open_owner_find(nmp
, vfs_context_ucred(ctx
), 1); 
2083                                 error 
= nfs_mount_state_in_use_start(nmp
, vfs_context_thread(ctx
)); 
2087                                 if (np
->n_flag 
& NREVOKE
) { 
2088                                         nfs_mount_state_in_use_end(nmp
, 0); 
2091                                 error 
= nfs_open_file_find(np
, noop
, &nofp
, 0, 0, 1); 
2092                                 if (!error 
&& (nofp
->nof_flags 
& NFS_OPEN_FILE_LOST
)) { 
2095                                 if (!error 
&& (nofp
->nof_flags 
& NFS_OPEN_FILE_REOPEN
)) { 
2096                                         error 
= nfs4_reopen(nofp
, vfs_context_thread(ctx
)); 
2099                                                 nfs_mount_state_in_use_end(nmp
, 0); 
2104                                         error 
= nfs_open_file_set_busy(nofp
, vfs_context_thread(ctx
)); 
2107                                         nfs_mount_state_in_use_end(nmp
, 0); 
2108                                         nfs_open_owner_rele(noop
); 
2111                                 if (!(nofp
->nof_access 
& NFS_OPEN_SHARE_ACCESS_WRITE
)) { 
2112                                         /* we don't have the file open for write access, so open it */ 
2113                                         error 
= nfs4_open(np
, nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
, ctx
); 
2115                                                 nofp
->nof_flags 
|= NFS_OPEN_FILE_SETATTR
; 
2117                                         if (nfs_mount_state_error_should_restart(error
)) { 
2118                                                 nfs_open_file_clear_busy(nofp
); 
2120                                                 nfs_mount_state_in_use_end(nmp
, error
); 
2126                         nfs_data_lock(np
, NFS_DATA_LOCK_EXCLUSIVE
); 
2127                         if (np
->n_size 
> vap
->va_data_size
) { /* shrinking? */ 
2134                                 obn 
= (np
->n_size 
- 1) / biosize
; 
2135                                 bn 
= vap
->va_data_size 
/ biosize
; 
2136                                 for (; obn 
>= bn
; obn
--) { 
2137                                         if (!nfs_buf_is_incore(np
, obn
)) { 
2140                                         error 
= nfs_buf_get(np
, obn
, biosize
, NULL
, NBLK_READ
, &bp
); 
2145                                                 FSDBG(512, bp
, bp
->nb_flags
, 0, obn
); 
2146                                                 SET(bp
->nb_flags
, NB_INVAL
); 
2147                                                 nfs_buf_release(bp
, 1); 
2151                                         neweofoff 
= vap
->va_data_size 
- NBOFF(bp
); 
2152                                         /* check for any dirty data before the new EOF */ 
2153                                         if ((bp
->nb_dirtyend 
> 0) && (bp
->nb_dirtyoff 
< neweofoff
)) { 
2154                                                 /* clip dirty range to EOF */ 
2155                                                 if (bp
->nb_dirtyend 
> neweofoff
) { 
2156                                                         bp
->nb_dirtyend 
= neweofoff
; 
2157                                                         if (bp
->nb_dirtyoff 
>= bp
->nb_dirtyend
) { 
2158                                                                 bp
->nb_dirtyoff 
= bp
->nb_dirtyend 
= 0; 
2161                                                 if ((bp
->nb_dirtyend 
> 0) && (bp
->nb_dirtyoff 
< neweofoff
)) { 
2165                                         nfs_buf_pgs_get_page_mask(&pagemask
, round_page_64(neweofoff
) / PAGE_SIZE
); 
2166                                         nfs_buf_pgs_bit_and(&bp
->nb_dirty
, &pagemask
, &bp
->nb_dirty
); 
2167                                         if (nfs_buf_pgs_is_set(&bp
->nb_dirty
)) { 
2171                                                 FSDBG(512, bp
, bp
->nb_flags
, 0, obn
); 
2172                                                 SET(bp
->nb_flags
, NB_INVAL
); 
2173                                                 nfs_buf_release(bp
, 1); 
2176                                         /* gotta write out dirty data before invalidating */ 
2177                                         /* (NB_STABLE indicates that data writes should be FILESYNC) */ 
2178                                         /* (NB_NOCACHE indicates buffer should be discarded) */ 
2179                                         CLR(bp
->nb_flags
, (NB_DONE 
| NB_ERROR 
| NB_INVAL 
| NB_ASYNC 
| NB_READ
)); 
2180                                         SET(bp
->nb_flags
, NB_STABLE 
| NB_NOCACHE
); 
2181                                         if (!IS_VALID_CRED(bp
->nb_wcred
)) { 
2182                                                 kauth_cred_t cred 
= vfs_context_ucred(ctx
); 
2183                                                 kauth_cred_ref(cred
); 
2184                                                 bp
->nb_wcred 
= cred
; 
2186                                         error 
= nfs_buf_write(bp
); 
2187                                         // Note: bp has been released 
2189                                                 FSDBG(512, bp
, 0xd00dee, 0xbad, error
); 
2190                                                 nfs_node_lock_force(np
); 
2191                                                 np
->n_error 
= error
; 
2192                                                 np
->n_flag 
|= NWRITEERR
; 
2194                                                  * There was a write error and we need to 
2195                                                  * invalidate attrs and flush buffers in 
2196                                                  * order to sync up with the server. 
2197                                                  * (if this write was extending the file, 
2198                                                  * we may no longer know the correct size) 
2200                                                 NATTRINVALIDATE(np
); 
2201                                                 nfs_node_unlock(np
); 
2202                                                 nfs_data_unlock(np
); 
2203                                                 nfs_vinvalbuf(vp
, V_SAVE 
| V_IGNORE_WRITEERR
, ctx
, 1); 
2204                                                 nfs_data_lock(np
, NFS_DATA_LOCK_EXCLUSIVE
); 
2209                         if (vap
->va_data_size 
!= np
->n_size
) { 
2210                                 ubc_setsize(vp
, (off_t
)vap
->va_data_size
); /* XXX error? */ 
2212                         origsize 
= np
->n_size
; 
2213                         np
->n_size 
= np
->n_vattr
.nva_size 
= vap
->va_data_size
; 
2214                         nfs_node_lock_force(np
); 
2215                         CLR(np
->n_flag
, NUPDATESIZE
); 
2216                         nfs_node_unlock(np
); 
2217                         FSDBG(512, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0001); 
2219         } else if (VATTR_IS_ACTIVE(vap
, va_modify_time
) || 
2220             VATTR_IS_ACTIVE(vap
, va_access_time
) || 
2221             (vap
->va_vaflags 
& VA_UTIMES_NULL
)) { 
2222                 if ((error 
= nfs_node_lock(np
))) { 
2224                         if (nfsvers 
>= NFS_VER4
) { 
2225                                 nfs_mount_state_in_use_end(nmp
, 0); 
2230                 if ((np
->n_flag 
& NMODIFIED
) && (vnode_vtype(vp
) == VREG
)) { 
2231                         nfs_node_unlock(np
); 
2232                         error 
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1); 
2233                         if (error 
== EINTR
) { 
2235                                 if (nfsvers 
>= NFS_VER4
) { 
2236                                         nfs_mount_state_in_use_end(nmp
, 0); 
2242                         nfs_node_unlock(np
); 
2246         MALLOC(dul
, struct nfs_dulookup 
*, sizeof(*dul
), M_TEMP
, M_WAITOK
); 
2248         if ((VATTR_IS_ACTIVE(vap
, va_mode
) || VATTR_IS_ACTIVE(vap
, va_uid
) || VATTR_IS_ACTIVE(vap
, va_gid
) || 
2249             VATTR_IS_ACTIVE(vap
, va_acl
) || VATTR_IS_ACTIVE(vap
, va_uuuid
) || VATTR_IS_ACTIVE(vap
, va_guuid
)) && 
2250             !(error 
= nfs_node_lock(np
))) { 
2251                 NACCESSINVALIDATE(np
); 
2252                 nfs_node_unlock(np
); 
2254                         dvp 
= vnode_getparent(vp
); 
2255                         vname 
= vnode_getname(vp
); 
2256                         dnp 
= (dvp 
&& vname
) ? VTONFS(dvp
) : NULL
; 
2258                                 if (nfs_node_set_busy(dnp
, vfs_context_thread(ctx
))) { 
2260                                         vnode_putname(vname
); 
2262                                         nfs_dulookup_init(dul
, dnp
, vname
, NFS_STRLEN_INT(vname
), ctx
); 
2263                                         nfs_dulookup_start(dul
, dnp
, ctx
); 
2264                                         dul_in_progress 
= 1; 
2271                                         vnode_putname(vname
); 
2278                 error 
= nmp
->nm_funcs
->nf_setattr_rpc(np
, vap
, ctx
); 
2281         if (dul_in_progress
) { 
2282                 nfs_dulookup_finish(dul
, dnp
, ctx
); 
2283                 nfs_node_clear_busy(dnp
); 
2285                 vnode_putname(vname
); 
2289         FSDBG_BOT(512, np
->n_size
, vap
->va_data_size
, np
->n_vattr
.nva_size
, error
); 
2290         if (VATTR_IS_ACTIVE(vap
, va_data_size
)) { 
2291                 if (error 
&& (origsize 
!= np
->n_size
) && 
2292                     ((nfsvers 
< NFS_VER4
) || !nfs_mount_state_error_should_restart(error
))) { 
2293                         /* make every effort to resync file size w/ server... */ 
2294                         /* (don't bother if we'll be restarting the operation) */ 
2295                         int err
; /* preserve "error" for return */ 
2296                         np
->n_size 
= np
->n_vattr
.nva_size 
= origsize
; 
2297                         nfs_node_lock_force(np
); 
2298                         CLR(np
->n_flag
, NUPDATESIZE
); 
2299                         nfs_node_unlock(np
); 
2300                         FSDBG(512, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0002); 
2301                         ubc_setsize(vp
, (off_t
)np
->n_size
); /* XXX check error */ 
2302                         vapsize 
= vap
->va_data_size
; 
2303                         vap
->va_data_size 
= origsize
; 
2304                         err 
= nmp
->nm_funcs
->nf_setattr_rpc(np
, vap
, ctx
); 
2306                                 NP(np
, "nfs_vnop_setattr: nfs%d_setattr_rpc %d %d", nfsvers
, error
, err
); 
2308                         vap
->va_data_size 
= vapsize
; 
2310                 nfs_node_lock_force(np
); 
2312                  * The size was just set.  If the size is already marked for update, don't 
2313                  * trust the newsize (it may have been set while the setattr was in progress). 
2314                  * Clear the update flag and make sure we fetch new attributes so we are sure 
2315                  * we have the latest size. 
2317                 if (ISSET(np
->n_flag
, NUPDATESIZE
)) { 
2318                         CLR(np
->n_flag
, NUPDATESIZE
); 
2319                         NATTRINVALIDATE(np
); 
2320                         nfs_node_unlock(np
); 
2321                         nfs_getattr(np
, NULL
, ctx
, NGA_UNCACHED
); 
2323                         nfs_node_unlock(np
); 
2325                 nfs_data_unlock(np
); 
2327                 if (nfsvers 
>= NFS_VER4
) { 
2329                                 /* don't close our setattr open if we'll be restarting... */ 
2330                                 if (!nfs_mount_state_error_should_restart(error
) && 
2331                                     (nofp
->nof_flags 
& NFS_OPEN_FILE_SETATTR
)) { 
2332                                         int err 
= nfs_close(np
, nofp
, NFS_OPEN_SHARE_ACCESS_WRITE
, NFS_OPEN_SHARE_DENY_NONE
, ctx
); 
2334                                                 NP(np
, "nfs_vnop_setattr: close error: %d", err
); 
2336                                         nofp
->nof_flags 
&= ~NFS_OPEN_FILE_SETATTR
; 
2338                                 nfs_open_file_clear_busy(nofp
); 
2341                         if (nfs_mount_state_in_use_end(nmp
, error
)) { 
2344                         nfs_open_owner_rele(noop
); 
2352  * Do an NFS setattr RPC. 
2357         struct vnode_attr 
*vap
, 
2360         struct nfsmount 
*nmp 
= NFSTONMP(np
); 
2361         int error 
= 0, lockerror 
= ENOENT
, status 
= 0, wccpostattr 
= 0, nfsvers
; 
2362         u_int64_t xid
, nextxid
; 
2363         struct nfsm_chain nmreq
, nmrep
; 
2365         if (nfs_mount_gone(nmp
)) { 
2368         nfsvers 
= nmp
->nm_vers
; 
2370         VATTR_SET_SUPPORTED(vap
, va_mode
); 
2371         VATTR_SET_SUPPORTED(vap
, va_uid
); 
2372         VATTR_SET_SUPPORTED(vap
, va_gid
); 
2373         VATTR_SET_SUPPORTED(vap
, va_data_size
); 
2374         VATTR_SET_SUPPORTED(vap
, va_access_time
); 
2375         VATTR_SET_SUPPORTED(vap
, va_modify_time
); 
2378         if (VATTR_IS_ACTIVE(vap
, va_flags
) 
2380                 if (vap
->va_flags
) {    /* we don't support setting flags */ 
2381                         if (vap
->va_active 
& ~VNODE_ATTR_va_flags
) { 
2382                                 return EINVAL
;        /* return EINVAL if other attributes also set */ 
2384                                 return ENOTSUP
;       /* return ENOTSUP for chflags(2) */ 
2387                 /* no flags set, so we'll just ignore it */ 
2388                 if (!(vap
->va_active 
& ~VNODE_ATTR_va_flags
)) { 
2389                         return 0; /* no (other) attributes to set, so nothing to do */ 
2393         nfsm_chain_null(&nmreq
); 
2394         nfsm_chain_null(&nmrep
); 
2396         nfsm_chain_build_alloc_init(error
, &nmreq
, 
2397             NFSX_FH(nfsvers
) + NFSX_SATTR(nfsvers
)); 
2398         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
); 
2399         if (nfsvers 
== NFS_VER3
) { 
2400                 if (VATTR_IS_ACTIVE(vap
, va_mode
)) { 
2401                         nfsm_chain_add_32(error
, &nmreq
, TRUE
); 
2402                         nfsm_chain_add_32(error
, &nmreq
, vap
->va_mode
); 
2404                         nfsm_chain_add_32(error
, &nmreq
, FALSE
); 
2406                 if (VATTR_IS_ACTIVE(vap
, va_uid
)) { 
2407                         nfsm_chain_add_32(error
, &nmreq
, TRUE
); 
2408                         nfsm_chain_add_32(error
, &nmreq
, vap
->va_uid
); 
2410                         nfsm_chain_add_32(error
, &nmreq
, FALSE
); 
2412                 if (VATTR_IS_ACTIVE(vap
, va_gid
)) { 
2413                         nfsm_chain_add_32(error
, &nmreq
, TRUE
); 
2414                         nfsm_chain_add_32(error
, &nmreq
, vap
->va_gid
); 
2416                         nfsm_chain_add_32(error
, &nmreq
, FALSE
); 
2418                 if (VATTR_IS_ACTIVE(vap
, va_data_size
)) { 
2419                         nfsm_chain_add_32(error
, &nmreq
, TRUE
); 
2420                         nfsm_chain_add_64(error
, &nmreq
, vap
->va_data_size
); 
2422                         nfsm_chain_add_32(error
, &nmreq
, FALSE
); 
2424                 if (vap
->va_vaflags 
& VA_UTIMES_NULL
) { 
2425                         nfsm_chain_add_32(error
, &nmreq
, NFS_TIME_SET_TO_SERVER
); 
2426                         nfsm_chain_add_32(error
, &nmreq
, NFS_TIME_SET_TO_SERVER
); 
2428                         if (VATTR_IS_ACTIVE(vap
, va_access_time
)) { 
2429                                 nfsm_chain_add_32(error
, &nmreq
, NFS_TIME_SET_TO_CLIENT
); 
2430                                 nfsm_chain_add_32(error
, &nmreq
, vap
->va_access_time
.tv_sec
); 
2431                                 nfsm_chain_add_32(error
, &nmreq
, vap
->va_access_time
.tv_nsec
); 
2433                                 nfsm_chain_add_32(error
, &nmreq
, NFS_TIME_DONT_CHANGE
); 
2435                         if (VATTR_IS_ACTIVE(vap
, va_modify_time
)) { 
2436                                 nfsm_chain_add_32(error
, &nmreq
, NFS_TIME_SET_TO_CLIENT
); 
2437                                 nfsm_chain_add_32(error
, &nmreq
, vap
->va_modify_time
.tv_sec
); 
2438                                 nfsm_chain_add_32(error
, &nmreq
, vap
->va_modify_time
.tv_nsec
); 
2440                                 nfsm_chain_add_32(error
, &nmreq
, NFS_TIME_DONT_CHANGE
); 
2443                 nfsm_chain_add_32(error
, &nmreq
, FALSE
); 
2445                 nfsm_chain_add_32(error
, &nmreq
, VATTR_IS_ACTIVE(vap
, va_mode
) ? 
2446                     vtonfsv2_mode(vnode_vtype(NFSTOV(np
)), vap
->va_mode
) : -1); 
2447                 nfsm_chain_add_32(error
, &nmreq
, VATTR_IS_ACTIVE(vap
, va_uid
) ? 
2448                     vap
->va_uid 
: (uint32_t)-1); 
2449                 nfsm_chain_add_32(error
, &nmreq
, VATTR_IS_ACTIVE(vap
, va_gid
) ? 
2450                     vap
->va_gid 
: (uint32_t)-1); 
2451                 nfsm_chain_add_32(error
, &nmreq
, VATTR_IS_ACTIVE(vap
, va_data_size
) ? 
2452                     vap
->va_data_size 
: (uint32_t)-1); 
2453                 if (VATTR_IS_ACTIVE(vap
, va_access_time
)) { 
2454                         nfsm_chain_add_32(error
, &nmreq
, vap
->va_access_time
.tv_sec
); 
2455                         nfsm_chain_add_32(error
, &nmreq
, (vap
->va_access_time
.tv_nsec 
!= -1) ? 
2456                             ((uint32_t)vap
->va_access_time
.tv_nsec 
/ 1000) : 0xffffffff); 
2458                         nfsm_chain_add_32(error
, &nmreq
, -1); 
2459                         nfsm_chain_add_32(error
, &nmreq
, -1); 
2461                 if (VATTR_IS_ACTIVE(vap
, va_modify_time
)) { 
2462                         nfsm_chain_add_32(error
, &nmreq
, vap
->va_modify_time
.tv_sec
); 
2463                         nfsm_chain_add_32(error
, &nmreq
, (vap
->va_modify_time
.tv_nsec 
!= -1) ? 
2464                             ((uint32_t)vap
->va_modify_time
.tv_nsec 
/ 1000) : 0xffffffff); 
2466                         nfsm_chain_add_32(error
, &nmreq
, -1); 
2467                         nfsm_chain_add_32(error
, &nmreq
, -1); 
2470         nfsm_chain_build_done(error
, &nmreq
); 
2472         error 
= nfs_request(np
, NULL
, &nmreq
, NFSPROC_SETATTR
, ctx
, NULL
, &nmrep
, &xid
, &status
); 
2473         if ((lockerror 
= nfs_node_lock(np
))) { 
2476         if (nfsvers 
== NFS_VER3
) { 
2477                 struct timespec premtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
2478                 nfsm_chain_get_wcc_data(error
, &nmrep
, np
, &premtime
, &wccpostattr
, &xid
); 
2480                 /* if file hadn't changed, update cached mtime */ 
2481                 if (nfstimespeccmp(&np
->n_mtime
, &premtime
, ==)) { 
2482                         NFS_CHANGED_UPDATE(nfsvers
, np
, &np
->n_vattr
); 
2484                 /* if directory hadn't changed, update namecache mtime */ 
2485                 if ((vnode_vtype(NFSTOV(np
)) == VDIR
) && 
2486                     nfstimespeccmp(&np
->n_ncmtime
, &premtime
, ==)) { 
2487                         NFS_CHANGED_UPDATE_NC(nfsvers
, np
, &np
->n_vattr
); 
2490                         NATTRINVALIDATE(np
); 
2497                 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
); 
2500          * We just changed the attributes and we want to make sure that we 
2501          * see the latest attributes.  Get the next XID.  If it's not the 
2502          * next XID after the SETATTR XID, then it's possible that another 
2503          * RPC was in flight at the same time and it might put stale attributes 
2504          * in the cache.  In that case, we invalidate the attributes and set 
2505          * the attribute cache XID to guarantee that newer attributes will 
2509         nfs_get_xid(&nextxid
); 
2510         if (nextxid 
!= (xid 
+ 1)) { 
2511                 np
->n_xid 
= nextxid
; 
2512                 NATTRINVALIDATE(np
); 
2516                 nfs_node_unlock(np
); 
2518         nfsm_chain_cleanup(&nmreq
); 
2519         nfsm_chain_cleanup(&nmrep
); 
2524  * NFS lookup call, one step at a time... 
2525  * First look in cache 
2526  * If not found, unlock the directory nfsnode and do the RPC 
2530         struct vnop_lookup_args 
/* { 
2531                                  *  struct vnodeop_desc *a_desc; 
2534                                  *  struct componentname *a_cnp; 
2535                                  *  vfs_context_t a_context; 
2538         vfs_context_t ctx 
= ap
->a_context
; 
2539         struct componentname 
*cnp 
= ap
->a_cnp
; 
2540         vnode_t dvp 
= ap
->a_dvp
; 
2541         vnode_t 
*vpp 
= ap
->a_vpp
; 
2542         int flags 
= cnp
->cn_flags
; 
2545         struct nfsmount 
*nmp
; 
2547         int nfsvers
, error
, busyerror 
= ENOENT
, isdot
, isdotdot
, negnamecache
; 
2549         struct nfs_vattr 
*nvattr
; 
2550         int ngflags
, skipdu 
= 0; 
2551         struct vnop_access_args naa
; 
2559         fh 
= zalloc(nfs_fhandle_zone
); 
2560         req 
= zalloc_flags(nfs_req_zone
, Z_WAITOK
); 
2561         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
2562         NVATTR_INIT(nvattr
); 
2564         mp 
= vnode_mount(dvp
); 
2566         if (nfs_mount_gone(nmp
)) { 
2570         nfsvers 
= nmp
->nm_vers
; 
2571         negnamecache 
= !NMFLAG(nmp
, NONEGNAMECACHE
); 
2573         if ((error 
= busyerror 
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
)))) { 
2576         /* nfs_getattr() will check changed and purge caches */ 
2577         if ((error 
= nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
))) { 
2581         error 
= cache_lookup(dvp
, vpp
, cnp
); 
2584                 /* negative cache entry */ 
2588                 if ((nfsvers 
> NFS_VER2
) && NMFLAG(nmp
, RDIRPLUS
)) { 
2589                         /* if rdirplus, try dir buf cache lookup */ 
2590                         error 
= nfs_dir_buf_cache_lookup(dnp
, &np
, cnp
, ctx
, 0, &skipdu
); 
2592                                 /* dir buf cache hit */ 
2595                         } else if (skipdu
) { 
2596                                 /* Skip lookup for du files */ 
2601                 if (error 
!= -1) { /* cache miss */ 
2606                 /* cache hit, not really an error */ 
2607                 OSAddAtomic64(1, &nfsstats
.lookupcache_hits
); 
2609                 nfs_node_clear_busy(dnp
); 
2612                 /* check for directory access */ 
2613                 naa
.a_desc 
= &vnop_access_desc
; 
2615                 naa
.a_action 
= KAUTH_VNODE_SEARCH
; 
2616                 naa
.a_context 
= ctx
; 
2618                 /* compute actual success/failure based on accessibility */ 
2619                 error 
= nfs_vnop_access(&naa
); 
2622                 /* unexpected error from cache_lookup */ 
2626         /* skip lookup, if we know who we are: "." or ".." */ 
2627         isdot 
= isdotdot 
= 0; 
2628         if (cnp
->cn_nameptr
[0] == '.') { 
2629                 if (cnp
->cn_namelen 
== 1) { 
2632                 if ((cnp
->cn_namelen 
== 2) && (cnp
->cn_nameptr
[1] == '.')) { 
2636         if (isdotdot 
|| isdot
) { 
2641         if ((nfsvers 
>= NFS_VER4
) && (dnp
->n_vattr
.nva_flags 
& NFS_FFLAG_TRIGGER
)) { 
2642                 /* we should never be looking things up in a trigger directory, return nothing */ 
2648         /* do we know this name is too long? */ 
2650         if (nfs_mount_gone(nmp
)) { 
2654         if (NFS_BITMAP_ISSET(nmp
->nm_fsattr
.nfsa_bitmap
, NFS_FATTR_MAXNAME
) && 
2655             (cnp
->cn_namelen 
> nmp
->nm_fsattr
.nfsa_maxname
)) { 
2656                 error 
= ENAMETOOLONG
; 
2663         OSAddAtomic64(1, &nfsstats
.lookupcache_misses
); 
2665         error 
= nmp
->nm_funcs
->nf_lookup_rpc_async(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &req
); 
2667         error 
= nmp
->nm_funcs
->nf_lookup_rpc_async_finish(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, req
, &xid
, fh
, nvattr
); 
2670         /* is the file handle the same as this directory's file handle? */ 
2671         isdot 
= NFS_CMPFH(dnp
, fh
->fh_data
, fh
->fh_len
); 
2674         if (flags 
& ISLASTCN
) { 
2675                 switch (cnp
->cn_nameiop
) { 
2677                         cnp
->cn_flags 
&= ~MAKEENTRY
; 
2680                         cnp
->cn_flags 
&= ~MAKEENTRY
; 
2690                 newvp 
= vnode_getparent(dvp
); 
2696                 error 
= vnode_get(dvp
); 
2701                 nfs_node_lock_force(dnp
); 
2702                 if (fh
->fh_len 
&& (dnp
->n_xid 
<= xid
)) { 
2703                         nfs_loadattrcache(dnp
, nvattr
, &xid
, 0); 
2705                 nfs_node_unlock(dnp
); 
2707                 ngflags 
= (cnp
->cn_flags 
& MAKEENTRY
) ? NG_MAKEENTRY 
: 0; 
2708                 error 
= nfs_nget(mp
, dnp
, cnp
, fh
->fh_data
, fh
->fh_len
, nvattr
, &xid
, req
->r_auth
, ngflags
, &np
); 
2713                 nfs_node_unlock(np
); 
2719                 if (((cnp
->cn_nameiop 
== CREATE
) || (cnp
->cn_nameiop 
== RENAME
)) && 
2720                     (flags 
& ISLASTCN
) && (error 
== ENOENT
)) { 
2721                         if (vnode_mount(dvp
) && vnode_vfsisrdonly(dvp
)) { 
2724                                 error 
= EJUSTRETURN
; 
2728         if ((error 
== ENOENT
) && (cnp
->cn_flags 
& MAKEENTRY
) && 
2729             (cnp
->cn_nameiop 
!= CREATE
) && negnamecache
) { 
2730                 /* add a negative entry in the name cache */ 
2731                 nfs_node_lock_force(dnp
); 
2732                 cache_enter(dvp
, NULL
, cnp
); 
2733                 dnp
->n_flag 
|= NNEGNCENTRIES
; 
2734                 nfs_node_unlock(dnp
); 
2737         NVATTR_CLEANUP(nvattr
); 
2738         NFS_ZFREE(nfs_fhandle_zone
, fh
); 
2739         NFS_ZFREE(nfs_req_zone
, req
); 
2740         FREE(nvattr
, M_TEMP
); 
2742                 nfs_node_clear_busy(dnp
); 
2744         if (error 
&& *vpp
) { 
2751 int nfs_readlink_nocache 
= DEFAULT_READLINK_NOCACHE
; 
2758         struct vnop_readlink_args 
/* { 
2759                                    *  struct vnodeop_desc *a_desc; 
2761                                    *  struct uio *a_uio; 
2762                                    *  vfs_context_t a_context; 
2765         vfs_context_t ctx 
= ap
->a_context
; 
2766         nfsnode_t np 
= VTONFS(ap
->a_vp
); 
2767         struct nfsmount 
*nmp
; 
2768         int error 
= 0, nfsvers
; 
2770         uio_t uio 
= ap
->a_uio
; 
2771         struct nfsbuf 
*bp 
= NULL
; 
2772         struct timespec ts 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
2775         if (vnode_vtype(ap
->a_vp
) != VLNK
) { 
2779         if (uio_resid(uio
) == 0) { 
2782         if (uio_offset(uio
) < 0) { 
2786         nmp 
= VTONMP(ap
->a_vp
); 
2787         if (nfs_mount_gone(nmp
)) { 
2790         nfsvers 
= nmp
->nm_vers
; 
2793         /* nfs_getattr() will check changed and purge caches */ 
2794         if ((error 
= nfs_getattr(np
, NULL
, ctx
, nfs_readlink_nocache 
? NGA_UNCACHED 
: NGA_CACHED
))) { 
2795                 FSDBG(531, np
, 0xd1e0001, 0, error
); 
2799         if (nfs_readlink_nocache
) { 
2800                 timeo 
= nfs_attrcachetimeout(np
); 
2805         OSAddAtomic64(1, &nfsstats
.biocache_readlinks
); 
2806         error 
= nfs_buf_get(np
, 0, NFS_MAXPATHLEN
, vfs_context_thread(ctx
), NBLK_META
, &bp
); 
2808                 FSDBG(531, np
, 0xd1e0002, 0, error
); 
2812         if (nfs_readlink_nocache
) { 
2813                 NFS_VNOP_DBG("timeo = %ld ts.tv_sec = %ld need refresh = %d cached = %d\n", timeo
, ts
.tv_sec
, 
2814                     (np
->n_rltim
.tv_sec 
+ timeo
) < ts
.tv_sec 
|| nfs_readlink_nocache 
> 1, 
2815                     ISSET(bp
->nb_flags
, NB_CACHE
) == NB_CACHE
); 
2816                 /* n_rltim is synchronized by the associated nfs buf */ 
2817                 if (ISSET(bp
->nb_flags
, NB_CACHE
) && ((nfs_readlink_nocache 
> 1) || ((np
->n_rltim
.tv_sec 
+ timeo
) < ts
.tv_sec
))) { 
2818                         SET(bp
->nb_flags
, NB_INVAL
); 
2819                         nfs_buf_release(bp
, 0); 
2823         if (!ISSET(bp
->nb_flags
, NB_CACHE
)) { 
2825                 OSAddAtomic64(1, &nfsstats
.readlink_bios
); 
2826                 buflen 
= bp
->nb_bufsize
; 
2827                 error 
= nmp
->nm_funcs
->nf_readlink_rpc(np
, bp
->nb_data
, &buflen
, ctx
); 
2829                         if (error 
== ESTALE
) { 
2830                                 NFS_VNOP_DBG("Stale FH from readlink rpc\n"); 
2831                                 error 
= nfs_refresh_fh(np
, ctx
); 
2836                         SET(bp
->nb_flags
, NB_ERROR
); 
2837                         bp
->nb_error 
= error
; 
2838                         NFS_VNOP_DBG("readlink failed %d\n", error
); 
2840                         bp
->nb_validoff 
= 0; 
2841                         bp
->nb_validend 
= buflen
; 
2843                         NFS_VNOP_DBG("readlink of %.*s\n", (int32_t)bp
->nb_validend
, (char *)bp
->nb_data
); 
2846                 NFS_VNOP_DBG("got cached link of %.*s\n", (int32_t)bp
->nb_validend
, (char *)bp
->nb_data
); 
2849         if (!error 
&& (bp
->nb_validend 
> 0)) { 
2850                 int validend32 
= bp
->nb_validend 
> INT_MAX 
? INT_MAX 
: (int)bp
->nb_validend
; 
2851                 error 
= uiomove(bp
->nb_data
, validend32
, uio
); 
2852                 if (!error 
&& bp
->nb_validend 
> validend32
) { 
2853                         error 
= uiomove(bp
->nb_data 
+ validend32
, (int)(bp
->nb_validend 
- validend32
), uio
); 
2856         FSDBG(531, np
, bp
->nb_validend
, 0, error
); 
2857         nfs_buf_release(bp
, 1); 
2862  * Do a readlink RPC. 
2865 nfs3_readlink_rpc(nfsnode_t np
, char *buf
, size_t *buflenp
, vfs_context_t ctx
) 
2867         struct nfsmount 
*nmp
; 
2868         int error 
= 0, lockerror 
= ENOENT
, nfsvers
, status
; 
2871         struct nfsm_chain nmreq
, nmrep
; 
2874         if (nfs_mount_gone(nmp
)) { 
2877         nfsvers 
= nmp
->nm_vers
; 
2878         nfsm_chain_null(&nmreq
); 
2879         nfsm_chain_null(&nmrep
); 
2881         nfsm_chain_build_alloc_init(error
, &nmreq
, NFSX_FH(nfsvers
)); 
2882         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
); 
2883         nfsm_chain_build_done(error
, &nmreq
); 
2885         error 
= nfs_request(np
, NULL
, &nmreq
, NFSPROC_READLINK
, ctx
, NULL
, &nmrep
, &xid
, &status
); 
2886         if ((lockerror 
= nfs_node_lock(np
))) { 
2889         if (nfsvers 
== NFS_VER3
) { 
2890                 nfsm_chain_postop_attr_update(error
, &nmrep
, np
, &xid
); 
2895         nfsm_chain_get_32(error
, &nmrep
, len
); 
2897         if ((nfsvers 
== NFS_VER2
) && (len 
> *buflenp
)) { 
2901         if (len 
>= *buflenp
) { 
2902                 if (np
->n_size 
&& (np
->n_size 
< *buflenp
)) { 
2903                         len 
= (size_t)np
->n_size
; 
2908         nfsm_chain_get_opaque(error
, &nmrep
, len
, buf
); 
2914                 nfs_node_unlock(np
); 
2916         nfsm_chain_cleanup(&nmreq
); 
2917         nfsm_chain_cleanup(&nmrep
); 
2926 nfs_read_rpc(nfsnode_t np
, uio_t uio
, vfs_context_t ctx
) 
2928         struct nfsmount 
*nmp
; 
2929         int error 
= 0, nfsvers
, eof 
= 0; 
2930         size_t nmrsize
, len
, retlen
; 
2935         uint32_t stategenid 
= 0, restart 
= 0; 
2937         FSDBG_TOP(536, np
, uio_offset(uio
), uio_resid(uio
), 0); 
2939         if (nfs_mount_gone(nmp
)) { 
2942         nfsvers 
= nmp
->nm_vers
; 
2943         nmrsize 
= nmp
->nm_rsize
; 
2945         txoffset 
= uio_offset(uio
); 
2946         tsiz 
= uio_resid(uio
); 
2947         if ((nfsvers 
== NFS_VER2
) && ((uint64_t)(txoffset 
+ tsiz
) > 0xffffffffULL
)) { 
2948                 FSDBG_BOT(536, np
, uio_offset(uio
), uio_resid(uio
), EFBIG
); 
2952         req 
= zalloc_flags(nfs_req_zone
, Z_WAITOK
); 
2954                 len 
= retlen 
= (tsiz 
> (user_ssize_t
)nmrsize
) ? nmrsize 
: (size_t)tsiz
; 
2955                 FSDBG(536, np
, txoffset
, len
, 0); 
2956                 if (np
->n_flag 
& NREVOKE
) { 
2961                 if (nmp
->nm_vers 
>= NFS_VER4
) { 
2962                         stategenid 
= nmp
->nm_stategenid
; 
2965                 error 
= nmp
->nm_funcs
->nf_read_rpc_async(np
, txoffset
, len
, 
2966                     vfs_context_thread(ctx
), vfs_context_ucred(ctx
), NULL
, &req
); 
2968                         error 
= nmp
->nm_funcs
->nf_read_rpc_async_finish(np
, req
, uio
, &retlen
, &eof
); 
2971                 if ((nmp
->nm_vers 
>= NFS_VER4
) && nfs_mount_state_error_should_restart(error
) && 
2972                     (++restart 
<= nfs_mount_state_max_restarts(nmp
))) { /* guard against no progress */ 
2973                         lck_mtx_lock(&nmp
->nm_lock
); 
2974                         if ((error 
!= NFSERR_GRACE
) && (stategenid 
== nmp
->nm_stategenid
)) { 
2975                                 NP(np
, "nfs_read_rpc: error %d, initiating recovery", error
); 
2976                                 nfs_need_recover(nmp
, error
); 
2978                         lck_mtx_unlock(&nmp
->nm_lock
); 
2979                         if (np
->n_flag 
& NREVOKE
) { 
2982                                 if (error 
== NFSERR_GRACE
) { 
2983                                         tsleep(&nmp
->nm_state
, (PZERO 
- 1), "nfsgrace", 2 * hz
); 
2985                                 if (!(error 
= nfs_mount_state_wait_for_recovery(nmp
))) { 
2996                 if (nfsvers 
!= NFS_VER2
) { 
2997                         if (eof 
|| (retlen 
== 0)) { 
3000                 } else if (retlen 
< len
) { 
3005         NFS_ZFREE(nfs_req_zone
, req
); 
3006         FSDBG_BOT(536, np
, eof
, uio_resid(uio
), error
); 
3011 nfs3_read_rpc_async( 
3017         struct nfsreq_cbinfo 
*cb
, 
3018         struct nfsreq 
**reqp
) 
3020         struct nfsmount 
*nmp
; 
3021         int error 
= 0, nfsvers
; 
3022         struct nfsm_chain nmreq
; 
3025         if (nfs_mount_gone(nmp
)) { 
3028         nfsvers 
= nmp
->nm_vers
; 
3030         nfsm_chain_null(&nmreq
); 
3031         nfsm_chain_build_alloc_init(error
, &nmreq
, NFSX_FH(nfsvers
) + 3 * NFSX_UNSIGNED
); 
3032         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
); 
3033         if (nfsvers 
== NFS_VER3
) { 
3034                 nfsm_chain_add_64(error
, &nmreq
, offset
); 
3035                 nfsm_chain_add_32(error
, &nmreq
, len
); 
3037                 nfsm_chain_add_32(error
, &nmreq
, offset
); 
3038                 nfsm_chain_add_32(error
, &nmreq
, len
); 
3039                 nfsm_chain_add_32(error
, &nmreq
, 0); 
3041         nfsm_chain_build_done(error
, &nmreq
); 
3043         error 
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC_READ
, thd
, cred
, NULL
, 0, cb
, reqp
); 
3045         nfsm_chain_cleanup(&nmreq
); 
3050 nfs3_read_rpc_async_finish( 
3057         int error 
= 0, lockerror
, nfsvers
, status 
= 0, eof 
= 0; 
3058         uint32_t retlen 
= 0; 
3060         struct nfsmount 
*nmp
; 
3061         struct nfsm_chain nmrep
; 
3064         if (nfs_mount_gone(nmp
)) { 
3065                 nfs_request_async_cancel(req
); 
3068         nfsvers 
= nmp
->nm_vers
; 
3070         nfsm_chain_null(&nmrep
); 
3072         error 
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
); 
3073         if (error 
== EINPROGRESS
) { /* async request restarted */ 
3077         if ((lockerror 
= nfs_node_lock(np
))) { 
3080         if (nfsvers 
== NFS_VER3
) { 
3081                 nfsm_chain_postop_attr_update(error
, &nmrep
, np
, &xid
); 
3086         if (nfsvers 
== NFS_VER3
) { 
3087                 nfsm_chain_adv(error
, &nmrep
, NFSX_UNSIGNED
); 
3088                 nfsm_chain_get_32(error
, &nmrep
, eof
); 
3090                 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
); 
3093                 nfs_node_unlock(np
); 
3095         nfsm_chain_get_32(error
, &nmrep
, retlen
); 
3096         if ((nfsvers 
== NFS_VER2
) && (retlen 
> *lenp
)) { 
3100         error 
= nfsm_chain_get_uio(&nmrep
, MIN(retlen
, *lenp
), uio
); 
3102                 if (nfsvers 
== NFS_VER3
) { 
3103                         if (!eof 
&& !retlen
) { 
3106                 } else if (retlen 
< *lenp
) { 
3111         *lenp 
= MIN(retlen
, *lenp
); 
3113         nfsm_chain_cleanup(&nmrep
); 
3122         struct vnop_write_args 
/* { 
3123                                 *  struct vnodeop_desc *a_desc; 
3125                                 *  struct uio *a_uio; 
3127                                 *  vfs_context_t a_context; 
3130         vfs_context_t ctx 
= ap
->a_context
; 
3131         uio_t uio 
= ap
->a_uio
; 
3132         vnode_t vp 
= ap
->a_vp
; 
3133         nfsnode_t np 
= VTONFS(vp
); 
3134         int ioflag 
= ap
->a_ioflag
; 
3136         struct nfsmount 
*nmp 
= VTONMP(vp
); 
3142         off_t boff
, start
, end
; 
3144         char auio_buf
[UIO_SIZEOF(1)]; 
3148         FSDBG_TOP(515, np
, uio_offset(uio
), uio_resid(uio
), ioflag
); 
3150         if (vnode_vtype(vp
) != VREG
) { 
3151                 FSDBG_BOT(515, np
, uio_offset(uio
), uio_resid(uio
), EIO
); 
3155         thd 
= vfs_context_thread(ctx
); 
3156         cred 
= vfs_context_ucred(ctx
); 
3158         nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
); 
3160         if ((error 
= nfs_node_lock(np
))) { 
3161                 nfs_data_unlock(np
); 
3162                 FSDBG_BOT(515, np
, uio_offset(uio
), uio_resid(uio
), error
); 
3167         if (np
->n_flag 
& NWRITEERR
) { 
3168                 error 
= np
->n_error
; 
3169                 np
->n_flag 
&= ~NWRITEERR
; 
3171         if (np
->n_flag 
& NNEEDINVALIDATE
) { 
3172                 np
->n_flag 
&= ~NNEEDINVALIDATE
; 
3173                 nfs_node_unlock(np
); 
3174                 nfs_data_unlock(np
); 
3175                 nfs_vinvalbuf(vp
, V_SAVE 
| V_IGNORE_WRITEERR
, ctx
, 1); 
3176                 nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
); 
3178                 nfs_node_unlock(np
); 
3184         biosize 
= nmp
->nm_biosize
; 
3186         if (ioflag 
& (IO_APPEND 
| IO_SYNC
)) { 
3187                 nfs_node_lock_force(np
); 
3188                 if (np
->n_flag 
& NMODIFIED
) { 
3189                         NATTRINVALIDATE(np
); 
3190                         nfs_node_unlock(np
); 
3191                         nfs_data_unlock(np
); 
3192                         error 
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1); 
3193                         nfs_data_lock(np
, NFS_DATA_LOCK_SHARED
); 
3195                                 FSDBG(515, np
, uio_offset(uio
), 0x10bad01, error
); 
3199                         nfs_node_unlock(np
); 
3201                 if (ioflag 
& IO_APPEND
) { 
3202                         nfs_data_unlock(np
); 
3203                         /* nfs_getattr() will check changed and purge caches */ 
3204                         error 
= nfs_getattr(np
, NULL
, ctx
, NGA_UNCACHED
); 
3205                         /* we'll be extending the file, so take the data lock exclusive */ 
3206                         nfs_data_lock(np
, NFS_DATA_LOCK_EXCLUSIVE
); 
3208                                 FSDBG(515, np
, uio_offset(uio
), 0x10bad02, error
); 
3211                         uio_setoffset(uio
, np
->n_size
); 
3214         if (uio_offset(uio
) < 0) { 
3216                 FSDBG_BOT(515, np
, uio_offset(uio
), 0xbad0ff, error
); 
3219         if (uio_resid(uio
) == 0) { 
3223         if (((uio_offset(uio
) + uio_resid(uio
)) > (off_t
)np
->n_size
) && !(ioflag 
& IO_APPEND
)) { 
3225                  * It looks like we'll be extending the file, so take the data lock exclusive. 
3227                 nfs_data_unlock(np
); 
3228                 nfs_data_lock(np
, NFS_DATA_LOCK_EXCLUSIVE
); 
3231                  * Also, if the write begins after the previous EOF buffer, make sure to zero 
3232                  * and validate the new bytes in that buffer. 
3234                 struct nfsbuf 
*eofbp 
= NULL
; 
3235                 daddr64_t eofbn 
= np
->n_size 
/ biosize
; 
3236                 uint32_t eofoff 
= np
->n_size 
% biosize
; 
3237                 lbn 
= uio_offset(uio
) / biosize
; 
3239                 if (eofoff 
&& (eofbn 
< lbn
)) { 
3240                         if ((error 
= nfs_buf_get(np
, eofbn
, biosize
, thd
, NBLK_WRITE 
| NBLK_ONLYVALID
, &eofbp
))) { 
3243                         np
->n_size 
+= (biosize 
- eofoff
); 
3244                         nfs_node_lock_force(np
); 
3245                         CLR(np
->n_flag
, NUPDATESIZE
); 
3246                         np
->n_flag 
|= NMODIFIED
; 
3247                         nfs_node_unlock(np
); 
3248                         FSDBG(516, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0001); 
3249                         ubc_setsize(vp
, (off_t
)np
->n_size
); /* XXX errors */ 
3252                                  * For the old last page, don't zero bytes if there 
3253                                  * are invalid bytes in that page (i.e. the page isn't 
3255                                  * For pages after the old last page, zero them and 
3256                                  * mark them as valid. 
3260                                 if (ioflag 
& IO_NOCACHE
) { 
3261                                         SET(eofbp
->nb_flags
, NB_NOCACHE
); 
3264                                 FSDBG(516, eofbp
, eofoff
, biosize 
- eofoff
, 0xe0fff01e); 
3266                                 i 
= eofoff 
/ PAGE_SIZE
; 
3267                                 while (eofoff 
< biosize
) { 
3268                                         int poff 
= eofoff 
& PAGE_MASK
; 
3269                                         if (!poff 
|| NBPGVALID(eofbp
, i
)) { 
3270                                                 bzero(d 
+ eofoff
, PAGE_SIZE 
- poff
); 
3271                                                 NBPGVALID_SET(eofbp
, i
); 
3273                                         eofoff 
+= PAGE_SIZE 
- poff
; 
3276                                 nfs_buf_release(eofbp
, 1); 
3282                 OSAddAtomic64(1, &nfsstats
.biocache_writes
); 
3283                 lbn 
= uio_offset(uio
) / biosize
; 
3284                 on 
= uio_offset(uio
) % biosize
; 
3286                 if (uio_resid(uio
) < n
) { 
3291                  * Get a cache block for writing.  The range to be written is 
3292                  * (off..off+n) within the block.  We ensure that the block 
3293                  * either has no dirty region or that the given range is 
3294                  * contiguous with the existing dirty region. 
3296                 error 
= nfs_buf_get(np
, lbn
, biosize
, thd
, NBLK_WRITE
, &bp
); 
3300                 /* map the block because we know we're going to write to it */ 
3303                 if (ioflag 
& IO_NOCACHE
) { 
3304                         SET(bp
->nb_flags
, NB_NOCACHE
); 
3307                 if (!IS_VALID_CRED(bp
->nb_wcred
)) { 
3308                         kauth_cred_ref(cred
); 
3309                         bp
->nb_wcred 
= cred
; 
3313                  * If there's already a dirty range AND dirty pages in this block we 
3314                  * need to send a commit AND write the dirty pages before continuing. 
3316                  * If there's already a dirty range OR dirty pages in this block 
3317                  * and the new write range is not contiguous with the existing range, 
3318                  * then force the buffer to be written out now. 
3319                  * (We used to just extend the dirty range to cover the valid, 
3320                  * but unwritten, data in between also.  But writing ranges 
3321                  * of data that weren't actually written by an application 
3322                  * risks overwriting some other client's data with stale data 
3323                  * that's just masquerading as new written data.) 
3325                 if (bp
->nb_dirtyend 
> 0) { 
3326                         if (on 
> bp
->nb_dirtyend 
|| (on 
+ n
) < bp
->nb_dirtyoff 
|| nfs_buf_pgs_is_set(&bp
->nb_dirty
)) { 
3327                                 FSDBG(515, np
, uio_offset(uio
), bp
, 0xd15c001); 
3328                                 /* write/commit buffer "synchronously" */ 
3329                                 /* (NB_STABLE indicates that data writes should be FILESYNC) */ 
3330                                 CLR(bp
->nb_flags
, (NB_DONE 
| NB_ERROR 
| NB_INVAL
)); 
3331                                 SET(bp
->nb_flags
, (NB_ASYNC 
| NB_STABLE
)); 
3332                                 error 
= nfs_buf_write(bp
); 
3338                 } else if (nfs_buf_pgs_is_set(&bp
->nb_dirty
)) { 
3339                         off_t firstpg 
= 0, lastpg 
= 0; 
3340                         nfsbufpgs pagemask
, pagemaskand
; 
3341                         /* calculate write range pagemask */ 
3343                                 firstpg 
= on 
/ PAGE_SIZE
; 
3344                                 lastpg 
= (on 
+ n 
- 1) / PAGE_SIZE
; 
3345                                 nfs_buf_pgs_set_pages_between(&pagemask
, firstpg
, lastpg 
+ 1); 
3347                                 NBPGS_ERASE(&pagemask
); 
3349                         /* check if there are dirty pages outside the write range */ 
3350                         nfs_buf_pgs_bit_not(&pagemask
); 
3351                         nfs_buf_pgs_bit_and(&bp
->nb_dirty
, &pagemask
, &pagemaskand
); 
3352                         if (nfs_buf_pgs_is_set(&pagemaskand
)) { 
3353                                 FSDBG(515, np
, uio_offset(uio
), bp
, 0xd15c002); 
3354                                 /* write/commit buffer "synchronously" */ 
3355                                 /* (NB_STABLE indicates that data writes should be FILESYNC) */ 
3356                                 CLR(bp
->nb_flags
, (NB_DONE 
| NB_ERROR 
| NB_INVAL
)); 
3357                                 SET(bp
->nb_flags
, (NB_ASYNC 
| NB_STABLE
)); 
3358                                 error 
= nfs_buf_write(bp
); 
3364                         /* if the first or last pages are already dirty */ 
3365                         /* make sure that the dirty range encompasses those pages */ 
3366                         if (NBPGDIRTY(bp
, firstpg
) || NBPGDIRTY(bp
, lastpg
)) { 
3367                                 FSDBG(515, np
, uio_offset(uio
), bp
, 0xd15c003); 
3368                                 bp
->nb_dirtyoff 
= MIN(on
, firstpg 
* PAGE_SIZE
); 
3369                                 if (NBPGDIRTY(bp
, lastpg
)) { 
3370                                         bp
->nb_dirtyend 
= (lastpg 
+ 1) * PAGE_SIZE
; 
3372                                         if (NBOFF(bp
) + bp
->nb_dirtyend 
> (off_t
)np
->n_size
) { 
3373                                                 bp
->nb_dirtyend 
= np
->n_size 
- NBOFF(bp
); 
3374                                                 if (bp
->nb_dirtyoff 
>= bp
->nb_dirtyend
) { 
3375                                                         bp
->nb_dirtyoff 
= bp
->nb_dirtyend 
= 0; 
3379                                         bp
->nb_dirtyend 
= on 
+ n
; 
3385                  * Are we extending the size of the file with this write? 
3386                  * If so, update file size now that we have the block. 
3387                  * If there was a partial buf at the old eof, validate 
3388                  * and zero the new bytes. 
3390                 if ((uio_offset(uio
) + n
) > (off_t
)np
->n_size
) { 
3391                         daddr64_t eofbn 
= np
->n_size 
/ biosize
; 
3392                         int neweofoff 
= (uio_offset(uio
) + n
) % biosize
; 
3394                         FSDBG(515, 0xb1ffa000, uio_offset(uio
) + n
, eofoff
, neweofoff
); 
3396                         /* if we're extending within the same last block */ 
3397                         /* and the block is flagged as being cached... */ 
3398                         if ((lbn 
== eofbn
) && ISSET(bp
->nb_flags
, NB_CACHE
)) { 
3399                                 /* ...check that all pages in buffer are valid */ 
3400                                 int endpg 
= ((neweofoff 
? neweofoff 
: biosize
) - 1) / PAGE_SIZE
; 
3401                                 nfsbufpgs pagemask
, pagemaskand
; 
3402                                 /* pagemask only has to extend to last page being written to */ 
3403                                 nfs_buf_pgs_get_page_mask(&pagemask
, endpg 
+ 1); 
3404                                 FSDBG(515, 0xb1ffa001, bp
->nb_valid
, pagemask
, 0); 
3405                                 nfs_buf_pgs_bit_and(&bp
->nb_valid
, &pagemask
, &pagemaskand
); 
3406                                 if (!NBPGS_IS_EQUAL(&pagemaskand
, &pagemask
)) { 
3407                                         /* zerofill any hole */ 
3408                                         if (on 
> bp
->nb_validend
) { 
3409                                                 for (off_t i 
= bp
->nb_validend 
/ PAGE_SIZE
; i 
<= (on 
- 1) / PAGE_SIZE
; i
++) { 
3410                                                         NBPGVALID_SET(bp
, i
); 
3413                                                 FSDBG(516, bp
, bp
->nb_validend
, on 
- bp
->nb_validend
, 0xf01e); 
3414                                                 NFS_BZERO((char *)bp
->nb_data 
+ bp
->nb_validend
, on 
- bp
->nb_validend
); 
3416                                         /* zerofill any trailing data in the last page */ 
3419                                                 FSDBG(516, bp
, neweofoff
, PAGE_SIZE 
- (neweofoff 
& PAGE_MASK
), 0xe0f); 
3420                                                 bzero((char *)bp
->nb_data 
+ neweofoff
, 
3421                                                     PAGE_SIZE 
- (neweofoff 
& PAGE_MASK
)); 
3425                         np
->n_size 
= uio_offset(uio
) + n
; 
3426                         nfs_node_lock_force(np
); 
3427                         CLR(np
->n_flag
, NUPDATESIZE
); 
3428                         np
->n_flag 
|= NMODIFIED
; 
3429                         nfs_node_unlock(np
); 
3430                         FSDBG(516, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0001); 
3431                         ubc_setsize(vp
, (off_t
)np
->n_size
); /* XXX errors */ 
3434                  * If dirtyend exceeds file size, chop it down.  This should 
3435                  * not occur unless there is a race. 
3437                 if (NBOFF(bp
) + bp
->nb_dirtyend 
> (off_t
)np
->n_size
) { 
3438                         bp
->nb_dirtyend 
= np
->n_size 
- NBOFF(bp
); 
3439                         if (bp
->nb_dirtyoff 
>= bp
->nb_dirtyend
) { 
3440                                 bp
->nb_dirtyoff 
= bp
->nb_dirtyend 
= 0; 
3444                  * UBC doesn't handle partial pages, so we need to make sure 
3445                  * that any pages left in the page cache are completely valid. 
3447                  * Writes that are smaller than a block are delayed if they 
3448                  * don't extend to the end of the block. 
3450                  * If the block isn't (completely) cached, we may need to read 
3451                  * in some parts of pages that aren't covered by the write. 
3452                  * If the write offset (on) isn't page aligned, we'll need to 
3453                  * read the start of the first page being written to.  Likewise, 
3454                  * if the offset of the end of the write (on+n) isn't page aligned, 
3455                  * we'll need to read the end of the last page being written to. 
3458                  * We don't want to read anything we're just going to write over. 
3459                  * We don't want to read anything we're just going drop when the 
3460                  *   I/O is complete (i.e. don't do reads for NOCACHE requests). 
3461                  * We don't want to issue multiple I/Os if we don't have to 
3462                  *   (because they're synchronous rpcs). 
3463                  * We don't want to read anything we already have modified in the 
3466                 if (!ISSET(bp
->nb_flags
, NB_CACHE
) && (n 
< biosize
)) { 
3467                         off_t firstpgoff
, lastpgoff
, firstpg
, lastpg
, dirtypg
; 
3469                         firstpg 
= on 
/ PAGE_SIZE
; 
3470                         firstpgoff 
= on 
& PAGE_MASK
; 
3471                         lastpg 
= (on 
+ n 
- 1) / PAGE_SIZE
; 
3472                         lastpgoff 
= (on 
+ n
) & PAGE_MASK
; 
3473                         if (firstpgoff 
&& !NBPGVALID(bp
, firstpg
)) { 
3474                                 /* need to read start of first page */ 
3475                                 start 
= firstpg 
* PAGE_SIZE
; 
3476                                 end 
= start 
+ firstpgoff
; 
3478                         if (lastpgoff 
&& !NBPGVALID(bp
, lastpg
)) { 
3479                                 /* need to read end of last page */ 
3481                                         start 
= (lastpg 
* PAGE_SIZE
) + lastpgoff
; 
3483                                 end 
= (lastpg 
+ 1) * PAGE_SIZE
; 
3485                         if (ISSET(bp
->nb_flags
, NB_NOCACHE
)) { 
3487                                  * For nocache writes, if there is any partial page at the 
3488                                  * start or end of the write range, then we do the write 
3489                                  * synchronously to make sure that we can drop the data 
3490                                  * from the cache as soon as the WRITE finishes.  Normally, 
3491                                  * we would do an unstable write and not drop the data until 
3492                                  * it was committed.  But doing that here would risk allowing 
3493                                  * invalid data to be read from the cache between the WRITE 
3495                                  * (NB_STABLE indicates that data writes should be FILESYNC) 
3498                                         SET(bp
->nb_flags
, NB_STABLE
); 
3503                                 /* need to read the data in range: start...end-1 */ 
3505                                 /* first, check for dirty pages in between */ 
3506                                 /* if there are, we'll have to do two reads because */ 
3507                                 /* we don't want to overwrite the dirty pages. */ 
3508                                 for (dirtypg 
= start 
/ PAGE_SIZE
; dirtypg 
<= (end 
- 1) / PAGE_SIZE
; dirtypg
++) { 
3509                                         if (NBPGDIRTY(bp
, dirtypg
)) { 
3514                                 /* if start is at beginning of page, try */ 
3515                                 /* to get any preceeding pages as well. */ 
3516                                 if (!(start 
& PAGE_MASK
)) { 
3517                                         /* stop at next dirty/valid page or start of block */ 
3518                                         for (; start 
> 0; start 
-= PAGE_SIZE
) { 
3519                                                 if (NBPGVALID(bp
, ((start 
- 1) / PAGE_SIZE
))) { 
3526                                 /* setup uio for read(s) */ 
3528                                 auio 
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_READ
, 
3529                                     &auio_buf
, sizeof(auio_buf
)); 
3531                                 if (dirtypg 
<= (end 
- 1) / PAGE_SIZE
) { 
3532                                         /* there's a dirty page in the way, so just do two reads */ 
3533                                         /* we'll read the preceding data here */ 
3534                                         uio_reset(auio
, boff 
+ start
, UIO_SYSSPACE
, UIO_READ
); 
3535                                         NFS_UIO_ADDIOV(auio
, CAST_USER_ADDR_T(bp
->nb_data 
+ start
), on 
- start
); 
3536                                         error 
= nfs_read_rpc(np
, auio
, ctx
); 
3538                                                 /* couldn't read the data, so treat buffer as synchronous NOCACHE */ 
3539                                                 SET(bp
->nb_flags
, (NB_NOCACHE 
| NB_STABLE
)); 
3542                                         if (uio_resid(auio
) > 0) { 
3543                                                 FSDBG(516, bp
, (caddr_t
)uio_curriovbase(auio
) - bp
->nb_data
, uio_resid(auio
), 0xd00dee01); 
3544                                                 bzero(CAST_DOWN(caddr_t
, uio_curriovbase(auio
)), uio_resid(auio
)); 
3547                                                 /* update validoff/validend if necessary */ 
3548                                                 if ((bp
->nb_validoff 
< 0) || (bp
->nb_validoff 
> start
)) { 
3549                                                         bp
->nb_validoff 
= start
; 
3551                                                 if ((bp
->nb_validend 
< 0) || (bp
->nb_validend 
< on
)) { 
3552                                                         bp
->nb_validend 
= on
; 
3554                                                 if ((off_t
)np
->n_size 
> boff 
+ bp
->nb_validend
) { 
3555                                                         bp
->nb_validend 
= MIN(np
->n_size 
- (boff 
+ start
), biosize
); 
3557                                                 /* validate any pages before the write offset */ 
3558                                                 for (; start 
< on 
/ PAGE_SIZE
; start 
+= PAGE_SIZE
) { 
3559                                                         NBPGVALID_SET(bp
, start 
/ PAGE_SIZE
); 
3562                                         /* adjust start to read any trailing data */ 
3566                                 /* if end is at end of page, try to */ 
3567                                 /* get any following pages as well. */ 
3568                                 if (!(end 
& PAGE_MASK
)) { 
3569                                         /* stop at next valid page or end of block */ 
3570                                         for (; end 
< biosize
; end 
+= PAGE_SIZE
) { 
3571                                                 if (NBPGVALID(bp
, end 
/ PAGE_SIZE
)) { 
3577                                 if (((boff 
+ start
) >= (off_t
)np
->n_size
) || 
3578                                     ((start 
>= on
) && ((boff 
+ on 
+ n
) >= (off_t
)np
->n_size
))) { 
3580                                          * Either this entire read is beyond the current EOF 
3581                                          * or the range that we won't be modifying (on+n...end) 
3582                                          * is all beyond the current EOF. 
3583                                          * No need to make a trip across the network to 
3584                                          * read nothing.  So, just zero the buffer instead. 
3586                                         FSDBG(516, bp
, start
, end 
- start
, 0xd00dee00); 
3587                                         NFS_BZERO(bp
->nb_data 
+ start
, end 
- start
); 
3590                                         /* now we'll read the (rest of the) data */ 
3591                                         uio_reset(auio
, boff 
+ start
, UIO_SYSSPACE
, UIO_READ
); 
3592                                         NFS_UIO_ADDIOV(auio
, CAST_USER_ADDR_T(bp
->nb_data 
+ start
), end 
- start
); 
3593                                         error 
= nfs_read_rpc(np
, auio
, ctx
); 
3595                                                 /* couldn't read the data, so treat buffer as synchronous NOCACHE */ 
3596                                                 SET(bp
->nb_flags
, (NB_NOCACHE 
| NB_STABLE
)); 
3599                                         if (uio_resid(auio
) > 0) { 
3600                                                 FSDBG(516, bp
, (caddr_t
)uio_curriovbase(auio
) - bp
->nb_data
, uio_resid(auio
), 0xd00dee02); 
3601                                                 bzero(CAST_DOWN(caddr_t
, uio_curriovbase(auio
)), uio_resid(auio
)); 
3605                                         /* update validoff/validend if necessary */ 
3606                                         if ((bp
->nb_validoff 
< 0) || (bp
->nb_validoff 
> start
)) { 
3607                                                 bp
->nb_validoff 
= start
; 
3609                                         if ((bp
->nb_validend 
< 0) || (bp
->nb_validend 
< end
)) { 
3610                                                 bp
->nb_validend 
= end
; 
3612                                         if ((off_t
)np
->n_size 
> boff 
+ bp
->nb_validend
) { 
3613                                                 bp
->nb_validend 
= MIN(np
->n_size 
- (boff 
+ start
), biosize
); 
3615                                         /* validate any pages before the write offset's page */ 
3616                                         for (; start 
< (off_t
)trunc_page_64(on
); start 
+= PAGE_SIZE
) { 
3617                                                 NBPGVALID_SET(bp
, start 
/ PAGE_SIZE
); 
3619                                         /* validate any pages after the range of pages being written to */ 
3620                                         for (; (end 
- 1) > (off_t
)round_page_64(on 
+ n 
- 1); end 
-= PAGE_SIZE
) { 
3621                                                 NBPGVALID_SET(bp
, (end 
- 1) / PAGE_SIZE
); 
3624                                 /* Note: pages being written to will be validated when written */ 
3629                 if (ISSET(bp
->nb_flags
, NB_ERROR
)) { 
3630                         error 
= bp
->nb_error
; 
3631                         nfs_buf_release(bp
, 1); 
3635                 nfs_node_lock_force(np
); 
3636                 np
->n_flag 
|= NMODIFIED
; 
3637                 nfs_node_unlock(np
); 
3643                         n32 
= n 
> INT_MAX 
? INT_MAX 
: (int)n
; 
3644                         error 
= uiomove(bp
->nb_data 
+ on
, n32
, uio
); 
3645                         if (!error 
&& n 
> n32
) { 
3646                                 error 
= uiomove(bp
->nb_data 
+ on 
+ n32
, (int)(n 
- n32
), uio
); 
3650                         SET(bp
->nb_flags
, NB_ERROR
); 
3651                         nfs_buf_release(bp
, 1); 
3655                 /* validate any pages written to */ 
3656                 start 
= on 
& ~PAGE_MASK
; 
3657                 for (; start 
< on 
+ n
; start 
+= PAGE_SIZE
) { 
3658                         NBPGVALID_SET(bp
, start 
/ PAGE_SIZE
); 
3660                          * This may seem a little weird, but we don't actually set the 
3661                          * dirty bits for writes.  This is because we keep the dirty range 
3662                          * in the nb_dirtyoff/nb_dirtyend fields.  Also, particularly for 
3663                          * delayed writes, when we give the pages back to the VM we don't 
3664                          * want to keep them marked dirty, because when we later write the 
3665                          * buffer we won't be able to tell which pages were written dirty 
3666                          * and which pages were mmapped and dirtied. 
3669                 if (bp
->nb_dirtyend 
> 0) { 
3670                         bp
->nb_dirtyoff 
= MIN(on
, bp
->nb_dirtyoff
); 
3671                         bp
->nb_dirtyend 
= MAX((on 
+ n
), bp
->nb_dirtyend
); 
3673                         bp
->nb_dirtyoff 
= on
; 
3674                         bp
->nb_dirtyend 
= on 
+ n
; 
3676                 if (bp
->nb_validend 
<= 0 || bp
->nb_validend 
< bp
->nb_dirtyoff 
|| 
3677                     bp
->nb_validoff 
> bp
->nb_dirtyend
) { 
3678                         bp
->nb_validoff 
= bp
->nb_dirtyoff
; 
3679                         bp
->nb_validend 
= bp
->nb_dirtyend
; 
3681                         bp
->nb_validoff 
= MIN(bp
->nb_validoff
, bp
->nb_dirtyoff
); 
3682                         bp
->nb_validend 
= MAX(bp
->nb_validend
, bp
->nb_dirtyend
); 
3684                 if (!ISSET(bp
->nb_flags
, NB_CACHE
)) { 
3685                         nfs_buf_normalize_valid_range(np
, bp
); 
3689                  * Since this block is being modified, it must be written 
3690                  * again and not just committed. 
3692                 if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) { 
3693                         nfs_node_lock_force(np
); 
3694                         if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) { 
3695                                 np
->n_needcommitcnt
--; 
3696                                 CHECK_NEEDCOMMITCNT(np
); 
3698                         CLR(bp
->nb_flags
, NB_NEEDCOMMIT
); 
3699                         nfs_node_unlock(np
); 
3702                 if (ioflag 
& IO_SYNC
) { 
3703                         error 
= nfs_buf_write(bp
); 
3707                         if (np
->n_needcommitcnt 
>= NFS_A_LOT_OF_NEEDCOMMITS
) { 
3708                                 nfs_flushcommits(np
, 1); 
3710                 } else if (((n 
+ on
) == biosize
) || (ioflag 
& IO_APPEND
) || 
3711                     (ioflag 
& IO_NOCACHE
) || ISSET(bp
->nb_flags
, NB_NOCACHE
)) { 
3712                         SET(bp
->nb_flags
, NB_ASYNC
); 
3713                         error 
= nfs_buf_write(bp
); 
3718                         /* If the block wasn't already delayed: charge for the write */ 
3719                         if (!ISSET(bp
->nb_flags
, NB_DELWRI
)) { 
3720                                 proc_t p 
= vfs_context_proc(ctx
); 
3721                                 if (p 
&& p
->p_stats
) { 
3722                                         OSIncrementAtomicLong(&p
->p_stats
->p_ru
.ru_oublock
); 
3725                         nfs_buf_write_delayed(bp
); 
3728         } while (uio_resid(uio
) > 0 && n 
> 0); 
3731         nfs_node_lock_force(np
); 
3733         if ((ioflag 
& IO_SYNC
) && !np
->n_wrbusy 
&& !np
->n_numoutput
) { 
3734                 np
->n_flag 
&= ~NMODIFIED
; 
3736         nfs_node_unlock(np
); 
3737         nfs_data_unlock(np
); 
3738         FSDBG_BOT(515, np
, uio_offset(uio
), uio_resid(uio
), error
); 
3754         return nfs_write_rpc2(np
, uio
, vfs_context_thread(ctx
), vfs_context_ucred(ctx
), iomodep
, wverfp
); 
3766         struct nfsmount 
*nmp
; 
3767         int error 
= 0, nfsvers
; 
3768         int wverfset
, commit 
= 0, committed
; 
3769         uint64_t wverf 
= 0, wverf2 
= 0; 
3770         size_t nmwsize
, totalsize
, tsiz
, len
, rlen 
= 0; 
3773         uint32_t stategenid 
= 0, restart 
= 0; 
3775         uint32_t vrestart 
= 0; 
3776         uio_t uio_save 
= NULL
; 
3779         /* XXX limitation based on need to back up uio on short write */ 
3780         if (uio_iovcnt(uio
) != 1) { 
3781                 panic("nfs3_write_rpc: iovcnt > 1"); 
3784         FSDBG_TOP(537, np
, uio_offset(uio
), uio_resid(uio
), *iomodep
); 
3786         if (nfs_mount_gone(nmp
)) { 
3789         nfsvers 
= nmp
->nm_vers
; 
3790         nmwsize 
= nmp
->nm_wsize
; 
3793         committed 
= NFS_WRITE_FILESYNC
; 
3795         totalsize 
= tsiz 
= uio_resid(uio
); 
3796         if ((nfsvers 
== NFS_VER2
) && ((uint64_t)(uio_offset(uio
) + tsiz
) > 0xffffffffULL
)) { 
3797                 FSDBG_BOT(537, np
, uio_offset(uio
), uio_resid(uio
), EFBIG
); 
3801         uio_save 
= uio_duplicate(uio
); 
3802         if (uio_save 
== NULL
) { 
3806         req 
= zalloc_flags(nfs_req_zone
, Z_WAITOK
); 
3808                 len 
= (tsiz 
> nmwsize
) ? nmwsize 
: tsiz
; 
3809                 FSDBG(537, np
, uio_offset(uio
), len
, 0); 
3810                 if (np
->n_flag 
& NREVOKE
) { 
3815                 if (nmp
->nm_vers 
>= NFS_VER4
) { 
3816                         stategenid 
= nmp
->nm_stategenid
; 
3819                 error 
= nmp
->nm_funcs
->nf_write_rpc_async(np
, uio
, len
, thd
, cred
, *iomodep
, NULL
, &req
); 
3821                         error 
= nmp
->nm_funcs
->nf_write_rpc_async_finish(np
, req
, &commit
, &rlen
, &wverf2
); 
3824                 if (nfs_mount_gone(nmp
)) { 
3828                 if ((nmp
->nm_vers 
>= NFS_VER4
) && nfs_mount_state_error_should_restart(error
) && 
3829                     (++restart 
<= nfs_mount_state_max_restarts(nmp
))) { /* guard against no progress */ 
3830                         lck_mtx_lock(&nmp
->nm_lock
); 
3831                         if ((error 
!= NFSERR_GRACE
) && (stategenid 
== nmp
->nm_stategenid
)) { 
3832                                 NP(np
, "nfs_write_rpc: error %d, initiating recovery", error
); 
3833                                 nfs_need_recover(nmp
, error
); 
3835                         lck_mtx_unlock(&nmp
->nm_lock
); 
3836                         if (np
->n_flag 
& NREVOKE
) { 
3839                                 if (error 
== NFSERR_GRACE
) { 
3840                                         tsleep(&nmp
->nm_state
, (PZERO 
- 1), "nfsgrace", 2 * hz
); 
3842                                 if (!(error 
= nfs_mount_state_wait_for_recovery(nmp
))) { 
3851                 if (nfsvers 
== NFS_VER2
) { 
3856                 /* check for a short write */ 
3858                         /* Reset the uio to reflect the actual transfer */ 
3860                         uio_update(uio
, totalsize 
- (tsiz 
- rlen
)); 
3864                 /* return lowest commit level returned */ 
3865                 if (commit 
< committed
) { 
3871                 /* check write verifier */ 
3875                 } else if (wverf 
!= wverf2
) { 
3876                         /* verifier changed, so we need to restart all the writes */ 
3877                         if (++vrestart 
> 100) { 
3878                                 /* give up after too many restarts */ 
3882                         *uio 
= *uio_save
;       // Reset the uio back to the start 
3883                         committed 
= NFS_WRITE_FILESYNC
; 
3891         if (wverfset 
&& wverfp
) { 
3894         *iomodep 
= committed
; 
3896                 uio_setresid(uio
, tsiz
); 
3898         NFS_ZFREE(nfs_req_zone
, req
); 
3899         FSDBG_BOT(537, np
, committed
, uio_resid(uio
), error
); 
3904 nfs3_write_rpc_async( 
3911         struct nfsreq_cbinfo 
*cb
, 
3912         struct nfsreq 
**reqp
) 
3914         struct nfsmount 
*nmp
; 
3916         int error 
= 0, nfsvers
; 
3917         struct nfsm_chain nmreq
; 
3920         if (nfs_mount_gone(nmp
)) { 
3923         nfsvers 
= nmp
->nm_vers
; 
3925         /* for async mounts, don't bother sending sync write requests */ 
3926         if ((iomode 
!= NFS_WRITE_UNSTABLE
) && nfs_allow_async 
&& 
3927             ((mp 
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
)) { 
3928                 iomode 
= NFS_WRITE_UNSTABLE
; 
3931         nfsm_chain_null(&nmreq
); 
3932         nfsm_chain_build_alloc_init(error
, &nmreq
, 
3933             NFSX_FH(nfsvers
) + 5 * NFSX_UNSIGNED 
+ nfsm_rndup(len
)); 
3934         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
); 
3935         if (nfsvers 
== NFS_VER3
) { 
3936                 nfsm_chain_add_64(error
, &nmreq
, uio_offset(uio
)); 
3937                 nfsm_chain_add_32(error
, &nmreq
, len
); 
3938                 nfsm_chain_add_32(error
, &nmreq
, iomode
); 
3940                 nfsm_chain_add_32(error
, &nmreq
, 0); 
3941                 nfsm_chain_add_32(error
, &nmreq
, uio_offset(uio
)); 
3942                 nfsm_chain_add_32(error
, &nmreq
, 0); 
3944         nfsm_chain_add_32(error
, &nmreq
, len
); 
3946         error 
= nfsm_chain_add_uio(&nmreq
, uio
, len
); 
3947         nfsm_chain_build_done(error
, &nmreq
); 
3949         error 
= nfs_request_async(np
, NULL
, &nmreq
, NFSPROC_WRITE
, thd
, cred
, NULL
, 0, cb
, reqp
); 
3951         nfsm_chain_cleanup(&nmreq
); 
3956 nfs3_write_rpc_async_finish( 
3963         struct nfsmount 
*nmp
; 
3964         int error 
= 0, lockerror 
= ENOENT
, nfsvers
, status
; 
3965         int updatemtime 
= 0, wccpostattr 
= 0, rlen
, committed 
= NFS_WRITE_FILESYNC
; 
3966         u_int64_t xid
, wverf
; 
3968         struct nfsm_chain nmrep
; 
3971         if (nfs_mount_gone(nmp
)) { 
3972                 nfs_request_async_cancel(req
); 
3975         nfsvers 
= nmp
->nm_vers
; 
3977         nfsm_chain_null(&nmrep
); 
3979         error 
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
); 
3980         if (error 
== EINPROGRESS
) { /* async request restarted */ 
3984         if (nfs_mount_gone(nmp
)) { 
3987         if (!error 
&& (lockerror 
= nfs_node_lock(np
))) { 
3990         if (nfsvers 
== NFS_VER3
) { 
3991                 struct timespec premtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
3992                 nfsm_chain_get_wcc_data(error
, &nmrep
, np
, &premtime
, &wccpostattr
, &xid
); 
3993                 if (nfstimespeccmp(&np
->n_mtime
, &premtime
, ==)) { 
3999                 nfsm_chain_get_32(error
, &nmrep
, rlen
); 
4005                 nfsm_chain_get_32(error
, &nmrep
, committed
); 
4006                 nfsm_chain_get_64(error
, &nmrep
, wverf
); 
4011                 lck_mtx_lock(&nmp
->nm_lock
); 
4012                 if (!(nmp
->nm_state 
& NFSSTA_HASWRITEVERF
)) { 
4013                         nmp
->nm_verf 
= wverf
; 
4014                         nmp
->nm_state 
|= NFSSTA_HASWRITEVERF
; 
4015                 } else if (nmp
->nm_verf 
!= wverf
) { 
4016                         nmp
->nm_verf 
= wverf
; 
4018                 lck_mtx_unlock(&nmp
->nm_lock
); 
4023                 nfsm_chain_loadattr(error
, &nmrep
, np
, nfsvers
, &xid
); 
4027                 NFS_CHANGED_UPDATE(nfsvers
, np
, &np
->n_vattr
); 
4031                 nfs_node_unlock(np
); 
4033         nfsm_chain_cleanup(&nmrep
); 
4034         if ((committed 
!= NFS_WRITE_FILESYNC
) && nfs_allow_async 
&& 
4035             ((mp 
= NFSTOMP(np
))) && (vfs_flags(mp
) & MNT_ASYNC
)) { 
4036                 committed 
= NFS_WRITE_FILESYNC
; 
4038         *iomodep 
= committed
; 
4043  * NFS mknod vnode op 
4045  * For NFS v2 this is a kludge. Use a create RPC but with the IFMT bits of the 
4046  * mode set to specify the file type and the size field for rdev. 
4050         struct vnop_mknod_args 
/* { 
4051                                 *  struct vnodeop_desc *a_desc; 
4054                                 *  struct componentname *a_cnp; 
4055                                 *  struct vnode_attr *a_vap; 
4056                                 *  vfs_context_t a_context; 
4059         vnode_t dvp 
= ap
->a_dvp
; 
4060         vnode_t 
*vpp 
= ap
->a_vpp
; 
4061         struct componentname 
*cnp 
= ap
->a_cnp
; 
4062         struct vnode_attr 
*vap 
= ap
->a_vap
; 
4063         vfs_context_t ctx 
= ap
->a_context
; 
4064         vnode_t newvp 
= NULL
; 
4065         nfsnode_t np 
= NULL
; 
4066         struct nfsmount 
*nmp
; 
4067         nfsnode_t dnp 
= VTONFS(dvp
); 
4068         struct nfs_vattr 
*nvattr
; 
4070         int error 
= 0, lockerror 
= ENOENT
, busyerror 
= ENOENT
, status 
= 0, wccpostattr 
= 0; 
4071         struct timespec premtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
4073         u_int64_t xid 
= 0, dxid
; 
4074         int nfsvers
, gotuid
, gotgid
; 
4075         struct nfsm_chain nmreq
, nmrep
; 
4079         if (nfs_mount_gone(nmp
)) { 
4082         nfsvers 
= nmp
->nm_vers
; 
4084         if (!VATTR_IS_ACTIVE(vap
, va_type
)) { 
4087         if (vap
->va_type 
== VCHR 
|| vap
->va_type 
== VBLK
) { 
4088                 if (!VATTR_IS_ACTIVE(vap
, va_rdev
)) { 
4091                 rdev 
= vap
->va_rdev
; 
4092         } else if (vap
->va_type 
== VFIFO 
|| vap
->va_type 
== VSOCK
) { 
4097         if ((nfsvers 
== NFS_VER2
) && (cnp
->cn_namelen 
> NFS_MAXNAMLEN
)) { 
4098                 return ENAMETOOLONG
; 
4101         nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
); 
4103         VATTR_SET_SUPPORTED(vap
, va_mode
); 
4104         VATTR_SET_SUPPORTED(vap
, va_uid
); 
4105         VATTR_SET_SUPPORTED(vap
, va_gid
); 
4106         VATTR_SET_SUPPORTED(vap
, va_data_size
); 
4107         VATTR_SET_SUPPORTED(vap
, va_access_time
); 
4108         VATTR_SET_SUPPORTED(vap
, va_modify_time
); 
4109         gotuid 
= VATTR_IS_ACTIVE(vap
, va_uid
); 
4110         gotgid 
= VATTR_IS_ACTIVE(vap
, va_gid
); 
4112         nfsm_chain_null(&nmreq
); 
4113         nfsm_chain_null(&nmrep
); 
4115         fh 
= zalloc(nfs_fhandle_zone
); 
4116         req 
= zalloc_flags(nfs_req_zone
, Z_WAITOK
); 
4117         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
4119         nfsm_chain_build_alloc_init(error
, &nmreq
, 
4120             NFSX_FH(nfsvers
) + 4 * NFSX_UNSIGNED 
+ 
4121             nfsm_rndup(cnp
->cn_namelen
) + NFSX_SATTR(nfsvers
)); 
4122         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
); 
4123         nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
); 
4124         if (nfsvers 
== NFS_VER3
) { 
4125                 nfsm_chain_add_32(error
, &nmreq
, vtonfs_type(vap
->va_type
, nfsvers
)); 
4126                 nfsm_chain_add_v3sattr(nmp
, error
, &nmreq
, vap
); 
4127                 if (vap
->va_type 
== VCHR 
|| vap
->va_type 
== VBLK
) { 
4128                         nfsm_chain_add_32(error
, &nmreq
, major(vap
->va_rdev
)); 
4129                         nfsm_chain_add_32(error
, &nmreq
, minor(vap
->va_rdev
)); 
4132                 nfsm_chain_add_v2sattr(error
, &nmreq
, vap
, rdev
); 
4134         nfsm_chain_build_done(error
, &nmreq
); 
4136                 error 
= busyerror 
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
)); 
4140         error 
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC_MKNOD
, 
4141             vfs_context_thread(ctx
), vfs_context_ucred(ctx
), NULL
, 0, NULL
, &req
); 
4143                 error 
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
); 
4146         if ((lockerror 
= nfs_node_lock(dnp
))) { 
4149         /* XXX no EEXIST kludge here? */ 
4151         if (!error 
&& !status
) { 
4152                 if (dnp
->n_flag 
& NNEGNCENTRIES
) { 
4153                         dnp
->n_flag 
&= ~NNEGNCENTRIES
; 
4154                         cache_purge_negatives(dvp
); 
4156                 error 
= nfsm_chain_get_fh_attr(nmp
, &nmrep
, dnp
, ctx
, nfsvers
, &xid
, fh
, nvattr
); 
4158         if (nfsvers 
== NFS_VER3
) { 
4159                 nfsm_chain_get_wcc_data(error
, &nmrep
, dnp
, &premtime
, &wccpostattr
, &dxid
); 
4165         nfsm_chain_cleanup(&nmreq
); 
4166         nfsm_chain_cleanup(&nmrep
); 
4169                 dnp
->n_flag 
|= NMODIFIED
; 
4170                 /* if directory hadn't changed, update namecache mtime */ 
4171                 if (nfstimespeccmp(&dnp
->n_ncmtime
, &premtime
, ==)) { 
4172                         NFS_CHANGED_UPDATE_NC(nfsvers
, dnp
, &dnp
->n_vattr
); 
4174                 nfs_node_unlock(dnp
); 
4175                 /* nfs_getattr() will check changed and purge caches */ 
4176                 nfs_getattr(dnp
, NULL
, ctx
, wccpostattr 
? NGA_CACHED 
: NGA_UNCACHED
); 
4179         if (!error 
&& fh
->fh_len
) { 
4180                 error 
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
->fh_data
, fh
->fh_len
, nvattr
, &xid
, req
->r_auth
, NG_MAKEENTRY
, &np
); 
4182         if (!error 
&& !np
) { 
4183                 error 
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
); 
4189                 nfs_node_clear_busy(dnp
); 
4192         if (!error 
&& (gotuid 
|| gotgid
) && 
4193             (!newvp 
|| nfs_getattrcache(np
, nvattr
, 0) || 
4194             (gotuid 
&& (nvattr
->nva_uid 
!= vap
->va_uid
)) || 
4195             (gotgid 
&& (nvattr
->nva_gid 
!= vap
->va_gid
)))) { 
4196                 /* clear ID bits if server didn't use them (or we can't tell) */ 
4197                 VATTR_CLEAR_SUPPORTED(vap
, va_uid
); 
4198                 VATTR_CLEAR_SUPPORTED(vap
, va_gid
); 
4202                         nfs_node_unlock(np
); 
4207                 nfs_node_unlock(np
); 
4209         NFS_ZFREE(nfs_fhandle_zone
, fh
); 
4210         NFS_ZFREE(nfs_req_zone
, req
); 
4211         FREE(nvattr
, M_TEMP
); 
4215 static uint32_t create_verf
; 
4217  * NFS file create call 
4221         struct vnop_create_args 
/* { 
4222                                  *  struct vnodeop_desc *a_desc; 
4225                                  *  struct componentname *a_cnp; 
4226                                  *  struct vnode_attr *a_vap; 
4227                                  *  vfs_context_t a_context; 
4230         vfs_context_t ctx 
= ap
->a_context
; 
4231         vnode_t dvp 
= ap
->a_dvp
; 
4232         struct vnode_attr 
*vap 
= ap
->a_vap
; 
4233         struct componentname 
*cnp 
= ap
->a_cnp
; 
4234         struct nfs_vattr 
*nvattr
; 
4236         nfsnode_t np 
= NULL
; 
4237         struct nfsmount 
*nmp
; 
4238         nfsnode_t dnp 
= VTONFS(dvp
); 
4239         vnode_t newvp 
= NULL
; 
4240         int error 
= 0, lockerror 
= ENOENT
, busyerror 
= ENOENT
, status 
= 0, wccpostattr 
= 0, fmode 
= 0; 
4241         struct timespec premtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
4242         int nfsvers
, gotuid
, gotgid
; 
4243         u_int64_t xid 
= 0, dxid
; 
4245         struct nfsm_chain nmreq
, nmrep
; 
4247         struct nfs_dulookup 
*dul
; 
4248         int dul_in_progress 
= 0; 
4252         if (nfs_mount_gone(nmp
)) { 
4255         nfsvers 
= nmp
->nm_vers
; 
4256         namedattrs 
= (nmp
->nm_fsattr
.nfsa_flags 
& NFS_FSFLAG_NAMED_ATTR
); 
4258         if ((nfsvers 
== NFS_VER2
) && (cnp
->cn_namelen 
> NFS_MAXNAMLEN
)) { 
4259                 return ENAMETOOLONG
; 
4262         nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
); 
4264         VATTR_SET_SUPPORTED(vap
, va_mode
); 
4265         VATTR_SET_SUPPORTED(vap
, va_uid
); 
4266         VATTR_SET_SUPPORTED(vap
, va_gid
); 
4267         VATTR_SET_SUPPORTED(vap
, va_data_size
); 
4268         VATTR_SET_SUPPORTED(vap
, va_access_time
); 
4269         VATTR_SET_SUPPORTED(vap
, va_modify_time
); 
4270         gotuid 
= VATTR_IS_ACTIVE(vap
, va_uid
); 
4271         gotgid 
= VATTR_IS_ACTIVE(vap
, va_gid
); 
4273         if ((vap
->va_vaflags 
& VA_EXCLUSIVE
) 
4276                 if (!VATTR_IS_ACTIVE(vap
, va_access_time
) || !VATTR_IS_ACTIVE(vap
, va_modify_time
)) { 
4277                         vap
->va_vaflags 
|= VA_UTIMES_NULL
; 
4281         fh 
= zalloc(nfs_fhandle_zone
); 
4282         req 
= zalloc_flags(nfs_req_zone
, Z_WAITOK
); 
4283         MALLOC(dul
, struct nfs_dulookup 
*, sizeof(*dul
), M_TEMP
, M_WAITOK
); 
4284         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
4287         error 
= busyerror 
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
)); 
4289                 nfs_dulookup_init(dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
); 
4292         nfsm_chain_null(&nmreq
); 
4293         nfsm_chain_null(&nmrep
); 
4295         nfsm_chain_build_alloc_init(error
, &nmreq
, 
4296             NFSX_FH(nfsvers
) + 2 * NFSX_UNSIGNED 
+ 
4297             nfsm_rndup(cnp
->cn_namelen
) + NFSX_SATTR(nfsvers
)); 
4298         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
); 
4299         nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
); 
4300         if (nfsvers 
== NFS_VER3
) { 
4301                 if (fmode 
& O_EXCL
) { 
4302                         nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_EXCLUSIVE
); 
4303                         lck_rw_lock_shared(in_ifaddr_rwlock
); 
4304                         if (!TAILQ_EMPTY(&in_ifaddrhead
)) { 
4305                                 val 
= IA_SIN(in_ifaddrhead
.tqh_first
)->sin_addr
.s_addr
; 
4309                         lck_rw_done(in_ifaddr_rwlock
); 
4310                         nfsm_chain_add_32(error
, &nmreq
, val
); 
4312                         nfsm_chain_add_32(error
, &nmreq
, create_verf
); 
4314                         nfsm_chain_add_32(error
, &nmreq
, NFS_CREATE_UNCHECKED
); 
4315                         nfsm_chain_add_v3sattr(nmp
, error
, &nmreq
, vap
); 
4318                 nfsm_chain_add_v2sattr(error
, &nmreq
, vap
, 0); 
4320         nfsm_chain_build_done(error
, &nmreq
); 
4323         error 
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC_CREATE
, 
4324             vfs_context_thread(ctx
), vfs_context_ucred(ctx
), NULL
, 0, NULL
, &req
); 
4327                         nfs_dulookup_start(dul
, dnp
, ctx
); 
4328                         dul_in_progress 
= 1; 
4330                 error 
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
); 
4333         if ((lockerror 
= nfs_node_lock(dnp
))) { 
4337         if (!error 
&& !status
) { 
4338                 if (dnp
->n_flag 
& NNEGNCENTRIES
) { 
4339                         dnp
->n_flag 
&= ~NNEGNCENTRIES
; 
4340                         cache_purge_negatives(dvp
); 
4342                 error 
= nfsm_chain_get_fh_attr(nmp
, &nmrep
, dnp
, ctx
, nfsvers
, &xid
, fh
, nvattr
); 
4344         if (nfsvers 
== NFS_VER3
) { 
4345                 nfsm_chain_get_wcc_data(error
, &nmrep
, dnp
, &premtime
, &wccpostattr
, &dxid
); 
4351         nfsm_chain_cleanup(&nmreq
); 
4352         nfsm_chain_cleanup(&nmrep
); 
4355                 dnp
->n_flag 
|= NMODIFIED
; 
4356                 /* if directory hadn't changed, update namecache mtime */ 
4357                 if (nfstimespeccmp(&dnp
->n_ncmtime
, &premtime
, ==)) { 
4358                         NFS_CHANGED_UPDATE_NC(nfsvers
, dnp
, &dnp
->n_vattr
); 
4360                 nfs_node_unlock(dnp
); 
4361                 /* nfs_getattr() will check changed and purge caches */ 
4362                 nfs_getattr(dnp
, NULL
, ctx
, wccpostattr 
? NGA_CACHED 
: NGA_UNCACHED
); 
4365         if (!error 
&& fh
->fh_len
) { 
4366                 error 
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
->fh_data
, fh
->fh_len
, nvattr
, &xid
, req
->r_auth
, NG_MAKEENTRY
, &np
); 
4368         if (!error 
&& !np
) { 
4369                 error 
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
); 
4375         if (dul_in_progress
) { 
4376                 nfs_dulookup_finish(dul
, dnp
, ctx
); 
4379                 nfs_node_clear_busy(dnp
); 
4383                 if ((nfsvers 
== NFS_VER3
) && (fmode 
& O_EXCL
) && (error 
== NFSERR_NOTSUPP
)) { 
4388                         nfs_node_unlock(np
); 
4391         } else if ((nfsvers 
== NFS_VER3
) && (fmode 
& O_EXCL
)) { 
4392                 nfs_node_unlock(np
); 
4393                 error 
= nfs3_setattr_rpc(np
, vap
, ctx
); 
4394                 if (error 
&& (gotuid 
|| gotgid
)) { 
4395                         /* it's possible the server didn't like our attempt to set IDs. */ 
4396                         /* so, let's try it again without those */ 
4397                         VATTR_CLEAR_ACTIVE(vap
, va_uid
); 
4398                         VATTR_CLEAR_ACTIVE(vap
, va_gid
); 
4399                         error 
= nfs3_setattr_rpc(np
, vap
, ctx
); 
4404                         nfs_node_lock_force(np
); 
4410         if (!error 
&& (gotuid 
|| gotgid
) && 
4411             (!newvp 
|| nfs_getattrcache(np
, nvattr
, 0) || 
4412             (gotuid 
&& (nvattr
->nva_uid 
!= vap
->va_uid
)) || 
4413             (gotgid 
&& (nvattr
->nva_gid 
!= vap
->va_gid
)))) { 
4414                 /* clear ID bits if server didn't use them (or we can't tell) */ 
4415                 VATTR_CLEAR_SUPPORTED(vap
, va_uid
); 
4416                 VATTR_CLEAR_SUPPORTED(vap
, va_gid
); 
4419                 nfs_node_unlock(np
); 
4421         NFS_ZFREE(nfs_fhandle_zone
, fh
); 
4422         NFS_ZFREE(nfs_req_zone
, req
); 
4424         FREE(nvattr
, M_TEMP
); 
4429  * NFS file remove call 
4430  * To try and make NFS semantics closer to UFS semantics, a file that has 
4431  * other processes using the vnode is renamed instead of removed and then 
4432  * removed later on the last close. 
4433  * - If vnode_isinuse() 
4434  *        If a rename is not already in the works 
4435  *           call nfs_sillyrename() to set it up 
4441         struct vnop_remove_args 
/* { 
4442                                  *  struct vnodeop_desc *a_desc; 
4445                                  *  struct componentname *a_cnp; 
4447                                  *  vfs_context_t a_context; 
4450         vfs_context_t ctx 
= ap
->a_context
; 
4451         vnode_t vp 
= ap
->a_vp
; 
4452         vnode_t dvp 
= ap
->a_dvp
; 
4453         struct componentname 
*cnp 
= ap
->a_cnp
; 
4454         nfsnode_t dnp 
= VTONFS(dvp
); 
4455         nfsnode_t np 
= VTONFS(vp
); 
4456         int error 
= 0, nfsvers
, namedattrs
, inuse
, gotattr 
= 0, flushed 
= 0, setsize 
= 0; 
4457         struct nfs_vattr 
*nvattr
; 
4458         struct nfsmount 
*nmp
; 
4459         struct nfs_dulookup 
*dul
; 
4461         /* XXX prevent removing a sillyrenamed file? */ 
4463         nmp 
= NFSTONMP(dnp
); 
4464         if (nfs_mount_gone(nmp
)) { 
4468         if (vnode_isdir(vp
)) { 
4472         nfsvers 
= nmp
->nm_vers
; 
4473         namedattrs 
= (nmp
->nm_fsattr
.nfsa_flags 
& NFS_FSFLAG_NAMED_ATTR
); 
4474         MALLOC(dul
, struct nfs_dulookup 
*, sizeof(*dul
), M_TEMP
, M_WAITOK
); 
4475         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
4478         error 
= nfs_node_set_busy2(dnp
, np
, vfs_context_thread(ctx
)); 
4483         /* lock the node while we remove the file */ 
4484         lck_mtx_lock(&nfs_node_hash_mutex
); 
4485         while (np
->n_hflag 
& NHLOCKED
) { 
4486                 np
->n_hflag 
|= NHLOCKWANT
; 
4487                 msleep(np
, &nfs_node_hash_mutex
, PINOD
, "nfs_remove", NULL
); 
4489         np
->n_hflag 
|= NHLOCKED
; 
4490         lck_mtx_unlock(&nfs_node_hash_mutex
); 
4493                 nfs_dulookup_init(dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
); 
4497         inuse 
= vnode_isinuse(vp
, 0); 
4498         if ((ap
->a_flags 
& VNODE_REMOVE_NODELETEBUSY
) && inuse
) { 
4499                 /* Caller requested Carbon delete semantics, but file is busy */ 
4503         if (inuse 
&& !gotattr
) { 
4504                 if (nfs_getattr(np
, nvattr
, ctx
, NGA_CACHED
)) { 
4505                         nvattr
->nva_nlink 
= 1; 
4510         if (!inuse 
|| (np
->n_sillyrename 
&& (nvattr
->nva_nlink 
> 1))) { 
4511                 if (!inuse 
&& !flushed
) { /* flush all the buffers first */ 
4512                         /* unlock the node */ 
4513                         lck_mtx_lock(&nfs_node_hash_mutex
); 
4514                         np
->n_hflag 
&= ~NHLOCKED
; 
4515                         if (np
->n_hflag 
& NHLOCKWANT
) { 
4516                                 np
->n_hflag 
&= ~NHLOCKWANT
; 
4519                         lck_mtx_unlock(&nfs_node_hash_mutex
); 
4520                         nfs_node_clear_busy2(dnp
, np
); 
4521                         error 
= nfs_vinvalbuf(vp
, V_SAVE
, ctx
, 1); 
4522                         FSDBG(260, np
, np
->n_size
, np
->n_vattr
.nva_size
, 0xf00d0011); 
4524                         if (error 
== EINTR
) { 
4525                                 nfs_node_lock_force(np
); 
4526                                 NATTRINVALIDATE(np
); 
4527                                 nfs_node_unlock(np
); 
4531                                 nfs_dulookup_finish(dul
, dnp
, ctx
); 
4536                 if ((nmp
->nm_vers 
>= NFS_VER4
) && (np
->n_openflags 
& N_DELEG_MASK
)) { 
4537                         nfs4_delegation_return(np
, 0, vfs_context_thread(ctx
), vfs_context_ucred(ctx
)); 
4541                  * Purge the name cache so that the chance of a lookup for 
4542                  * the name succeeding while the remove is in progress is 
4545                 nfs_name_cache_purge(dnp
, np
, cnp
, ctx
); 
4548                         nfs_dulookup_start(dul
, dnp
, ctx
); 
4552                 error 
= nmp
->nm_funcs
->nf_remove_rpc(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, 
4553                     vfs_context_thread(ctx
), vfs_context_ucred(ctx
)); 
4556                  * Kludge City: If the first reply to the remove rpc is lost.. 
4557                  *   the reply to the retransmitted request will be ENOENT 
4558                  *   since the file was in fact removed 
4559                  *   Therefore, we cheat and return success. 
4561                 if (error 
== ENOENT
) { 
4565                 if (!error 
&& !inuse 
&& !np
->n_sillyrename
) { 
4567                          * removal succeeded, it's not in use, and not silly renamed so 
4568                          * remove nfsnode from hash now so we can't accidentally find it 
4569                          * again if another object gets created with the same filehandle 
4570                          * before this vnode gets reclaimed 
4572                         lck_mtx_lock(&nfs_node_hash_mutex
); 
4573                         if (np
->n_hflag 
& NHHASHED
) { 
4574                                 LIST_REMOVE(np
, n_hash
); 
4575                                 np
->n_hflag 
&= ~NHHASHED
; 
4576                                 FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e); 
4578                         lck_mtx_unlock(&nfs_node_hash_mutex
); 
4579                         /* clear flags now: won't get nfs_vnop_inactive for recycled vnode */ 
4580                         /* clear all flags other than these */ 
4581                         nfs_node_lock_force(np
); 
4582                         np
->n_flag 
&= (NMODIFIED
); 
4583                         NATTRINVALIDATE(np
); 
4584                         nfs_node_unlock(np
); 
4588                         nfs_node_lock_force(np
); 
4589                         NATTRINVALIDATE(np
); 
4590                         nfs_node_unlock(np
); 
4592         } else if (!np
->n_sillyrename
) { 
4594                         nfs_dulookup_start(dul
, dnp
, ctx
); 
4596                 error 
= nfs_sillyrename(dnp
, np
, cnp
, ctx
); 
4597                 nfs_node_lock_force(np
); 
4598                 NATTRINVALIDATE(np
); 
4599                 nfs_node_unlock(np
); 
4601                 nfs_node_lock_force(np
); 
4602                 NATTRINVALIDATE(np
); 
4603                 nfs_node_unlock(np
); 
4605                         nfs_dulookup_start(dul
, dnp
, ctx
); 
4609         /* nfs_getattr() will check changed and purge caches */ 
4610         nfs_getattr(dnp
, NULL
, ctx
, NGA_CACHED
); 
4612                 nfs_dulookup_finish(dul
, dnp
, ctx
); 
4615         /* unlock the node */ 
4616         lck_mtx_lock(&nfs_node_hash_mutex
); 
4617         np
->n_hflag 
&= ~NHLOCKED
; 
4618         if (np
->n_hflag 
& NHLOCKWANT
) { 
4619                 np
->n_hflag 
&= ~NHLOCKWANT
; 
4622         lck_mtx_unlock(&nfs_node_hash_mutex
); 
4623         nfs_node_clear_busy2(dnp
, np
); 
4629         FREE(nvattr
, M_TEMP
); 
4634  * NFS silly-renamed file removal function called from nfs_vnop_inactive 
4637 nfs_removeit(struct nfs_sillyrename 
*nsp
) 
4639         struct nfsmount 
*nmp 
= NFSTONMP(nsp
->nsr_dnp
); 
4640         if (nfs_mount_gone(nmp
)) { 
4643         return nmp
->nm_funcs
->nf_remove_rpc(nsp
->nsr_dnp
, nsp
->nsr_name
, nsp
->nsr_namlen
, NULL
, nsp
->nsr_cred
); 
4647  * NFS remove rpc, called from nfs_remove() and nfs_removeit(). 
4657         int error 
= 0, lockerror 
= ENOENT
, status 
= 0, wccpostattr 
= 0; 
4658         struct timespec premtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
4659         struct nfsmount 
*nmp
; 
4662         struct nfsm_chain nmreq
, nmrep
; 
4664         nmp 
= NFSTONMP(dnp
); 
4665         if (nfs_mount_gone(nmp
)) { 
4668         nfsvers 
= nmp
->nm_vers
; 
4669         if ((nfsvers 
== NFS_VER2
) && (namelen 
> NFS_MAXNAMLEN
)) { 
4670                 return ENAMETOOLONG
; 
4673         nfsm_chain_null(&nmreq
); 
4674         nfsm_chain_null(&nmrep
); 
4676         nfsm_chain_build_alloc_init(error
, &nmreq
, 
4677             NFSX_FH(nfsvers
) + NFSX_UNSIGNED 
+ nfsm_rndup(namelen
)); 
4678         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
); 
4679         nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
); 
4680         nfsm_chain_build_done(error
, &nmreq
); 
4683         error 
= nfs_request2(dnp
, NULL
, &nmreq
, NFSPROC_REMOVE
, thd
, cred
, NULL
, 0, &nmrep
, &xid
, &status
); 
4685         if ((lockerror 
= nfs_node_lock(dnp
))) { 
4688         if (nfsvers 
== NFS_VER3
) { 
4689                 nfsm_chain_get_wcc_data(error
, &nmrep
, dnp
, &premtime
, &wccpostattr
, &xid
); 
4692         dnp
->n_flag 
|= NMODIFIED
; 
4693         /* if directory hadn't changed, update namecache mtime */ 
4694         if (nfstimespeccmp(&dnp
->n_ncmtime
, &premtime
, ==)) { 
4695                 NFS_CHANGED_UPDATE_NC(nfsvers
, dnp
, &dnp
->n_vattr
); 
4698                 NATTRINVALIDATE(dnp
); 
4705                 nfs_node_unlock(dnp
); 
4707         nfsm_chain_cleanup(&nmreq
); 
4708         nfsm_chain_cleanup(&nmrep
); 
4713  * NFS file rename call 
4717         struct vnop_rename_args  
/* { 
4718                                   *  struct vnodeop_desc *a_desc; 
4721                                   *  struct componentname *a_fcnp; 
4724                                   *  struct componentname *a_tcnp; 
4725                                   *  vfs_context_t a_context; 
4728         vfs_context_t ctx 
= ap
->a_context
; 
4729         vnode_t fdvp 
= ap
->a_fdvp
; 
4730         vnode_t fvp 
= ap
->a_fvp
; 
4731         vnode_t tdvp 
= ap
->a_tdvp
; 
4732         vnode_t tvp 
= ap
->a_tvp
; 
4733         nfsnode_t fdnp
, fnp
, tdnp
, tnp
; 
4734         struct componentname 
*tcnp 
= ap
->a_tcnp
; 
4735         struct componentname 
*fcnp 
= ap
->a_fcnp
; 
4736         int error
, nfsvers
, inuse 
= 0, tvprecycle 
= 0, locked 
= 0; 
4737         mount_t fmp
, tdmp
, tmp
; 
4738         struct nfs_vattr 
*nvattr
; 
4739         struct nfsmount 
*nmp
; 
4741         fdnp 
= VTONFS(fdvp
); 
4743         tdnp 
= VTONFS(tdvp
); 
4744         tnp 
= tvp 
? VTONFS(tvp
) : NULL
; 
4746         nmp 
= NFSTONMP(fdnp
); 
4747         if (nfs_mount_gone(nmp
)) { 
4750         nfsvers 
= nmp
->nm_vers
; 
4752         error 
= nfs_node_set_busy4(fdnp
, fnp
, tdnp
, tnp
, vfs_context_thread(ctx
)); 
4757         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
4759         if (tvp 
&& (tvp 
!= fvp
)) { 
4760                 /* lock the node while we rename over the existing file */ 
4761                 lck_mtx_lock(&nfs_node_hash_mutex
); 
4762                 while (tnp
->n_hflag 
& NHLOCKED
) { 
4763                         tnp
->n_hflag 
|= NHLOCKWANT
; 
4764                         msleep(tnp
, &nfs_node_hash_mutex
, PINOD
, "nfs_rename", NULL
); 
4766                 tnp
->n_hflag 
|= NHLOCKED
; 
4767                 lck_mtx_unlock(&nfs_node_hash_mutex
); 
4771         /* Check for cross-device rename */ 
4772         fmp 
= vnode_mount(fvp
); 
4773         tmp 
= tvp 
? vnode_mount(tvp
) : NULL
; 
4774         tdmp 
= vnode_mount(tdvp
); 
4775         if ((fmp 
!= tdmp
) || (tvp 
&& (fmp 
!= tmp
))) { 
4780         /* XXX prevent renaming from/over a sillyrenamed file? */ 
4783          * If the tvp exists and is in use, sillyrename it before doing the 
4784          * rename of the new file over it. 
4785          * XXX Can't sillyrename a directory. 
4786          * Don't sillyrename if source and target are same vnode (hard 
4787          * links or case-variants) 
4789         if (tvp 
&& (tvp 
!= fvp
)) { 
4790                 inuse 
= vnode_isinuse(tvp
, 0); 
4792         if (inuse 
&& !tnp
->n_sillyrename 
&& (vnode_vtype(tvp
) != VDIR
)) { 
4793                 error 
= nfs_sillyrename(tdnp
, tnp
, tcnp
, ctx
); 
4795                         /* sillyrename failed. Instead of pressing on, return error */ 
4796                         goto out
; /* should not be ENOENT. */ 
4798                         /* sillyrename succeeded.*/ 
4803         else if (tvp 
&& (nmp
->nm_vers 
>= NFS_VER4
) && (tnp
->n_openflags 
& N_DELEG_MASK
)) { 
4804                 nfs4_delegation_return(tnp
, 0, vfs_context_thread(ctx
), vfs_context_ucred(ctx
)); 
4807         error 
= nmp
->nm_funcs
->nf_rename_rpc(fdnp
, fcnp
->cn_nameptr
, fcnp
->cn_namelen
, 
4808             tdnp
, tcnp
->cn_nameptr
, tcnp
->cn_namelen
, ctx
); 
4811          * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 
4813         if (error 
== ENOENT
) { 
4817         if (tvp 
&& (tvp 
!= fvp
) && !tnp
->n_sillyrename
) { 
4818                 nfs_node_lock_force(tnp
); 
4819                 tvprecycle 
= (!error 
&& !vnode_isinuse(tvp
, 0) && 
4820                     (nfs_getattrcache(tnp
, nvattr
, 0) || (nvattr
->nva_nlink 
== 1))); 
4821                 nfs_node_unlock(tnp
); 
4822                 lck_mtx_lock(&nfs_node_hash_mutex
); 
4823                 if (tvprecycle 
&& (tnp
->n_hflag 
& NHHASHED
)) { 
4825                          * remove nfsnode from hash now so we can't accidentally find it 
4826                          * again if another object gets created with the same filehandle 
4827                          * before this vnode gets reclaimed 
4829                         LIST_REMOVE(tnp
, n_hash
); 
4830                         tnp
->n_hflag 
&= ~NHHASHED
; 
4831                         FSDBG(266, 0, tnp
, tnp
->n_flag
, 0xb1eb1e); 
4833                 lck_mtx_unlock(&nfs_node_hash_mutex
); 
4836         /* purge the old name cache entries and enter the new one */ 
4837         nfs_name_cache_purge(fdnp
, fnp
, fcnp
, ctx
); 
4839                 nfs_name_cache_purge(tdnp
, tnp
, tcnp
, ctx
); 
4841                         /* clear flags now: won't get nfs_vnop_inactive for recycled vnode */ 
4842                         /* clear all flags other than these */ 
4843                         nfs_node_lock_force(tnp
); 
4844                         tnp
->n_flag 
&= (NMODIFIED
); 
4845                         nfs_node_unlock(tnp
); 
4850                 nfs_node_lock_force(tdnp
); 
4851                 if (tdnp
->n_flag 
& NNEGNCENTRIES
) { 
4852                         tdnp
->n_flag 
&= ~NNEGNCENTRIES
; 
4853                         cache_purge_negatives(tdvp
); 
4855                 nfs_node_unlock(tdnp
); 
4856                 nfs_node_lock_force(fnp
); 
4857                 cache_enter(tdvp
, fvp
, tcnp
); 
4858                 if (tdvp 
!= fdvp
) {     /* update parent pointer */ 
4859                         if (fnp
->n_parent 
&& !vnode_get(fnp
->n_parent
)) { 
4860                                 /* remove ref from old parent */ 
4861                                 vnode_rele(fnp
->n_parent
); 
4862                                 vnode_put(fnp
->n_parent
); 
4864                         fnp
->n_parent 
= tdvp
; 
4865                         if (tdvp 
&& !vnode_get(tdvp
)) { 
4866                                 /* add ref to new parent */ 
4870                                 fnp
->n_parent 
= NULL
; 
4873                 nfs_node_unlock(fnp
); 
4876         /* nfs_getattr() will check changed and purge caches */ 
4877         nfs_getattr(fdnp
, NULL
, ctx
, NGA_CACHED
); 
4878         nfs_getattr(tdnp
, NULL
, ctx
, NGA_CACHED
); 
4881                 lck_mtx_lock(&nfs_node_hash_mutex
); 
4882                 tnp
->n_hflag 
&= ~NHLOCKED
; 
4883                 if (tnp
->n_hflag 
& NHLOCKWANT
) { 
4884                         tnp
->n_hflag 
&= ~NHLOCKWANT
; 
4887                 lck_mtx_unlock(&nfs_node_hash_mutex
); 
4889         nfs_node_clear_busy4(fdnp
, fnp
, tdnp
, tnp
); 
4890         FREE(nvattr
, M_TEMP
); 
4895  * Do an NFS rename rpc. Called from nfs_vnop_rename() and nfs_sillyrename(). 
4907         int error 
= 0, lockerror 
= ENOENT
, status 
= 0, fwccpostattr 
= 0, twccpostattr 
= 0; 
4908         struct timespec fpremtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }, tpremtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
4909         struct nfsmount 
*nmp
; 
4911         u_int64_t xid
, txid
; 
4912         struct nfsm_chain nmreq
, nmrep
; 
4914         nmp 
= NFSTONMP(fdnp
); 
4915         if (nfs_mount_gone(nmp
)) { 
4918         nfsvers 
= nmp
->nm_vers
; 
4919         if ((nfsvers 
== NFS_VER2
) && 
4920             ((fnamelen 
> NFS_MAXNAMLEN
) || (tnamelen 
> NFS_MAXNAMLEN
))) { 
4921                 return ENAMETOOLONG
; 
4924         nfsm_chain_null(&nmreq
); 
4925         nfsm_chain_null(&nmrep
); 
4927         nfsm_chain_build_alloc_init(error
, &nmreq
, 
4928             (NFSX_FH(nfsvers
) + NFSX_UNSIGNED
) * 2 + 
4929             nfsm_rndup(fnamelen
) + nfsm_rndup(tnamelen
)); 
4930         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, fdnp
->n_fhp
, fdnp
->n_fhsize
); 
4931         nfsm_chain_add_name(error
, &nmreq
, fnameptr
, fnamelen
, nmp
); 
4932         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
); 
4933         nfsm_chain_add_name(error
, &nmreq
, tnameptr
, tnamelen
, nmp
); 
4934         nfsm_chain_build_done(error
, &nmreq
); 
4937         error 
= nfs_request(fdnp
, NULL
, &nmreq
, NFSPROC_RENAME
, ctx
, NULL
, &nmrep
, &xid
, &status
); 
4939         if ((lockerror 
= nfs_node_lock2(fdnp
, tdnp
))) { 
4942         if (nfsvers 
== NFS_VER3
) { 
4944                 nfsm_chain_get_wcc_data(error
, &nmrep
, fdnp
, &fpremtime
, &fwccpostattr
, &xid
); 
4945                 nfsm_chain_get_wcc_data(error
, &nmrep
, tdnp
, &tpremtime
, &twccpostattr
, &txid
); 
4951         nfsm_chain_cleanup(&nmreq
); 
4952         nfsm_chain_cleanup(&nmrep
); 
4954                 fdnp
->n_flag 
|= NMODIFIED
; 
4955                 /* if directory hadn't changed, update namecache mtime */ 
4956                 if (nfstimespeccmp(&fdnp
->n_ncmtime
, &fpremtime
, ==)) { 
4957                         NFS_CHANGED_UPDATE_NC(nfsvers
, fdnp
, &fdnp
->n_vattr
); 
4959                 if (!fwccpostattr
) { 
4960                         NATTRINVALIDATE(fdnp
); 
4962                 tdnp
->n_flag 
|= NMODIFIED
; 
4963                 /* if directory hadn't changed, update namecache mtime */ 
4964                 if (nfstimespeccmp(&tdnp
->n_ncmtime
, &tpremtime
, ==)) { 
4965                         NFS_CHANGED_UPDATE_NC(nfsvers
, tdnp
, &tdnp
->n_vattr
); 
4967                 if (!twccpostattr
) { 
4968                         NATTRINVALIDATE(tdnp
); 
4970                 nfs_node_unlock2(fdnp
, tdnp
); 
4976  * NFS hard link create call 
4980         struct vnop_link_args 
/* { 
4981                                *  struct vnodeop_desc *a_desc; 
4984                                *  struct componentname *a_cnp; 
4985                                *  vfs_context_t a_context; 
4988         vfs_context_t ctx 
= ap
->a_context
; 
4989         vnode_t vp 
= ap
->a_vp
; 
4990         vnode_t tdvp 
= ap
->a_tdvp
; 
4991         struct componentname 
*cnp 
= ap
->a_cnp
; 
4992         int error 
= 0, lockerror 
= ENOENT
, status 
= 0, wccpostattr 
= 0, attrflag 
= 0; 
4993         struct timespec premtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
4994         struct nfsmount 
*nmp
; 
4995         nfsnode_t np 
= VTONFS(vp
); 
4996         nfsnode_t tdnp 
= VTONFS(tdvp
); 
4998         u_int64_t xid
, txid
; 
4999         struct nfsm_chain nmreq
, nmrep
; 
5001         if (vnode_mount(vp
) != vnode_mount(tdvp
)) { 
5006         if (nfs_mount_gone(nmp
)) { 
5009         nfsvers 
= nmp
->nm_vers
; 
5010         if ((nfsvers 
== NFS_VER2
) && (cnp
->cn_namelen 
> NFS_MAXNAMLEN
)) { 
5011                 return ENAMETOOLONG
; 
5015          * Push all writes to the server, so that the attribute cache 
5016          * doesn't get "out of sync" with the server. 
5017          * XXX There should be a better way! 
5019         nfs_flush(np
, MNT_WAIT
, vfs_context_thread(ctx
), V_IGNORE_WRITEERR
); 
5021         error 
= nfs_node_set_busy2(tdnp
, np
, vfs_context_thread(ctx
)); 
5026         nfsm_chain_null(&nmreq
); 
5027         nfsm_chain_null(&nmrep
); 
5029         nfsm_chain_build_alloc_init(error
, &nmreq
, 
5030             NFSX_FH(nfsvers
) * 2 + NFSX_UNSIGNED 
+ nfsm_rndup(cnp
->cn_namelen
)); 
5031         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
); 
5032         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, tdnp
->n_fhp
, tdnp
->n_fhsize
); 
5033         nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
); 
5034         nfsm_chain_build_done(error
, &nmreq
); 
5036         error 
= nfs_request(np
, NULL
, &nmreq
, NFSPROC_LINK
, ctx
, NULL
, &nmrep
, &xid
, &status
); 
5038         if ((lockerror 
= nfs_node_lock2(tdnp
, np
))) { 
5042         if (nfsvers 
== NFS_VER3
) { 
5044                 nfsm_chain_postop_attr_update_flag(error
, &nmrep
, np
, attrflag
, &xid
); 
5045                 nfsm_chain_get_wcc_data(error
, &nmrep
, tdnp
, &premtime
, &wccpostattr
, &txid
); 
5051         nfsm_chain_cleanup(&nmreq
); 
5052         nfsm_chain_cleanup(&nmrep
); 
5055                         NATTRINVALIDATE(np
); 
5057                 tdnp
->n_flag 
|= NMODIFIED
; 
5058                 /* if directory hadn't changed, update namecache mtime */ 
5059                 if (nfstimespeccmp(&tdnp
->n_ncmtime
, &premtime
, ==)) { 
5060                         NFS_CHANGED_UPDATE_NC(nfsvers
, tdnp
, &tdnp
->n_vattr
); 
5063                         NATTRINVALIDATE(tdnp
); 
5065                 if (!error 
&& (tdnp
->n_flag 
& NNEGNCENTRIES
)) { 
5066                         tdnp
->n_flag 
&= ~NNEGNCENTRIES
; 
5067                         cache_purge_negatives(tdvp
); 
5069                 nfs_node_unlock2(tdnp
, np
); 
5071         nfs_node_clear_busy2(tdnp
, np
); 
5073          * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 
5075         if (error 
== EEXIST
) { 
5082  * NFS symbolic link create call 
5086         struct vnop_symlink_args 
/* { 
5087                                   *  struct vnodeop_desc *a_desc; 
5090                                   *  struct componentname *a_cnp; 
5091                                   *  struct vnode_attr *a_vap; 
5093                                   *  vfs_context_t a_context; 
5096         vfs_context_t ctx 
= ap
->a_context
; 
5097         vnode_t dvp 
= ap
->a_dvp
; 
5098         struct vnode_attr 
*vap 
= ap
->a_vap
; 
5099         struct componentname 
*cnp 
= ap
->a_cnp
; 
5100         struct nfs_vattr 
*nvattr
; 
5102         int error 
= 0, lockerror 
= ENOENT
, busyerror 
= ENOENT
, status 
= 0, wccpostattr 
= 0; 
5104         struct timespec premtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
5105         vnode_t newvp 
= NULL
; 
5106         int nfsvers
, gotuid
, gotgid
; 
5107         u_int64_t xid 
= 0, dxid
; 
5108         nfsnode_t np 
= NULL
; 
5109         nfsnode_t dnp 
= VTONFS(dvp
); 
5110         struct nfsmount 
*nmp
; 
5111         struct nfsm_chain nmreq
, nmrep
; 
5113         struct nfs_dulookup 
*dul
; 
5115         int dul_in_progress 
= 0; 
5118         if (nfs_mount_gone(nmp
)) { 
5121         nfsvers 
= nmp
->nm_vers
; 
5122         namedattrs 
= (nmp
->nm_fsattr
.nfsa_flags 
& NFS_FSFLAG_NAMED_ATTR
); 
5124         slen 
= strlen(ap
->a_target
); 
5125         if ((nfsvers 
== NFS_VER2
) && 
5126             ((cnp
->cn_namelen 
> NFS_MAXNAMLEN
) || (slen 
> NFS_MAXPATHLEN
))) { 
5127                 return ENAMETOOLONG
; 
5130         nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
); 
5132         VATTR_SET_SUPPORTED(vap
, va_mode
); 
5133         VATTR_SET_SUPPORTED(vap
, va_uid
); 
5134         VATTR_SET_SUPPORTED(vap
, va_gid
); 
5135         VATTR_SET_SUPPORTED(vap
, va_data_size
); 
5136         VATTR_SET_SUPPORTED(vap
, va_access_time
); 
5137         VATTR_SET_SUPPORTED(vap
, va_modify_time
); 
5138         gotuid 
= VATTR_IS_ACTIVE(vap
, va_uid
); 
5139         gotgid 
= VATTR_IS_ACTIVE(vap
, va_gid
); 
5141         fh 
= zalloc(nfs_fhandle_zone
); 
5142         req 
= zalloc_flags(nfs_req_zone
, Z_WAITOK
); 
5143         MALLOC(dul
, struct nfs_dulookup 
*, sizeof(*dul
), M_TEMP
, M_WAITOK
); 
5144         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
5146         error 
= busyerror 
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
)); 
5148                 nfs_dulookup_init(dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
); 
5151         nfsm_chain_null(&nmreq
); 
5152         nfsm_chain_null(&nmrep
); 
5154         nfsm_chain_build_alloc_init(error
, &nmreq
, 
5155             NFSX_FH(nfsvers
) + 2 * NFSX_UNSIGNED 
+ 
5156             nfsm_rndup(cnp
->cn_namelen
) + nfsm_rndup(slen
) + NFSX_SATTR(nfsvers
)); 
5157         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
); 
5158         nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
); 
5159         if (nfsvers 
== NFS_VER3
) { 
5160                 nfsm_chain_add_v3sattr(nmp
, error
, &nmreq
, vap
); 
5162         nfsm_chain_add_name(error
, &nmreq
, ap
->a_target
, slen
, nmp
); 
5163         if (nfsvers 
== NFS_VER2
) { 
5164                 nfsm_chain_add_v2sattr(error
, &nmreq
, vap
, -1); 
5166         nfsm_chain_build_done(error
, &nmreq
); 
5169         error 
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC_SYMLINK
, 
5170             vfs_context_thread(ctx
), vfs_context_ucred(ctx
), NULL
, 0, NULL
, &req
); 
5173                         nfs_dulookup_start(dul
, dnp
, ctx
); 
5174                         dul_in_progress 
= 1; 
5176                 error 
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
); 
5179         if ((lockerror 
= nfs_node_lock(dnp
))) { 
5183         if (!error 
&& !status
) { 
5184                 if (dnp
->n_flag 
& NNEGNCENTRIES
) { 
5185                         dnp
->n_flag 
&= ~NNEGNCENTRIES
; 
5186                         cache_purge_negatives(dvp
); 
5188                 if (nfsvers 
== NFS_VER3
) { 
5189                         error 
= nfsm_chain_get_fh_attr(nmp
, &nmrep
, dnp
, ctx
, nfsvers
, &xid
, fh
, nvattr
); 
5194         if (nfsvers 
== NFS_VER3
) { 
5195                 nfsm_chain_get_wcc_data(error
, &nmrep
, dnp
, &premtime
, &wccpostattr
, &dxid
); 
5201         nfsm_chain_cleanup(&nmreq
); 
5202         nfsm_chain_cleanup(&nmrep
); 
5205                 dnp
->n_flag 
|= NMODIFIED
; 
5206                 /* if directory hadn't changed, update namecache mtime */ 
5207                 if (nfstimespeccmp(&dnp
->n_ncmtime
, &premtime
, ==)) { 
5208                         NFS_CHANGED_UPDATE_NC(nfsvers
, dnp
, &dnp
->n_vattr
); 
5210                 nfs_node_unlock(dnp
); 
5211                 /* nfs_getattr() will check changed and purge caches */ 
5212                 nfs_getattr(dnp
, NULL
, ctx
, wccpostattr 
? NGA_CACHED 
: NGA_UNCACHED
); 
5215         if (!error 
&& fh
->fh_len
) { 
5216                 error 
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
->fh_data
, fh
->fh_len
, nvattr
, &xid
, req
->r_auth
, NG_MAKEENTRY
, &np
); 
5222         if (dul_in_progress
) { 
5223                 nfs_dulookup_finish(dul
, dnp
, ctx
); 
5227          * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry 
5228          * if we can succeed in looking up the symlink. 
5230         if ((error 
== EEXIST
) || (!error 
&& !newvp
)) { 
5232                         nfs_node_unlock(np
); 
5236                 error 
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
); 
5239                         if (vnode_vtype(newvp
) != VLNK
) { 
5245                 nfs_node_clear_busy(dnp
); 
5247         if (!error 
&& (gotuid 
|| gotgid
) && 
5248             (!newvp 
|| nfs_getattrcache(np
, nvattr
, 0) || 
5249             (gotuid 
&& (nvattr
->nva_uid 
!= vap
->va_uid
)) || 
5250             (gotgid 
&& (nvattr
->nva_gid 
!= vap
->va_gid
)))) { 
5251                 /* clear ID bits if server didn't use them (or we can't tell) */ 
5252                 VATTR_CLEAR_SUPPORTED(vap
, va_uid
); 
5253                 VATTR_CLEAR_SUPPORTED(vap
, va_gid
); 
5257                         nfs_node_unlock(np
); 
5261                 nfs_node_unlock(np
); 
5264         NFS_ZFREE(nfs_fhandle_zone
, fh
); 
5265         NFS_ZFREE(nfs_req_zone
, req
); 
5267         FREE(nvattr
, M_TEMP
); 
5276         struct vnop_mkdir_args 
/* { 
5277                                 *  struct vnodeop_desc *a_desc; 
5280                                 *  struct componentname *a_cnp; 
5281                                 *  struct vnode_attr *a_vap; 
5282                                 *  vfs_context_t a_context; 
5285         vfs_context_t ctx 
= ap
->a_context
; 
5286         vnode_t dvp 
= ap
->a_dvp
; 
5287         struct vnode_attr 
*vap 
= ap
->a_vap
; 
5288         struct componentname 
*cnp 
= ap
->a_cnp
; 
5289         struct nfs_vattr 
*nvattr
; 
5290         nfsnode_t np 
= NULL
; 
5291         struct nfsmount 
*nmp
; 
5292         nfsnode_t dnp 
= VTONFS(dvp
); 
5293         vnode_t newvp 
= NULL
; 
5294         int error 
= 0, lockerror 
= ENOENT
, busyerror 
= ENOENT
, status 
= 0, wccpostattr 
= 0; 
5295         struct timespec premtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
5296         int nfsvers
, gotuid
, gotgid
; 
5297         u_int64_t xid 
= 0, dxid
; 
5299         struct nfsm_chain nmreq
, nmrep
; 
5301         struct nfs_dulookup 
*dul
; 
5303         int dul_in_progress 
= 0; 
5306         if (nfs_mount_gone(nmp
)) { 
5309         nfsvers 
= nmp
->nm_vers
; 
5310         namedattrs 
= (nmp
->nm_fsattr
.nfsa_flags 
& NFS_FSFLAG_NAMED_ATTR
); 
5312         if ((nfsvers 
== NFS_VER2
) && (cnp
->cn_namelen 
> NFS_MAXNAMLEN
)) { 
5313                 return ENAMETOOLONG
; 
5316         nfs_avoid_needless_id_setting_on_create(dnp
, vap
, ctx
); 
5318         VATTR_SET_SUPPORTED(vap
, va_mode
); 
5319         VATTR_SET_SUPPORTED(vap
, va_uid
); 
5320         VATTR_SET_SUPPORTED(vap
, va_gid
); 
5321         VATTR_SET_SUPPORTED(vap
, va_data_size
); 
5322         VATTR_SET_SUPPORTED(vap
, va_access_time
); 
5323         VATTR_SET_SUPPORTED(vap
, va_modify_time
); 
5324         gotuid 
= VATTR_IS_ACTIVE(vap
, va_uid
); 
5325         gotgid 
= VATTR_IS_ACTIVE(vap
, va_gid
); 
5327         fh 
= zalloc(nfs_fhandle_zone
); 
5328         req 
= zalloc_flags(nfs_req_zone
, Z_WAITOK
); 
5329         MALLOC(dul
, struct nfs_dulookup 
*, sizeof(*dul
), M_TEMP
, M_WAITOK
); 
5330         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
5332         error 
= busyerror 
= nfs_node_set_busy(dnp
, vfs_context_thread(ctx
)); 
5334                 nfs_dulookup_init(dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
); 
5337         nfsm_chain_null(&nmreq
); 
5338         nfsm_chain_null(&nmrep
); 
5340         nfsm_chain_build_alloc_init(error
, &nmreq
, 
5341             NFSX_FH(nfsvers
) + NFSX_UNSIGNED 
+ 
5342             nfsm_rndup(cnp
->cn_namelen
) + NFSX_SATTR(nfsvers
)); 
5343         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
); 
5344         nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
); 
5345         if (nfsvers 
== NFS_VER3
) { 
5346                 nfsm_chain_add_v3sattr(nmp
, error
, &nmreq
, vap
); 
5348                 nfsm_chain_add_v2sattr(error
, &nmreq
, vap
, -1); 
5350         nfsm_chain_build_done(error
, &nmreq
); 
5353         error 
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC_MKDIR
, 
5354             vfs_context_thread(ctx
), vfs_context_ucred(ctx
), NULL
, 0, NULL
, &req
); 
5357                         nfs_dulookup_start(dul
, dnp
, ctx
); 
5358                         dul_in_progress 
= 1; 
5360                 error 
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
); 
5363         if ((lockerror 
= nfs_node_lock(dnp
))) { 
5367         if (!error 
&& !status
) { 
5368                 if (dnp
->n_flag 
& NNEGNCENTRIES
) { 
5369                         dnp
->n_flag 
&= ~NNEGNCENTRIES
; 
5370                         cache_purge_negatives(dvp
); 
5372                 error 
= nfsm_chain_get_fh_attr(nmp
, &nmrep
, dnp
, ctx
, nfsvers
, &xid
, fh
, nvattr
); 
5374         if (nfsvers 
== NFS_VER3
) { 
5375                 nfsm_chain_get_wcc_data(error
, &nmrep
, dnp
, &premtime
, &wccpostattr
, &dxid
); 
5381         nfsm_chain_cleanup(&nmreq
); 
5382         nfsm_chain_cleanup(&nmrep
); 
5385                 dnp
->n_flag 
|= NMODIFIED
; 
5386                 /* if directory hadn't changed, update namecache mtime */ 
5387                 if (nfstimespeccmp(&dnp
->n_ncmtime
, &premtime
, ==)) { 
5388                         NFS_CHANGED_UPDATE_NC(nfsvers
, dnp
, &dnp
->n_vattr
); 
5390                 nfs_node_unlock(dnp
); 
5391                 /* nfs_getattr() will check changed and purge caches */ 
5392                 nfs_getattr(dnp
, NULL
, ctx
, wccpostattr 
? NGA_CACHED 
: NGA_UNCACHED
); 
5395         if (!error 
&& fh
->fh_len
) { 
5396                 error 
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
->fh_data
, fh
->fh_len
, nvattr
, &xid
, req
->r_auth
, NG_MAKEENTRY
, &np
); 
5402         if (dul_in_progress
) { 
5403                 nfs_dulookup_finish(dul
, dnp
, ctx
); 
5407          * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry 
5408          * if we can succeed in looking up the directory. 
5410         if ((error 
== EEXIST
) || (!error 
&& !newvp
)) { 
5412                         nfs_node_unlock(np
); 
5416                 error 
= nfs_lookitup(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
, &np
); 
5419                         if (vnode_vtype(newvp
) != VDIR
) { 
5425                 nfs_node_clear_busy(dnp
); 
5427         if (!error 
&& (gotuid 
|| gotgid
) && 
5428             (!newvp 
|| nfs_getattrcache(np
, nvattr
, 0) || 
5429             (gotuid 
&& (nvattr
->nva_uid 
!= vap
->va_uid
)) || 
5430             (gotgid 
&& (nvattr
->nva_gid 
!= vap
->va_gid
)))) { 
5431                 /* clear ID bits if server didn't use them (or we can't tell) */ 
5432                 VATTR_CLEAR_SUPPORTED(vap
, va_uid
); 
5433                 VATTR_CLEAR_SUPPORTED(vap
, va_gid
); 
5437                         nfs_node_unlock(np
); 
5441                 nfs_node_unlock(np
); 
5444         NFS_ZFREE(nfs_fhandle_zone
, fh
); 
5445         NFS_ZFREE(nfs_req_zone
, req
); 
5447         FREE(nvattr
, M_TEMP
); 
5452  * NFS remove directory call 
5456         struct vnop_rmdir_args 
/* { 
5457                                 *  struct vnodeop_desc *a_desc; 
5460                                 *  struct componentname *a_cnp; 
5461                                 *  vfs_context_t a_context; 
5464         vfs_context_t ctx 
= ap
->a_context
; 
5465         vnode_t vp 
= ap
->a_vp
; 
5466         vnode_t dvp 
= ap
->a_dvp
; 
5467         struct componentname 
*cnp 
= ap
->a_cnp
; 
5468         int error 
= 0, lockerror 
= ENOENT
, status 
= 0, wccpostattr 
= 0; 
5469         struct timespec premtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
5470         struct nfsmount 
*nmp
; 
5471         nfsnode_t np 
= VTONFS(vp
); 
5472         nfsnode_t dnp 
= VTONFS(dvp
); 
5475         struct nfsm_chain nmreq
, nmrep
; 
5477         struct nfs_dulookup 
*dul
; 
5479         int dul_in_progress 
= 0; 
5482         if (nfs_mount_gone(nmp
)) { 
5485         nfsvers 
= nmp
->nm_vers
; 
5486         namedattrs 
= (nmp
->nm_fsattr
.nfsa_flags 
& NFS_FSFLAG_NAMED_ATTR
); 
5488         if ((nfsvers 
== NFS_VER2
) && (cnp
->cn_namelen 
> NFS_MAXNAMLEN
)) { 
5489                 return ENAMETOOLONG
; 
5492         if ((error 
= nfs_node_set_busy2(dnp
, np
, vfs_context_thread(ctx
)))) { 
5496         req 
= zalloc_flags(nfs_req_zone
, Z_WAITOK
); 
5497         MALLOC(dul
, struct nfs_dulookup 
*, sizeof(*dul
), M_TEMP
, M_WAITOK
); 
5500                 nfs_dulookup_init(dul
, dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, ctx
); 
5503         nfsm_chain_null(&nmreq
); 
5504         nfsm_chain_null(&nmrep
); 
5506         nfsm_chain_build_alloc_init(error
, &nmreq
, 
5507             NFSX_FH(nfsvers
) + NFSX_UNSIGNED 
+ nfsm_rndup(cnp
->cn_namelen
)); 
5508         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
); 
5509         nfsm_chain_add_name(error
, &nmreq
, cnp
->cn_nameptr
, cnp
->cn_namelen
, nmp
); 
5510         nfsm_chain_build_done(error
, &nmreq
); 
5513         error 
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC_RMDIR
, 
5514             vfs_context_thread(ctx
), vfs_context_ucred(ctx
), NULL
, 0, NULL
, &req
); 
5517                         nfs_dulookup_start(dul
, dnp
, ctx
); 
5518                         dul_in_progress 
= 1; 
5520                 error 
= nfs_request_async_finish(req
, &nmrep
, &xid
, &status
); 
5523         if ((lockerror 
= nfs_node_lock(dnp
))) { 
5526         if (nfsvers 
== NFS_VER3
) { 
5527                 nfsm_chain_get_wcc_data(error
, &nmrep
, dnp
, &premtime
, &wccpostattr
, &xid
); 
5533         nfsm_chain_cleanup(&nmreq
); 
5534         nfsm_chain_cleanup(&nmrep
); 
5537                 dnp
->n_flag 
|= NMODIFIED
; 
5538                 /* if directory hadn't changed, update namecache mtime */ 
5539                 if (nfstimespeccmp(&dnp
->n_ncmtime
, &premtime
, ==)) { 
5540                         NFS_CHANGED_UPDATE_NC(nfsvers
, dnp
, &dnp
->n_vattr
); 
5542                 nfs_node_unlock(dnp
); 
5543                 nfs_name_cache_purge(dnp
, np
, cnp
, ctx
); 
5544                 /* nfs_getattr() will check changed and purge caches */ 
5545                 nfs_getattr(dnp
, NULL
, ctx
, wccpostattr 
? NGA_CACHED 
: NGA_UNCACHED
); 
5547         if (dul_in_progress
) { 
5548                 nfs_dulookup_finish(dul
, dnp
, ctx
); 
5550         nfs_node_clear_busy2(dnp
, np
); 
5553          * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 
5555         if (error 
== ENOENT
) { 
5560                  * remove nfsnode from hash now so we can't accidentally find it 
5561                  * again if another object gets created with the same filehandle 
5562                  * before this vnode gets reclaimed 
5564                 lck_mtx_lock(&nfs_node_hash_mutex
); 
5565                 if (np
->n_hflag 
& NHHASHED
) { 
5566                         LIST_REMOVE(np
, n_hash
); 
5567                         np
->n_hflag 
&= ~NHHASHED
; 
5568                         FSDBG(266, 0, np
, np
->n_flag
, 0xb1eb1e); 
5570                 lck_mtx_unlock(&nfs_node_hash_mutex
); 
5572         NFS_ZFREE(nfs_req_zone
, req
); 
5580  * The incoming "offset" is a directory cookie indicating where in the 
5581  * directory entries should be read from.  A zero cookie means start at 
5582  * the beginning of the directory.  Any other cookie will be a cookie 
5583  * returned from the server. 
5585  * Using that cookie, determine which buffer (and where in that buffer) 
5586  * to start returning entries from.  Buffer logical block numbers are 
5587  * the cookies they start at.  If a buffer is found that is not full, 
5588  * call into the bio/RPC code to fill it.  The RPC code will probably 
5589  * fill several buffers (dropping the first, requiring a re-get). 
5591  * When done copying entries to the buffer, set the offset to the current 
5592  * entry's cookie and enter that cookie in the cookie cache. 
5594  * Note: because the getdirentries(2) API returns a long-typed offset, 
5595  * the incoming offset is a potentially truncated cookie (ptc). 
5596  * The cookie matching code is aware of this and will fall back to 
5597  * matching only 32 bits of the cookie. 
5601         struct vnop_readdir_args 
/* { 
5602                                   *  struct vnodeop_desc *a_desc; 
5604                                   *  struct uio *a_uio; 
5608                                   *  vfs_context_t a_context; 
5611         vfs_context_t ctx 
= ap
->a_context
; 
5612         vnode_t dvp 
= ap
->a_vp
; 
5613         nfsnode_t dnp 
= VTONFS(dvp
); 
5614         struct nfsmount 
*nmp
; 
5615         uio_t uio 
= ap
->a_uio
; 
5616         int error
, nfsvers
, extended
, numdirent
, bigcookies
, ptc
, done
; 
5617         long attrcachetimeout
; 
5618         uint16_t i
, iptc
, rlen
, nlen
; 
5619         uint64_t cookie
, nextcookie
, lbn 
= 0; 
5620         struct nfsbuf 
*bp 
= NULL
; 
5621         struct nfs_dir_buf_header 
*ndbhp
; 
5622         struct direntry 
*dp
, *dpptc
; 
5629         if (nfs_mount_gone(nmp
)) { 
5632         nfsvers 
= nmp
->nm_vers
; 
5633         bigcookies 
= (nmp
->nm_state 
& NFSSTA_BIGCOOKIES
); 
5634         extended 
= (ap
->a_flags 
& VNODE_READDIR_EXTENDED
); 
5636         if (vnode_vtype(dvp
) != VDIR
) { 
5640         if (ap
->a_eofflag
) { 
5644         if (uio_resid(uio
) == 0) { 
5648         if ((nfsvers 
>= NFS_VER4
) && (dnp
->n_vattr
.nva_flags 
& NFS_FFLAG_TRIGGER
)) { 
5649                 /* trigger directories should never be read, return nothing */ 
5653         thd 
= vfs_context_thread(ctx
); 
5654         numdirent 
= done 
= 0; 
5655         nextcookie 
= uio_offset(uio
); 
5656         ptc 
= bigcookies 
&& NFS_DIR_COOKIE_POTENTIALLY_TRUNCATED(nextcookie
); 
5658         if ((error 
= nfs_node_lock(dnp
))) { 
5662         if (dnp
->n_flag 
& NNEEDINVALIDATE
) { 
5663                 dnp
->n_flag 
&= ~NNEEDINVALIDATE
; 
5665                 nfs_node_unlock(dnp
); 
5666                 error 
= nfs_vinvalbuf(dvp
, 0, ctx
, 1); 
5668                         error 
= nfs_node_lock(dnp
); 
5675         if (dnp
->n_rdirplusstamp_eof 
&& dnp
->n_rdirplusstamp_sof
) { 
5676                 attrcachetimeout 
= nfs_attrcachetimeout(dnp
); 
5678                 if (attrcachetimeout 
&& (now
.tv_sec 
- dnp
->n_rdirplusstamp_sof 
> attrcachetimeout 
- 1)) { 
5679                         dnp
->n_rdirplusstamp_eof 
= dnp
->n_rdirplusstamp_sof 
= 0; 
5681                         nfs_node_unlock(dnp
); 
5682                         error 
= nfs_vinvalbuf(dvp
, 0, ctx
, 1); 
5684                                 error 
= nfs_node_lock(dnp
); 
5693          * check for need to invalidate when (re)starting at beginning 
5696                 if (dnp
->n_flag 
& NMODIFIED
) { 
5698                         nfs_node_unlock(dnp
); 
5699                         if ((error 
= nfs_vinvalbuf(dvp
, 0, ctx
, 1))) { 
5703                         nfs_node_unlock(dnp
); 
5705                 /* nfs_getattr() will check changed and purge caches */ 
5706                 if ((error 
= nfs_getattr(dnp
, NULL
, ctx
, NGA_UNCACHED
))) { 
5710                 nfs_node_unlock(dnp
); 
5713         error 
= nfs_dir_cookie_to_lbn(dnp
, nextcookie
, &ptc
, &lbn
); 
5715                 if (error 
< 0) { /* just hit EOF cookie */ 
5719                 if (ap
->a_eofflag
) { 
5724         while (!error 
&& !done
) { 
5725                 OSAddAtomic64(1, &nfsstats
.biocache_readdirs
); 
5726                 cookie 
= nextcookie
; 
5728                 error 
= nfs_buf_get(dnp
, lbn
, NFS_DIRBLKSIZ
, thd
, NBLK_READ
, &bp
); 
5732                 ndbhp 
= (struct nfs_dir_buf_header
*)bp
->nb_data
; 
5733                 if (!ISSET(bp
->nb_flags
, NB_CACHE
) || !ISSET(ndbhp
->ndbh_flags
, NDB_FULL
)) { 
5734                         if (!ISSET(bp
->nb_flags
, NB_CACHE
)) { /* initialize the buffer */ 
5735                                 ndbhp
->ndbh_flags 
= 0; 
5736                                 ndbhp
->ndbh_count 
= 0; 
5737                                 ndbhp
->ndbh_entry_end 
= sizeof(*ndbhp
); 
5738                                 ndbhp
->ndbh_ncgen 
= dnp
->n_ncgen
; 
5740                         error 
= nfs_buf_readdir(bp
, ctx
); 
5741                         if (error 
== NFSERR_DIRBUFDROPPED
) { 
5745                                 nfs_buf_release(bp
, 1); 
5747                         if (error 
&& (error 
!= ENXIO
) && (error 
!= ETIMEDOUT
) && (error 
!= EINTR
) && (error 
!= ERESTART
)) { 
5748                                 if (!nfs_node_lock(dnp
)) { 
5750                                         nfs_node_unlock(dnp
); 
5752                                 nfs_vinvalbuf(dvp
, 0, ctx
, 1); 
5753                                 if (error 
== NFSERR_BAD_COOKIE
) { 
5762                 /* find next entry to return */ 
5763                 dp 
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
); 
5765                 if ((lbn 
!= cookie
) && !(ptc 
&& NFS_DIR_COOKIE_SAME32(lbn
, cookie
))) { 
5768                         for (; (i 
< ndbhp
->ndbh_count
) && (cookie 
!= dp
->d_seekoff
); i
++) { 
5769                                 if (ptc 
&& !dpptc 
&& NFS_DIR_COOKIE_SAME32(cookie
, dp
->d_seekoff
)) { 
5773                                 nextcookie 
= dp
->d_seekoff
; 
5774                                 dp 
= NFS_DIRENTRY_NEXT(dp
); 
5776                         if ((i 
== ndbhp
->ndbh_count
) && dpptc
) { 
5780                         if (i 
< ndbhp
->ndbh_count
) { 
5781                                 nextcookie 
= dp
->d_seekoff
; 
5782                                 dp 
= NFS_DIRENTRY_NEXT(dp
); 
5786                 ptc 
= 0;  /* only have to deal with ptc on first cookie */ 
5788                 /* return as many entries as we can */ 
5789                 for (; i 
< ndbhp
->ndbh_count
; i
++) { 
5791                                 rlen 
= dp
->d_reclen
; 
5796                                         bzero(cp
, sizeof(dent
)); 
5798                                 if (dp
->d_namlen 
> (sizeof(dent
.d_name
) - 1)) { 
5799                                         nlen 
= sizeof(dent
.d_name
) - 1; 
5801                                         nlen 
= dp
->d_namlen
; 
5803                                 rlen 
= NFS_DIRENT_LEN(nlen
); 
5804                                 dent
.d_reclen 
= rlen
; 
5805                                 dent
.d_ino 
= (ino_t
)dp
->d_ino
; 
5806                                 dent
.d_type 
= dp
->d_type
; 
5807                                 dent
.d_namlen 
= (uint8_t)nlen
; 
5808                                 strlcpy(dent
.d_name
, dp
->d_name
, nlen 
+ 1); 
5810                         /* check that the record fits */ 
5811                         if (rlen 
> uio_resid(uio
)) { 
5815                         if ((error 
= uiomove(cp
, rlen
, uio
))) { 
5819                         nextcookie 
= dp
->d_seekoff
; 
5820                         dp 
= NFS_DIRENTRY_NEXT(dp
); 
5823                 if (i 
== ndbhp
->ndbh_count
) { 
5824                         /* hit end of buffer, move to next buffer */ 
5826                         /* if we also hit EOF, we're done */ 
5827                         if (ISSET(ndbhp
->ndbh_flags
, NDB_EOF
)) { 
5829                                 if (ap
->a_eofflag
) { 
5835                         uio_setoffset(uio
, nextcookie
); 
5837                 if (!error 
&& !done 
&& (nextcookie 
== cookie
)) { 
5838                         printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie
, i
, ndbhp
->ndbh_count
); 
5841                 nfs_buf_release(bp
, 1); 
5845                 nfs_dir_cookie_cache(dnp
, nextcookie
, lbn
); 
5848         if (ap
->a_numdirent
) { 
5849                 *ap
->a_numdirent 
= numdirent
; 
5857  * Invalidate cached directory information, except for the actual directory 
5858  * blocks (which are invalidated separately). 
5861 nfs_invaldir_cookies(nfsnode_t dnp
) 
5863         if (vnode_vtype(NFSTOV(dnp
)) != VDIR
) { 
5866         dnp
->n_eofcookie 
= 0; 
5867         dnp
->n_cookieverf 
= 0; 
5868         if (!dnp
->n_cookiecache
) { 
5871         dnp
->n_cookiecache
->free 
= 0; 
5872         dnp
->n_cookiecache
->mru 
= -1; 
5873         memset(dnp
->n_cookiecache
->next
, -1, NFSNUMCOOKIES
); 
5877 nfs_invaldir(nfsnode_t dnp
) 
5880         nfs_invaldir_cookies(dnp
); 
5884  * calculate how much space is available for additional directory entries. 
5887 nfs_dir_buf_freespace(struct nfsbuf 
*bp
, int rdirplus
) 
5889         struct nfs_dir_buf_header 
*ndbhp 
= (struct nfs_dir_buf_header
*)bp
->nb_data
; 
5895         space 
= bp
->nb_bufsize 
- ndbhp
->ndbh_entry_end
; 
5897                 space 
-= ndbhp
->ndbh_count 
* sizeof(struct nfs_vattr
); 
5903  * add/update a cookie->lbn entry in the directory cookie cache 
5906 nfs_dir_cookie_cache(nfsnode_t dnp
, uint64_t cookie
, uint64_t lbn
) 
5908         struct nfsdmap 
*ndcc
; 
5915         if (nfs_node_lock(dnp
)) { 
5919         if (cookie 
== dnp
->n_eofcookie
) { /* EOF cookie */ 
5920                 nfs_node_unlock(dnp
); 
5924         ndcc 
= dnp
->n_cookiecache
; 
5926                 /* allocate the cookie cache structure */ 
5927                 ndcc 
= dnp
->n_cookiecache 
= zalloc(ZV_NFSDIROFF
); 
5930                 memset(ndcc
->next
, -1, NFSNUMCOOKIES
); 
5934          * Search the list for this cookie. 
5935          * Keep track of previous and last entries. 
5939         while ((i 
!= -1) && (cookie 
!= ndcc
->cookies
[i
].key
)) { 
5940                 if (ndcc
->next
[i
] == -1) { /* stop on last entry so we can reuse */ 
5946         if ((i 
!= -1) && (cookie 
== ndcc
->cookies
[i
].key
)) { 
5947                 /* found it, remove from list */ 
5949                         ndcc
->next
[prev
] = ndcc
->next
[i
]; 
5951                         ndcc
->mru 
= ndcc
->next
[i
]; 
5954                 /* not found, use next free entry or reuse last entry */ 
5955                 if (ndcc
->free 
!= NFSNUMCOOKIES
) { 
5958                         ndcc
->next
[prev
] = -1; 
5960                 ndcc
->cookies
[i
].key 
= cookie
; 
5961                 ndcc
->cookies
[i
].lbn 
= lbn
; 
5963         /* insert cookie at head of MRU list */ 
5964         ndcc
->next
[i
] = ndcc
->mru
; 
5966         nfs_node_unlock(dnp
); 
5970  * Try to map the given directory cookie to a directory buffer (return lbn). 
5971  * If we have a possibly truncated cookie (ptc), check for 32-bit matches too. 
5974 nfs_dir_cookie_to_lbn(nfsnode_t dnp
, uint64_t cookie
, int *ptc
, uint64_t *lbnp
) 
5976         struct nfsdmap 
*ndcc 
= dnp
->n_cookiecache
; 
5977         int8_t eofptc
, found
; 
5979         struct nfsmount 
*nmp
; 
5980         struct nfsbuf 
*bp
, *lastbp
; 
5981         struct nfsbuflists blist
; 
5982         struct direntry 
*dp
, *dpptc
; 
5983         struct nfs_dir_buf_header 
*ndbhp
; 
5985         if (!cookie
) {  /* initial cookie */ 
5991         if (nfs_node_lock(dnp
)) { 
5995         if (cookie 
== dnp
->n_eofcookie
) { /* EOF cookie */ 
5996                 nfs_node_unlock(dnp
); 
5997                 OSAddAtomic64(1, &nfsstats
.direofcache_hits
); 
6001         /* note if cookie is a 32-bit match with the EOF cookie */ 
6002         eofptc 
= *ptc 
? NFS_DIR_COOKIE_SAME32(cookie
, dnp
->n_eofcookie
) : 0; 
6005         /* search the list for the cookie */ 
6006         for (i 
= ndcc 
? ndcc
->mru 
: -1; i 
>= 0; i 
= ndcc
->next
[i
]) { 
6007                 if (ndcc
->cookies
[i
].key 
== cookie
) { 
6008                         /* found a match for this cookie */ 
6009                         *lbnp 
= ndcc
->cookies
[i
].lbn
; 
6010                         nfs_node_unlock(dnp
); 
6011                         OSAddAtomic64(1, &nfsstats
.direofcache_hits
); 
6015                 /* check for 32-bit match */ 
6016                 if (*ptc 
&& (iptc 
== -1) && NFS_DIR_COOKIE_SAME32(ndcc
->cookies
[i
].key
, cookie
)) { 
6020         /* exact match not found */ 
6022                 /* but 32-bit match hit the EOF cookie */ 
6023                 nfs_node_unlock(dnp
); 
6024                 OSAddAtomic64(1, &nfsstats
.direofcache_hits
); 
6028                 /* but 32-bit match got a hit */ 
6029                 *lbnp 
= ndcc
->cookies
[iptc
].lbn
; 
6030                 nfs_node_unlock(dnp
); 
6031                 OSAddAtomic64(1, &nfsstats
.direofcache_hits
); 
6034         nfs_node_unlock(dnp
); 
6037          * No match found in the cookie cache... hmm... 
6038          * Let's search the directory's buffers for the cookie. 
6040         nmp 
= NFSTONMP(dnp
); 
6041         if (nfs_mount_gone(nmp
)) { 
6047         lck_mtx_lock(&nfs_buf_mutex
); 
6049          * Scan the list of buffers, keeping them in order. 
6050          * Note that itercomplete inserts each of the remaining buffers 
6051          * into the head of list (thus reversing the elements).  So, we 
6052          * make sure to iterate through all buffers, inserting them after 
6053          * each other, to keep them in order. 
6054          * Also note: the LIST_INSERT_AFTER(lastbp) is only safe because 
6055          * we don't drop nfs_buf_mutex. 
6057         if (!nfs_buf_iterprepare(dnp
, &blist
, NBI_CLEAN
)) { 
6059                 while ((bp 
= LIST_FIRST(&blist
))) { 
6060                         LIST_REMOVE(bp
, nb_vnbufs
); 
6062                                 LIST_INSERT_HEAD(&dnp
->n_cleanblkhd
, bp
, nb_vnbufs
); 
6064                                 LIST_INSERT_AFTER(lastbp
, bp
, nb_vnbufs
); 
6071                         if (nfs_buf_acquire(bp
, NBAC_NOWAIT
, 0, 0)) { 
6072                                 /* just skip this buffer */ 
6073                                 nfs_buf_refrele(bp
); 
6076                         nfs_buf_refrele(bp
); 
6078                         /* scan the buffer for the cookie */ 
6079                         ndbhp 
= (struct nfs_dir_buf_header
*)bp
->nb_data
; 
6080                         dp 
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
); 
6082                         for (i 
= 0; (i 
< ndbhp
->ndbh_count
) && (cookie 
!= dp
->d_seekoff
); i
++) { 
6083                                 if (*ptc 
&& !dpptc 
&& NFS_DIR_COOKIE_SAME32(cookie
, dp
->d_seekoff
)) { 
6087                                 dp 
= NFS_DIRENTRY_NEXT(dp
); 
6089                         if ((i 
== ndbhp
->ndbh_count
) && dpptc
) { 
6090                                 /* found only a PTC match */ 
6093                         } else if (i 
< ndbhp
->ndbh_count
) { 
6096                         if (i 
< (ndbhp
->ndbh_count 
- 1)) { 
6097                                 /* next entry is *in* this buffer: return this block */ 
6098                                 *lbnp 
= bp
->nb_lblkno
; 
6100                         } else if (i 
== (ndbhp
->ndbh_count 
- 1)) { 
6101                                 /* next entry refers to *next* buffer: return next block */ 
6102                                 *lbnp 
= dp
->d_seekoff
; 
6107                 nfs_buf_itercomplete(dnp
, &blist
, NBI_CLEAN
); 
6109         lck_mtx_unlock(&nfs_buf_mutex
); 
6111                 OSAddAtomic64(1, &nfsstats
.direofcache_hits
); 
6115         /* still not found... oh well, just start a new block */ 
6117         OSAddAtomic64(1, &nfsstats
.direofcache_misses
); 
6122  * scan a directory buffer for the given name 
6123  * Returns: ESRCH if not found, ENOENT if found invalid, 0 if found 
6124  * Note: should only be called with RDIRPLUS directory buffers 
6127 #define NDBS_PURGE      1 
6128 #define NDBS_UPDATE     2 
6133         struct componentname 
*cnp
, 
6135         struct nfs_vattr 
*nvap
, 
6138         daddr64_t 
*nextlbnp
, 
6141         struct direntry 
*dp
; 
6142         struct nfs_dir_buf_header 
*ndbhp
; 
6143         struct nfs_vattr 
*nvattrp
; 
6144         daddr64_t nextlbn 
= 0; 
6145         int i
, error 
= ESRCH
; 
6148         /* scan the buffer for the name */ 
6149         ndbhp 
= (struct nfs_dir_buf_header
*)bp
->nb_data
; 
6150         dp 
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
); 
6151         for (i 
= 0; i 
< ndbhp
->ndbh_count
; i
++) { 
6152                 nextlbn 
= dp
->d_seekoff
; 
6153                 if ((cnp
->cn_namelen 
== dp
->d_namlen
) && !strcmp(cnp
->cn_nameptr
, dp
->d_name
)) { 
6154                         fhlen 
= (uint8_t)dp
->d_name
[dp
->d_namlen 
+ 1]; 
6155                         nvattrp 
= NFS_DIR_BUF_NVATTR(bp
, i
); 
6156                         if ((ndbhp
->ndbh_ncgen 
!= bp
->nb_np
->n_ncgen
) || (fhlen 
== 0) || 
6157                             (nvattrp
->nva_type 
== VNON
) || (nvattrp
->nva_fileid 
== 0)) { 
6158                                 /* entry is not valid */ 
6162                         if (flags 
== NDBS_PURGE
) { 
6164                                 bzero(nvattrp
, sizeof(*nvattrp
)); 
6168                         if (flags 
== NDBS_UPDATE
) { 
6169                                 /* update direntry's attrs if fh matches */ 
6170                                 if ((fhp
->fh_len 
== fhlen
) && !bcmp(&dp
->d_name
[dp
->d_namlen 
+ 2], fhp
->fh_data
, fhlen
)) { 
6171                                         bcopy(nvap
, nvattrp
, sizeof(*nvap
)); 
6172                                         dp
->d_fileno 
= nvattrp
->nva_fileid
; 
6173                                         nvattrp
->nva_fileid 
= *xidp
; 
6174                                         nvap
->nva_flags 
|= NFS_FFLAG_FILEID_CONTAINS_XID
; 
6175                                         *(time_t*)(&dp
->d_name
[dp
->d_namlen 
+ 2 + fhp
->fh_len
]) = *attrstampp
; 
6180                         /* copy out fh, attrs, attrstamp, and xid */ 
6181                         fhp
->fh_len 
= fhlen
; 
6182                         bcopy(&dp
->d_name
[dp
->d_namlen 
+ 2], fhp
->fh_data
, MAX(fhp
->fh_len
, (int)sizeof(fhp
->fh_data
))); 
6183                         *attrstampp 
= *(time_t*)(&dp
->d_name
[dp
->d_namlen 
+ 2 + fhp
->fh_len
]); 
6184                         bcopy(nvattrp
, nvap
, sizeof(*nvap
)); 
6185                         *xidp 
= nvap
->nva_fileid
; 
6186                         nvap
->nva_fileid 
= dp
->d_fileno
; 
6187                         nvap
->nva_flags 
&= ~NFS_FFLAG_FILEID_CONTAINS_XID
; 
6191                 dp 
= NFS_DIRENTRY_NEXT(dp
); 
6194                 *nextlbnp 
= nextlbn
; 
6200  * Look up a name in a directory's buffers. 
6201  * Note: should only be called with RDIRPLUS directory buffers 
6204 nfs_dir_buf_cache_lookup(nfsnode_t dnp
, nfsnode_t 
*npp
, struct componentname 
*cnp
, vfs_context_t ctx
, int purge
, int *skipdu
) 
6207         struct nfsmount 
*nmp
; 
6208         int error 
= 0, i
, found 
= 0, count 
= 0; 
6210         struct nfs_vattr 
*nvattr
; 
6212         time_t attrstamp 
= 0; 
6213         thread_t thd 
= vfs_context_thread(ctx
); 
6214         struct nfsbuf 
*bp
, *lastbp
, *foundbp
; 
6215         struct nfsbuflists blist
; 
6216         daddr64_t lbn
, nextlbn
; 
6217         int dotunder 
= (cnp
->cn_namelen 
> 2) && (cnp
->cn_nameptr
[0] == '.') && (cnp
->cn_nameptr
[1] == '_'); 
6218         int isdot 
= (cnp
->cn_namelen 
== 1) && (cnp
->cn_nameptr
[0] == '.'); 
6219         int isdotdot 
= (cnp
->cn_namelen 
== 2) && (cnp
->cn_nameptr
[0] == '.') && (cnp
->cn_nameptr
[1] == '.'); 
6220         int eof 
= 0, sof 
= 0, skipped 
= 0; 
6222         nmp 
= NFSTONMP(dnp
); 
6223         if (nfs_mount_gone(nmp
)) { 
6230         if (isdot 
|| isdotdot
) { 
6234         fh 
= zalloc(nfs_fhandle_zone
); 
6235         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
6237         /* first check most recent buffer (and next one too) */ 
6238         lbn 
= dnp
->n_lastdbl
; 
6239         for (i 
= 0; i 
< 2; i
++) { 
6240                 if ((error 
= nfs_buf_get(dnp
, lbn
, NFS_DIRBLKSIZ
, thd
, NBLK_READ 
| NBLK_ONLYVALID
, &bp
))) { 
6248                 nfs_dir_buf_cache_lookup_boundaries(bp
, &sof
, &eof
); 
6249                 error 
= nfs_dir_buf_search(bp
, cnp
, fh
, nvattr
, &xid
, &attrstamp
, &nextlbn
, purge 
? NDBS_PURGE 
: 0); 
6250                 nfs_buf_release(bp
, 0); 
6251                 if (error 
== ESRCH
) { 
6260         lck_mtx_lock(&nfs_buf_mutex
); 
6262                 dnp
->n_lastdbl 
= lbn
; 
6266         /* If we detect that we fetched full directory listing we should avoid sending lookups for ._ files */ 
6267         if (dotunder 
&& !found 
&& !error 
&& eof 
&& sof 
&& !skipped 
&& skipdu
) { 
6272          * Scan the list of buffers, keeping them in order. 
6273          * Note that itercomplete inserts each of the remaining buffers 
6274          * into the head of list (thus reversing the elements).  So, we 
6275          * make sure to iterate through all buffers, inserting them after 
6276          * each other, to keep them in order. 
6277          * Also note: the LIST_INSERT_AFTER(lastbp) is only safe because 
6278          * we don't drop nfs_buf_mutex. 
6280         eof 
= sof 
= skipped 
= 0; 
6281         if (!nfs_buf_iterprepare(dnp
, &blist
, NBI_CLEAN
)) { 
6282                 lastbp 
= foundbp 
= NULL
; 
6283                 while ((bp 
= LIST_FIRST(&blist
))) { 
6284                         LIST_REMOVE(bp
, nb_vnbufs
); 
6286                                 LIST_INSERT_HEAD(&dnp
->n_cleanblkhd
, bp
, nb_vnbufs
); 
6288                                 LIST_INSERT_AFTER(lastbp
, bp
, nb_vnbufs
); 
6291                         if (error 
|| found
) { 
6295                         if (!purge 
&& dotunder 
&& (count 
> 100)) { /* don't waste too much time looking for ._ files */ 
6300                         lbn 
= bp
->nb_lblkno
; 
6301                         if (nfs_buf_acquire(bp
, NBAC_NOWAIT
, 0, 0)) { 
6302                                 /* just skip this buffer */ 
6303                                 nfs_buf_refrele(bp
); 
6307                         nfs_buf_refrele(bp
); 
6309                         nfs_dir_buf_cache_lookup_boundaries(bp
, &sof
, &eof
); 
6310                         error 
= nfs_dir_buf_search(bp
, cnp
, fh
, nvattr
, &xid
, &attrstamp
, NULL
, purge 
? NDBS_PURGE 
: 0); 
6311                         if (error 
== ESRCH
) { 
6320                         LIST_REMOVE(foundbp
, nb_vnbufs
); 
6321                         LIST_INSERT_HEAD(&dnp
->n_cleanblkhd
, foundbp
, nb_vnbufs
); 
6322                         dnp
->n_lastdbl 
= foundbp
->nb_lblkno
; 
6324                 nfs_buf_itercomplete(dnp
, &blist
, NBI_CLEAN
); 
6327         /* If we detect that we fetched full directory listing we should avoid sending lookups for ._ files */ 
6328         if (dotunder 
&& !found 
&& !error 
&& eof 
&& sof 
&& !skipped 
&& skipdu
) { 
6333         lck_mtx_unlock(&nfs_buf_mutex
); 
6335         if (!error 
&& found 
&& !purge
) { 
6336                 error 
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
->fh_data
, 
6337                     fh
->fh_len
, nvattr
, &xid
, dnp
->n_auth
, NG_MAKEENTRY
, 
6342                 newnp
->n_attrstamp 
= attrstamp
; 
6344                 nfs_node_unlock(newnp
); 
6345                 /* check if the dir buffer's attrs are out of date */ 
6346                 if (!nfs_getattr(newnp
, nvattr
, ctx
, NGA_CACHED
) && 
6347                     (newnp
->n_attrstamp 
!= attrstamp
)) { 
6348                         /* they are, so update them */ 
6349                         error 
= nfs_buf_get(dnp
, lbn
, NFS_DIRBLKSIZ
, thd
, NBLK_READ 
| NBLK_ONLYVALID
, &bp
); 
6351                                 attrstamp 
= newnp
->n_attrstamp
; 
6353                                 nfs_dir_buf_search(bp
, cnp
, fh
, nvattr
, &xid
, &attrstamp
, NULL
, NDBS_UPDATE
); 
6354                                 nfs_buf_release(bp
, 0); 
6361         NFS_ZFREE(nfs_fhandle_zone
, fh
); 
6362         FREE(nvattr
, M_TEMP
); 
6367  * Purge name cache entries for the given node. 
6368  * For RDIRPLUS, also invalidate the entry in the directory's buffers. 
6371 nfs_name_cache_purge(nfsnode_t dnp
, nfsnode_t np
, struct componentname 
*cnp
, vfs_context_t ctx
) 
6373         struct nfsmount 
*nmp 
= NFSTONMP(dnp
); 
6375         cache_purge(NFSTOV(np
)); 
6376         if (nmp 
&& (nmp
->nm_vers 
> NFS_VER2
) && NMFLAG(nmp
, RDIRPLUS
)) { 
6377                 nfs_dir_buf_cache_lookup(dnp
, NULL
, cnp
, ctx
, 1, NULL
); 
6382  * NFS V3 readdir (plus) RPC. 
6385 nfs3_readdir_rpc(nfsnode_t dnp
, struct nfsbuf 
*bp
, vfs_context_t ctx
) 
6387         struct nfsmount 
*nmp
; 
6388         int error 
= 0, lockerror
, nfsvers
, rdirplus
, bigcookies
; 
6389         int i
, status 
= 0, attrflag
, fhflag
, more_entries 
= 1, eof
, bp_dropped 
= 0; 
6390         uint32_t nmreaddirsize
, nmrsize
; 
6391         uint32_t namlen
, skiplen
, fhlen
, xlen
, attrlen
; 
6392         uint64_t cookie
, lastcookie
, xid
, savedxid
, fileno
, space_free
, space_needed
; 
6393         struct nfsm_chain nmreq
, nmrep
, nmrepsave
; 
6395         struct nfs_vattr 
*nvattrp
; 
6396         struct nfs_dir_buf_header 
*ndbhp
; 
6397         struct direntry 
*dp
; 
6403         nmp 
= NFSTONMP(dnp
); 
6404         if (nfs_mount_gone(nmp
)) { 
6407         nfsvers 
= nmp
->nm_vers
; 
6408         nmreaddirsize 
= nmp
->nm_readdirsize
; 
6409         nmrsize 
= nmp
->nm_rsize
; 
6410         bigcookies 
= nmp
->nm_state 
& NFSSTA_BIGCOOKIES
; 
6411         fh 
= zalloc(nfs_fhandle_zone
); 
6413         rdirplus 
= ((nfsvers 
> NFS_VER2
) && NMFLAG(nmp
, RDIRPLUS
)) ? 1 : 0; 
6415         if ((lockerror 
= nfs_node_lock(dnp
))) { 
6416                 NFS_ZFREE(nfs_fhandle_zone
, fh
); 
6420         /* determine cookie to use, and move dp to the right offset */ 
6421         ndbhp 
= (struct nfs_dir_buf_header
*)bp
->nb_data
; 
6422         dp 
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
); 
6423         if (ndbhp
->ndbh_count
) { 
6424                 for (i 
= 0; i 
< ndbhp
->ndbh_count 
- 1; i
++) { 
6425                         dp 
= NFS_DIRENTRY_NEXT(dp
); 
6427                 cookie 
= dp
->d_seekoff
; 
6428                 dp 
= NFS_DIRENTRY_NEXT(dp
); 
6430                 cookie 
= bp
->nb_lblkno
; 
6431                 /* increment with every buffer read */ 
6432                 OSAddAtomic64(1, &nfsstats
.readdir_bios
); 
6434         lastcookie 
= cookie
; 
6437          * Loop around doing readdir(plus) RPCs of size nm_readdirsize until 
6438          * the buffer is full (or we hit EOF).  Then put the remainder of the 
6439          * results in the next buffer(s). 
6441         nfsm_chain_null(&nmreq
); 
6442         nfsm_chain_null(&nmrep
); 
6443         while (nfs_dir_buf_freespace(bp
, rdirplus
) && !(ndbhp
->ndbh_flags 
& NDB_FULL
)) { 
6444                 nfsm_chain_build_alloc_init(error
, &nmreq
, 
6445                     NFSX_FH(nfsvers
) + NFSX_READDIR(nfsvers
) + NFSX_UNSIGNED
); 
6446                 nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
); 
6447                 if (nfsvers 
== NFS_VER3
) { 
6448                         /* opaque values don't need swapping, but as long */ 
6449                         /* as we are consistent about it, it should be ok */ 
6450                         nfsm_chain_add_64(error
, &nmreq
, cookie
); 
6451                         nfsm_chain_add_64(error
, &nmreq
, dnp
->n_cookieverf
); 
6453                         nfsm_chain_add_32(error
, &nmreq
, cookie
); 
6455                 nfsm_chain_add_32(error
, &nmreq
, nmreaddirsize
); 
6457                         nfsm_chain_add_32(error
, &nmreq
, nmrsize
); 
6459                 nfsm_chain_build_done(error
, &nmreq
); 
6460                 nfs_node_unlock(dnp
); 
6464                 error 
= nfs_request(dnp
, NULL
, &nmreq
, 
6465                     rdirplus 
? NFSPROC_READDIRPLUS 
: NFSPROC_READDIR
, 
6466                     ctx
, NULL
, &nmrep
, &xid
, &status
); 
6468                 if ((lockerror 
= nfs_node_lock(dnp
))) { 
6473                 if (nfsvers 
== NFS_VER3
) { 
6474                         nfsm_chain_postop_attr_update(error
, &nmrep
, dnp
, &xid
); 
6479                 if (nfsvers 
== NFS_VER3
) { 
6480                         nfsm_chain_get_64(error
, &nmrep
, dnp
->n_cookieverf
); 
6482                 nfsm_chain_get_32(error
, &nmrep
, more_entries
); 
6485                         nfs_node_unlock(dnp
); 
6488                 if (error 
== NFSERR_NOTSUPP
) { 
6489                         /* oops... it doesn't look like readdirplus is supported */ 
6490                         lck_mtx_lock(&nmp
->nm_lock
); 
6491                         NFS_BITMAP_CLR(nmp
->nm_flags
, NFS_MFLAG_RDIRPLUS
); 
6492                         lck_mtx_unlock(&nmp
->nm_lock
); 
6493                         nfsm_chain_cleanup(&nmreq
); 
6494                         nfsm_chain_cleanup(&nmrep
); 
6501                         if (lastcookie 
== 0) { 
6502                                 dnp
->n_rdirplusstamp_sof 
= now
.tv_sec
; 
6503                                 dnp
->n_rdirplusstamp_eof 
= 0; 
6507                 /* loop through the entries packing them into the buffer */ 
6508                 while (more_entries
) { 
6509                         if (nfsvers 
== NFS_VER3
) { 
6510                                 nfsm_chain_get_64(error
, &nmrep
, fileno
); 
6512                                 nfsm_chain_get_32(error
, &nmrep
, fileno
); 
6514                         nfsm_chain_get_32(error
, &nmrep
, namlen
); 
6516                         /* just truncate names that don't fit in direntry.d_name */ 
6521                         if (namlen 
> (sizeof(dp
->d_name
) - 1)) { 
6522                                 skiplen 
= namlen 
- sizeof(dp
->d_name
) + 1; 
6523                                 namlen 
= sizeof(dp
->d_name
) - 1; 
6527                         /* guess that fh size will be same as parent */ 
6528                         fhlen 
= rdirplus 
? (1 + dnp
->n_fhsize
) : 0; 
6529                         xlen 
= rdirplus 
? (fhlen 
+ sizeof(time_t)) : 0; 
6530                         attrlen 
= rdirplus 
? sizeof(struct nfs_vattr
) : 0; 
6531                         reclen 
= NFS_DIRENTRY_LEN_16(namlen 
+ xlen
); 
6532                         space_needed 
= reclen 
+ attrlen
; 
6533                         space_free 
= nfs_dir_buf_freespace(bp
, rdirplus
); 
6534                         if (space_needed 
> space_free
) { 
6536                                  * We still have entries to pack, but we've 
6537                                  * run out of room in the current buffer. 
6538                                  * So we need to move to the next buffer. 
6539                                  * The block# for the next buffer is the 
6540                                  * last cookie in the current buffer. 
6543                                 ndbhp
->ndbh_flags 
|= NDB_FULL
; 
6544                                 nfs_buf_release(bp
, 0); 
6547                                 error 
= nfs_buf_get(dnp
, lastcookie
, NFS_DIRBLKSIZ
, vfs_context_thread(ctx
), NBLK_READ
, &bp
); 
6549                                 /* initialize buffer */ 
6550                                 ndbhp 
= (struct nfs_dir_buf_header
*)bp
->nb_data
; 
6551                                 ndbhp
->ndbh_flags 
= 0; 
6552                                 ndbhp
->ndbh_count 
= 0; 
6553                                 ndbhp
->ndbh_entry_end 
= sizeof(*ndbhp
); 
6554                                 ndbhp
->ndbh_ncgen 
= dnp
->n_ncgen
; 
6555                                 space_free 
= nfs_dir_buf_freespace(bp
, rdirplus
); 
6556                                 dp 
= NFS_DIR_BUF_FIRST_DIRENTRY(bp
); 
6557                                 /* increment with every buffer read */ 
6558                                 OSAddAtomic64(1, &nfsstats
.readdir_bios
); 
6561                         dp
->d_fileno 
= fileno
; 
6562                         dp
->d_namlen 
= (uint16_t)namlen
; 
6563                         dp
->d_reclen 
= reclen
; 
6564                         dp
->d_type 
= DT_UNKNOWN
; 
6565                         nfsm_chain_get_opaque(error
, &nmrep
, namlen
, dp
->d_name
); 
6567                         dp
->d_name
[namlen
] = '\0'; 
6569                                 nfsm_chain_adv(error
, &nmrep
, 
6570                                     nfsm_rndup(namlen 
+ skiplen
) - nfsm_rndup(namlen
)); 
6572                         if (nfsvers 
== NFS_VER3
) { 
6573                                 nfsm_chain_get_64(error
, &nmrep
, cookie
); 
6575                                 nfsm_chain_get_32(error
, &nmrep
, cookie
); 
6578                         dp
->d_seekoff 
= cookie
; 
6579                         if (!bigcookies 
&& (cookie 
>> 32) && (nmp 
== NFSTONMP(dnp
))) { 
6580                                 /* we've got a big cookie, make sure flag is set */ 
6581                                 lck_mtx_lock(&nmp
->nm_lock
); 
6582                                 nmp
->nm_state 
|= NFSSTA_BIGCOOKIES
; 
6583                                 lck_mtx_unlock(&nmp
->nm_lock
); 
6587                                 nvattrp 
= NFS_DIR_BUF_NVATTR(bp
, ndbhp
->ndbh_count
); 
6588                                 /* check for attributes */ 
6589                                 nfsm_chain_get_32(error
, &nmrep
, attrflag
); 
6592                                         /* grab attributes */ 
6593                                         error 
= nfs_parsefattr(nmp
, &nmrep
, NFS_VER3
, nvattrp
); 
6595                                         dp
->d_type 
= IFTODT(VTTOIF(nvattrp
->nva_type
)); 
6596                                         /* fileid is already in d_fileno, so stash xid in attrs */ 
6597                                         nvattrp
->nva_fileid 
= savedxid
; 
6598                                         nvattrp
->nva_flags 
|= NFS_FFLAG_FILEID_CONTAINS_XID
; 
6600                                         /* mark the attributes invalid */ 
6601                                         bzero(nvattrp
, sizeof(struct nfs_vattr
)); 
6603                                 /* check for file handle */ 
6604                                 nfsm_chain_get_32(error
, &nmrep
, fhflag
); 
6607                                         nfsm_chain_get_fh(error
, &nmrep
, NFS_VER3
, fh
); 
6609                                         fhlen 
= fh
->fh_len 
+ 1; 
6610                                         xlen 
= fhlen 
+ sizeof(time_t); 
6611                                         reclen 
= NFS_DIRENTRY_LEN_16(namlen 
+ xlen
); 
6612                                         space_needed 
= reclen 
+ attrlen
; 
6613                                         if (space_needed 
> space_free
) { 
6614                                                 /* didn't actually have the room... move on to next buffer */ 
6618                                         /* pack the file handle into the record */ 
6619                                         dp
->d_name
[dp
->d_namlen 
+ 1] = (unsigned char)fh
->fh_len
; /* No truncation because fh_len's value is checked during nfsm_chain_get_fh() */ 
6620                                         bcopy(fh
->fh_data
, &dp
->d_name
[dp
->d_namlen 
+ 2], fh
->fh_len
); 
6622                                         /* mark the file handle invalid */ 
6624                                         fhlen 
= fh
->fh_len 
+ 1; 
6625                                         xlen 
= fhlen 
+ sizeof(time_t); 
6626                                         reclen 
= NFS_DIRENTRY_LEN_16(namlen 
+ xlen
); 
6627                                         bzero(&dp
->d_name
[dp
->d_namlen 
+ 1], fhlen
); 
6629                                 *(time_t*)(&dp
->d_name
[dp
->d_namlen 
+ 1 + fhlen
]) = now
.tv_sec
; 
6630                                 dp
->d_reclen 
= reclen
; 
6631                                 nfs_rdirplus_update_node_attrs(dnp
, dp
, fh
, nvattrp
, &savedxid
); 
6633                         padstart 
= dp
->d_name 
+ dp
->d_namlen 
+ 1 + xlen
; 
6634                         ndbhp
->ndbh_count
++; 
6635                         lastcookie 
= cookie
; 
6636                         /* advance to next direntry in buffer */ 
6637                         dp 
= NFS_DIRENTRY_NEXT(dp
); 
6638                         ndbhp
->ndbh_entry_end 
= (char*)dp 
- bp
->nb_data
; 
6639                         /* zero out the pad bytes */ 
6640                         padlen 
= (char*)dp 
- padstart
; 
6642                                 bzero(padstart
, padlen
); 
6644                         /* check for more entries */ 
6645                         nfsm_chain_get_32(error
, &nmrep
, more_entries
); 
6648                 /* Finally, get the eof boolean */ 
6649                 nfsm_chain_get_32(error
, &nmrep
, eof
); 
6652                         ndbhp
->ndbh_flags 
|= (NDB_FULL 
| NDB_EOF
); 
6653                         nfs_node_lock_force(dnp
); 
6654                         dnp
->n_eofcookie 
= lastcookie
; 
6656                                 dnp
->n_rdirplusstamp_eof 
= now
.tv_sec
; 
6658                         nfs_node_unlock(dnp
); 
6663                         nfs_buf_release(bp
, 0); 
6667                 if ((lockerror 
= nfs_node_lock(dnp
))) { 
6671                 nfsm_chain_cleanup(&nmrep
); 
6672                 nfsm_chain_null(&nmreq
); 
6675         if (bp_dropped 
&& bp
) { 
6676                 nfs_buf_release(bp
, 0); 
6679                 nfs_node_unlock(dnp
); 
6681         nfsm_chain_cleanup(&nmreq
); 
6682         nfsm_chain_cleanup(&nmrep
); 
6683         NFS_ZFREE(nfs_fhandle_zone
, fh
); 
6684         return bp_dropped 
? NFSERR_DIRBUFDROPPED 
: error
; 
6688  * Silly rename. To make the NFS filesystem that is stateless look a little 
6689  * more like the "ufs" a remove of an active vnode is translated to a rename 
6690  * to a funny looking filename that is removed by nfs_vnop_inactive on the 
6691  * nfsnode. There is the potential for another process on a different client 
6692  * to create the same funny name between when the lookitup() fails and the 
6693  * rename() completes, but... 
6696 /* format of "random" silly names - includes a number and pid */ 
6697 /* (note: shouldn't exceed size of nfs_sillyrename.nsr_name) */ 
6698 #define NFS_SILLYNAME_FORMAT ".nfs.%08x.%04x" 
6699 /* starting from zero isn't silly enough */ 
6700 static uint32_t nfs_sillyrename_number 
= 0x20051025; 
6706         struct componentname 
*cnp
, 
6709         struct nfs_sillyrename 
*nsp
; 
6714         struct nfsmount 
*nmp
; 
6716         nmp 
= NFSTONMP(dnp
); 
6717         if (nfs_mount_gone(nmp
)) { 
6721         nfs_name_cache_purge(dnp
, np
, cnp
, ctx
); 
6723         MALLOC(nsp
, struct nfs_sillyrename 
*, 
6724             sizeof(struct nfs_sillyrename
), M_TEMP
, M_WAITOK
); 
6728         cred 
= vfs_context_ucred(ctx
); 
6729         kauth_cred_ref(cred
); 
6730         nsp
->nsr_cred 
= cred
; 
6732         error 
= vnode_ref(NFSTOV(dnp
)); 
6737         /* Fudge together a funny name */ 
6738         pid 
= vfs_context_pid(ctx
); 
6739         num 
= OSAddAtomic(1, &nfs_sillyrename_number
); 
6740         nsp
->nsr_namlen 
= snprintf(nsp
->nsr_name
, sizeof(nsp
->nsr_name
), 
6741             NFS_SILLYNAME_FORMAT
, num
, (pid 
& 0xffff)); 
6742         if (nsp
->nsr_namlen 
>= (int)sizeof(nsp
->nsr_name
)) { 
6743                 nsp
->nsr_namlen 
= sizeof(nsp
->nsr_name
) - 1; 
6746         /* Try lookitups until we get one that isn't there */ 
6747         while (nfs_lookitup(dnp
, nsp
->nsr_name
, nsp
->nsr_namlen
, ctx
, NULL
) == 0) { 
6748                 num 
= OSAddAtomic(1, &nfs_sillyrename_number
); 
6749                 nsp
->nsr_namlen 
= snprintf(nsp
->nsr_name
, sizeof(nsp
->nsr_name
), 
6750                     NFS_SILLYNAME_FORMAT
, num
, (pid 
& 0xffff)); 
6751                 if (nsp
->nsr_namlen 
>= (int)sizeof(nsp
->nsr_name
)) { 
6752                         nsp
->nsr_namlen 
= sizeof(nsp
->nsr_name
) - 1; 
6756         /* now, do the rename */ 
6757         error 
= nmp
->nm_funcs
->nf_rename_rpc(dnp
, cnp
->cn_nameptr
, cnp
->cn_namelen
, 
6758             dnp
, nsp
->nsr_name
, nsp
->nsr_namlen
, ctx
); 
6760         /* Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. */ 
6761         if (error 
== ENOENT
) { 
6765                 nfs_node_lock_force(dnp
); 
6766                 if (dnp
->n_flag 
& NNEGNCENTRIES
) { 
6767                         dnp
->n_flag 
&= ~NNEGNCENTRIES
; 
6768                         cache_purge_negatives(NFSTOV(dnp
)); 
6770                 nfs_node_unlock(dnp
); 
6772         FSDBG(267, dnp
, np
, num
, error
); 
6776         error 
= nfs_lookitup(dnp
, nsp
->nsr_name
, nsp
->nsr_namlen
, ctx
, &np
); 
6777         nfs_node_lock_force(np
); 
6778         np
->n_sillyrename 
= nsp
; 
6779         nfs_node_unlock(np
); 
6782         vnode_rele(NFSTOV(dnp
)); 
6784         nsp
->nsr_cred 
= NOCRED
; 
6785         kauth_cred_unref(&cred
); 
6791 nfs3_lookup_rpc_async( 
6796         struct nfsreq 
**reqp
) 
6798         struct nfsmount 
*nmp
; 
6799         struct nfsm_chain nmreq
; 
6800         int error 
= 0, nfsvers
; 
6802         nmp 
= NFSTONMP(dnp
); 
6803         if (nfs_mount_gone(nmp
)) { 
6806         nfsvers 
= nmp
->nm_vers
; 
6808         nfsm_chain_null(&nmreq
); 
6810         nfsm_chain_build_alloc_init(error
, &nmreq
, 
6811             NFSX_FH(nfsvers
) + NFSX_UNSIGNED 
+ nfsm_rndup(namelen
)); 
6812         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, dnp
->n_fhp
, dnp
->n_fhsize
); 
6813         nfsm_chain_add_name(error
, &nmreq
, name
, namelen
, nmp
); 
6814         nfsm_chain_build_done(error
, &nmreq
); 
6816         error 
= nfs_request_async(dnp
, NULL
, &nmreq
, NFSPROC_LOOKUP
, 
6817             vfs_context_thread(ctx
), vfs_context_ucred(ctx
), NULL
, 0, NULL
, reqp
); 
6819         nfsm_chain_cleanup(&nmreq
); 
6824 nfs3_lookup_rpc_async_finish( 
6826         __unused 
char *name
, 
6827         __unused 
int namelen
, 
6832         struct nfs_vattr 
*nvap
) 
6834         int error 
= 0, lockerror 
= ENOENT
, status 
= 0, nfsvers
, attrflag
; 
6836         struct nfsmount 
*nmp
; 
6837         struct nfsm_chain nmrep
; 
6839         nmp 
= NFSTONMP(dnp
); 
6843         nfsvers 
= nmp
->nm_vers
; 
6845         nfsm_chain_null(&nmrep
); 
6847         error 
= nfs_request_async_finish(req
, &nmrep
, xidp
, &status
); 
6849         if ((lockerror 
= nfs_node_lock(dnp
))) { 
6853         if (error 
|| status
) { 
6854                 if (nfsvers 
== NFS_VER3
) { 
6855                         nfsm_chain_postop_attr_update(error
, &nmrep
, dnp
, &xid
); 
6863         nfsmout_if(error 
|| !fhp 
|| !nvap
); 
6865         /* get the file handle */ 
6866         nfsm_chain_get_fh(error
, &nmrep
, nfsvers
, fhp
); 
6868         /* get the attributes */ 
6869         if (nfsvers 
== NFS_VER3
) { 
6870                 nfsm_chain_postop_attr_get(nmp
, error
, &nmrep
, attrflag
, nvap
); 
6871                 nfsm_chain_postop_attr_update(error
, &nmrep
, dnp
, &xid
); 
6872                 if (!error 
&& !attrflag
) { 
6873                         error 
= nfs3_getattr_rpc(NULL
, NFSTOMP(dnp
), fhp
->fh_data
, fhp
->fh_len
, 0, ctx
, nvap
, xidp
); 
6876                 error 
= nfs_parsefattr(nmp
, &nmrep
, nfsvers
, nvap
); 
6880                 nfs_node_unlock(dnp
); 
6882         nfsm_chain_cleanup(&nmrep
); 
6887  * Look up a file name and optionally either update the file handle or 
6888  * allocate an nfsnode, depending on the value of npp. 
6889  * npp == NULL  --> just do the lookup 
6890  * *npp == NULL --> allocate a new nfsnode and make sure attributes are 
6892  * *npp != NULL --> update the file handle in the vnode 
6903         nfsnode_t np
, newnp 
= NULL
; 
6906         struct nfsmount 
*nmp
; 
6907         struct nfs_vattr 
*nvattr
; 
6910         nmp 
= NFSTONMP(dnp
); 
6911         if (nfs_mount_gone(nmp
)) { 
6915         if (NFS_BITMAP_ISSET(nmp
->nm_fsattr
.nfsa_bitmap
, NFS_FATTR_MAXNAME
) && 
6916             (namelen 
> nmp
->nm_fsattr
.nfsa_maxname
)) { 
6917                 return ENAMETOOLONG
; 
6920         fh 
= zalloc(nfs_fhandle_zone
); 
6921         req 
= zalloc_flags(nfs_req_zone
, Z_WAITOK
); 
6922         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
6923         NVATTR_INIT(nvattr
); 
6925         /* check for lookup of "." */ 
6926         if ((name
[0] == '.') && (namelen 
== 1)) { 
6927                 /* skip lookup, we know who we are */ 
6933         error 
= nmp
->nm_funcs
->nf_lookup_rpc_async(dnp
, name
, namelen
, ctx
, &req
); 
6935         error 
= nmp
->nm_funcs
->nf_lookup_rpc_async_finish(dnp
, name
, namelen
, ctx
, req
, &xid
, fh
, nvattr
); 
6936         nfsmout_if(!npp 
|| error
); 
6940                 if (fh
->fh_len 
!= np
->n_fhsize
) { 
6941                         u_char 
*oldbuf 
= (np
->n_fhsize 
> NFS_SMALLFH
) ? np
->n_fhp 
: NULL
; 
6942                         if (fh
->fh_len 
> NFS_SMALLFH
) { 
6943                                 MALLOC(np
->n_fhp
, u_char 
*, fh
->fh_len
, M_NFSBIGFH
, M_WAITOK
); 
6950                                 np
->n_fhp 
= &np
->n_fh
[0]; 
6953                                 FREE(oldbuf
, M_NFSBIGFH
); 
6956                 bcopy(fh
->fh_data
, np
->n_fhp
, fh
->fh_len
); 
6957                 np
->n_fhsize 
= fh
->fh_len
; 
6958                 nfs_node_lock_force(np
); 
6959                 error 
= nfs_loadattrcache(np
, nvattr
, &xid
, 0); 
6960                 nfs_node_unlock(np
); 
6963         } else if (NFS_CMPFH(dnp
, fh
->fh_data
, fh
->fh_len
)) { 
6964                 nfs_node_lock_force(dnp
); 
6965                 if (dnp
->n_xid 
<= xid
) { 
6966                         error 
= nfs_loadattrcache(dnp
, nvattr
, &xid
, 0); 
6968                 nfs_node_unlock(dnp
); 
6972                 struct componentname cn
, *cnp 
= &cn
; 
6973                 bzero(cnp
, sizeof(*cnp
)); 
6974                 cnp
->cn_nameptr 
= name
; 
6975                 cnp
->cn_namelen 
= namelen
; 
6976                 error 
= nfs_nget(NFSTOMP(dnp
), dnp
, cnp
, fh
->fh_data
, fh
->fh_len
, 
6977                     nvattr
, &xid
, req
->r_auth
, NG_MAKEENTRY
, &np
); 
6983         if (npp 
&& !*npp 
&& !error
) { 
6986         NVATTR_CLEANUP(nvattr
); 
6987         NFS_ZFREE(nfs_fhandle_zone
, fh
); 
6988         NFS_ZFREE(nfs_req_zone
, req
); 
6989         FREE(nvattr
, M_TEMP
); 
6994  * set up and initialize a "._" file lookup structure used for 
6995  * performing async lookups. 
6998 nfs_dulookup_init(struct nfs_dulookup 
*dulp
, nfsnode_t dnp
, const char *name
, int namelen
, vfs_context_t ctx
) 
7000         int error
, du_namelen
; 
7002         struct nfsmount 
*nmp 
= NFSTONMP(dnp
); 
7004         /* check for ._ file in name cache */ 
7006         bzero(&dulp
->du_cn
, sizeof(dulp
->du_cn
)); 
7007         du_namelen 
= namelen 
+ 2; 
7008         if (!nmp 
|| NMFLAG(nmp
, NONEGNAMECACHE
)) { 
7011         if ((namelen 
>= 2) && (name
[0] == '.') && (name
[1] == '_')) { 
7014         if (du_namelen 
>= (int)sizeof(dulp
->du_smallname
)) { 
7015                 MALLOC(dulp
->du_cn
.cn_nameptr
, char *, du_namelen 
+ 1, M_TEMP
, M_WAITOK
); 
7017                 dulp
->du_cn
.cn_nameptr 
= dulp
->du_smallname
; 
7019         if (!dulp
->du_cn
.cn_nameptr
) { 
7022         dulp
->du_cn
.cn_namelen 
= du_namelen
; 
7023         snprintf(dulp
->du_cn
.cn_nameptr
, du_namelen 
+ 1, "._%s", name
); 
7024         dulp
->du_cn
.cn_nameptr
[du_namelen
] = '\0'; 
7025         dulp
->du_cn
.cn_nameiop 
= LOOKUP
; 
7026         dulp
->du_cn
.cn_flags 
= MAKEENTRY
; 
7028         error 
= cache_lookup(NFSTOV(dnp
), &du_vp
, &dulp
->du_cn
); 
7031         } else if (!error
) { 
7032                 nmp 
= NFSTONMP(dnp
); 
7033                 if (nmp 
&& (nmp
->nm_vers 
> NFS_VER2
) && NMFLAG(nmp
, RDIRPLUS
)) { 
7034                         /* if rdirplus, try dir buf cache lookup */ 
7035                         nfsnode_t du_np 
= NULL
; 
7036                         if (!nfs_dir_buf_cache_lookup(dnp
, &du_np
, &dulp
->du_cn
, ctx
, 0, NULL
) && du_np
) { 
7037                                 /* dir buf cache hit */ 
7038                                 du_vp 
= NFSTOV(du_np
); 
7044                         dulp
->du_flags 
|= NFS_DULOOKUP_DOIT
; 
7050  * start an async "._" file lookup request 
7053 nfs_dulookup_start(struct nfs_dulookup 
*dulp
, nfsnode_t dnp
, vfs_context_t ctx
) 
7055         struct nfsmount 
*nmp 
= NFSTONMP(dnp
); 
7056         struct nfsreq 
*req 
= &dulp
->du_req
; 
7058         if (!nmp 
|| !(dulp
->du_flags 
& NFS_DULOOKUP_DOIT
) || (dulp
->du_flags 
& NFS_DULOOKUP_INPROG
)) { 
7061         if (!nmp
->nm_funcs
->nf_lookup_rpc_async(dnp
, dulp
->du_cn
.cn_nameptr
, 
7062             dulp
->du_cn
.cn_namelen
, ctx
, &req
)) { 
7063                 dulp
->du_flags 
|= NFS_DULOOKUP_INPROG
; 
7068  * finish an async "._" file lookup request and clean up the structure 
7071 nfs_dulookup_finish(struct nfs_dulookup 
*dulp
, nfsnode_t dnp
, vfs_context_t ctx
) 
7073         struct nfsmount 
*nmp 
= NFSTONMP(dnp
); 
7078         struct nfs_vattr 
*nvattr
; 
7080         if (!nmp 
|| !(dulp
->du_flags 
& NFS_DULOOKUP_INPROG
)) { 
7084         fh 
= zalloc(nfs_fhandle_zone
); 
7085         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
7086         NVATTR_INIT(nvattr
); 
7087         error 
= nmp
->nm_funcs
->nf_lookup_rpc_async_finish(dnp
, dulp
->du_cn
.cn_nameptr
, 
7088             dulp
->du_cn
.cn_namelen
, ctx
, &dulp
->du_req
, &xid
, fh
, nvattr
); 
7089         dulp
->du_flags 
&= ~NFS_DULOOKUP_INPROG
; 
7090         if (error 
== ENOENT
) { 
7091                 /* add a negative entry in the name cache */ 
7092                 nfs_node_lock_force(dnp
); 
7093                 cache_enter(NFSTOV(dnp
), NULL
, &dulp
->du_cn
); 
7094                 dnp
->n_flag 
|= NNEGNCENTRIES
; 
7095                 nfs_node_unlock(dnp
); 
7096         } else if (!error
) { 
7097                 error 
= nfs_nget(NFSTOMP(dnp
), dnp
, &dulp
->du_cn
, fh
->fh_data
, fh
->fh_len
, 
7098                     nvattr
, &xid
, dulp
->du_req
.r_auth
, NG_MAKEENTRY
, &du_np
); 
7100                         nfs_node_unlock(du_np
); 
7101                         vnode_put(NFSTOV(du_np
)); 
7104         NVATTR_CLEANUP(nvattr
); 
7105         NFS_ZFREE(nfs_fhandle_zone
, fh
); 
7106         FREE(nvattr
, M_TEMP
); 
7108         if (dulp
->du_flags 
& NFS_DULOOKUP_INPROG
) { 
7109                 nfs_request_async_cancel(&dulp
->du_req
); 
7111         if (dulp
->du_cn
.cn_nameptr 
&& (dulp
->du_cn
.cn_nameptr 
!= dulp
->du_smallname
)) { 
7112                 FREE(dulp
->du_cn
.cn_nameptr
, M_TEMP
); 
7118  * NFS Version 3 commit RPC 
7128         struct nfsmount 
*nmp
; 
7129         int error 
= 0, lockerror
, status 
= 0, wccpostattr 
= 0, nfsvers
; 
7130         struct timespec premtime 
= { .tv_sec 
= 0, .tv_nsec 
= 0 }; 
7131         u_int64_t xid
, newwverf
; 
7133         struct nfsm_chain nmreq
, nmrep
; 
7136         FSDBG(521, np
, offset
, count
, nmp 
? nmp
->nm_state 
: 0); 
7137         if (nfs_mount_gone(nmp
)) { 
7140         if (!(nmp
->nm_state 
& NFSSTA_HASWRITEVERF
)) { 
7143         nfsvers 
= nmp
->nm_vers
; 
7144         count32 
= count 
> UINT32_MAX 
? 0 : (uint32_t)count
; 
7146         nfsm_chain_null(&nmreq
); 
7147         nfsm_chain_null(&nmrep
); 
7149         nfsm_chain_build_alloc_init(error
, &nmreq
, NFSX_FH(NFS_VER3
)); 
7150         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
); 
7151         nfsm_chain_add_64(error
, &nmreq
, offset
); 
7152         nfsm_chain_add_32(error
, &nmreq
, count32
); 
7153         nfsm_chain_build_done(error
, &nmreq
); 
7155         error 
= nfs_request2(np
, NULL
, &nmreq
, NFSPROC_COMMIT
, 
7156             current_thread(), cred
, NULL
, 0, &nmrep
, &xid
, &status
); 
7157         if ((lockerror 
= nfs_node_lock(np
))) { 
7160         /* can we do anything useful with the wcc info? */ 
7161         nfsm_chain_get_wcc_data(error
, &nmrep
, np
, &premtime
, &wccpostattr
, &xid
); 
7163                 nfs_node_unlock(np
); 
7168         nfsm_chain_get_64(error
, &nmrep
, newwverf
); 
7170         lck_mtx_lock(&nmp
->nm_lock
); 
7171         if (nmp
->nm_verf 
!= newwverf
) { 
7172                 nmp
->nm_verf 
= newwverf
; 
7174         if (wverf 
!= newwverf
) { 
7175                 error 
= NFSERR_STALEWRITEVERF
; 
7177         lck_mtx_unlock(&nmp
->nm_lock
); 
7179         nfsm_chain_cleanup(&nmreq
); 
7180         nfsm_chain_cleanup(&nmrep
); 
7187         __unused 
struct vnop_blockmap_args 
/* { 
7188                                             *  struct vnodeop_desc *a_desc; 
7203  * fsync vnode op. Just call nfs_flush(). 
7208         struct vnop_fsync_args 
/* { 
7209                                 *  struct vnodeop_desc *a_desc; 
7212                                 *  vfs_context_t a_context; 
7215         return nfs_flush(VTONFS(ap
->a_vp
), ap
->a_waitfor
, vfs_context_thread(ap
->a_context
), 0); 
7220  * Do an NFS pathconf RPC. 
7225         struct nfs_fsattr 
*nfsap
, 
7229         int error 
= 0, lockerror
, status 
= 0, nfsvers
; 
7230         struct nfsm_chain nmreq
, nmrep
; 
7231         struct nfsmount 
*nmp 
= NFSTONMP(np
); 
7234         if (nfs_mount_gone(nmp
)) { 
7237         nfsvers 
= nmp
->nm_vers
; 
7239         nfsm_chain_null(&nmreq
); 
7240         nfsm_chain_null(&nmrep
); 
7242         /* fetch pathconf info from server */ 
7243         nfsm_chain_build_alloc_init(error
, &nmreq
, NFSX_FH(NFS_VER3
)); 
7244         nfsm_chain_add_fh(error
, &nmreq
, nfsvers
, np
->n_fhp
, np
->n_fhsize
); 
7245         nfsm_chain_build_done(error
, &nmreq
); 
7247         error 
= nfs_request(np
, NULL
, &nmreq
, NFSPROC_PATHCONF
, ctx
, NULL
, &nmrep
, &xid
, &status
); 
7248         if ((lockerror 
= nfs_node_lock(np
))) { 
7251         nfsm_chain_postop_attr_update(error
, &nmrep
, np
, &xid
); 
7253                 nfs_node_unlock(np
); 
7258         nfsm_chain_get_32(error
, &nmrep
, nfsap
->nfsa_maxlink
); 
7259         nfsm_chain_get_32(error
, &nmrep
, nfsap
->nfsa_maxname
); 
7260         nfsap
->nfsa_flags 
&= ~(NFS_FSFLAG_NO_TRUNC 
| NFS_FSFLAG_CHOWN_RESTRICTED 
| NFS_FSFLAG_CASE_INSENSITIVE 
| NFS_FSFLAG_CASE_PRESERVING
); 
7261         nfsm_chain_get_32(error
, &nmrep
, val
); 
7263                 nfsap
->nfsa_flags 
|= NFS_FSFLAG_NO_TRUNC
; 
7265         nfsm_chain_get_32(error
, &nmrep
, val
); 
7267                 nfsap
->nfsa_flags 
|= NFS_FSFLAG_CHOWN_RESTRICTED
; 
7269         nfsm_chain_get_32(error
, &nmrep
, val
); 
7271                 nfsap
->nfsa_flags 
|= NFS_FSFLAG_CASE_INSENSITIVE
; 
7273         nfsm_chain_get_32(error
, &nmrep
, val
); 
7275                 nfsap
->nfsa_flags 
|= NFS_FSFLAG_CASE_PRESERVING
; 
7277         NFS_BITMAP_SET(nfsap
->nfsa_bitmap
, NFS_FATTR_MAXLINK
); 
7278         NFS_BITMAP_SET(nfsap
->nfsa_bitmap
, NFS_FATTR_MAXNAME
); 
7279         NFS_BITMAP_SET(nfsap
->nfsa_bitmap
, NFS_FATTR_NO_TRUNC
); 
7280         NFS_BITMAP_SET(nfsap
->nfsa_bitmap
, NFS_FATTR_CHOWN_RESTRICTED
); 
7281         NFS_BITMAP_SET(nfsap
->nfsa_bitmap
, NFS_FATTR_CASE_INSENSITIVE
); 
7282         NFS_BITMAP_SET(nfsap
->nfsa_bitmap
, NFS_FATTR_CASE_PRESERVING
); 
7284         nfsm_chain_cleanup(&nmreq
); 
7285         nfsm_chain_cleanup(&nmrep
); 
7289 /* save pathconf info for NFSv3 mount */ 
7291 nfs3_pathconf_cache(struct nfsmount 
*nmp
, struct nfs_fsattr 
*nfsap
) 
7293         nmp
->nm_fsattr
.nfsa_maxlink 
= nfsap
->nfsa_maxlink
; 
7294         nmp
->nm_fsattr
.nfsa_maxname 
= nfsap
->nfsa_maxname
; 
7295         nmp
->nm_fsattr
.nfsa_flags 
&= ~(NFS_FSFLAG_NO_TRUNC 
| NFS_FSFLAG_CHOWN_RESTRICTED 
| NFS_FSFLAG_CASE_INSENSITIVE 
| NFS_FSFLAG_CASE_PRESERVING
); 
7296         nmp
->nm_fsattr
.nfsa_flags 
|= nfsap
->nfsa_flags 
& NFS_FSFLAG_NO_TRUNC
; 
7297         nmp
->nm_fsattr
.nfsa_flags 
|= nfsap
->nfsa_flags 
& NFS_FSFLAG_CHOWN_RESTRICTED
; 
7298         nmp
->nm_fsattr
.nfsa_flags 
|= nfsap
->nfsa_flags 
& NFS_FSFLAG_CASE_INSENSITIVE
; 
7299         nmp
->nm_fsattr
.nfsa_flags 
|= nfsap
->nfsa_flags 
& NFS_FSFLAG_CASE_PRESERVING
; 
7300         NFS_BITMAP_SET(nmp
->nm_fsattr
.nfsa_bitmap
, NFS_FATTR_MAXLINK
); 
7301         NFS_BITMAP_SET(nmp
->nm_fsattr
.nfsa_bitmap
, NFS_FATTR_MAXNAME
); 
7302         NFS_BITMAP_SET(nmp
->nm_fsattr
.nfsa_bitmap
, NFS_FATTR_NO_TRUNC
); 
7303         NFS_BITMAP_SET(nmp
->nm_fsattr
.nfsa_bitmap
, NFS_FATTR_CHOWN_RESTRICTED
); 
7304         NFS_BITMAP_SET(nmp
->nm_fsattr
.nfsa_bitmap
, NFS_FATTR_CASE_INSENSITIVE
); 
7305         NFS_BITMAP_SET(nmp
->nm_fsattr
.nfsa_bitmap
, NFS_FATTR_CASE_PRESERVING
); 
7306         nmp
->nm_state 
|= NFSSTA_GOTPATHCONF
; 
7310  * Return POSIX pathconf information applicable to nfs. 
7312  * The NFS V2 protocol doesn't support this, so just return EINVAL 
7318         struct vnop_pathconf_args 
/* { 
7319                                    *  struct vnodeop_desc *a_desc; 
7322                                    *  int32_t *a_retval; 
7323                                    *  vfs_context_t a_context; 
7326         vnode_t vp 
= ap
->a_vp
; 
7327         nfsnode_t np 
= VTONFS(vp
); 
7328         struct nfsmount 
*nmp
; 
7329         struct nfs_fsattr nfsa
, *nfsap
; 
7331         uint64_t maxFileSize
; 
7335         if (nfs_mount_gone(nmp
)) { 
7339         switch (ap
->a_name
) { 
7342         case _PC_CHOWN_RESTRICTED
: 
7344         case _PC_CASE_SENSITIVE
: 
7345         case _PC_CASE_PRESERVING
: 
7347         case _PC_FILESIZEBITS
: 
7348                 if (nmp
->nm_vers 
== NFS_VER2
) { 
7353         case _PC_XATTR_SIZE_BITS
: 
7354                 /* Do we support xattrs natively? */ 
7355                 if (nmp
->nm_fsattr
.nfsa_flags 
& NFS_FSFLAG_NAMED_ATTR
) { 
7358                 /* No... so just return an error */ 
7361                 /* don't bother contacting the server if we know the answer */ 
7365         if (nmp
->nm_vers 
== NFS_VER2
) { 
7369         lck_mtx_lock(&nmp
->nm_lock
); 
7370         if (nmp
->nm_vers 
== NFS_VER3
) { 
7371                 if (!(nmp
->nm_state 
& NFSSTA_GOTPATHCONF
) || (!(nmp
->nm_fsattr
.nfsa_flags 
& NFS_FSFLAG_HOMOGENEOUS
) && nmp
->nm_dnp 
!= np
)) { 
7372                         /* no pathconf info cached OR we were asked for non-root pathconf and filesystem does not support FSF_HOMOGENEOUS */ 
7373                         lck_mtx_unlock(&nmp
->nm_lock
); 
7374                         NFS_CLEAR_ATTRIBUTES(nfsa
.nfsa_bitmap
); 
7375                         error 
= nfs3_pathconf_rpc(np
, &nfsa
, ap
->a_context
); 
7380                         if (nfs_mount_gone(nmp
)) { 
7383                         lck_mtx_lock(&nmp
->nm_lock
); 
7384                         if (nmp
->nm_fsattr
.nfsa_flags 
& NFS_FSFLAG_HOMOGENEOUS
) { 
7385                                 /* all files have the same pathconf info, */ 
7386                                 /* so cache a copy of the results */ 
7387                                 nfs3_pathconf_cache(nmp
, &nfsa
); 
7391                         nfsap 
= &nmp
->nm_fsattr
; 
7395         else if (!(nmp
->nm_fsattr
.nfsa_flags 
& NFS_FSFLAG_HOMOGENEOUS
)) { 
7396                 /* no pathconf info cached */ 
7397                 lck_mtx_unlock(&nmp
->nm_lock
); 
7398                 NFS_CLEAR_ATTRIBUTES(nfsa
.nfsa_bitmap
); 
7399                 error 
= nfs4_pathconf_rpc(np
, &nfsa
, ap
->a_context
); 
7404                 if (nfs_mount_gone(nmp
)) { 
7407                 lck_mtx_lock(&nmp
->nm_lock
); 
7412                 nfsap 
= &nmp
->nm_fsattr
; 
7414         switch (ap
->a_name
) { 
7416                 if (NFS_BITMAP_ISSET(nfsap
->nfsa_bitmap
, NFS_FATTR_MAXLINK
)) { 
7417                         *ap
->a_retval 
= nfsap
->nfsa_maxlink
; 
7419                 } else if ((nmp
->nm_vers 
== NFS_VER4
) && NFS_BITMAP_ISSET(np
->n_vattr
.nva_bitmap
, NFS_FATTR_MAXLINK
)) { 
7420                         *ap
->a_retval 
= np
->n_vattr
.nva_maxlink
; 
7427                 if (NFS_BITMAP_ISSET(nfsap
->nfsa_bitmap
, NFS_FATTR_MAXNAME
)) { 
7428                         *ap
->a_retval 
= nfsap
->nfsa_maxname
; 
7433         case _PC_CHOWN_RESTRICTED
: 
7434                 if (NFS_BITMAP_ISSET(nfsap
->nfsa_bitmap
, NFS_FATTR_CHOWN_RESTRICTED
)) { 
7435                         *ap
->a_retval 
= (nfsap
->nfsa_flags 
& NFS_FSFLAG_CHOWN_RESTRICTED
) ? 200112 /* _POSIX_CHOWN_RESTRICTED */ : 0; 
7441                 if (NFS_BITMAP_ISSET(nfsap
->nfsa_bitmap
, NFS_FATTR_NO_TRUNC
)) { 
7442                         *ap
->a_retval 
= (nfsap
->nfsa_flags 
& NFS_FSFLAG_NO_TRUNC
) ? 200112 /* _POSIX_NO_TRUNC */ : 0; 
7447         case _PC_CASE_SENSITIVE
: 
7448                 if (NFS_BITMAP_ISSET(nfsap
->nfsa_bitmap
, NFS_FATTR_CASE_INSENSITIVE
)) { 
7449                         *ap
->a_retval 
= (nfsap
->nfsa_flags 
& NFS_FSFLAG_CASE_INSENSITIVE
) ? 0 : 1; 
7454         case _PC_CASE_PRESERVING
: 
7455                 if (NFS_BITMAP_ISSET(nfsap
->nfsa_bitmap
, NFS_FATTR_CASE_PRESERVING
)) { 
7456                         *ap
->a_retval 
= (nfsap
->nfsa_flags 
& NFS_FSFLAG_CASE_PRESERVING
) ? 1 : 0; 
7461         case _PC_XATTR_SIZE_BITS
: /* same as file size bits if named attrs supported */ 
7462         case _PC_FILESIZEBITS
: 
7463                 if (!NFS_BITMAP_ISSET(nfsap
->nfsa_bitmap
, NFS_FATTR_MAXFILESIZE
)) { 
7468                 maxFileSize 
= nfsap
->nfsa_maxfilesize
; 
7470                 if (maxFileSize 
& 0xffffffff00000000ULL
) { 
7474                 if (maxFileSize 
& 0xffff0000) { 
7478                 if (maxFileSize 
& 0xff00) { 
7482                 if (maxFileSize 
& 0xf0) { 
7486                 if (maxFileSize 
& 0xc) { 
7490                 if (maxFileSize 
& 0x2) { 
7493                 *ap
->a_retval 
= nbits
; 
7499         lck_mtx_unlock(&nmp
->nm_lock
); 
7505  * Read wrapper for special devices. 
7509         struct vnop_read_args 
/* { 
7510                                *  struct vnodeop_desc *a_desc; 
7512                                *  struct uio *a_uio; 
7514                                *  vfs_context_t a_context; 
7517         nfsnode_t np 
= VTONFS(ap
->a_vp
); 
7518         struct timespec now
; 
7524         if ((error 
= nfs_node_lock(np
))) { 
7529         np
->n_atim
.tv_sec 
= now
.tv_sec
; 
7530         np
->n_atim
.tv_nsec 
= now
.tv_nsec
; 
7531         nfs_node_unlock(np
); 
7532         return VOCALL(spec_vnodeop_p
, VOFFSET(vnop_read
), ap
); 
7536  * Write wrapper for special devices. 
7540         struct vnop_write_args 
/* { 
7541                                 *  struct vnodeop_desc *a_desc; 
7543                                 *  struct uio *a_uio; 
7545                                 *  vfs_context_t a_context; 
7548         nfsnode_t np 
= VTONFS(ap
->a_vp
); 
7549         struct timespec now
; 
7555         if ((error 
= nfs_node_lock(np
))) { 
7560         np
->n_mtim
.tv_sec 
= now
.tv_sec
; 
7561         np
->n_mtim
.tv_nsec 
= now
.tv_nsec
; 
7562         nfs_node_unlock(np
); 
7563         return VOCALL(spec_vnodeop_p
, VOFFSET(vnop_write
), ap
); 
7567  * Close wrapper for special devices. 
7569  * Update the times on the nfsnode then do device close. 
7573         struct vnop_close_args 
/* { 
7574                                 *  struct vnodeop_desc *a_desc; 
7577                                 *  vfs_context_t a_context; 
7580         vnode_t vp 
= ap
->a_vp
; 
7581         nfsnode_t np 
= VTONFS(vp
); 
7582         struct vnode_attr vattr
; 
7586         if ((error 
= nfs_node_lock(np
))) { 
7589         if (np
->n_flag 
& (NACC 
| NUPD
)) { 
7591                 if (!vnode_isinuse(vp
, 0) && (mp 
= vnode_mount(vp
)) && !vfs_isrdonly(mp
)) { 
7593                         if (np
->n_flag 
& NACC
) { 
7594                                 vattr
.va_access_time 
= np
->n_atim
; 
7595                                 VATTR_SET_ACTIVE(&vattr
, va_access_time
); 
7597                         if (np
->n_flag 
& NUPD
) { 
7598                                 vattr
.va_modify_time 
= np
->n_mtim
; 
7599                                 VATTR_SET_ACTIVE(&vattr
, va_modify_time
); 
7601                         nfs_node_unlock(np
); 
7602                         vnode_setattr(vp
, &vattr
, ap
->a_context
); 
7604                         nfs_node_unlock(np
); 
7607                 nfs_node_unlock(np
); 
7609         return VOCALL(spec_vnodeop_p
, VOFFSET(vnop_close
), ap
); 
7613 extern vnop_t 
**fifo_vnodeop_p
; 
7616  * Read wrapper for fifos. 
7620         struct vnop_read_args 
/* { 
7621                                *  struct vnodeop_desc *a_desc; 
7623                                *  struct uio *a_uio; 
7625                                *  vfs_context_t a_context; 
7628         nfsnode_t np 
= VTONFS(ap
->a_vp
); 
7629         struct timespec now
; 
7635         if ((error 
= nfs_node_lock(np
))) { 
7640         np
->n_atim
.tv_sec 
= now
.tv_sec
; 
7641         np
->n_atim
.tv_nsec 
= now
.tv_nsec
; 
7642         nfs_node_unlock(np
); 
7643         return VOCALL(fifo_vnodeop_p
, VOFFSET(vnop_read
), ap
); 
7647  * Write wrapper for fifos. 
7651         struct vnop_write_args 
/* { 
7652                                 *  struct vnodeop_desc *a_desc; 
7654                                 *  struct uio *a_uio; 
7656                                 *  vfs_context_t a_context; 
7659         nfsnode_t np 
= VTONFS(ap
->a_vp
); 
7660         struct timespec now
; 
7666         if ((error 
= nfs_node_lock(np
))) { 
7671         np
->n_mtim
.tv_sec 
= now
.tv_sec
; 
7672         np
->n_mtim
.tv_nsec 
= now
.tv_nsec
; 
7673         nfs_node_unlock(np
); 
7674         return VOCALL(fifo_vnodeop_p
, VOFFSET(vnop_write
), ap
); 
7678  * Close wrapper for fifos. 
7680  * Update the times on the nfsnode then do fifo close. 
7684         struct vnop_close_args 
/* { 
7685                                 *  struct vnodeop_desc *a_desc; 
7688                                 *  vfs_context_t a_context; 
7691         vnode_t vp 
= ap
->a_vp
; 
7692         nfsnode_t np 
= VTONFS(vp
); 
7693         struct vnode_attr vattr
; 
7694         struct timespec now
; 
7698         if ((error 
= nfs_node_lock(np
))) { 
7701         if (np
->n_flag 
& (NACC 
| NUPD
)) { 
7703                 if (np
->n_flag 
& NACC
) { 
7704                         np
->n_atim
.tv_sec 
= now
.tv_sec
; 
7705                         np
->n_atim
.tv_nsec 
= now
.tv_nsec
; 
7707                 if (np
->n_flag 
& NUPD
) { 
7708                         np
->n_mtim
.tv_sec 
= now
.tv_sec
; 
7709                         np
->n_mtim
.tv_nsec 
= now
.tv_nsec
; 
7712                 if (!vnode_isinuse(vp
, 1) && (mp 
= vnode_mount(vp
)) && !vfs_isrdonly(mp
)) { 
7714                         if (np
->n_flag 
& NACC
) { 
7715                                 vattr
.va_access_time 
= np
->n_atim
; 
7716                                 VATTR_SET_ACTIVE(&vattr
, va_access_time
); 
7718                         if (np
->n_flag 
& NUPD
) { 
7719                                 vattr
.va_modify_time 
= np
->n_mtim
; 
7720                                 VATTR_SET_ACTIVE(&vattr
, va_modify_time
); 
7722                         nfs_node_unlock(np
); 
7723                         vnode_setattr(vp
, &vattr
, ap
->a_context
); 
7725                         nfs_node_unlock(np
); 
7728                 nfs_node_unlock(np
); 
7730         return VOCALL(fifo_vnodeop_p
, VOFFSET(vnop_close
), ap
); 
7737         struct vnop_ioctl_args 
/* { 
7738                                 *  struct vnodeop_desc *a_desc; 
7740                                 *  u_int32_t a_command; 
7743                                 *  vfs_context_t a_context; 
7746         vfs_context_t ctx 
= ap
->a_context
; 
7747         vnode_t vp 
= ap
->a_vp
; 
7748         struct nfsmount 
*mp 
= VTONMP(vp
); 
7751         struct user_nfs_gss_principal gprinc 
= {}; 
7758         switch (ap
->a_command
) { 
7760                 if (vnode_vfsisrdonly(vp
)) { 
7763                 error 
= nfs_flush(VTONFS(vp
), MNT_WAIT
, vfs_context_thread(ctx
), 0); 
7766         case NFS_IOC_DESTROY_CRED
: 
7767                 if (!auth_is_kerberized(mp
->nm_auth
)) { 
7770                 if ((error 
= nfs_gss_clnt_ctx_remove(mp
, vfs_context_ucred(ctx
))) == ENOENT
) { 
7774         case NFS_IOC_SET_CRED
: 
7775         case NFS_IOC_SET_CRED64
: 
7776                 if (!auth_is_kerberized(mp
->nm_auth
)) { 
7779                 if ((ap
->a_command 
== NFS_IOC_SET_CRED 
&& vfs_context_is64bit(ctx
)) || 
7780                     (ap
->a_command 
== NFS_IOC_SET_CRED64 
&& !vfs_context_is64bit(ctx
))) { 
7783                 if (vfs_context_is64bit(ctx
)) { 
7784                         gprinc 
= *(struct user_nfs_gss_principal 
*)ap
->a_data
; 
7786                         struct nfs_gss_principal 
*tp
; 
7787                         tp 
= (struct nfs_gss_principal 
*)ap
->a_data
; 
7788                         gprinc
.princlen 
= tp
->princlen
; 
7789                         gprinc
.nametype 
= tp
->nametype
; 
7790                         gprinc
.principal 
= CAST_USER_ADDR_T(tp
->principal
); 
7792                 NFS_DBG(NFS_FAC_GSS
, 7, "Enter NFS_FSCTL_SET_CRED (64-bit=%d): principal length %zu name type %d usr pointer 0x%llx\n", vfs_context_is64bit(ctx
), gprinc
.princlen
, gprinc
.nametype
, gprinc
.principal
); 
7793                 if (gprinc
.princlen 
> MAXPATHLEN
) { 
7797                 MALLOC(p
, uint8_t *, gprinc
.princlen 
+ 1, M_TEMP
, M_WAITOK 
| M_ZERO
); 
7801                 assert((user_addr_t
)gprinc
.principal 
== gprinc
.principal
); 
7802                 error 
= copyin((user_addr_t
)gprinc
.principal
, p
, gprinc
.princlen
); 
7804                         NFS_DBG(NFS_FAC_GSS
, 7, "NFS_FSCTL_SET_CRED could not copy in princiapl data of len %zu: %d\n", 
7805                             gprinc
.princlen
, error
); 
7809                 NFS_DBG(NFS_FAC_GSS
, 7, "Seting credential to principal %s\n", p
); 
7810                 error 
= nfs_gss_clnt_ctx_set_principal(mp
, ctx
, p
, gprinc
.princlen
, gprinc
.nametype
); 
7811                 NFS_DBG(NFS_FAC_GSS
, 7, "Seting credential to principal %s returned %d\n", p
, error
); 
7814         case NFS_IOC_GET_CRED
: 
7815         case NFS_IOC_GET_CRED64
: 
7816                 if (!auth_is_kerberized(mp
->nm_auth
)) { 
7819                 if ((ap
->a_command 
== NFS_IOC_GET_CRED 
&& vfs_context_is64bit(ctx
)) || 
7820                     (ap
->a_command 
== NFS_IOC_GET_CRED64 
&& !vfs_context_is64bit(ctx
))) { 
7823                 error 
= nfs_gss_clnt_ctx_get_principal(mp
, ctx
, &gprinc
); 
7827                 if (vfs_context_is64bit(ctx
)) { 
7828                         struct user_nfs_gss_principal 
*upp 
= (struct user_nfs_gss_principal 
*)ap
->a_data
; 
7829                         len 
= upp
->princlen
; 
7830                         if (gprinc
.princlen 
< len
) { 
7831                                 len 
= gprinc
.princlen
; 
7833                         upp
->princlen 
= gprinc
.princlen
; 
7834                         upp
->nametype 
= gprinc
.nametype
; 
7835                         upp
->flags 
= gprinc
.flags
; 
7836                         if (gprinc
.principal
) { 
7837                                 assert((user_addr_t
)upp
->principal 
== upp
->principal
); 
7838                                 error 
= copyout((void *)gprinc
.principal
, (user_addr_t
)upp
->principal
, len
); 
7840                                 upp
->principal 
= USER_ADDR_NULL
; 
7843                         struct nfs_gss_principal 
*u32pp 
= (struct nfs_gss_principal 
*)ap
->a_data
; 
7844                         len 
= u32pp
->princlen
; 
7845                         if (gprinc
.princlen 
< len
) { 
7846                                 len 
= gprinc
.princlen
; 
7848                         u32pp
->princlen 
= gprinc
.princlen
; 
7849                         u32pp
->nametype 
= gprinc
.nametype
; 
7850                         u32pp
->flags 
= gprinc
.flags
; 
7851                         if (gprinc
.principal
) { 
7852                                 error 
= copyout((void *)gprinc
.principal
, u32pp
->principal
, len
); 
7854                                 u32pp
->principal 
= (user32_addr_t
)0; 
7858                         NFS_DBG(NFS_FAC_GSS
, 7, "NFS_FSCTL_GET_CRED could not copy out princiapl data of len %zu: %d\n", 
7859                             gprinc
.princlen
, error
); 
7861                 if (gprinc
.principal
) { 
7862                         void *ptr 
= (void *)gprinc
.principal
; 
7863                         gprinc
.principal 
= 0; 
7866 #endif /* CONFIG_NFS_GSS */ 
7875         __unused 
struct vnop_select_args 
/* { 
7876                                           *  struct vnodeop_desc *a_desc; 
7881                                           *  vfs_context_t a_context; 
7885          * We were once bogusly seltrue() which returns 1.  Is this right? 
7891  * vnode OP for pagein using UPL 
7893  * No buffer I/O, just RPCs straight into the mapped pages. 
7897         struct vnop_pagein_args 
/* { 
7898                                  *  struct vnodeop_desc *a_desc; 
7901                                  *  vm_offset_t a_pl_offset; 
7905                                  *  vfs_context_t a_context; 
7908         vnode_t vp 
= ap
->a_vp
; 
7909         upl_t pl 
= ap
->a_pl
; 
7910         upl_size_t size 
= (upl_size_t
)ap
->a_size
; 
7911         off_t f_offset 
= ap
->a_f_offset
; 
7912         upl_offset_t pl_offset 
= ap
->a_pl_offset
; 
7913         int flags 
= ap
->a_flags
; 
7916         nfsnode_t np 
= VTONFS(vp
); 
7917         size_t nmrsize
, iosize
, txsize
, rxsize
, retsize
; 
7919         struct nfsmount 
*nmp
; 
7921         vm_offset_t ioaddr
, rxaddr
; 
7923         char uio_buf
[UIO_SIZEOF(1)]; 
7924         int nofreeupl 
= flags 
& UPL_NOCOMMIT
; 
7925         upl_page_info_t 
*plinfo
; 
7926 #define MAXPAGINGREQS   16      /* max outstanding RPCs for pagein/pageout */ 
7927         struct nfsreq 
*req
[MAXPAGINGREQS
]; 
7928         int nextsend
, nextwait
; 
7930         uint32_t stategenid 
= 0; 
7932         uint32_t restart 
= 0; 
7935         FSDBG(322, np
, f_offset
, size
, flags
); 
7936         if (pl 
== (upl_t
)NULL
) { 
7937                 panic("nfs_pagein: no upl"); 
7941                 printf("nfs_pagein: invalid size %u", size
); 
7943                         (void) ubc_upl_abort_range(pl
, pl_offset
, size
, 0); 
7947         if (f_offset 
< 0 || f_offset 
>= (off_t
)np
->n_size 
|| (f_offset 
& PAGE_MASK_64
)) { 
7949                         ubc_upl_abort_range(pl
, pl_offset
, size
, 
7950                             UPL_ABORT_ERROR 
| UPL_ABORT_FREE_ON_EMPTY
); 
7955         thd 
= vfs_context_thread(ap
->a_context
); 
7956         cred 
= ubc_getcred(vp
); 
7957         if (!IS_VALID_CRED(cred
)) { 
7958                 cred 
= vfs_context_ucred(ap
->a_context
); 
7961         uio 
= uio_createwithbuffer(1, f_offset
, UIO_SYSSPACE
, UIO_READ
, 
7962             &uio_buf
, sizeof(uio_buf
)); 
7965         if (nfs_mount_gone(nmp
)) { 
7967                         ubc_upl_abort_range(pl
, pl_offset
, size
, 
7968                             UPL_ABORT_ERROR 
| UPL_ABORT_FREE_ON_EMPTY
); 
7972         nmrsize 
= nmp
->nm_rsize
; 
7974         plinfo 
= ubc_upl_pageinfo(pl
); 
7975         kret 
= ubc_upl_map(pl
, &ioaddr
); 
7976         if (kret 
!= KERN_SUCCESS
) { 
7977                 panic("nfs_vnop_pagein: ubc_upl_map() failed with (%d)", kret
); 
7979         ioaddr 
+= pl_offset
; 
7983         if (nmp
->nm_vers 
>= NFS_VER4
) { 
7984                 stategenid 
= nmp
->nm_stategenid
; 
7987         txsize 
= rxsize 
= size
; 
7988         txoffset 
= f_offset
; 
7991         bzero(req
, sizeof(req
)); 
7992         nextsend 
= nextwait 
= 0; 
7994                 if (np
->n_flag 
& NREVOKE
) { 
7998                 /* send requests while we need to and have available slots */ 
7999                 while ((txsize 
> 0) && (req
[nextsend
] == NULL
)) { 
8000                         iosize 
= MIN(nmrsize
, txsize
); 
8001                         if ((error 
= nmp
->nm_funcs
->nf_read_rpc_async(np
, txoffset
, iosize
, thd
, cred
, NULL
, &req
[nextsend
]))) { 
8002                                 req
[nextsend
] = NULL
; 
8007                         nextsend 
= (nextsend 
+ 1) % MAXPAGINGREQS
; 
8009                 /* wait while we need to and break out if more requests to send */ 
8010                 while ((rxsize 
> 0) && req
[nextwait
]) { 
8011                         iosize 
= retsize 
= MIN(nmrsize
, rxsize
); 
8012                         uio_reset(uio
, uio_offset(uio
), UIO_SYSSPACE
, UIO_READ
); 
8013                         uio_addiov(uio
, CAST_USER_ADDR_T(rxaddr
), iosize
); 
8014                         FSDBG(322, uio_offset(uio
), uio_resid(uio
), rxaddr
, rxsize
); 
8016                         upl_ubc_alias_set(pl
, (uintptr_t) current_thread(), (uintptr_t) 2); 
8017 #endif /* UPL_DEBUG */ 
8018                         OSAddAtomic64(1, &nfsstats
.pageins
); 
8019                         error 
= nmp
->nm_funcs
->nf_read_rpc_async_finish(np
, req
[nextwait
], uio
, &retsize
, NULL
); 
8020                         req
[nextwait
] = NULL
; 
8021                         nextwait 
= (nextwait 
+ 1) % MAXPAGINGREQS
; 
8023                         if ((nmp
->nm_vers 
>= NFS_VER4
) && nfs_mount_state_error_should_restart(error
)) { 
8024                                 lck_mtx_lock(&nmp
->nm_lock
); 
8025                                 if ((error 
!= NFSERR_GRACE
) && (stategenid 
== nmp
->nm_stategenid
)) { 
8026                                         NP(np
, "nfs_vnop_pagein: error %d, initiating recovery", error
); 
8027                                         nfs_need_recover(nmp
, error
); 
8029                                 lck_mtx_unlock(&nmp
->nm_lock
); 
8035                                 FSDBG(322, uio_offset(uio
), uio_resid(uio
), error
, -1); 
8038                         if (retsize 
< iosize
) { 
8039                                 /* Just zero fill the rest of the valid area. */ 
8040                                 size_t zcnt 
= iosize 
- retsize
; 
8041                                 bzero((char *)rxaddr 
+ retsize
, zcnt
); 
8042                                 FSDBG(324, uio_offset(uio
), retsize
, zcnt
, rxaddr
); 
8043                                 uio_update(uio
, zcnt
); 
8051         } while (!error 
&& (txsize 
|| rxsize
)); 
8059                 /* cancel any outstanding requests */ 
8060                 while (req
[nextwait
]) { 
8061                         nfs_request_async_cancel(req
[nextwait
]); 
8062                         req
[nextwait
] = NULL
; 
8063                         nextwait 
= (nextwait 
+ 1) % MAXPAGINGREQS
; 
8065                 if (np
->n_flag 
& NREVOKE
) { 
8067                 } else if (restart
) { 
8068                         if (restart 
<= nfs_mount_state_max_restarts(nmp
)) { /* guard against no progress */ 
8069                                 if (error 
== NFSERR_GRACE
) { 
8070                                         tsleep(&nmp
->nm_state
, (PZERO 
- 1), "nfsgrace", 2 * hz
); 
8072                                 if (!(error 
= nfs_mount_state_wait_for_recovery(nmp
))) { 
8076                                 NP(np
, "nfs_pagein: too many restarts, aborting"); 
8086                          * See comment in vnode_pagein() on handling EAGAIN, even though UPL_NOCOMMIT flag 
8087                          * is not set, we will not abort this upl, since VM subsystem will handle it. 
8089                         if (error 
!= EAGAIN 
&& error 
!= EPERM
) { 
8090                                 ubc_upl_abort_range(pl
, pl_offset
, size
, 
8092                                     UPL_ABORT_FREE_ON_EMPTY
); 
8095                         ubc_upl_commit_range(pl
, pl_offset
, size
, 
8096                             UPL_COMMIT_CLEAR_DIRTY 
| 
8097                             UPL_COMMIT_FREE_ON_EMPTY
); 
8105  * the following are needed only by nfs_pageout to know how to handle errors 
8106  * see nfs_pageout comments on explanation of actions. 
8107  * the errors here are copied from errno.h and errors returned by servers 
8108  * are expected to match the same numbers here. If not, our actions maybe 
8111 char nfs_pageouterrorhandler(int); 
8112 enum actiontype 
{NOACTION
, DUMP
, DUMPANDLOG
, RETRY
, SEVER
}; 
8113 #define NFS_ELAST 88 
8114 static u_char errorcount
[NFS_ELAST 
+ 1]; /* better be zeros when initialized */ 
8115 static const char errortooutcome
[NFS_ELAST 
+ 1] = { 
8117         DUMP
,                   /* EPERM        1       Operation not permitted */ 
8118         DUMP
,                   /* ENOENT       2       No such file or directory */ 
8119         DUMPANDLOG
,             /* ESRCH        3       No such process */ 
8120         RETRY
,                  /* EINTR        4       Interrupted system call */ 
8121         DUMP
,                   /* EIO          5       Input/output error */ 
8122         DUMP
,                   /* ENXIO        6       Device not configured */ 
8123         DUMPANDLOG
,             /* E2BIG        7       Argument list too long */ 
8124         DUMPANDLOG
,             /* ENOEXEC      8       Exec format error */ 
8125         DUMPANDLOG
,             /* EBADF        9       Bad file descriptor */ 
8126         DUMPANDLOG
,             /* ECHILD       10      No child processes */ 
8127         DUMPANDLOG
,             /* EDEADLK      11      Resource deadlock avoided - was EAGAIN */ 
8128         RETRY
,                  /* ENOMEM       12      Cannot allocate memory */ 
8129         DUMP
,                   /* EACCES       13      Permission denied */ 
8130         DUMPANDLOG
,             /* EFAULT       14      Bad address */ 
8131         DUMPANDLOG
,             /* ENOTBLK      15      POSIX - Block device required */ 
8132         RETRY
,                  /* EBUSY        16      Device busy */ 
8133         DUMP
,                   /* EEXIST       17      File exists */ 
8134         DUMP
,                   /* EXDEV        18      Cross-device link */ 
8135         DUMP
,                   /* ENODEV       19      Operation not supported by device */ 
8136         DUMP
,                   /* ENOTDIR      20      Not a directory */ 
8137         DUMP
,                   /* EISDIR       21      Is a directory */ 
8138         DUMP
,                   /* EINVAL       22      Invalid argument */ 
8139         DUMPANDLOG
,             /* ENFILE       23      Too many open files in system */ 
8140         DUMPANDLOG
,             /* EMFILE       24      Too many open files */ 
8141         DUMPANDLOG
,             /* ENOTTY       25      Inappropriate ioctl for device */ 
8142         DUMPANDLOG
,             /* ETXTBSY      26      Text file busy - POSIX */ 
8143         DUMP
,                   /* EFBIG        27      File too large */ 
8144         DUMP
,                   /* ENOSPC       28      No space left on device */ 
8145         DUMPANDLOG
,             /* ESPIPE       29      Illegal seek */ 
8146         DUMP
,                   /* EROFS        30      Read-only file system */ 
8147         DUMP
,                   /* EMLINK       31      Too many links */ 
8148         RETRY
,                  /* EPIPE        32      Broken pipe */ 
8150         DUMPANDLOG
,             /* EDOM                         33      Numerical argument out of domain */ 
8151         DUMPANDLOG
,             /* ERANGE                       34      Result too large */ 
8152         RETRY
,                  /* EAGAIN/EWOULDBLOCK   35      Resource temporarily unavailable */ 
8153         DUMPANDLOG
,             /* EINPROGRESS          36      Operation now in progress */ 
8154         DUMPANDLOG
,             /* EALREADY                     37      Operation already in progress */ 
8155         /* ipc/network software -- argument errors */ 
8156         DUMPANDLOG
,             /* ENOTSOC                      38      Socket operation on non-socket */ 
8157         DUMPANDLOG
,             /* EDESTADDRREQ         39      Destination address required */ 
8158         DUMPANDLOG
,             /* EMSGSIZE                     40      Message too long */ 
8159         DUMPANDLOG
,             /* EPROTOTYPE           41      Protocol wrong type for socket */ 
8160         DUMPANDLOG
,             /* ENOPROTOOPT          42      Protocol not available */ 
8161         DUMPANDLOG
,             /* EPROTONOSUPPORT      43      Protocol not supported */ 
8162         DUMPANDLOG
,             /* ESOCKTNOSUPPORT      44      Socket type not supported */ 
8163         DUMPANDLOG
,             /* ENOTSUP                      45      Operation not supported */ 
8164         DUMPANDLOG
,             /* EPFNOSUPPORT         46      Protocol family not supported */ 
8165         DUMPANDLOG
,             /* EAFNOSUPPORT         47      Address family not supported by protocol family */ 
8166         DUMPANDLOG
,             /* EADDRINUSE           48      Address already in use */ 
8167         DUMPANDLOG
,             /* EADDRNOTAVAIL        49      Can't assign requested address */ 
8168         /* ipc/network software -- operational errors */ 
8169         RETRY
,                  /* ENETDOWN                     50      Network is down */ 
8170         RETRY
,                  /* ENETUNREACH          51      Network is unreachable */ 
8171         RETRY
,                  /* ENETRESET            52      Network dropped connection on reset */ 
8172         RETRY
,                  /* ECONNABORTED         53      Software caused connection abort */ 
8173         RETRY
,                  /* ECONNRESET           54      Connection reset by peer */ 
8174         RETRY
,                  /* ENOBUFS                      55      No buffer space available */ 
8175         RETRY
,                  /* EISCONN                      56      Socket is already connected */ 
8176         RETRY
,                  /* ENOTCONN                     57      Socket is not connected */ 
8177         RETRY
,                  /* ESHUTDOWN            58      Can't send after socket shutdown */ 
8178         RETRY
,                  /* ETOOMANYREFS         59      Too many references: can't splice */ 
8179         RETRY
,                  /* ETIMEDOUT            60      Operation timed out */ 
8180         RETRY
,                  /* ECONNREFUSED         61      Connection refused */ 
8182         DUMPANDLOG
,             /* ELOOP                        62      Too many levels of symbolic links */ 
8183         DUMP
,                   /* ENAMETOOLONG         63      File name too long */ 
8184         RETRY
,                  /* EHOSTDOWN            64      Host is down */ 
8185         RETRY
,                  /* EHOSTUNREACH         65      No route to host */ 
8186         DUMP
,                   /* ENOTEMPTY            66      Directory not empty */ 
8188         DUMPANDLOG
,             /* PROCLIM                      67      Too many processes */ 
8189         DUMPANDLOG
,             /* EUSERS                       68      Too many users */ 
8190         DUMPANDLOG
,             /* EDQUOT                       69      Disc quota exceeded */ 
8191         /* Network File System */ 
8192         DUMP
,                   /* ESTALE                       70      Stale NFS file handle */ 
8193         DUMP
,                   /* EREMOTE                      71      Too many levels of remote in path */ 
8194         DUMPANDLOG
,             /* EBADRPC                      72      RPC struct is bad */ 
8195         DUMPANDLOG
,             /* ERPCMISMATCH         73      RPC version wrong */ 
8196         DUMPANDLOG
,             /* EPROGUNAVAIL         74      RPC prog. not avail */ 
8197         DUMPANDLOG
,             /* EPROGMISMATCH        75      Program version wrong */ 
8198         DUMPANDLOG
,             /* EPROCUNAVAIL         76      Bad procedure for program */ 
8200         DUMPANDLOG
,             /* ENOLCK                       77      No locks available */ 
8201         DUMPANDLOG
,             /* ENOSYS                       78      Function not implemented */ 
8202         DUMPANDLOG
,             /* EFTYPE                       79      Inappropriate file type or format */ 
8203         DUMPANDLOG
,             /* EAUTH                        80      Authentication error */ 
8204         DUMPANDLOG
,             /* ENEEDAUTH            81      Need authenticator */ 
8205         /* Intelligent device errors */ 
8206         DUMPANDLOG
,             /* EPWROFF                      82      Device power is off */ 
8207         DUMPANDLOG
,             /* EDEVERR                      83      Device error, e.g. paper out */ 
8208         DUMPANDLOG
,             /* EOVERFLOW            84      Value too large to be stored in data type */ 
8209         /* Program loading errors */ 
8210         DUMPANDLOG
,             /* EBADEXEC                     85      Bad executable */ 
8211         DUMPANDLOG
,             /* EBADARCH                     86      Bad CPU type in executable */ 
8212         DUMPANDLOG
,             /* ESHLIBVERS           87      Shared library version mismatch */ 
8213         DUMPANDLOG
,             /* EBADMACHO            88      Malformed Macho file */ 
8217 nfs_pageouterrorhandler(int error
) 
8219         if (error 
> NFS_ELAST
) { 
8222                 return errortooutcome
[error
]; 
8228  * vnode OP for pageout using UPL 
8230  * No buffer I/O, just RPCs straight from the mapped pages. 
8231  * File size changes are not permitted in pageout. 
8235         struct vnop_pageout_args 
/* { 
8236                                   *  struct vnodeop_desc *a_desc; 
8239                                   *  vm_offset_t a_pl_offset; 
8243                                   *  vfs_context_t a_context; 
8246         vnode_t vp 
= ap
->a_vp
; 
8247         upl_t pl 
= ap
->a_pl
; 
8248         upl_size_t size 
= (upl_size_t
)ap
->a_size
; 
8249         off_t f_offset 
= ap
->a_f_offset
; 
8250         upl_offset_t pl_offset 
= ap
->a_pl_offset
; 
8251         upl_offset_t pgsize
; 
8252         int flags 
= ap
->a_flags
; 
8253         nfsnode_t np 
= VTONFS(vp
); 
8257         struct nfsmount 
*nmp 
= VTONMP(vp
); 
8259         int error 
= 0, iomode
; 
8260         off_t off
, txoffset
, rxoffset
; 
8261         vm_offset_t ioaddr
, txaddr
, rxaddr
; 
8263         char uio_buf
[UIO_SIZEOF(1)]; 
8264         int nofreeupl 
= flags 
& UPL_NOCOMMIT
; 
8265         size_t nmwsize
, biosize
, iosize
, remsize
; 
8266         struct nfsreq 
*req
[MAXPAGINGREQS
]; 
8267         int nextsend
, nextwait
, wverfset
, commit
; 
8268         uint64_t wverf
, wverf2
, xsize
, txsize
, rxsize
; 
8270         uint32_t stategenid 
= 0; 
8272         uint32_t vrestart 
= 0, restart 
= 0, vrestarts 
= 0, restarts 
= 0; 
8275         FSDBG(323, f_offset
, size
, pl
, pl_offset
); 
8277         if (pl 
== (upl_t
)NULL
) { 
8278                 panic("nfs_pageout: no upl"); 
8282                 printf("nfs_pageout: invalid size %u", size
); 
8284                         ubc_upl_abort_range(pl
, pl_offset
, size
, 0); 
8291                         ubc_upl_abort(pl
, UPL_ABORT_DUMP_PAGES 
| UPL_ABORT_FREE_ON_EMPTY
); 
8295         biosize 
= nmp
->nm_biosize
; 
8296         nmwsize 
= nmp
->nm_wsize
; 
8298         nfs_data_lock_noupdate(np
, NFS_DATA_LOCK_SHARED
); 
8301          * Check to see whether the buffer is incore. 
8302          * If incore and not busy, invalidate it from the cache. 
8304         for (iosize 
= 0; iosize 
< size
; iosize 
+= xsize
) { 
8305                 off 
= f_offset 
+ iosize
; 
8306                 /* need make sure we do things on block boundaries */ 
8307                 xsize 
= biosize 
- (off 
% biosize
); 
8308                 if (off 
+ (off_t
)xsize 
> f_offset 
+ (off_t
)size
) { 
8309                         xsize 
= f_offset 
+ size 
- off
; 
8311                 lbn 
= (daddr64_t
)(off 
/ biosize
); 
8312                 lck_mtx_lock(&nfs_buf_mutex
); 
8313                 if ((bp 
= nfs_buf_incore(np
, lbn
))) { 
8314                         FSDBG(323, off
, bp
, bp
->nb_lflags
, bp
->nb_flags
); 
8315                         if (nfs_buf_acquire(bp
, NBAC_NOWAIT
, 0, 0)) { 
8316                                 lck_mtx_unlock(&nfs_buf_mutex
); 
8317                                 nfs_data_unlock_noupdate(np
); 
8318                                 /* no panic. just tell vm we are busy */ 
8320                                         ubc_upl_abort_range(pl
, pl_offset
, size
, 0); 
8324                         if (bp
->nb_dirtyend 
> 0) { 
8326                                  * if there's a dirty range in the buffer, check 
8327                                  * to see if it extends beyond the pageout region 
8329                                  * if the dirty region lies completely within the 
8330                                  * pageout region, we just invalidate the buffer 
8331                                  * because it's all being written out now anyway. 
8333                                  * if any of the dirty region lies outside the 
8334                                  * pageout region, we'll try to clip the dirty 
8335                                  * region to eliminate the portion that's being 
8336                                  * paged out.  If that's not possible, because 
8337                                  * the dirty region extends before and after the 
8338                                  * pageout region, then we'll just return EBUSY. 
8340                                 off_t boff
, start
, end
; 
8344                                 /* clip end to EOF */ 
8345                                 if (end 
> (off_t
)np
->n_size
) { 
8350                                 if ((bp
->nb_dirtyoff 
< start
) && 
8351                                     (bp
->nb_dirtyend 
> end
)) { 
8353                                          * not gonna be able to clip the dirty region 
8355                                          * But before returning the bad news, move the 
8356                                          * buffer to the start of the delwri list and 
8357                                          * give the list a push to try to flush the 
8360                                         FSDBG(323, np
, bp
, 0xd00deebc, EBUSY
); 
8361                                         nfs_buf_remfree(bp
); 
8362                                         TAILQ_INSERT_HEAD(&nfsbufdelwri
, bp
, nb_free
); 
8365                                         nfs_buf_delwri_push(1); 
8366                                         lck_mtx_unlock(&nfs_buf_mutex
); 
8367                                         nfs_data_unlock_noupdate(np
); 
8369                                                 ubc_upl_abort_range(pl
, pl_offset
, size
, 0); 
8373                                 if ((bp
->nb_dirtyoff 
< start
) || 
8374                                     (bp
->nb_dirtyend 
> end
)) { 
8375                                         /* clip dirty region, if necessary */ 
8376                                         if (bp
->nb_dirtyoff 
< start
) { 
8377                                                 bp
->nb_dirtyend 
= MIN(bp
->nb_dirtyend
, start
); 
8379                                         if (bp
->nb_dirtyend 
> end
) { 
8380                                                 bp
->nb_dirtyoff 
= MAX(bp
->nb_dirtyoff
, end
); 
8382                                         FSDBG(323, bp
, bp
->nb_dirtyoff
, bp
->nb_dirtyend
, 0xd00dee00); 
8383                                         /* we're leaving this block dirty */ 
8385                                         lck_mtx_unlock(&nfs_buf_mutex
); 
8389                         nfs_buf_remfree(bp
); 
8390                         lck_mtx_unlock(&nfs_buf_mutex
); 
8391                         SET(bp
->nb_flags
, NB_INVAL
); 
8392                         nfs_node_lock_force(np
); 
8393                         if (ISSET(bp
->nb_flags
, NB_NEEDCOMMIT
)) { 
8394                                 CLR(bp
->nb_flags
, NB_NEEDCOMMIT
); 
8395                                 np
->n_needcommitcnt
--; 
8396                                 CHECK_NEEDCOMMITCNT(np
); 
8398                         nfs_node_unlock(np
); 
8399                         nfs_buf_release(bp
, 1); 
8401                         lck_mtx_unlock(&nfs_buf_mutex
); 
8405         thd 
= vfs_context_thread(ap
->a_context
); 
8406         cred 
= ubc_getcred(vp
); 
8407         if (!IS_VALID_CRED(cred
)) { 
8408                 cred 
= vfs_context_ucred(ap
->a_context
); 
8411         nfs_node_lock_force(np
); 
8412         if (np
->n_flag 
& NWRITEERR
) { 
8413                 error 
= np
->n_error
; 
8414                 nfs_node_unlock(np
); 
8415                 nfs_data_unlock_noupdate(np
); 
8417                         ubc_upl_abort_range(pl
, pl_offset
, size
, 
8418                             UPL_ABORT_FREE_ON_EMPTY
); 
8422         nfs_node_unlock(np
); 
8424         if (f_offset 
< 0 || f_offset 
>= (off_t
)np
->n_size 
|| 
8425             f_offset 
& PAGE_MASK_64 
|| size 
& PAGE_MASK_64
) { 
8426                 nfs_data_unlock_noupdate(np
); 
8428                         ubc_upl_abort_range(pl
, pl_offset
, size
, 
8429                             UPL_ABORT_FREE_ON_EMPTY
); 
8434         kret 
= ubc_upl_map(pl
, &ioaddr
); 
8435         if (kret 
!= KERN_SUCCESS
) { 
8436                 panic("nfs_vnop_pageout: ubc_upl_map() failed with (%d)", kret
); 
8438         ioaddr 
+= pl_offset
; 
8440         if ((u_quad_t
)f_offset 
+ size 
> np
->n_size
) { 
8441                 xsize 
= np
->n_size 
- f_offset
; 
8446         pgsize 
= (upl_offset_t
)round_page_64(xsize
); 
8447         if ((size 
> pgsize
) && !nofreeupl
) { 
8448                 ubc_upl_abort_range(pl
, pl_offset 
+ pgsize
, size 
- pgsize
, 
8449                     UPL_ABORT_FREE_ON_EMPTY
); 
8453          * check for partial page and clear the 
8454          * contents past end of the file before 
8455          * releasing it in the VM page cache 
8457         if ((u_quad_t
)f_offset 
< np
->n_size 
&& (u_quad_t
)f_offset 
+ size 
> np
->n_size
) { 
8458                 uint64_t io 
= np
->n_size 
- f_offset
; 
8459                 NFS_BZERO((caddr_t
)(ioaddr 
+ io
), size 
- io
); 
8460                 FSDBG(321, np
->n_size
, f_offset
, f_offset 
+ io
, size 
- io
); 
8462         nfs_data_unlock_noupdate(np
); 
8464         auio 
= uio_createwithbuffer(1, 0, UIO_SYSSPACE
, UIO_WRITE
, 
8465             &uio_buf
, sizeof(uio_buf
)); 
8469         if (nmp
->nm_vers 
>= NFS_VER4
) { 
8470                 stategenid 
= nmp
->nm_stategenid
; 
8473         wverf 
= wverf2 
= wverfset 
= 0; 
8474         txsize 
= rxsize 
= xsize
; 
8475         txoffset 
= rxoffset 
= f_offset
; 
8476         txaddr 
= rxaddr 
= ioaddr
; 
8477         commit 
= NFS_WRITE_FILESYNC
; 
8479         bzero(req
, sizeof(req
)); 
8480         nextsend 
= nextwait 
= 0; 
8482                 if (np
->n_flag 
& NREVOKE
) { 
8486                 /* send requests while we need to and have available slots */ 
8487                 while ((txsize 
> 0) && (req
[nextsend
] == NULL
)) { 
8488                         iosize 
= (size_t)MIN(nmwsize
, txsize
); 
8489                         uio_reset(auio
, txoffset
, UIO_SYSSPACE
, UIO_WRITE
); 
8490                         uio_addiov(auio
, CAST_USER_ADDR_T(txaddr
), iosize
); 
8491                         FSDBG(323, uio_offset(auio
), iosize
, txaddr
, txsize
); 
8492                         OSAddAtomic64(1, &nfsstats
.pageouts
); 
8493                         nfs_node_lock_force(np
); 
8495                         nfs_node_unlock(np
); 
8496                         vnode_startwrite(vp
); 
8497                         iomode 
= NFS_WRITE_UNSTABLE
; 
8498                         if ((error 
= nmp
->nm_funcs
->nf_write_rpc_async(np
, auio
, iosize
, thd
, cred
, iomode
, NULL
, &req
[nextsend
]))) { 
8499                                 req
[nextsend
] = NULL
; 
8500                                 vnode_writedone(vp
); 
8501                                 nfs_node_lock_force(np
); 
8503                                 nfs_node_unlock(np
); 
8509                         nextsend 
= (nextsend 
+ 1) % MAXPAGINGREQS
; 
8511                 /* wait while we need to and break out if more requests to send */ 
8512                 while ((rxsize 
> 0) && req
[nextwait
]) { 
8513                         iosize 
= remsize 
= (size_t)MIN(nmwsize
, rxsize
); 
8514                         error 
= nmp
->nm_funcs
->nf_write_rpc_async_finish(np
, req
[nextwait
], &iomode
, &iosize
, &wverf2
); 
8515                         req
[nextwait
] = NULL
; 
8516                         nextwait 
= (nextwait 
+ 1) % MAXPAGINGREQS
; 
8517                         vnode_writedone(vp
); 
8518                         nfs_node_lock_force(np
); 
8520                         nfs_node_unlock(np
); 
8522                         if ((nmp
->nm_vers 
>= NFS_VER4
) && nfs_mount_state_error_should_restart(error
)) { 
8523                                 lck_mtx_lock(&nmp
->nm_lock
); 
8524                                 if ((error 
!= NFSERR_GRACE
) && (stategenid 
== nmp
->nm_stategenid
)) { 
8525                                         NP(np
, "nfs_vnop_pageout: error %d, initiating recovery", error
); 
8526                                         nfs_need_recover(nmp
, error
); 
8528                                 lck_mtx_unlock(&nmp
->nm_lock
); 
8534                                 FSDBG(323, rxoffset
, rxsize
, error
, -1); 
8540                         } else if (wverf 
!= wverf2
) { 
8541                                 /* verifier changed, so we need to restart all the writes */ 
8545                         /* Retain the lowest commitment level returned. */ 
8546                         if (iomode 
< commit
) { 
8554                                 /* need to try sending the remainder */ 
8556                                 uio_reset(auio
, rxoffset
, UIO_SYSSPACE
, UIO_WRITE
); 
8557                                 uio_addiov(auio
, CAST_USER_ADDR_T(rxaddr
), remsize
); 
8558                                 iomode 
= NFS_WRITE_UNSTABLE
; 
8559                                 error 
= nfs_write_rpc2(np
, auio
, thd
, cred
, &iomode
, &wverf2
); 
8561                                 if ((nmp
->nm_vers 
>= NFS_VER4
) && nfs_mount_state_error_should_restart(error
)) { 
8562                                         NP(np
, "nfs_vnop_pageout: restart: error %d", error
); 
8563                                         lck_mtx_lock(&nmp
->nm_lock
); 
8564                                         if ((error 
!= NFSERR_GRACE
) && (stategenid 
== nmp
->nm_stategenid
)) { 
8565                                                 NP(np
, "nfs_vnop_pageout: error %d, initiating recovery", error
); 
8566                                                 nfs_need_recover(nmp
, error
); 
8568                                         lck_mtx_unlock(&nmp
->nm_lock
); 
8574                                         FSDBG(323, rxoffset
, rxsize
, error
, -1); 
8577                                 if (wverf 
!= wverf2
) { 
8578                                         /* verifier changed, so we need to restart all the writes */ 
8582                                 if (iomode 
< commit
) { 
8593         } while (!error 
&& (txsize 
|| rxsize
)); 
8597         if (!error 
&& (commit 
!= NFS_WRITE_FILESYNC
)) { 
8598                 error 
= nmp
->nm_funcs
->nf_commit_rpc(np
, f_offset
, xsize
, cred
, wverf
); 
8599                 if (error 
== NFSERR_STALEWRITEVERF
) { 
8607                 /* cancel any outstanding requests */ 
8608                 while (req
[nextwait
]) { 
8609                         nfs_request_async_cancel(req
[nextwait
]); 
8610                         req
[nextwait
] = NULL
; 
8611                         nextwait 
= (nextwait 
+ 1) % MAXPAGINGREQS
; 
8612                         vnode_writedone(vp
); 
8613                         nfs_node_lock_force(np
); 
8615                         nfs_node_unlock(np
); 
8617                 if (np
->n_flag 
& NREVOKE
) { 
8621                                 if (++vrestarts 
<= 100) { /* guard against no progress */ 
8624                                 NP(np
, "nfs_pageout: too many restarts, aborting"); 
8625                                 FSDBG(323, f_offset
, xsize
, ERESTART
, -1); 
8628                                 if (restarts 
<= nfs_mount_state_max_restarts(nmp
)) { /* guard against no progress */ 
8629                                         if (error 
== NFSERR_GRACE
) { 
8630                                                 tsleep(&nmp
->nm_state
, (PZERO 
- 1), "nfsgrace", 2 * hz
); 
8632                                         if (!(error 
= nfs_mount_state_wait_for_recovery(nmp
))) { 
8636                                         NP(np
, "nfs_pageout: too many restarts, aborting"); 
8637                                         FSDBG(323, f_offset
, xsize
, ERESTART
, -1); 
8646          * We've had several different solutions on what to do when the pageout 
8647          * gets an error. If we don't handle it, and return an error to the 
8648          * caller, vm, it will retry . This can end in endless looping 
8649          * between vm and here doing retries of the same page. Doing a dump 
8650          * back to vm, will get it out of vm's knowledge and we lose whatever 
8651          * data existed. This is risky, but in some cases necessary. For 
8652          * example, the initial fix here was to do that for ESTALE. In that case 
8653          * the server is telling us that the file is no longer the same. We 
8654          * would not want to keep paging out to that. We also saw some 151 
8655          * errors from Auspex server and NFSv3 can return errors higher than 
8656          * ELAST. Those along with NFS known server errors we will "dump" from 
8657          * vm.  Errors we don't expect to occur, we dump and log for further 
8658          * analysis. Errors that could be transient, networking ones, 
8659          * we let vm "retry". Lastly, errors that we retry, but may have potential 
8660          * to storm the network, we "retrywithsleep". "sever" will be used in 
8661          * in the future to dump all pages of object for cases like ESTALE. 
8662          * All this is the basis for the states returned and first guesses on 
8663          * error handling. Tweaking expected as more statistics are gathered. 
8664          * Note, in the long run we may need another more robust solution to 
8665          * have some kind of persistant store when the vm cannot dump nor keep 
8666          * retrying as a solution, but this would be a file architectural change 
8668         if (!nofreeupl
) { /* otherwise stacked file system has to handle this */ 
8671                         char action 
= nfs_pageouterrorhandler(error
); 
8675                                 abortflags 
= UPL_ABORT_DUMP_PAGES 
| UPL_ABORT_FREE_ON_EMPTY
; 
8678                                 abortflags 
= UPL_ABORT_DUMP_PAGES 
| UPL_ABORT_FREE_ON_EMPTY
; 
8679                                 if (error 
<= NFS_ELAST
) { 
8680                                         if ((errorcount
[error
] % 100) == 0) { 
8681                                                 NP(np
, "nfs_pageout: unexpected error %d. dumping vm page", error
); 
8683                                         errorcount
[error
]++; 
8687                                 abortflags 
= UPL_ABORT_FREE_ON_EMPTY
; 
8689                         case SEVER
:         /* not implemented */ 
8691                                 NP(np
, "nfs_pageout: action %d not expected", action
); 
8695                         ubc_upl_abort_range(pl
, pl_offset
, pgsize
, abortflags
); 
8696                         /* return error in all cases above */ 
8698                         ubc_upl_commit_range(pl
, pl_offset
, pgsize
, 
8699                             UPL_COMMIT_CLEAR_DIRTY 
| 
8700                             UPL_COMMIT_FREE_ON_EMPTY
); 
8706 /* Blktooff derives file offset given a logical block number */ 
8709         struct vnop_blktooff_args 
/* { 
8710                                    *  struct vnodeop_desc *a_desc; 
8712                                    *  daddr64_t a_lblkno; 
8717         vnode_t vp 
= ap
->a_vp
; 
8718         struct nfsmount 
*nmp 
= VTONMP(vp
); 
8720         if (nfs_mount_gone(nmp
)) { 
8723         biosize 
= nmp
->nm_biosize
; 
8725         *ap
->a_offset 
= (off_t
)(ap
->a_lblkno 
* biosize
); 
8732         struct vnop_offtoblk_args 
/* { 
8733                                    *  struct vnodeop_desc *a_desc; 
8736                                    *  daddr64_t *a_lblkno; 
8740         vnode_t vp 
= ap
->a_vp
; 
8741         struct nfsmount 
*nmp 
= VTONMP(vp
); 
8743         if (nfs_mount_gone(nmp
)) { 
8746         biosize 
= nmp
->nm_biosize
; 
8748         *ap
->a_lblkno 
= (daddr64_t
)(ap
->a_offset 
/ biosize
); 
8754  * vnode change monitoring 
8758         struct vnop_monitor_args 
/* { 
8759                                   *  struct vnodeop_desc *a_desc; 
8761                                   *  uint32_t a_events; 
8764                                   *  vfs_context_t a_context; 
8767         nfsnode_t np 
= VTONFS(ap
->a_vp
); 
8768         struct nfsmount 
*nmp 
= VTONMP(ap
->a_vp
); 
8771         if (nfs_mount_gone(nmp
)) { 
8775         /* make sure that the vnode's monitoring status is up to date */ 
8776         lck_mtx_lock(&nmp
->nm_lock
); 
8777         if (vnode_ismonitored(ap
->a_vp
)) { 
8778                 /* This vnode is currently being monitored, make sure we're tracking it. */ 
8779                 if (np
->n_monlink
.le_next 
== NFSNOLIST
) { 
8780                         LIST_INSERT_HEAD(&nmp
->nm_monlist
, np
, n_monlink
); 
8781                         nfs_mount_sock_thread_wake(nmp
); 
8784                 /* This vnode is no longer being monitored, make sure we're not tracking it. */ 
8785                 /* Wait for any in-progress getattr to complete first. */ 
8786                 while (np
->n_mflag 
& NMMONSCANINPROG
) { 
8787                         struct timespec ts 
= { .tv_sec 
= 1, .tv_nsec 
= 0 }; 
8788                         np
->n_mflag 
|= NMMONSCANWANT
; 
8789                         msleep(&np
->n_mflag
, &nmp
->nm_lock
, PZERO 
- 1, "nfswaitmonscan", &ts
); 
8791                 if (np
->n_monlink
.le_next 
!= NFSNOLIST
) { 
8792                         LIST_REMOVE(np
, n_monlink
); 
8793                         np
->n_monlink
.le_next 
= NFSNOLIST
; 
8796         lck_mtx_unlock(&nmp
->nm_lock
); 
8802  * Send a vnode notification for the given events. 
8805 nfs_vnode_notify(nfsnode_t np
, uint32_t events
) 
8807         struct nfsmount 
*nmp 
= NFSTONMP(np
); 
8808         struct nfs_vattr 
*nvattr
; 
8809         struct vnode_attr vattr
, *vap 
= NULL
; 
8813         if ((np
->n_evtstamp 
== now
.tv_sec
) || !nmp
) { 
8814                 /* delay sending this notify */ 
8815                 np
->n_events 
|= events
; 
8818         events 
|= np
->n_events
; 
8820         np
->n_evtstamp 
= now
.tv_sec
; 
8821         MALLOC(nvattr
, struct nfs_vattr 
*, sizeof(*nvattr
), M_TEMP
, M_WAITOK
); 
8823         vfs_get_notify_attributes(&vattr
); 
8824         if (!nfs_getattrcache(np
, nvattr
, 0)) { 
8828                 VATTR_RETURN(vap
, va_fsid
, vfs_statfs(nmp
->nm_mountp
)->f_fsid
.val
[0]); 
8829                 VATTR_RETURN(vap
, va_fileid
, nvattr
->nva_fileid
); 
8830                 VATTR_RETURN(vap
, va_mode
, nvattr
->nva_mode
); 
8831                 VATTR_RETURN(vap
, va_uid
, nvattr
->nva_uid
); 
8832                 VATTR_RETURN(vap
, va_gid
, nvattr
->nva_gid
); 
8833                 VATTR_RETURN(vap
, va_nlink
, nvattr
->nva_nlink
); 
8835         vnode_notify(NFSTOV(np
), events
, vap
); 
8836         FREE(nvattr
, M_TEMP
); 
8839 #endif /* CONFIG_NFS_CLIENT */