+ return error;
+}
+
+/*
+ * NFSv4 Named Attributes
+ *
+ * Both the extended attributes interface and the named streams interface
+ * are backed by NFSv4 named attributes. The implementations for both use
+ * a common set of routines in an attempt to reduce code duplication, to
+ * increase efficiency, to increase caching of both names and data, and to
+ * confine the complexity.
+ *
+ * Each NFS node caches its named attribute directory's file handle.
+ * The directory nodes for the named attribute directories are handled
+ * exactly like regular directories (with a couple minor exceptions).
+ * Named attribute nodes are also treated as much like regular files as
+ * possible.
+ *
+ * Most of the heavy lifting is done by nfs4_named_attr_get().
+ */
+
+/*
+ * Get the given node's attribute directory node.
+ * If !fetch, then only return a cached node.
+ * Otherwise, we will attempt to fetch the node from the server.
+ * (Note: the node should be marked busy.)
+ */
+nfsnode_t
+nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx)
+{
+ nfsnode_t adnp = NULL;
+ struct nfsmount *nmp;
+ int error = 0, status, numops;
+ struct nfsm_chain nmreq, nmrep;
+ u_int64_t xid;
+ uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
+ fhandle_t fh;
+ struct nfs_vattr nvattr;
+ struct componentname cn;
+ struct nfsreq rq, *req = &rq;
+ struct nfsreq_secinfo_args si;
+
+ nmp = NFSTONMP(np);
+ if (nfs_mount_gone(nmp)) {
+ return NULL;
+ }
+ if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) {
+ return NULL;
+ }
+
+ NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
+ NVATTR_INIT(&nvattr);
+ nfsm_chain_null(&nmreq);
+ nfsm_chain_null(&nmrep);
+
+ bzero(&cn, sizeof(cn));
+ cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
+ cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
+ cn.cn_nameiop = LOOKUP;
+
+ if (np->n_attrdirfh) {
+ // XXX can't set parent correctly (to np) yet
+ error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh,
+ NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp);
+ if (adnp) {
+ goto nfsmout;
+ }
+ }
+ if (!fetch) {
+ error = ENOENT;
+ goto nfsmout;
+ }
+
+ // PUTFH, OPENATTR, GETATTR
+ numops = 3;
+ nfsm_chain_build_alloc_init(error, &nmreq, 22 * NFSX_UNSIGNED);
+ nfsm_chain_add_compound_header(error, &nmreq, "openattr", nmp->nm_minor_vers, numops);
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
+ nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
+ nfsm_chain_add_32(error, &nmreq, 0);
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
+ NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
+ NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
+ nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
+ NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
+ nfsm_chain_build_done(error, &nmreq);
+ nfsm_assert(error, (numops == 0), EPROTO);
+ nfsmout_if(error);
+ error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND,
+ vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req);
+ if (!error) {
+ error = nfs_request_async_finish(req, &nmrep, &xid, &status);
+ }
+
+ nfsm_chain_skip_tag(error, &nmrep);
+ nfsm_chain_get_32(error, &nmrep, numops);
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
+ nfsmout_if(error);
+ error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
+ nfsmout_if(error);
+ if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
+ error = ENOENT;
+ goto nfsmout;
+ }
+ if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
+ /* (re)allocate attrdir fh buffer */
+ if (np->n_attrdirfh) {
+ FREE(np->n_attrdirfh, M_TEMP);
+ }
+ MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
+ }
+ if (!np->n_attrdirfh) {
+ error = ENOMEM;
+ goto nfsmout;
+ }
+ /* cache the attrdir fh in the node */
+ *np->n_attrdirfh = fh.fh_len;
+ bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
+ /* create node for attrdir */
+ // XXX can't set parent correctly (to np) yet
+ error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
+nfsmout:
+ NVATTR_CLEANUP(&nvattr);
+ nfsm_chain_cleanup(&nmreq);
+ nfsm_chain_cleanup(&nmrep);
+
+ if (adnp) {
+ /* sanity check that this node is an attribute directory */
+ if (adnp->n_vattr.nva_type != VDIR) {
+ error = EINVAL;
+ }
+ if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) {
+ error = EINVAL;
+ }
+ nfs_node_unlock(adnp);
+ if (error) {
+ vnode_put(NFSTOV(adnp));
+ }
+ }
+ return error ? NULL : adnp;
+}
+
+/*
+ * Get the given node's named attribute node for the name given.
+ *
+ * In an effort to increase the performance of named attribute access, we try
+ * to reduce server requests by doing the following:
+ *
+ * - cache the node's named attribute directory file handle in the node
+ * - maintain a directory vnode for the attribute directory
+ * - use name cache entries (positive and negative) to speed up lookups
+ * - optionally open the named attribute (with the given accessMode) in the same RPC
+ * - combine attribute directory retrieval with the lookup/open RPC
+ * - optionally prefetch the named attribute's first block of data in the same RPC
+ *
+ * Also, in an attempt to reduce the number of copies/variations of this code,
+ * parts of the RPC building/processing code are conditionalized on what is
+ * needed for any particular request (openattr, lookup vs. open, read).
+ *
+ * Note that because we may not have the attribute directory node when we start
+ * the lookup/open, we lock both the node and the attribute directory node.
+ */
+
+#define NFS_GET_NAMED_ATTR_CREATE 0x1
+#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2
+#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4
+#define NFS_GET_NAMED_ATTR_PREFETCH 0x8
+
+int
+nfs4_named_attr_get(
+ nfsnode_t np,
+ struct componentname *cnp,
+ uint32_t accessMode,
+ int flags,
+ vfs_context_t ctx,
+ nfsnode_t *anpp,
+ struct nfs_open_file **nofpp)
+{
+ struct nfsmount *nmp;
+ int error = 0, open_error = EIO;
+ int inuse = 0, adlockerror = ENOENT, busyerror = ENOENT, adbusyerror = ENOENT, nofpbusyerror = ENOENT;
+ int create, guarded, prefetch, truncate, noopbusy = 0;
+ int open, status, numops, hadattrdir, negnamecache;
+ struct nfs_vattr nvattr;
+ struct vnode_attr vattr;
+ nfsnode_t adnp = NULL, anp = NULL;
+ vnode_t avp = NULL;
+ u_int64_t xid, savedxid = 0;
+ struct nfsm_chain nmreq, nmrep;
+ uint32_t bitmap[NFS_ATTR_BITMAP_LEN], bmlen;
+ uint32_t denyMode, rflags, delegation, recall, eof, rlen, retlen;
+ nfs_stateid stateid, dstateid;
+ fhandle_t fh;
+ struct nfs_open_owner *noop = NULL;
+ struct nfs_open_file *newnofp = NULL, *nofp = NULL;
+ struct vnop_access_args naa;
+ thread_t thd;
+ kauth_cred_t cred;
+ struct timeval now;
+ char sbuf[64], *s;
+ uint32_t ace_type, ace_flags, ace_mask, len, slen;
+ struct kauth_ace ace;
+ struct nfsreq rq, *req = &rq;
+ struct nfsreq_secinfo_args si;
+
+ *anpp = NULL;
+ fh.fh_len = 0;
+ rflags = delegation = recall = eof = rlen = retlen = 0;
+ ace.ace_flags = 0;
+ s = sbuf;
+ slen = sizeof(sbuf);
+
+ nmp = NFSTONMP(np);
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+ NVATTR_INIT(&nvattr);
+ negnamecache = !NMFLAG(nmp, NONEGNAMECACHE);
+ thd = vfs_context_thread(ctx);
+ cred = vfs_context_ucred(ctx);
+ create = (flags & NFS_GET_NAMED_ATTR_CREATE) ? NFS_OPEN_CREATE : NFS_OPEN_NOCREATE;
+ guarded = (flags & NFS_GET_NAMED_ATTR_CREATE_GUARDED) ? NFS_CREATE_GUARDED : NFS_CREATE_UNCHECKED;
+ truncate = (flags & NFS_GET_NAMED_ATTR_TRUNCATE);
+ prefetch = (flags & NFS_GET_NAMED_ATTR_PREFETCH);
+
+ if (!create) {
+ error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
+ if (error) {
+ return error;
+ }
+ if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
+ !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
+ return ENOATTR;
+ }
+ } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) {
+ /* shouldn't happen... but just be safe */
+ printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr);
+ accessMode = NFS_OPEN_SHARE_ACCESS_READ;
+ }
+ open = (accessMode != NFS_OPEN_SHARE_ACCESS_NONE);
+ if (open) {
+ /*
+ * We're trying to open the file.
+ * We'll create/open it with the given access mode,
+ * and set NFS_OPEN_FILE_CREATE.
+ */
+ denyMode = NFS_OPEN_SHARE_DENY_NONE;
+ if (prefetch && guarded) {
+ prefetch = 0; /* no sense prefetching data that can't be there */
+ }
+ noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1);
+ if (!noop) {
+ return ENOMEM;
+ }
+ }
+
+ if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
+ return error;
+ }
+
+ adnp = nfs4_named_attr_dir_get(np, 0, ctx);
+ hadattrdir = (adnp != NULL);
+ if (prefetch) {
+ microuptime(&now);
+ /* use the special state ID because we don't have a real one to send */
+ stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0;
+ rlen = MIN(nmp->nm_rsize, nmp->nm_biosize);
+ }
+ NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0);
+ nfsm_chain_null(&nmreq);
+ nfsm_chain_null(&nmrep);
+
+ if (hadattrdir) {
+ if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) {
+ goto nfsmout;
+ }
+ /* nfs_getattr() will check changed and purge caches */
+ error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
+ nfsmout_if(error);
+ error = cache_lookup(NFSTOV(adnp), &avp, cnp);
+ switch (error) {
+ case ENOENT:
+ /* negative cache entry */
+ goto nfsmout;
+ case 0:
+ /* cache miss */
+ /* try dir buf cache lookup */
+ error = nfs_dir_buf_cache_lookup(adnp, &anp, cnp, ctx, 0);
+ if (!error && anp) {
+ /* dir buf cache hit */
+ *anpp = anp;
+ error = -1;
+ }
+ if (error != -1) { /* cache miss */
+ break;
+ }
+ /* FALLTHROUGH */
+ case -1:
+ /* cache hit, not really an error */
+ OSAddAtomic64(1, &nfsstats.lookupcache_hits);
+ if (!anp && avp) {
+ *anpp = anp = VTONFS(avp);
+ }
+
+ nfs_node_clear_busy(adnp);
+ adbusyerror = ENOENT;
+
+ /* check for directory access */
+ naa.a_desc = &vnop_access_desc;
+ naa.a_vp = NFSTOV(adnp);
+ naa.a_action = KAUTH_VNODE_SEARCH;
+ naa.a_context = ctx;
+
+ /* compute actual success/failure based on accessibility */
+ error = nfs_vnop_access(&naa);
+ /* FALLTHROUGH */
+ default:
+ /* we either found it, or hit an error */
+ if (!error && guarded) {
+ /* found cached entry but told not to use it */
+ error = EEXIST;
+ vnode_put(NFSTOV(anp));
+ *anpp = anp = NULL;
+ }
+ /* we're done if error or we don't need to open */
+ if (error || !open) {
+ goto nfsmout;
+ }
+ /* no error and we need to open... */
+ }
+ }
+
+ if (open) {
+restart:
+ error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx));
+ if (error) {
+ nfs_open_owner_rele(noop);
+ noop = NULL;
+ goto nfsmout;
+ }
+ inuse = 1;
+
+ /* grab an open file - possibly provisional/nodeless if cache_lookup() failed */
+ error = nfs_open_file_find(anp, noop, &newnofp, 0, 0, 1);
+ if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_LOST)) {
+ printf("nfs4_named_attr_get: LOST %d %s\n", kauth_cred_getuid(noop->noo_cred), cnp->cn_nameptr);
+ error = EIO;
+ }
+ if (!error && (newnofp->nof_flags & NFS_OPEN_FILE_REOPEN)) {
+ nfs_mount_state_in_use_end(nmp, 0);
+ error = nfs4_reopen(newnofp, vfs_context_thread(ctx));
+ nfs_open_file_destroy(newnofp);
+ newnofp = NULL;
+ if (!error) {
+ goto restart;
+ }
+ }
+ if (!error) {
+ error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx));
+ }
+ if (error) {
+ if (newnofp) {
+ nfs_open_file_destroy(newnofp);
+ }
+ newnofp = NULL;
+ goto nfsmout;
+ }
+ if (anp) {
+ /*
+ * We already have the node. So we just need to open
+ * it - which we may be able to do with a delegation.
+ */
+ open_error = error = nfs4_open(anp, newnofp, accessMode, denyMode, ctx);
+ if (!error) {
+ /* open succeeded, so our open file is no longer temporary */
+ nofp = newnofp;
+ nofpbusyerror = 0;
+ newnofp = NULL;
+ if (nofpp) {
+ *nofpp = nofp;
+ }
+ }
+ goto nfsmout;
+ }
+ }
+
+ /*
+ * We either don't have the attrdir or we didn't find the attribute
+ * in the name cache, so we need to talk to the server.
+ *
+ * If we don't have the attrdir, we'll need to ask the server for that too.
+ * If the caller is requesting that the attribute be created, we need to
+ * make sure the attrdir is created.
+ * The caller may also request that the first block of an existing attribute
+ * be retrieved at the same time.
+ */
+
+ if (open) {
+ /* need to mark the open owner busy during the RPC */
+ if ((error = nfs_open_owner_set_busy(noop, thd))) {
+ goto nfsmout;
+ }
+ noopbusy = 1;
+ }
+
+ /*
+ * We'd like to get updated post-open/lookup attributes for the
+ * directory and we may also want to prefetch some data via READ.
+ * We'd like the READ results to be last so that we can leave the
+ * data in the mbufs until the end.
+ *
+ * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR
+ */
+ numops = 5;
+ if (!hadattrdir) {
+ numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR
+ }
+ if (prefetch) {
+ numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ
+ }
+ nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen);
+ nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops);
+ if (hadattrdir) {
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
+ nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
+ } else {
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
+ nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
+ nfsm_chain_add_32(error, &nmreq, create ? 1 : 0);
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
+ NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
+ NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
+ nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
+ NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
+ }
+ if (open) {
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_OPEN);
+ nfsm_chain_add_32(error, &nmreq, noop->noo_seqid);
+ nfsm_chain_add_32(error, &nmreq, accessMode);
+ nfsm_chain_add_32(error, &nmreq, denyMode);
+ nfsm_chain_add_64(error, &nmreq, nmp->nm_clientid);
+ nfsm_chain_add_32(error, &nmreq, NFSX_UNSIGNED);
+ nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(noop->noo_cred));
+ nfsm_chain_add_32(error, &nmreq, create);
+ if (create) {
+ nfsm_chain_add_32(error, &nmreq, guarded);
+ VATTR_INIT(&vattr);
+ if (truncate) {
+ VATTR_SET(&vattr, va_data_size, 0);
+ }
+ nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
+ }
+ nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL);
+ nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
+ } else {
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP);
+ nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp);
+ }
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
+ NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap);
+ NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE);
+ nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
+ NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
+ if (prefetch) {
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH);
+ }
+ if (hadattrdir) {
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
+ nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, adnp->n_fhp, adnp->n_fhsize);
+ } else {
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
+ nfsm_chain_add_fh(error, &nmreq, nmp->nm_vers, np->n_fhp, np->n_fhsize);
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_OPENATTR);
+ nfsm_chain_add_32(error, &nmreq, 0);
+ }
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
+ nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap,
+ NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
+ if (prefetch) {
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH);
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_NVERIFY);
+ VATTR_INIT(&vattr);
+ VATTR_SET(&vattr, va_data_size, 0);
+ nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp);
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_READ);
+ nfsm_chain_add_stateid(error, &nmreq, &stateid);
+ nfsm_chain_add_64(error, &nmreq, 0);
+ nfsm_chain_add_32(error, &nmreq, rlen);
+ }
+ nfsm_chain_build_done(error, &nmreq);
+ nfsm_assert(error, (numops == 0), EPROTO);
+ nfsmout_if(error);
+ error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND,
+ vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req);
+ if (!error) {
+ error = nfs_request_async_finish(req, &nmrep, &xid, &status);
+ }
+
+ if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) {
+ error = adlockerror;
+ }
+ savedxid = xid;
+ nfsm_chain_skip_tag(error, &nmrep);
+ nfsm_chain_get_32(error, &nmrep, numops);
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
+ if (!hadattrdir) {
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
+ nfsmout_if(error);
+ error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
+ nfsmout_if(error);
+ if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) {
+ if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) {
+ /* (re)allocate attrdir fh buffer */
+ if (np->n_attrdirfh) {
+ FREE(np->n_attrdirfh, M_TEMP);
+ }
+ MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK);
+ }
+ if (np->n_attrdirfh) {
+ /* remember the attrdir fh in the node */
+ *np->n_attrdirfh = fh.fh_len;
+ bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len);
+ /* create busied node for attrdir */
+ struct componentname cn;
+ bzero(&cn, sizeof(cn));
+ cn.cn_nameptr = __CAST_AWAY_QUALIFIER(_PATH_FORKSPECIFIER, const, char *); /* "/..namedfork/" */
+ cn.cn_namelen = strlen(_PATH_FORKSPECIFIER);
+ cn.cn_nameiop = LOOKUP;
+ // XXX can't set parent correctly (to np) yet
+ error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp);
+ if (!error) {
+ adlockerror = 0;
+ /* set the node busy */
+ SET(adnp->n_flag, NBUSY);
+ adbusyerror = 0;
+ }
+ /* if no adnp, oh well... */
+ error = 0;
+ }
+ }
+ NVATTR_CLEANUP(&nvattr);
+ fh.fh_len = 0;
+ }
+ if (open) {
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_OPEN);
+ nfs_owner_seqid_increment(noop, NULL, error);
+ nfsm_chain_get_stateid(error, &nmrep, &newnofp->nof_stateid);
+ nfsm_chain_check_change_info(error, &nmrep, adnp);
+ nfsm_chain_get_32(error, &nmrep, rflags);
+ bmlen = NFS_ATTR_BITMAP_LEN;
+ nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen);
+ nfsm_chain_get_32(error, &nmrep, delegation);
+ if (!error) {
+ switch (delegation) {
+ case NFS_OPEN_DELEGATE_NONE:
+ break;
+ case NFS_OPEN_DELEGATE_READ:
+ case NFS_OPEN_DELEGATE_WRITE:
+ nfsm_chain_get_stateid(error, &nmrep, &dstateid);
+ nfsm_chain_get_32(error, &nmrep, recall);
+ if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX
+ nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED);
+ }
+ /* if we have any trouble accepting the ACE, just invalidate it */
+ ace_type = ace_flags = ace_mask = len = 0;
+ nfsm_chain_get_32(error, &nmrep, ace_type);
+ nfsm_chain_get_32(error, &nmrep, ace_flags);
+ nfsm_chain_get_32(error, &nmrep, ace_mask);
+ nfsm_chain_get_32(error, &nmrep, len);
+ ace.ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error);
+ ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags);
+ ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask);
+ if (!error && (len >= slen)) {
+ MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK);
+ if (s) {
+ slen = len + 1;
+ } else {
+ ace.ace_flags = 0;
+ }
+ }
+ if (s) {
+ nfsm_chain_get_opaque(error, &nmrep, len, s);
+ } else {
+ nfsm_chain_adv(error, &nmrep, nfsm_rndup(len));
+ }
+ if (!error && s) {
+ s[len] = '\0';
+ if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) {
+ ace.ace_flags = 0;
+ }
+ }
+ if (error || !s) {
+ ace.ace_flags = 0;
+ }
+ if (s && (s != sbuf)) {
+ FREE(s, M_TEMP);
+ }
+ break;
+ default:
+ error = EBADRPC;
+ break;
+ }
+ }
+ /* At this point if we have no error, the object was created/opened. */
+ open_error = error;
+ } else {
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_LOOKUP);
+ }
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
+ nfsmout_if(error);
+ error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL);
+ nfsmout_if(error);
+ if (!NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) || !fh.fh_len) {
+ error = EIO;
+ goto nfsmout;
+ }
+ if (prefetch) {
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH);
+ }
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
+ if (!hadattrdir) {
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR);
+ }
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
+ nfsmout_if(error);
+ xid = savedxid;
+ nfsm_chain_loadattr(error, &nmrep, adnp, nmp->nm_vers, &xid);
+ nfsmout_if(error);
+
+ if (open) {
+ if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) {
+ newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
+ }
+ if (rflags & NFS_OPEN_RESULT_CONFIRM) {
+ if (adnp) {
+ nfs_node_unlock(adnp);
+ adlockerror = ENOENT;
+ }
+ NVATTR_CLEANUP(&nvattr);
+ error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid);
+ nfsmout_if(error);
+ savedxid = xid;
+ if ((adlockerror = nfs_node_lock(adnp))) {
+ error = adlockerror;
+ }
+ }
+ }
+
+nfsmout:
+ if (open && adnp && !adlockerror) {
+ if (!open_error && (adnp->n_flag & NNEGNCENTRIES)) {
+ adnp->n_flag &= ~NNEGNCENTRIES;
+ cache_purge_negatives(NFSTOV(adnp));
+ }
+ adnp->n_flag |= NMODIFIED;
+ nfs_node_unlock(adnp);
+ adlockerror = ENOENT;
+ nfs_getattr(adnp, NULL, ctx, NGA_CACHED);
+ }
+ if (adnp && !adlockerror && (error == ENOENT) &&
+ (cnp->cn_flags & MAKEENTRY) && (cnp->cn_nameiop != CREATE) && negnamecache) {
+ /* add a negative entry in the name cache */
+ cache_enter(NFSTOV(adnp), NULL, cnp);
+ adnp->n_flag |= NNEGNCENTRIES;
+ }
+ if (adnp && !adlockerror) {
+ nfs_node_unlock(adnp);
+ adlockerror = ENOENT;
+ }
+ if (!error && !anp && fh.fh_len) {
+ /* create the vnode with the filehandle and attributes */
+ xid = savedxid;
+ error = nfs_nget(NFSTOMP(np), adnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &anp);
+ if (!error) {
+ *anpp = anp;
+ nfs_node_unlock(anp);
+ }
+ if (!error && open) {
+ nfs_open_file_add_open(newnofp, accessMode, denyMode, 0);
+ /* After we have a node, add our open file struct to the node */
+ nofp = newnofp;
+ error = nfs_open_file_find_internal(anp, noop, &nofp, 0, 0, 0);
+ if (error) {
+ /* This shouldn't happen, because we passed in a new nofp to use. */
+ printf("nfs_open_file_find_internal failed! %d\n", error);
+ nofp = NULL;
+ } else if (nofp != newnofp) {
+ /*
+ * Hmm... an open file struct already exists.
+ * Mark the existing one busy and merge our open into it.
+ * Then destroy the one we created.
+ * Note: there's no chance of an open confict because the
+ * open has already been granted.
+ */
+ nofpbusyerror = nfs_open_file_set_busy(nofp, NULL);
+ nfs_open_file_add_open(nofp, accessMode, denyMode, 0);
+ nofp->nof_stateid = newnofp->nof_stateid;
+ if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) {
+ nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK;
+ }
+ nfs_open_file_clear_busy(newnofp);
+ nfs_open_file_destroy(newnofp);
+ newnofp = NULL;
+ }
+ if (!error) {
+ newnofp = NULL;
+ nofpbusyerror = 0;
+ /* mark the node as holding a create-initiated open */
+ nofp->nof_flags |= NFS_OPEN_FILE_CREATE;
+ nofp->nof_creator = current_thread();
+ if (nofpp) {
+ *nofpp = nofp;
+ }
+ }
+ }
+ }
+ NVATTR_CLEANUP(&nvattr);
+ if (open && ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE))) {
+ if (!error && anp && !recall) {
+ /* stuff the delegation state in the node */
+ lck_mtx_lock(&anp->n_openlock);
+ anp->n_openflags &= ~N_DELEG_MASK;
+ anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
+ anp->n_dstateid = dstateid;
+ anp->n_dace = ace;
+ if (anp->n_dlink.tqe_next == NFSNOLIST) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if (anp->n_dlink.tqe_next == NFSNOLIST) {
+ TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+ lck_mtx_unlock(&anp->n_openlock);
+ } else {
+ /* give the delegation back */
+ if (anp) {
+ if (NFS_CMPFH(anp, fh.fh_data, fh.fh_len)) {
+ /* update delegation state and return it */
+ lck_mtx_lock(&anp->n_openlock);
+ anp->n_openflags &= ~N_DELEG_MASK;
+ anp->n_openflags |= ((delegation == NFS_OPEN_DELEGATE_READ) ? N_DELEG_READ : N_DELEG_WRITE);
+ anp->n_dstateid = dstateid;
+ anp->n_dace = ace;
+ if (anp->n_dlink.tqe_next == NFSNOLIST) {
+ lck_mtx_lock(&nmp->nm_lock);
+ if (anp->n_dlink.tqe_next == NFSNOLIST) {
+ TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink);
+ }
+ lck_mtx_unlock(&nmp->nm_lock);
+ }
+ lck_mtx_unlock(&anp->n_openlock);
+ /* don't need to send a separate delegreturn for fh */
+ fh.fh_len = 0;
+ }
+ /* return anp's current delegation */
+ nfs4_delegation_return(anp, 0, thd, cred);
+ }
+ if (fh.fh_len) { /* return fh's delegation if it wasn't for anp */
+ nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred);
+ }
+ }
+ }
+ if (open) {
+ if (newnofp) {
+ /* need to cleanup our temporary nofp */
+ nfs_open_file_clear_busy(newnofp);
+ nfs_open_file_destroy(newnofp);
+ newnofp = NULL;
+ } else if (nofp && !nofpbusyerror) {
+ nfs_open_file_clear_busy(nofp);
+ nofpbusyerror = ENOENT;
+ }
+ if (inuse && nfs_mount_state_in_use_end(nmp, error)) {
+ inuse = 0;
+ nofp = newnofp = NULL;
+ rflags = delegation = recall = eof = rlen = retlen = 0;
+ ace.ace_flags = 0;
+ s = sbuf;
+ slen = sizeof(sbuf);
+ nfsm_chain_cleanup(&nmreq);
+ nfsm_chain_cleanup(&nmrep);
+ if (anp) {
+ vnode_put(NFSTOV(anp));
+ *anpp = anp = NULL;
+ }
+ hadattrdir = (adnp != NULL);
+ if (noopbusy) {
+ nfs_open_owner_clear_busy(noop);
+ noopbusy = 0;
+ }
+ goto restart;
+ }
+ if (noop) {
+ if (noopbusy) {
+ nfs_open_owner_clear_busy(noop);
+ noopbusy = 0;
+ }
+ nfs_open_owner_rele(noop);
+ }
+ }
+ if (!error && prefetch && nmrep.nmc_mhead) {
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH);
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_NVERIFY);
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_READ);
+ nfsm_chain_get_32(error, &nmrep, eof);
+ nfsm_chain_get_32(error, &nmrep, retlen);
+ if (!error && anp) {
+ /*
+ * There can be one problem with doing the prefetch.
+ * Because we don't have the node before we start the RPC, we
+ * can't have the buffer busy while the READ is performed.
+ * So there is a chance that other I/O occured on the same
+ * range of data while we were performing this RPC. If that
+ * happens, then it's possible the data we have in the READ
+ * response is no longer up to date.
+ * Once we have the node and the buffer, we need to make sure
+ * that there's no chance we could be putting stale data in
+ * the buffer.
+ * So, we check if the range read is dirty or if any I/O may
+ * have occured on it while we were performing our RPC.
+ */
+ struct nfsbuf *bp = NULL;
+ int lastpg;
+ uint32_t pagemask;
+
+ retlen = MIN(retlen, rlen);
+
+ /* check if node needs size update or invalidation */
+ if (ISSET(anp->n_flag, NUPDATESIZE)) {
+ nfs_data_update_size(anp, 0);
+ }
+ if (!(error = nfs_node_lock(anp))) {
+ if (anp->n_flag & NNEEDINVALIDATE) {
+ anp->n_flag &= ~NNEEDINVALIDATE;
+ nfs_node_unlock(anp);
+ error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1);
+ if (!error) { /* lets play it safe and just drop the data */
+ error = EIO;
+ }
+ } else {
+ nfs_node_unlock(anp);
+ }
+ }
+
+ /* calculate page mask for the range of data read */
+ lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE;
+ pagemask = ((1 << (lastpg + 1)) - 1);
+
+ if (!error) {
+ error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp);
+ }
+ /* don't save the data if dirty or potential I/O conflict */
+ if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) &&
+ timevalcmp(&anp->n_lastio, &now, <)) {
+ OSAddAtomic64(1, &nfsstats.read_bios);
+ CLR(bp->nb_flags, (NB_DONE | NB_ASYNC));
+ SET(bp->nb_flags, NB_READ);
+ NFS_BUF_MAP(bp);
+ nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data);
+ if (error) {
+ bp->nb_error = error;
+ SET(bp->nb_flags, NB_ERROR);
+ } else {
+ bp->nb_offio = 0;
+ bp->nb_endio = rlen;
+ if ((retlen > 0) && (bp->nb_endio < (int)retlen)) {
+ bp->nb_endio = retlen;
+ }
+ if (eof || (retlen == 0)) {
+ /* zero out the remaining data (up to EOF) */
+ off_t rpcrem, eofrem, rem;
+ rpcrem = (rlen - retlen);
+ eofrem = anp->n_size - (NBOFF(bp) + retlen);
+ rem = (rpcrem < eofrem) ? rpcrem : eofrem;
+ if (rem > 0) {
+ bzero(bp->nb_data + retlen, rem);
+ }
+ } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) {
+ /* ugh... short read ... just invalidate for now... */
+ SET(bp->nb_flags, NB_INVAL);
+ }
+ }
+ nfs_buf_read_finish(bp);
+ microuptime(&anp->n_lastio);
+ }
+ if (bp) {
+ nfs_buf_release(bp, 1);
+ }
+ }
+ error = 0; /* ignore any transient error in processing the prefetch */
+ }
+ if (adnp && !adbusyerror) {
+ nfs_node_clear_busy(adnp);
+ adbusyerror = ENOENT;
+ }
+ if (!busyerror) {
+ nfs_node_clear_busy(np);
+ busyerror = ENOENT;
+ }
+ if (adnp) {
+ vnode_put(NFSTOV(adnp));
+ }
+ if (error && *anpp) {
+ vnode_put(NFSTOV(*anpp));
+ *anpp = NULL;
+ }
+ nfsm_chain_cleanup(&nmreq);
+ nfsm_chain_cleanup(&nmrep);
+ return error;
+}
+
+/*
+ * Remove a named attribute.
+ */
+int
+nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_context_t ctx)
+{
+ nfsnode_t adnp = NULL;
+ struct nfsmount *nmp;
+ struct componentname cn;
+ struct vnop_remove_args vra;
+ int error, putanp = 0;
+
+ nmp = NFSTONMP(np);
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+
+ bzero(&cn, sizeof(cn));
+ cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
+ cn.cn_namelen = strlen(name);
+ cn.cn_nameiop = DELETE;
+ cn.cn_flags = 0;
+
+ if (!anp) {
+ error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE,
+ 0, ctx, &anp, NULL);
+ if ((!error && !anp) || (error == ENOATTR)) {
+ error = ENOENT;
+ }
+ if (error) {
+ if (anp) {
+ vnode_put(NFSTOV(anp));
+ anp = NULL;
+ }
+ goto out;
+ }
+ putanp = 1;
+ }
+
+ if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
+ goto out;
+ }
+ adnp = nfs4_named_attr_dir_get(np, 1, ctx);
+ nfs_node_clear_busy(np);
+ if (!adnp) {
+ error = ENOENT;
+ goto out;
+ }
+
+ vra.a_desc = &vnop_remove_desc;
+ vra.a_dvp = NFSTOV(adnp);
+ vra.a_vp = NFSTOV(anp);
+ vra.a_cnp = &cn;
+ vra.a_flags = 0;
+ vra.a_context = ctx;
+ error = nfs_vnop_remove(&vra);
+out:
+ if (adnp) {
+ vnode_put(NFSTOV(adnp));
+ }
+ if (putanp) {
+ vnode_put(NFSTOV(anp));
+ }
+ return error;
+}
+
+int
+nfs4_vnop_getxattr(
+ struct vnop_getxattr_args /* {
+ * struct vnodeop_desc *a_desc;
+ * vnode_t a_vp;
+ * const char * a_name;
+ * uio_t a_uio;
+ * size_t *a_size;
+ * int a_options;
+ * vfs_context_t a_context;
+ * } */*ap)
+{
+ vfs_context_t ctx = ap->a_context;
+ struct nfsmount *nmp;
+ struct nfs_vattr nvattr;
+ struct componentname cn;
+ nfsnode_t anp;
+ int error = 0, isrsrcfork;
+
+ nmp = VTONMP(ap->a_vp);
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+
+ if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
+ return ENOTSUP;
+ }
+ error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
+ if (error) {
+ return error;
+ }
+ if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
+ !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
+ return ENOATTR;
+ }
+
+ bzero(&cn, sizeof(cn));
+ cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
+ cn.cn_namelen = strlen(ap->a_name);
+ cn.cn_nameiop = LOOKUP;
+ cn.cn_flags = MAKEENTRY;
+
+ /* we'll normally try to prefetch data for xattrs... the resource fork is really a stream */
+ isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
+
+ error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
+ !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL);
+ if ((!error && !anp) || (error == ENOENT)) {
+ error = ENOATTR;
+ }
+ if (!error) {
+ if (ap->a_uio) {
+ error = nfs_bioread(anp, ap->a_uio, 0, ctx);
+ } else {
+ *ap->a_size = anp->n_size;
+ }
+ }
+ if (anp) {
+ vnode_put(NFSTOV(anp));
+ }
+ return error;
+}
+
+int
+nfs4_vnop_setxattr(
+ struct vnop_setxattr_args /* {
+ * struct vnodeop_desc *a_desc;
+ * vnode_t a_vp;
+ * const char * a_name;
+ * uio_t a_uio;
+ * int a_options;
+ * vfs_context_t a_context;
+ * } */*ap)
+{
+ vfs_context_t ctx = ap->a_context;
+ int options = ap->a_options;
+ uio_t uio = ap->a_uio;
+ const char *name = ap->a_name;
+ struct nfsmount *nmp;
+ struct componentname cn;
+ nfsnode_t anp = NULL;
+ int error = 0, closeerror = 0, flags, isrsrcfork, isfinderinfo, empty = 0, i;
+#define FINDERINFOSIZE 32
+ uint8_t finfo[FINDERINFOSIZE];
+ uint32_t *finfop;
+ struct nfs_open_file *nofp = NULL;
+ char uio_buf[UIO_SIZEOF(1)];
+ uio_t auio;
+ struct vnop_write_args vwa;
+
+ nmp = VTONMP(ap->a_vp);
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+
+ if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
+ return ENOTSUP;
+ }
+
+ if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) {
+ return EINVAL;
+ }
+
+ /* XXX limitation based on need to back up uio on short write */
+ if (uio_iovcnt(uio) > 1) {
+ printf("nfs4_vnop_setxattr: iovcnt > 1\n");
+ return EINVAL;
+ }
+
+ bzero(&cn, sizeof(cn));
+ cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *);
+ cn.cn_namelen = strlen(name);
+ cn.cn_nameiop = CREATE;
+ cn.cn_flags = MAKEENTRY;
+
+ isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0);
+ isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0);
+ if (!isrsrcfork) {
+ uio_setoffset(uio, 0);
+ }
+ if (isfinderinfo) {
+ if (uio_resid(uio) != sizeof(finfo)) {
+ return ERANGE;
+ }
+ error = uiomove((char*)&finfo, sizeof(finfo), uio);
+ if (error) {
+ return error;
+ }
+ /* setting a FinderInfo of all zeroes means remove the FinderInfo */
+ empty = 1;
+ for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) {
+ if (finfop[i]) {
+ empty = 0;
+ break;
+ }
+ }
+ if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) {
+ error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
+ if (error == ENOENT) {
+ error = 0;
+ }
+ return error;
+ }
+ /* first, let's see if we get a create/replace error */
+ }
+
+ /*
+ * create/open the xattr
+ *
+ * We need to make sure not to create it if XATTR_REPLACE.
+ * For all xattrs except the resource fork, we also want to
+ * truncate the xattr to remove any current data. We'll do
+ * that by setting the size to 0 on create/open.
+ */
+ flags = 0;
+ if (!(options & XATTR_REPLACE)) {
+ flags |= NFS_GET_NAMED_ATTR_CREATE;
+ }
+ if (options & XATTR_CREATE) {
+ flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED;
+ }
+ if (!isrsrcfork) {
+ flags |= NFS_GET_NAMED_ATTR_TRUNCATE;
+ }
+
+ error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
+ flags, ctx, &anp, &nofp);
+ if (!error && !anp) {
+ error = ENOATTR;
+ }
+ if (error) {
+ goto out;
+ }
+ /* grab the open state from the get/create/open */
+ if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) {
+ nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE;
+ nofp->nof_creator = NULL;
+ nfs_open_file_clear_busy(nofp);
+ }
+
+ /* Setting an empty FinderInfo really means remove it, skip to the close/remove */
+ if (isfinderinfo && empty) {
+ goto doclose;
+ }
+
+ /*
+ * Write the data out and flush.
+ *
+ * For FinderInfo, we've already copied the data to finfo, so do I/O from there.
+ */
+ vwa.a_desc = &vnop_write_desc;
+ vwa.a_vp = NFSTOV(anp);
+ vwa.a_uio = NULL;
+ vwa.a_ioflag = 0;
+ vwa.a_context = ctx;
+ if (isfinderinfo) {
+ auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf));
+ uio_addiov(auio, (uintptr_t)&finfo, sizeof(finfo));
+ vwa.a_uio = auio;
+ } else if (uio_resid(uio) > 0) {
+ vwa.a_uio = uio;
+ }
+ if (vwa.a_uio) {
+ error = nfs_vnop_write(&vwa);
+ if (!error) {
+ error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0);
+ }
+ }
+doclose:
+ /* Close the xattr. */
+ if (nofp) {
+ int busyerror = nfs_open_file_set_busy(nofp, NULL);
+ closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx);
+ if (!busyerror) {
+ nfs_open_file_clear_busy(nofp);
+ }
+ }
+ if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */
+ error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx);
+ if (error == ENOENT) {
+ error = 0;
+ }
+ }
+ if (!error) {
+ error = closeerror;
+ }
+out:
+ if (anp) {
+ vnode_put(NFSTOV(anp));
+ }
+ if (error == ENOENT) {
+ error = ENOATTR;
+ }
+ return error;
+}
+
+int
+nfs4_vnop_removexattr(
+ struct vnop_removexattr_args /* {
+ * struct vnodeop_desc *a_desc;
+ * vnode_t a_vp;
+ * const char * a_name;
+ * int a_options;
+ * vfs_context_t a_context;
+ * } */*ap)
+{
+ struct nfsmount *nmp = VTONMP(ap->a_vp);
+ int error;
+
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+ if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
+ return ENOTSUP;
+ }
+
+ error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context);
+ if (error == ENOENT) {
+ error = ENOATTR;
+ }
+ return error;
+}
+
+int
+nfs4_vnop_listxattr(
+ struct vnop_listxattr_args /* {
+ * struct vnodeop_desc *a_desc;
+ * vnode_t a_vp;
+ * uio_t a_uio;
+ * size_t *a_size;
+ * int a_options;
+ * vfs_context_t a_context;
+ * } */*ap)
+{
+ vfs_context_t ctx = ap->a_context;
+ nfsnode_t np = VTONFS(ap->a_vp);
+ uio_t uio = ap->a_uio;
+ nfsnode_t adnp = NULL;
+ struct nfsmount *nmp;
+ int error, done, i;
+ struct nfs_vattr nvattr;
+ uint64_t cookie, nextcookie, lbn = 0;
+ struct nfsbuf *bp = NULL;
+ struct nfs_dir_buf_header *ndbhp;
+ struct direntry *dp;
+
+ nmp = VTONMP(ap->a_vp);
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+
+ if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
+ return ENOTSUP;
+ }
+
+ error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED);
+ if (error) {
+ return error;
+ }
+ if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
+ !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
+ return 0;
+ }
+
+ if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) {
+ return error;
+ }
+ adnp = nfs4_named_attr_dir_get(np, 1, ctx);
+ nfs_node_clear_busy(np);
+ if (!adnp) {
+ goto out;
+ }
+
+ if ((error = nfs_node_lock(adnp))) {
+ goto out;
+ }
+
+ if (adnp->n_flag & NNEEDINVALIDATE) {
+ adnp->n_flag &= ~NNEEDINVALIDATE;
+ nfs_invaldir(adnp);
+ nfs_node_unlock(adnp);
+ error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
+ if (!error) {
+ error = nfs_node_lock(adnp);
+ }
+ if (error) {
+ goto out;
+ }
+ }
+
+ /*
+ * check for need to invalidate when (re)starting at beginning
+ */
+ if (adnp->n_flag & NMODIFIED) {
+ nfs_invaldir(adnp);
+ nfs_node_unlock(adnp);
+ if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) {
+ goto out;
+ }
+ } else {
+ nfs_node_unlock(adnp);
+ }
+ /* nfs_getattr() will check changed and purge caches */
+ if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED))) {
+ goto out;
+ }
+
+ if (uio && (uio_resid(uio) == 0)) {
+ goto out;
+ }
+
+ done = 0;
+ nextcookie = lbn = 0;
+
+ while (!error && !done) {
+ OSAddAtomic64(1, &nfsstats.biocache_readdirs);
+ cookie = nextcookie;
+getbuffer:
+ error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp);
+ if (error) {
+ goto out;
+ }
+ ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
+ if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) {
+ if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */
+ ndbhp->ndbh_flags = 0;
+ ndbhp->ndbh_count = 0;
+ ndbhp->ndbh_entry_end = sizeof(*ndbhp);
+ ndbhp->ndbh_ncgen = adnp->n_ncgen;
+ }
+ error = nfs_buf_readdir(bp, ctx);
+ if (error == NFSERR_DIRBUFDROPPED) {
+ goto getbuffer;
+ }
+ if (error) {
+ nfs_buf_release(bp, 1);
+ }
+ if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) {
+ if (!nfs_node_lock(adnp)) {
+ nfs_invaldir(adnp);
+ nfs_node_unlock(adnp);
+ }
+ nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1);
+ if (error == NFSERR_BAD_COOKIE) {
+ error = ENOENT;
+ }
+ }
+ if (error) {
+ goto out;
+ }
+ }
+
+ /* go through all the entries copying/counting */
+ dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp);
+ for (i = 0; i < ndbhp->ndbh_count; i++) {
+ if (!xattr_protected(dp->d_name)) {
+ if (uio == NULL) {
+ *ap->a_size += dp->d_namlen + 1;
+ } else if (uio_resid(uio) < (dp->d_namlen + 1)) {
+ error = ERANGE;
+ } else {
+ error = uiomove(dp->d_name, dp->d_namlen + 1, uio);
+ if (error && (error != EFAULT)) {
+ error = ERANGE;
+ }
+ }
+ }
+ nextcookie = dp->d_seekoff;
+ dp = NFS_DIRENTRY_NEXT(dp);
+ }
+
+ if (i == ndbhp->ndbh_count) {
+ /* hit end of buffer, move to next buffer */
+ lbn = nextcookie;
+ /* if we also hit EOF, we're done */
+ if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) {
+ done = 1;
+ }
+ }
+ if (!error && !done && (nextcookie == cookie)) {
+ printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count);
+ error = EIO;
+ }
+ nfs_buf_release(bp, 1);
+ }
+out:
+ if (adnp) {
+ vnode_put(NFSTOV(adnp));
+ }
+ return error;
+}
+
+#if NAMEDSTREAMS
+int
+nfs4_vnop_getnamedstream(
+ struct vnop_getnamedstream_args /* {
+ * struct vnodeop_desc *a_desc;
+ * vnode_t a_vp;
+ * vnode_t *a_svpp;
+ * const char *a_name;
+ * enum nsoperation a_operation;
+ * int a_flags;
+ * vfs_context_t a_context;
+ * } */*ap)
+{
+ vfs_context_t ctx = ap->a_context;
+ struct nfsmount *nmp;
+ struct nfs_vattr nvattr;
+ struct componentname cn;
+ nfsnode_t anp;
+ int error = 0;
+
+ nmp = VTONMP(ap->a_vp);
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+
+ if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
+ return ENOTSUP;
+ }
+ error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED);
+ if (error) {
+ return error;
+ }
+ if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) &&
+ !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) {
+ return ENOATTR;
+ }
+
+ bzero(&cn, sizeof(cn));
+ cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
+ cn.cn_namelen = strlen(ap->a_name);
+ cn.cn_nameiop = LOOKUP;
+ cn.cn_flags = MAKEENTRY;
+
+ error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE,
+ 0, ctx, &anp, NULL);
+ if ((!error && !anp) || (error == ENOENT)) {
+ error = ENOATTR;
+ }
+ if (!error && anp) {
+ *ap->a_svpp = NFSTOV(anp);
+ } else if (anp) {
+ vnode_put(NFSTOV(anp));
+ }
+ return error;
+}
+
+int
+nfs4_vnop_makenamedstream(
+ struct vnop_makenamedstream_args /* {
+ * struct vnodeop_desc *a_desc;
+ * vnode_t *a_svpp;
+ * vnode_t a_vp;
+ * const char *a_name;
+ * int a_flags;
+ * vfs_context_t a_context;
+ * } */*ap)
+{
+ vfs_context_t ctx = ap->a_context;
+ struct nfsmount *nmp;
+ struct componentname cn;
+ nfsnode_t anp;
+ int error = 0;
+
+ nmp = VTONMP(ap->a_vp);
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+
+ if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
+ return ENOTSUP;
+ }
+
+ bzero(&cn, sizeof(cn));
+ cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *);
+ cn.cn_namelen = strlen(ap->a_name);
+ cn.cn_nameiop = CREATE;
+ cn.cn_flags = MAKEENTRY;
+
+ error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH,
+ NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL);
+ if ((!error && !anp) || (error == ENOENT)) {
+ error = ENOATTR;
+ }
+ if (!error && anp) {
+ *ap->a_svpp = NFSTOV(anp);
+ } else if (anp) {
+ vnode_put(NFSTOV(anp));
+ }
+ return error;
+}
+
+int
+nfs4_vnop_removenamedstream(
+ struct vnop_removenamedstream_args /* {
+ * struct vnodeop_desc *a_desc;
+ * vnode_t a_vp;
+ * vnode_t a_svp;
+ * const char *a_name;
+ * int a_flags;
+ * vfs_context_t a_context;
+ * } */*ap)
+{
+ struct nfsmount *nmp = VTONMP(ap->a_vp);
+ nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL;
+ nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL;
+
+ if (nfs_mount_gone(nmp)) {
+ return ENXIO;
+ }
+
+ /*
+ * Given that a_svp is a named stream, checking for
+ * named attribute support is kinda pointless.
+ */
+ if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) {
+ return ENOTSUP;
+ }
+
+ return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context);