+/*
+ * Do operations associated with quotas
+ */
+#if !QUOTA
+int
+nfs_vfs_quotactl(
+ __unused mount_t mp,
+ __unused int cmds,
+ __unused uid_t uid,
+ __unused caddr_t datap,
+ __unused vfs_context_t context)
+{
+ return (ENOTSUP);
+}
+#else
+
+int
+nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb)
+{
+ int error = 0, auth_len, slen, timeo;
+ int rqvers = (type == GRPQUOTA) ? RPCRQUOTA_EXT_VER : RPCRQUOTA_VER;
+ thread_t thd = vfs_context_thread(ctx);
+ kauth_cred_t cred = vfs_context_ucred(ctx);
+ char *path;
+ uint64_t xid = 0;
+ struct nfsm_chain nmreq, nmrep;
+ mbuf_t mreq;
+ uint32_t val = 0, bsize = 0;
+ struct sockaddr *nam = mbuf_data(nmp->nm_nam);
+ struct sockaddr_in saddr;
+ struct timeval now;
+
+ bcopy(nam, &saddr, min(sizeof(saddr), nam->sa_len));
+ auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
+ nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
+ 5 * NFSX_UNSIGNED;
+ timeo = (nmp->nm_flag & NFSMNT_SOFT) ? 10 : 60;
+ nfsm_chain_null(&nmreq);
+ nfsm_chain_null(&nmrep);
+
+ /* check if we have a recently cached rquota port */
+ if (nmp->nm_rqport) {
+ microuptime(&now);
+ if ((nmp->nm_rqportstamp + 60) >= (uint32_t)now.tv_sec)
+ goto got_rqport;
+ }
+
+ /* send portmap request to get rquota port */
+ saddr.sin_port = htons(PMAPPORT);
+ nfsm_chain_build_alloc_init(error, &nmreq, 4*NFSX_UNSIGNED);
+ nfsm_chain_add_32(error, &nmreq, RPCPROG_RQUOTA);
+ nfsm_chain_add_32(error, &nmreq, rqvers);
+ nfsm_chain_add_32(error, &nmreq, IPPROTO_UDP);
+ nfsm_chain_add_32(error, &nmreq, 0);
+ nfsm_chain_build_done(error, &nmreq);
+ nfsmout_if(error);
+ error = nfsm_rpchead2(SOCK_DGRAM, PMAPPROG, PMAPVERS, PMAPPROC_GETPORT,
+ RPCAUTH_SYS, auth_len, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
+ nfsmout_if(error);
+ nmreq.nmc_mhead = NULL;
+ error = nfs_aux_request(nmp, thd, &saddr, mreq, R_XID32(xid), 0, timeo, &nmrep);
+ nfsmout_if(error);
+
+ /* grab rquota port from portmap response */
+ nfsm_chain_get_32(error, &nmrep, val);
+ nfsmout_if(error);
+ nmp->nm_rqport = val;
+ microuptime(&now);
+ nmp->nm_rqportstamp = now.tv_sec;
+ nfsm_chain_cleanup(&nmreq);
+ nfsm_chain_cleanup(&nmrep);
+ xid = 0;
+
+got_rqport:
+ /* rquota request */
+ saddr.sin_port = htons(nmp->nm_rqport);
+ path = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0];
+ while (*path && (*path != '/'))
+ path++;
+ slen = strlen(path);
+ nfsm_chain_build_alloc_init(error, &nmreq, 3 * NFSX_UNSIGNED + nfsm_rndup(slen));
+ nfsm_chain_add_string(error, &nmreq, path, slen);
+ if (type == GRPQUOTA)
+ nfsm_chain_add_32(error, &nmreq, type);
+ nfsm_chain_add_32(error, &nmreq, id);
+ nfsm_chain_build_done(error, &nmreq);
+ nfsmout_if(error);
+ error = nfsm_rpchead2(SOCK_DGRAM, RPCPROG_RQUOTA, rqvers, RPCRQUOTA_GET,
+ RPCAUTH_SYS, auth_len, cred, NULL, nmreq.nmc_mhead, &xid, &mreq);
+ nfsmout_if(error);
+ nmreq.nmc_mhead = NULL;
+ error = nfs_aux_request(nmp, thd, &saddr, mreq, R_XID32(xid), 0, timeo, &nmrep);
+ nfsmout_if(error);
+
+ /* parse rquota response */
+ nfsm_chain_get_32(error, &nmrep, val);
+ if (!error && (val != RQUOTA_STAT_OK)) {
+ if (val == RQUOTA_STAT_NOQUOTA)
+ error = ENOENT;
+ else if (val == RQUOTA_STAT_EPERM)
+ error = EPERM;
+ else
+ error = EIO;
+ }
+ nfsm_chain_get_32(error, &nmrep, bsize);
+ nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED);
+ nfsm_chain_get_32(error, &nmrep, val);
+ nfsmout_if(error);
+ dqb->dqb_bhardlimit = (uint64_t)val * bsize;
+ nfsm_chain_get_32(error, &nmrep, val);
+ nfsmout_if(error);
+ dqb->dqb_bsoftlimit = (uint64_t)val * bsize;
+ nfsm_chain_get_32(error, &nmrep, val);
+ nfsmout_if(error);
+ dqb->dqb_curbytes = (uint64_t)val * bsize;
+ nfsm_chain_get_32(error, &nmrep, dqb->dqb_ihardlimit);
+ nfsm_chain_get_32(error, &nmrep, dqb->dqb_isoftlimit);
+ nfsm_chain_get_32(error, &nmrep, dqb->dqb_curinodes);
+ nfsm_chain_get_32(error, &nmrep, dqb->dqb_btime);
+ nfsm_chain_get_32(error, &nmrep, dqb->dqb_itime);
+ nfsmout_if(error);
+ dqb->dqb_id = id;
+nfsmout:
+ nfsm_chain_cleanup(&nmreq);
+ nfsm_chain_cleanup(&nmrep);
+ return (error);
+}
+
+int
+nfs4_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struct dqblk *dqb)
+{
+ nfsnode_t np;
+ int error = 0, status, nfsvers, numops;
+ u_int64_t xid;
+ struct nfsm_chain nmreq, nmrep;
+ uint32_t bitmap[NFS_ATTR_BITMAP_LEN];
+ thread_t thd = vfs_context_thread(ctx);
+ kauth_cred_t cred = vfs_context_ucred(ctx);
+
+ if (type != USRQUOTA) /* NFSv4 only supports user quotas */
+ return (ENOTSUP);
+
+ /* first check that the server supports any of the quota attributes */
+ if (!NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_HARD) &&
+ !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_SOFT) &&
+ !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED))
+ return (ENOTSUP);
+
+ /*
+ * The credential passed to the server needs to have
+ * an effective uid that matches the given uid.
+ */
+ if (id != kauth_cred_getuid(cred)) {
+ struct ucred temp_cred;
+ bzero(&temp_cred, sizeof(temp_cred));
+ temp_cred.cr_uid = id;
+ temp_cred.cr_ngroups = cred->cr_ngroups;
+ bcopy(cred->cr_groups, temp_cred.cr_groups, sizeof(temp_cred.cr_groups));
+ cred = kauth_cred_create(&temp_cred);
+ if (!IS_VALID_CRED(cred))
+ return (ENOMEM);
+ } else {
+ kauth_cred_ref(cred);
+ }
+
+ nfsvers = nmp->nm_vers;
+ np = nmp->nm_dnp;
+ if (!np)
+ error = ENXIO;
+ if (error || ((error = vnode_get(NFSTOV(np))))) {
+ kauth_cred_unref(&cred);
+ return(error);
+ }
+
+ nfsm_chain_null(&nmreq);
+ nfsm_chain_null(&nmrep);
+
+ // PUTFH + GETATTR
+ numops = 2;
+ nfsm_chain_build_alloc_init(error, &nmreq, 15 * NFSX_UNSIGNED);
+ nfsm_chain_add_compound_header(error, &nmreq, "quota", numops);
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_PUTFH);
+ nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize);
+ numops--;
+ nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR);
+ NFS_CLEAR_ATTRIBUTES(bitmap);
+ NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_AVAIL_HARD);
+ NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_AVAIL_SOFT);
+ NFS_BITMAP_SET(bitmap, NFS_FATTR_QUOTA_USED);
+ nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap,
+ NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr);
+ nfsm_chain_build_done(error, &nmreq);
+ nfsm_assert(error, (numops == 0), EPROTO);
+ nfsmout_if(error);
+ error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, 0, &nmrep, &xid, &status);
+ nfsm_chain_skip_tag(error, &nmrep);
+ nfsm_chain_get_32(error, &nmrep, numops);
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH);
+ nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
+ nfsm_assert(error, NFSTONMP(np), ENXIO);
+ nfsmout_if(error);
+ error = nfs4_parsefattr(&nmrep, NULL, NULL, NULL, dqb);
+ nfsmout_if(error);
+ nfsm_assert(error, NFSTONMP(np), ENXIO);
+nfsmout:
+ nfsm_chain_cleanup(&nmreq);
+ nfsm_chain_cleanup(&nmrep);
+ vnode_put(NFSTOV(np));
+ kauth_cred_unref(&cred);
+ return (error);
+}
+
+int
+nfs_vfs_quotactl(mount_t mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t ctx)
+{
+ struct nfsmount *nmp;
+ int cmd, type, error, nfsvers;
+ uid_t ruid = vfs_context_ucred(ctx)->cr_ruid;
+ struct dqblk *dqb = (struct dqblk*)datap;
+
+ if (!(nmp = VFSTONFS(mp)))
+ return (ENXIO);
+ nfsvers = nmp->nm_vers;
+
+ if (uid == ~0U)
+ uid = ruid;
+
+ /* we can only support Q_GETQUOTA */
+ cmd = cmds >> SUBCMDSHIFT;
+ switch (cmd) {
+ case Q_GETQUOTA:
+ break;
+ case Q_QUOTAON:
+ case Q_QUOTAOFF:
+ case Q_SETQUOTA:
+ case Q_SETUSE:
+ case Q_SYNC:
+ case Q_QUOTASTAT:
+ return (ENOTSUP);
+ default:
+ return (EINVAL);
+ }
+
+ type = cmds & SUBCMDMASK;
+ if ((u_int)type >= MAXQUOTAS)
+ return (EINVAL);
+ if ((uid != ruid) && ((error = vfs_context_suser(ctx))))
+ return (error);
+
+ if (vfs_busy(mp, LK_NOWAIT))
+ return (0);
+ bzero(dqb, sizeof(*dqb));
+ error = nmp->nm_funcs->nf_getquota(nmp, ctx, uid, type, dqb);
+ vfs_unbusy(mp);
+ return (error);
+}
+#endif
+