]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
xnu-1228.5.20.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/param.h>
32 #include <sys/file_internal.h>
33 #include <sys/dirent.h>
34 #include <sys/stat.h>
35 #include <sys/buf.h>
36 #include <sys/mount.h>
37 #include <sys/vnode_if.h>
38 #include <sys/vnode_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/ubc.h>
41 #include <sys/ubc_internal.h>
42 #include <sys/paths.h>
43 #include <sys/quota.h>
44 #include <sys/time.h>
45 #include <sys/disk.h>
46 #include <sys/kauth.h>
47 #include <sys/uio_internal.h>
48
49 #include <miscfs/specfs/specdev.h>
50 #include <miscfs/fifofs/fifo.h>
51 #include <vfs/vfs_support.h>
52 #include <machine/spl.h>
53
54 #include <sys/kdebug.h>
55 #include <sys/sysctl.h>
56
57 #include "hfs.h"
58 #include "hfs_catalog.h"
59 #include "hfs_cnode.h"
60 #include "hfs_dbg.h"
61 #include "hfs_mount.h"
62 #include "hfs_quota.h"
63 #include "hfs_endian.h"
64
65 #include "hfscommon/headers/BTreesInternal.h"
66 #include "hfscommon/headers/FileMgrInternal.h"
67
68
69 #define KNDETACH_VNLOCKED 0x00000001
70
71 #define CARBON_TEMP_DIR_NAME "Cleanup At Startup"
72
73
74 /* Global vfs data structures for hfs */
75
76 /* Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is 'no') */
77 int always_do_fullfsync = 0;
78 SYSCTL_INT (_kern, OID_AUTO, always_do_fullfsync, CTLFLAG_RW, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called");
79
80 static int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
81 struct componentname *cnp, struct vnode_attr *vap,
82 vfs_context_t ctx);
83
84 static int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
85 static int hfs_metasync_all(struct hfsmount *hfsmp);
86
87 static int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
88 int);
89
90 static int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
91 int, int, int);
92
93 #if FIFO
94 static int hfsfifo_read(struct vnop_read_args *);
95 static int hfsfifo_write(struct vnop_write_args *);
96 static int hfsfifo_close(struct vnop_close_args *);
97 static int hfsfifo_kqfilt_add(struct vnop_kqfilt_add_args *);
98 static int hfsfifo_kqfilt_remove(struct vnop_kqfilt_remove_args *);
99
100 extern int (**fifo_vnodeop_p)(void *);
101 #endif /* FIFO */
102
103 static int hfs_vnop_close(struct vnop_close_args*);
104 static int hfs_vnop_create(struct vnop_create_args*);
105 static int hfs_vnop_exchange(struct vnop_exchange_args*);
106 static int hfs_vnop_fsync(struct vnop_fsync_args*);
107 static int hfs_vnop_mkdir(struct vnop_mkdir_args*);
108 static int hfs_vnop_mknod(struct vnop_mknod_args*);
109 static int hfs_vnop_getattr(struct vnop_getattr_args*);
110 static int hfs_vnop_open(struct vnop_open_args*);
111 static int hfs_vnop_readdir(struct vnop_readdir_args*);
112 static int hfs_vnop_remove(struct vnop_remove_args*);
113 static int hfs_vnop_rename(struct vnop_rename_args*);
114 static int hfs_vnop_rmdir(struct vnop_rmdir_args*);
115 static int hfs_vnop_symlink(struct vnop_symlink_args*);
116 static int hfs_vnop_setattr(struct vnop_setattr_args*);
117 static int hfs_vnop_readlink(struct vnop_readlink_args *);
118 static int hfs_vnop_pathconf(struct vnop_pathconf_args *);
119 static int hfs_vnop_kqfiltremove(struct vnop_kqfilt_remove_args *);
120 static int hfs_vnop_whiteout(struct vnop_whiteout_args *);
121 static int hfsspec_read(struct vnop_read_args *);
122 static int hfsspec_write(struct vnop_write_args *);
123 static int hfsspec_close(struct vnop_close_args *);
124
125 /* Options for hfs_removedir and hfs_removefile */
126 #define HFSRM_SKIP_RESERVE 0x01
127
128
129
130
131 /*****************************************************************************
132 *
133 * Common Operations on vnodes
134 *
135 *****************************************************************************/
136
137 /*
138 * Create a regular file.
139 */
140 static int
141 hfs_vnop_create(struct vnop_create_args *ap)
142 {
143 int error;
144
145 again:
146 error = hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
147
148 /*
149 * We speculatively skipped the original lookup of the leaf
150 * for CREATE. Since it exists, go get it as long as they
151 * didn't want an exclusive create.
152 */
153 if ((error == EEXIST) && !(ap->a_vap->va_vaflags & VA_EXCLUSIVE)) {
154 struct vnop_lookup_args args;
155
156 args.a_desc = &vnop_lookup_desc;
157 args.a_dvp = ap->a_dvp;
158 args.a_vpp = ap->a_vpp;
159 args.a_cnp = ap->a_cnp;
160 args.a_context = ap->a_context;
161 args.a_cnp->cn_nameiop = LOOKUP;
162 error = hfs_vnop_lookup(&args);
163 /*
164 * We can also race with remove for this file.
165 */
166 if (error == ENOENT) {
167 goto again;
168 }
169
170 /* Make sure it was file. */
171 if ((error == 0) && !vnode_isreg(*args.a_vpp)) {
172 vnode_put(*args.a_vpp);
173 error = EEXIST;
174 }
175 args.a_cnp->cn_nameiop = CREATE;
176 }
177 return (error);
178 }
179
180 /*
181 * Make device special file.
182 */
183 static int
184 hfs_vnop_mknod(struct vnop_mknod_args *ap)
185 {
186 struct vnode_attr *vap = ap->a_vap;
187 struct vnode *dvp = ap->a_dvp;
188 struct vnode **vpp = ap->a_vpp;
189 struct cnode *cp;
190 int error;
191
192 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
193 return (ENOTSUP);
194 }
195
196 /* Create the vnode */
197 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
198 if (error)
199 return (error);
200
201 cp = VTOC(*vpp);
202 cp->c_touch_acctime = TRUE;
203 cp->c_touch_chgtime = TRUE;
204 cp->c_touch_modtime = TRUE;
205
206 if ((vap->va_rdev != VNOVAL) &&
207 (vap->va_type == VBLK || vap->va_type == VCHR))
208 cp->c_rdev = vap->va_rdev;
209
210 return (0);
211 }
212
213 /*
214 * Open a file/directory.
215 */
216 static int
217 hfs_vnop_open(struct vnop_open_args *ap)
218 {
219 struct vnode *vp = ap->a_vp;
220 struct filefork *fp;
221 struct timeval tv;
222 int error;
223
224 /*
225 * Files marked append-only must be opened for appending.
226 */
227 if ((VTOC(vp)->c_flags & APPEND) && !vnode_isdir(vp) &&
228 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
229 return (EPERM);
230
231 if (vnode_isreg(vp) && !UBCINFOEXISTS(vp))
232 return (EBUSY); /* file is in use by the kernel */
233
234 /* Don't allow journal file to be opened externally. */
235 if (VTOC(vp)->c_fileid == VTOHFS(vp)->hfs_jnlfileid)
236 return (EPERM);
237 /*
238 * On the first (non-busy) open of a fragmented
239 * file attempt to de-frag it (if its less than 20MB).
240 */
241 if ((VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) ||
242 (VTOHFS(vp)->jnl == NULL) ||
243 #if NAMEDSTREAMS
244 !vnode_isreg(vp) || vnode_isinuse(vp, 0) || vnode_isnamedstream(vp)) {
245 #else
246 !vnode_isreg(vp) || vnode_isinuse(vp, 0)) {
247 #endif
248 return (0);
249 }
250
251 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
252 return (error);
253 fp = VTOF(vp);
254 if (fp->ff_blocks &&
255 fp->ff_extents[7].blockCount != 0 &&
256 fp->ff_size <= (20 * 1024 * 1024)) {
257 struct timeval now;
258 struct cnode *cp = VTOC(vp);
259 /*
260 * Wait until system bootup is done (3 min).
261 * And don't relocate a file that's been modified
262 * within the past minute -- this can lead to
263 * system thrashing.
264 */
265 microuptime(&tv);
266 microtime(&now);
267 if (tv.tv_sec > (60 * 3) &&
268 ((now.tv_sec - cp->c_mtime) > 60)) {
269 (void) hfs_relocate(vp, VTOVCB(vp)->nextAllocation + 4096,
270 vfs_context_ucred(ap->a_context),
271 vfs_context_proc(ap->a_context));
272 }
273 }
274 hfs_unlock(VTOC(vp));
275
276 return (0);
277 }
278
279
280 /*
281 * Close a file/directory.
282 */
283 static int
284 hfs_vnop_close(ap)
285 struct vnop_close_args /* {
286 struct vnode *a_vp;
287 int a_fflag;
288 vfs_context_t a_context;
289 } */ *ap;
290 {
291 register struct vnode *vp = ap->a_vp;
292 register struct cnode *cp;
293 struct proc *p = vfs_context_proc(ap->a_context);
294 struct hfsmount *hfsmp;
295 int busy;
296 int knownrefs = 0;
297 int tooktrunclock = 0;
298
299 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0)
300 return (0);
301 cp = VTOC(vp);
302 hfsmp = VTOHFS(vp);
303
304 /*
305 * If the rsrc fork is a named stream, it holds a usecount on
306 * the data fork, which prevents the data fork from getting recycled, which
307 * then prevents the de-allocation of its extra blocks.
308 * Do checks for truncation on close. Purge extra extents if they
309 * exist. Make sure the vp is not a directory, that it has a resource
310 * fork, and that rsrc fork is a named stream.
311 */
312
313 if ((vp->v_type == VREG) && (cp->c_rsrc_vp)
314 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
315 uint32_t blks;
316
317 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
318 /*
319 * If there are any extra blocks and there are only 2 refs on
320 * this vp (ourselves + rsrc fork holding ref on us), go ahead
321 * and try to truncate the extra blocks away.
322 */
323 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
324 // release cnode lock ; must acquire truncate lock BEFORE cnode lock
325 hfs_unlock (cp);
326
327 hfs_lock_truncate(cp, TRUE);
328 tooktrunclock = 1;
329
330 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0) {
331 hfs_unlock_truncate(cp, TRUE);
332 return (0);
333 }
334
335 //now re-test to make sure it's still valid.
336 if (cp->c_rsrc_vp) {
337 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
338 if (!vnode_isinuse(vp, knownrefs)) {
339 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
340 if (blks < VTOF(vp)->ff_blocks) {
341 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY, 0, ap->a_context);
342 }
343 }
344 }
345 }
346 }
347
348 // if we froze the fs and we're exiting, then "thaw" the fs
349 if (hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
350 hfsmp->hfs_freezing_proc = NULL;
351 hfs_global_exclusive_lock_release(hfsmp);
352 lck_rw_unlock_exclusive(&hfsmp->hfs_insync);
353 }
354
355 busy = vnode_isinuse(vp, 1);
356
357 if (busy) {
358 hfs_touchtimes(VTOHFS(vp), cp);
359 }
360 if (vnode_isdir(vp)) {
361 hfs_reldirhints(cp, busy);
362 } else if (vnode_issystem(vp) && !busy) {
363 vnode_recycle(vp);
364 }
365 if (tooktrunclock) {
366 hfs_unlock_truncate(cp, TRUE);
367 }
368
369 hfs_unlock(cp);
370 return (0);
371 }
372
373 /*
374 * Get basic attributes.
375 */
376 static int
377 hfs_vnop_getattr(struct vnop_getattr_args *ap)
378 {
379 #define VNODE_ATTR_TIMES \
380 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
381 #define VNODE_ATTR_AUTH \
382 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
383 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
384
385 struct vnode *vp = ap->a_vp;
386 struct vnode_attr *vap = ap->a_vap;
387 struct vnode *rvp = NULL;
388 struct hfsmount *hfsmp;
389 struct cnode *cp;
390 uint64_t data_size;
391 enum vtype v_type;
392 int error = 0;
393
394 cp = VTOC(vp);
395
396 /*
397 * Shortcut for vnode_authorize path. Each of the attributes
398 * in this set is updated atomically so we don't need to take
399 * the cnode lock to access them.
400 */
401 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
402 /* Make sure file still exists. */
403 if (cp->c_flag & C_NOEXISTS)
404 return (ENOENT);
405
406 vap->va_uid = cp->c_uid;
407 vap->va_gid = cp->c_gid;
408 vap->va_mode = cp->c_mode;
409 vap->va_flags = cp->c_flags;
410 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
411
412 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
413 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
414 VATTR_SET_SUPPORTED(vap, va_acl);
415 }
416 return (0);
417 }
418 hfsmp = VTOHFS(vp);
419 v_type = vnode_vtype(vp);
420
421 /*
422 * If time attributes are requested and we have cnode times
423 * that require updating, then acquire an exclusive lock on
424 * the cnode before updating the times. Otherwise we can
425 * just acquire a shared lock.
426 */
427 if ((vap->va_active & VNODE_ATTR_TIMES) &&
428 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
429 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK)))
430 return (error);
431 hfs_touchtimes(hfsmp, cp);
432 } else {
433 if ((error = hfs_lock(cp, HFS_SHARED_LOCK)))
434 return (error);
435 }
436
437 if (v_type == VDIR) {
438 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
439
440 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
441 int nlink;
442
443 /*
444 * For directories, the va_nlink is esentially a count
445 * of the ".." references to a directory plus the "."
446 * reference and the directory itself. So for HFS+ this
447 * becomes the sub-directory count plus two.
448 *
449 * In the absence of a sub-directory count we use the
450 * directory's item count. This will be too high in
451 * most cases since it also includes files.
452 */
453 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
454 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
455 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
456 else
457 nlink = cp->c_entries;
458
459 /* Account for ourself and our "." entry */
460 nlink += 2;
461 /* Hide our private directories. */
462 if (cp->c_cnid == kHFSRootFolderID) {
463 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
464 --nlink;
465 }
466 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
467 --nlink;
468 }
469 }
470 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
471 }
472 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
473 int entries;
474
475 entries = cp->c_entries;
476 /* Hide our private files and directories. */
477 if (cp->c_cnid == kHFSRootFolderID) {
478 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
479 --entries;
480 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
481 --entries;
482 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
483 entries -= 2; /* hide the journal files */
484 }
485 VATTR_RETURN(vap, va_nchildren, entries);
486 }
487 /*
488 * The va_dirlinkcount is the count of real directory hard links.
489 * (i.e. its not the sum of the implied "." and ".." references)
490 */
491 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
492 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
493 }
494 } else /* !VDIR */ {
495 data_size = VCTOF(vp, cp)->ff_size;
496
497 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
498 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
499 u_int64_t blocks;
500
501 blocks = VCTOF(vp, cp)->ff_blocks;
502 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
503 }
504 }
505
506 /* conditional because 64-bit arithmetic can be expensive */
507 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
508 if (v_type == VDIR) {
509 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
510 } else {
511 u_int64_t total_size = 0;
512 struct cnode *rcp;
513
514 if (cp->c_datafork) {
515 total_size = cp->c_datafork->ff_size;
516 }
517
518 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
519 error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE);
520 if (error) {
521 goto out;
522 }
523
524 rcp = VTOC(rvp);
525 if (rcp && rcp->c_rsrcfork) {
526 total_size += rcp->c_rsrcfork->ff_size;
527 }
528 }
529
530 VATTR_RETURN(vap, va_total_size, total_size);
531 }
532 }
533 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
534 if (v_type == VDIR) {
535 VATTR_RETURN(vap, va_total_alloc, 0);
536 } else {
537 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
538 }
539 }
540
541 /*
542 * If the VFS wants extended security data, and we know that we
543 * don't have any (because it never told us it was setting any)
544 * then we can return the supported bit and no data. If we do
545 * have extended security, we can just leave the bit alone and
546 * the VFS will use the fallback path to fetch it.
547 */
548 if (VATTR_IS_ACTIVE(vap, va_acl)) {
549 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
550 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
551 VATTR_SET_SUPPORTED(vap, va_acl);
552 }
553 }
554 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
555 /* Access times are lazily updated, get current time if needed */
556 if (cp->c_touch_acctime) {
557 struct timeval tv;
558
559 microtime(&tv);
560 vap->va_access_time.tv_sec = tv.tv_sec;
561 } else {
562 vap->va_access_time.tv_sec = cp->c_atime;
563 }
564 vap->va_access_time.tv_nsec = 0;
565 VATTR_SET_SUPPORTED(vap, va_access_time);
566 }
567 vap->va_create_time.tv_sec = cp->c_itime;
568 vap->va_create_time.tv_nsec = 0;
569 vap->va_modify_time.tv_sec = cp->c_mtime;
570 vap->va_modify_time.tv_nsec = 0;
571 vap->va_change_time.tv_sec = cp->c_ctime;
572 vap->va_change_time.tv_nsec = 0;
573 vap->va_backup_time.tv_sec = cp->c_btime;
574 vap->va_backup_time.tv_nsec = 0;
575
576 /* XXX is this really a good 'optimal I/O size'? */
577 vap->va_iosize = hfsmp->hfs_logBlockSize;
578 vap->va_uid = cp->c_uid;
579 vap->va_gid = cp->c_gid;
580 vap->va_mode = cp->c_mode;
581 vap->va_flags = cp->c_flags;
582
583 /*
584 * Exporting file IDs from HFS Plus:
585 *
586 * For "normal" files the c_fileid is the same value as the
587 * c_cnid. But for hard link files, they are different - the
588 * c_cnid belongs to the active directory entry (ie the link)
589 * and the c_fileid is for the actual inode (ie the data file).
590 *
591 * The stat call (getattr) uses va_fileid and the Carbon APIs,
592 * which are hardlink-ignorant, will ask for va_linkid.
593 */
594 vap->va_fileid = (u_int64_t)cp->c_fileid;
595 /* Hardlinked directories have multiple cnids and parents (one per link). */
596 if ((v_type == VDIR) && (cp->c_flag & C_HARDLINK)) {
597 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
598 vap->va_parentid = (u_int64_t)hfs_currentparent(cp);
599 } else {
600 vap->va_linkid = (u_int64_t)cp->c_cnid;
601 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
602 }
603 vap->va_fsid = cp->c_dev;
604 vap->va_filerev = 0;
605 vap->va_encoding = cp->c_encoding;
606 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
607 vap->va_data_size = data_size;
608
609 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
610 vap->va_supported |= VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
611 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
612 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
613 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
614 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
615 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
616 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
617 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev |
618 VNODE_ATTR_va_data_size;
619
620 /* if this is the root, let VFS to find out the mount name, which may be different from the real name */
621 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
622 /* Return the name for ATTR_CMN_NAME */
623 if (cp->c_desc.cd_namelen == 0) {
624 if ((cp->c_flag & C_HARDLINK) && ((cp->c_flag & C_DELETED) == 0 || (cp->c_linkcount > 1))) {
625 cnid_t nextlinkid;
626 cnid_t prevlinkid;
627 struct vnode *file_vp;
628
629 if ((error = hfs_lookuplink(hfsmp, cp->c_fileid, &prevlinkid, &nextlinkid))) {
630 goto out;
631 }
632
633 //
634 // don't bother trying to get a linkid that's the same
635 // as the current cnid
636 //
637 if (nextlinkid == VTOC(vp)->c_cnid) {
638 if (prevlinkid == VTOC(vp)->c_cnid) {
639 hfs_unlock(cp);
640 goto out2;
641 } else {
642 nextlinkid = prevlinkid;
643 }
644 }
645
646 hfs_unlock(cp);
647
648 if (nextlinkid == 0 || (error = hfs_vget(hfsmp, nextlinkid, &file_vp, 1))) {
649 if (prevlinkid == 0 || (error = hfs_vget(hfsmp, prevlinkid, &file_vp, 1))) {
650 goto out2;
651 }
652 }
653
654 cp = VTOC(file_vp);
655 if (hfs_lock(cp, HFS_SHARED_LOCK) == 0) {
656 if (cp->c_desc.cd_namelen) {
657 strlcpy(vap->va_name, (const char *)cp->c_desc.cd_nameptr, MAXPATHLEN);
658 }
659 hfs_unlock(cp);
660 vnode_put(file_vp);
661 goto out2;
662 }
663
664 if (vnode_name(file_vp)) {
665 strlcpy(vap->va_name, vnode_name(file_vp), MAXPATHLEN);
666 } else {
667 error = ENOENT;
668 }
669 vnode_put(file_vp);
670 goto out2;
671 } else {
672 error = ENOENT;
673 goto out;
674 }
675 } else {
676 strlcpy(vap->va_name, (const char *)cp->c_desc.cd_nameptr, MAXPATHLEN);
677 VATTR_SET_SUPPORTED(vap, va_name);
678 }
679 }
680
681 out:
682 hfs_unlock(cp);
683 out2:
684 if (rvp) {
685 vnode_put(rvp);
686 }
687 return (error);
688 }
689
690 static int
691 hfs_vnop_setattr(ap)
692 struct vnop_setattr_args /* {
693 struct vnode *a_vp;
694 struct vnode_attr *a_vap;
695 vfs_context_t a_context;
696 } */ *ap;
697 {
698 struct vnode_attr *vap = ap->a_vap;
699 struct vnode *vp = ap->a_vp;
700 struct cnode *cp = NULL;
701 struct hfsmount *hfsmp;
702 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
703 struct proc *p = vfs_context_proc(ap->a_context);
704 int error = 0;
705 uid_t nuid;
706 gid_t ngid;
707
708 hfsmp = VTOHFS(vp);
709
710 /* Don't allow modification of the journal file. */
711 if (hfsmp->hfs_jnlfileid == VTOC(vp)->c_fileid) {
712 return (EPERM);
713 }
714
715 /*
716 * File size change request.
717 * We are guaranteed that this is not a directory, and that
718 * the filesystem object is writeable.
719 */
720 VATTR_SET_SUPPORTED(vap, va_data_size);
721 if (VATTR_IS_ACTIVE(vap, va_data_size) && !vnode_islnk(vp)) {
722
723 /* Take truncate lock before taking cnode lock. */
724 hfs_lock_truncate(VTOC(vp), TRUE);
725
726 /* Perform the ubc_setsize before taking the cnode lock. */
727 ubc_setsize(vp, vap->va_data_size);
728
729 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
730 hfs_unlock_truncate(VTOC(vp), TRUE);
731 return (error);
732 }
733 cp = VTOC(vp);
734
735 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff, 1, ap->a_context);
736
737 hfs_unlock_truncate(cp, TRUE);
738 if (error)
739 goto out;
740 }
741 if (cp == NULL) {
742 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
743 return (error);
744 cp = VTOC(vp);
745 }
746
747 /*
748 * If it is just an access time update request by itself
749 * we know the request is from kernel level code, and we
750 * can delay it without being as worried about consistency.
751 * This change speeds up mmaps, in the rare case that they
752 * get caught behind a sync.
753 */
754
755 if (vap->va_active == VNODE_ATTR_va_access_time) {
756 cp->c_touch_acctime=TRUE;
757 goto out;
758 }
759
760
761
762 /*
763 * Owner/group change request.
764 * We are guaranteed that the new owner/group is valid and legal.
765 */
766 VATTR_SET_SUPPORTED(vap, va_uid);
767 VATTR_SET_SUPPORTED(vap, va_gid);
768 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
769 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
770 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
771 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
772 goto out;
773
774 /*
775 * Mode change request.
776 * We are guaranteed that the mode value is valid and that in
777 * conjunction with the owner and group, this change is legal.
778 */
779 VATTR_SET_SUPPORTED(vap, va_mode);
780 if (VATTR_IS_ACTIVE(vap, va_mode) &&
781 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
782 goto out;
783
784 /*
785 * File flags change.
786 * We are guaranteed that only flags allowed to change given the
787 * current securelevel are being changed.
788 */
789 VATTR_SET_SUPPORTED(vap, va_flags);
790 if (VATTR_IS_ACTIVE(vap, va_flags)) {
791 u_int16_t *fdFlags;
792
793 cp->c_flags = vap->va_flags;
794 cp->c_touch_chgtime = TRUE;
795
796 /*
797 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
798 *
799 * The fdFlags for files and frFlags for folders are both 8 bytes
800 * into the userInfo (the first 16 bytes of the Finder Info). They
801 * are both 16-bit fields.
802 */
803 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
804 if (vap->va_flags & UF_HIDDEN)
805 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
806 else
807 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
808 }
809
810 /*
811 * Timestamp updates.
812 */
813 VATTR_SET_SUPPORTED(vap, va_create_time);
814 VATTR_SET_SUPPORTED(vap, va_access_time);
815 VATTR_SET_SUPPORTED(vap, va_modify_time);
816 VATTR_SET_SUPPORTED(vap, va_backup_time);
817 VATTR_SET_SUPPORTED(vap, va_change_time);
818 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
819 VATTR_IS_ACTIVE(vap, va_access_time) ||
820 VATTR_IS_ACTIVE(vap, va_modify_time) ||
821 VATTR_IS_ACTIVE(vap, va_backup_time)) {
822 if (VATTR_IS_ACTIVE(vap, va_create_time))
823 cp->c_itime = vap->va_create_time.tv_sec;
824 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
825 cp->c_atime = vap->va_access_time.tv_sec;
826 cp->c_touch_acctime = FALSE;
827 }
828 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
829 cp->c_mtime = vap->va_modify_time.tv_sec;
830 cp->c_touch_modtime = FALSE;
831 cp->c_touch_chgtime = TRUE;
832
833 /*
834 * The utimes system call can reset the modification
835 * time but it doesn't know about HFS create times.
836 * So we need to ensure that the creation time is
837 * always at least as old as the modification time.
838 */
839 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
840 (cp->c_cnid != kHFSRootFolderID) &&
841 (cp->c_mtime < cp->c_itime)) {
842 cp->c_itime = cp->c_mtime;
843 }
844 }
845 if (VATTR_IS_ACTIVE(vap, va_backup_time))
846 cp->c_btime = vap->va_backup_time.tv_sec;
847 cp->c_flag |= C_MODIFIED;
848 }
849
850 /*
851 * Set name encoding.
852 */
853 VATTR_SET_SUPPORTED(vap, va_encoding);
854 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
855 cp->c_encoding = vap->va_encoding;
856 hfs_setencodingbits(hfsmp, cp->c_encoding);
857 }
858
859 if ((error = hfs_update(vp, TRUE)) != 0)
860 goto out;
861 HFS_KNOTE(vp, NOTE_ATTRIB);
862 out:
863 if (cp)
864 hfs_unlock(cp);
865 return (error);
866 }
867
868
869 /*
870 * Change the mode on a file.
871 * cnode must be locked before calling.
872 */
873 __private_extern__
874 int
875 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
876 {
877 register struct cnode *cp = VTOC(vp);
878
879 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
880 return (0);
881
882 // XXXdbg - don't allow modification of the journal or journal_info_block
883 if (VTOHFS(vp)->jnl && cp && cp->c_datafork) {
884 struct HFSPlusExtentDescriptor *extd;
885
886 extd = &cp->c_datafork->ff_extents[0];
887 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
888 return EPERM;
889 }
890 }
891
892 #if OVERRIDE_UNKNOWN_PERMISSIONS
893 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
894 return (0);
895 };
896 #endif
897 cp->c_mode &= ~ALLPERMS;
898 cp->c_mode |= (mode & ALLPERMS);
899 cp->c_touch_chgtime = TRUE;
900 return (0);
901 }
902
903
904 __private_extern__
905 int
906 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
907 {
908 struct cnode *cp = VTOC(vp);
909 int retval = 0;
910 int is_member;
911
912 /*
913 * Disallow write attempts on read-only file systems;
914 * unless the file is a socket, fifo, or a block or
915 * character device resident on the file system.
916 */
917 switch (vnode_vtype(vp)) {
918 case VDIR:
919 case VLNK:
920 case VREG:
921 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
922 return (EROFS);
923 break;
924 default:
925 break;
926 }
927
928 /* If immutable bit set, nobody gets to write it. */
929 if (considerFlags && (cp->c_flags & IMMUTABLE))
930 return (EPERM);
931
932 /* Otherwise, user id 0 always gets access. */
933 if (!suser(cred, NULL))
934 return (0);
935
936 /* Otherwise, check the owner. */
937 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
938 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
939
940 /* Otherwise, check the groups. */
941 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
942 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
943 }
944
945 /* Otherwise, check everyone else. */
946 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
947 }
948
949
950 /*
951 * Perform chown operation on cnode cp;
952 * code must be locked prior to call.
953 */
954 __private_extern__
955 int
956 #if !QUOTA
957 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
958 __unused struct proc *p)
959 #else
960 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
961 __unused struct proc *p)
962 #endif
963 {
964 register struct cnode *cp = VTOC(vp);
965 uid_t ouid;
966 gid_t ogid;
967 #if QUOTA
968 int error = 0;
969 register int i;
970 int64_t change;
971 #endif /* QUOTA */
972
973 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
974 return (ENOTSUP);
975
976 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
977 return (0);
978
979 if (uid == (uid_t)VNOVAL)
980 uid = cp->c_uid;
981 if (gid == (gid_t)VNOVAL)
982 gid = cp->c_gid;
983
984 #if 0 /* we are guaranteed that this is already the case */
985 /*
986 * If we don't own the file, are trying to change the owner
987 * of the file, or are not a member of the target group,
988 * the caller must be superuser or the call fails.
989 */
990 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
991 (gid != cp->c_gid &&
992 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
993 (error = suser(cred, 0)))
994 return (error);
995 #endif
996
997 ogid = cp->c_gid;
998 ouid = cp->c_uid;
999 #if QUOTA
1000 if ((error = hfs_getinoquota(cp)))
1001 return (error);
1002 if (ouid == uid) {
1003 dqrele(cp->c_dquot[USRQUOTA]);
1004 cp->c_dquot[USRQUOTA] = NODQUOT;
1005 }
1006 if (ogid == gid) {
1007 dqrele(cp->c_dquot[GRPQUOTA]);
1008 cp->c_dquot[GRPQUOTA] = NODQUOT;
1009 }
1010
1011 /*
1012 * Eventually need to account for (fake) a block per directory
1013 * if (vnode_isdir(vp))
1014 * change = VTOHFS(vp)->blockSize;
1015 * else
1016 */
1017
1018 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1019 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1020 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1021 for (i = 0; i < MAXQUOTAS; i++) {
1022 dqrele(cp->c_dquot[i]);
1023 cp->c_dquot[i] = NODQUOT;
1024 }
1025 #endif /* QUOTA */
1026 cp->c_gid = gid;
1027 cp->c_uid = uid;
1028 #if QUOTA
1029 if ((error = hfs_getinoquota(cp)) == 0) {
1030 if (ouid == uid) {
1031 dqrele(cp->c_dquot[USRQUOTA]);
1032 cp->c_dquot[USRQUOTA] = NODQUOT;
1033 }
1034 if (ogid == gid) {
1035 dqrele(cp->c_dquot[GRPQUOTA]);
1036 cp->c_dquot[GRPQUOTA] = NODQUOT;
1037 }
1038 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1039 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1040 goto good;
1041 else
1042 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1043 }
1044 for (i = 0; i < MAXQUOTAS; i++) {
1045 dqrele(cp->c_dquot[i]);
1046 cp->c_dquot[i] = NODQUOT;
1047 }
1048 }
1049 cp->c_gid = ogid;
1050 cp->c_uid = ouid;
1051 if (hfs_getinoquota(cp) == 0) {
1052 if (ouid == uid) {
1053 dqrele(cp->c_dquot[USRQUOTA]);
1054 cp->c_dquot[USRQUOTA] = NODQUOT;
1055 }
1056 if (ogid == gid) {
1057 dqrele(cp->c_dquot[GRPQUOTA]);
1058 cp->c_dquot[GRPQUOTA] = NODQUOT;
1059 }
1060 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1061 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1062 (void) hfs_getinoquota(cp);
1063 }
1064 return (error);
1065 good:
1066 if (hfs_getinoquota(cp))
1067 panic("hfs_chown: lost quota");
1068 #endif /* QUOTA */
1069
1070
1071 /*
1072 According to the SUSv3 Standard, chown() shall mark
1073 for update the st_ctime field of the file.
1074 (No exceptions mentioned)
1075 */
1076 cp->c_touch_chgtime = TRUE;
1077 return (0);
1078 }
1079
1080
1081 /*
1082 * The hfs_exchange routine swaps the fork data in two files by
1083 * exchanging some of the information in the cnode. It is used
1084 * to preserve the file ID when updating an existing file, in
1085 * case the file is being tracked through its file ID. Typically
1086 * its used after creating a new file during a safe-save.
1087 */
1088 static int
1089 hfs_vnop_exchange(ap)
1090 struct vnop_exchange_args /* {
1091 struct vnode *a_fvp;
1092 struct vnode *a_tvp;
1093 int a_options;
1094 vfs_context_t a_context;
1095 } */ *ap;
1096 {
1097 struct vnode *from_vp = ap->a_fvp;
1098 struct vnode *to_vp = ap->a_tvp;
1099 struct cnode *from_cp;
1100 struct cnode *to_cp;
1101 struct hfsmount *hfsmp;
1102 struct cat_desc tempdesc;
1103 struct cat_attr tempattr;
1104 const unsigned char *from_nameptr;
1105 const unsigned char *to_nameptr;
1106 char from_iname[32];
1107 char to_iname[32];
1108 u_int32_t tempflag;
1109 cnid_t from_parid;
1110 cnid_t to_parid;
1111 int lockflags;
1112 int error = 0, started_tr = 0, got_cookie = 0;
1113 cat_cookie_t cookie;
1114
1115 /* The files must be on the same volume. */
1116 if (vnode_mount(from_vp) != vnode_mount(to_vp))
1117 return (EXDEV);
1118
1119 if (from_vp == to_vp)
1120 return (EINVAL);
1121
1122 if ((error = hfs_lockpair(VTOC(from_vp), VTOC(to_vp), HFS_EXCLUSIVE_LOCK)))
1123 return (error);
1124
1125 from_cp = VTOC(from_vp);
1126 to_cp = VTOC(to_vp);
1127 hfsmp = VTOHFS(from_vp);
1128
1129 /* Only normal files can be exchanged. */
1130 if (!vnode_isreg(from_vp) || !vnode_isreg(to_vp) ||
1131 VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp)) {
1132 error = EINVAL;
1133 goto exit;
1134 }
1135
1136 // XXXdbg - don't allow modification of the journal or journal_info_block
1137 if (hfsmp->jnl) {
1138 struct HFSPlusExtentDescriptor *extd;
1139
1140 if (from_cp->c_datafork) {
1141 extd = &from_cp->c_datafork->ff_extents[0];
1142 if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1143 error = EPERM;
1144 goto exit;
1145 }
1146 }
1147
1148 if (to_cp->c_datafork) {
1149 extd = &to_cp->c_datafork->ff_extents[0];
1150 if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1151 error = EPERM;
1152 goto exit;
1153 }
1154 }
1155 }
1156
1157 if ((error = hfs_start_transaction(hfsmp)) != 0) {
1158 goto exit;
1159 }
1160 started_tr = 1;
1161
1162 /*
1163 * Reserve some space in the Catalog file.
1164 */
1165 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
1166 goto exit;
1167 }
1168 got_cookie = 1;
1169
1170 /* The backend code always tries to delete the virtual
1171 * extent id for exchanging files so we need to lock
1172 * the extents b-tree.
1173 */
1174 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
1175
1176 /* Account for the location of the catalog objects. */
1177 if (from_cp->c_flag & C_HARDLINK) {
1178 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
1179 from_cp->c_attr.ca_linkref);
1180 from_nameptr = (unsigned char *)from_iname;
1181 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1182 from_cp->c_hint = 0;
1183 } else {
1184 from_nameptr = from_cp->c_desc.cd_nameptr;
1185 from_parid = from_cp->c_parentcnid;
1186 }
1187 if (to_cp->c_flag & C_HARDLINK) {
1188 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
1189 to_cp->c_attr.ca_linkref);
1190 to_nameptr = (unsigned char *)to_iname;
1191 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1192 to_cp->c_hint = 0;
1193 } else {
1194 to_nameptr = to_cp->c_desc.cd_nameptr;
1195 to_parid = to_cp->c_parentcnid;
1196 }
1197
1198 /* Do the exchange */
1199 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
1200 to_parid, from_cp->c_hint, to_cp->c_hint);
1201 hfs_systemfile_unlock(hfsmp, lockflags);
1202
1203 /*
1204 * Note that we don't need to exchange any extended attributes
1205 * since the attributes are keyed by file ID.
1206 */
1207
1208 if (error != E_NONE) {
1209 error = MacToVFSError(error);
1210 goto exit;
1211 }
1212
1213 /* Purge the vnodes from the name cache */
1214 if (from_vp)
1215 cache_purge(from_vp);
1216 if (to_vp)
1217 cache_purge(to_vp);
1218
1219 /* Save a copy of from attributes before swapping. */
1220 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
1221 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
1222 tempflag = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
1223
1224 /*
1225 * Swap the descriptors and all non-fork related attributes.
1226 * (except the modify date)
1227 */
1228 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
1229
1230 from_cp->c_hint = 0;
1231 from_cp->c_fileid = from_cp->c_cnid;
1232 from_cp->c_itime = to_cp->c_itime;
1233 from_cp->c_btime = to_cp->c_btime;
1234 from_cp->c_atime = to_cp->c_atime;
1235 from_cp->c_ctime = to_cp->c_ctime;
1236 from_cp->c_gid = to_cp->c_gid;
1237 from_cp->c_uid = to_cp->c_uid;
1238 from_cp->c_flags = to_cp->c_flags;
1239 from_cp->c_mode = to_cp->c_mode;
1240 from_cp->c_linkcount = to_cp->c_linkcount;
1241 from_cp->c_flag = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
1242 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
1243 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
1244
1245 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
1246 to_cp->c_hint = 0;
1247 to_cp->c_fileid = to_cp->c_cnid;
1248 to_cp->c_itime = tempattr.ca_itime;
1249 to_cp->c_btime = tempattr.ca_btime;
1250 to_cp->c_atime = tempattr.ca_atime;
1251 to_cp->c_ctime = tempattr.ca_ctime;
1252 to_cp->c_gid = tempattr.ca_gid;
1253 to_cp->c_uid = tempattr.ca_uid;
1254 to_cp->c_flags = tempattr.ca_flags;
1255 to_cp->c_mode = tempattr.ca_mode;
1256 to_cp->c_linkcount = tempattr.ca_linkcount;
1257 to_cp->c_flag = tempflag;
1258 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
1259 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
1260
1261 /* Rehash the cnodes using their new file IDs */
1262 hfs_chash_rehash(from_cp, to_cp);
1263
1264 /*
1265 * When a file moves out of "Cleanup At Startup"
1266 * we can drop its NODUMP status.
1267 */
1268 if ((from_cp->c_flags & UF_NODUMP) &&
1269 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
1270 from_cp->c_flags &= ~UF_NODUMP;
1271 from_cp->c_touch_chgtime = TRUE;
1272 }
1273 if ((to_cp->c_flags & UF_NODUMP) &&
1274 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
1275 to_cp->c_flags &= ~UF_NODUMP;
1276 to_cp->c_touch_chgtime = TRUE;
1277 }
1278
1279 HFS_KNOTE(from_vp, NOTE_ATTRIB);
1280 HFS_KNOTE(to_vp, NOTE_ATTRIB);
1281
1282 exit:
1283 if (got_cookie) {
1284 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
1285 }
1286 if (started_tr) {
1287 hfs_end_transaction(hfsmp);
1288 }
1289
1290 hfs_unlockpair(from_cp, to_cp);
1291 return (error);
1292 }
1293
1294
1295 /*
1296 * cnode must be locked
1297 */
1298 __private_extern__
1299 int
1300 hfs_fsync(struct vnode *vp, int waitfor, int fullsync, struct proc *p)
1301 {
1302 struct cnode *cp = VTOC(vp);
1303 struct filefork *fp = NULL;
1304 int retval = 0;
1305 struct hfsmount *hfsmp = VTOHFS(vp);
1306 struct timeval tv;
1307 int wait;
1308 int lockflag;
1309 int took_trunc_lock = 0;
1310
1311 wait = (waitfor == MNT_WAIT);
1312 if (always_do_fullfsync)
1313 fullsync = 1;
1314
1315 /* HFS directories don't have any data blocks. */
1316 if (vnode_isdir(vp))
1317 goto metasync;
1318
1319 /*
1320 * For system files flush the B-tree header and
1321 * for regular files write out any clusters
1322 */
1323 if (vnode_issystem(vp)) {
1324 if (VTOF(vp)->fcbBTCBPtr != NULL) {
1325 // XXXdbg
1326 if (hfsmp->jnl == NULL) {
1327 BTFlushPath(VTOF(vp));
1328 }
1329 }
1330 } else if (UBCINFOEXISTS(vp)) {
1331 hfs_unlock(cp);
1332 hfs_lock_truncate(cp, TRUE);
1333 took_trunc_lock = 1;
1334
1335 /* Don't hold cnode lock when calling into cluster layer. */
1336 (void) cluster_push(vp, wait ? IO_SYNC : 0);
1337
1338 hfs_lock(cp, HFS_FORCE_LOCK);
1339 }
1340 /*
1341 * When MNT_WAIT is requested and the zero fill timeout
1342 * has expired then we must explicitly zero out any areas
1343 * that are currently marked invalid (holes).
1344 *
1345 * Files with NODUMP can bypass zero filling here.
1346 */
1347 if ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
1348 ((cp->c_flags & UF_NODUMP) == 0) &&
1349 UBCINFOEXISTS(vp) && (vnode_issystem(vp) ==0) && (fp = VTOF(vp)) &&
1350 cp->c_zftimeout != 0) {
1351 microuptime(&tv);
1352 if (!fullsync && tv.tv_sec < (long)cp->c_zftimeout) {
1353 /* Remember that a force sync was requested. */
1354 cp->c_flag |= C_ZFWANTSYNC;
1355 goto datasync;
1356 }
1357 if (!took_trunc_lock) {
1358 hfs_unlock(cp);
1359 hfs_lock_truncate(cp, TRUE);
1360 hfs_lock(cp, HFS_FORCE_LOCK);
1361 took_trunc_lock = 1;
1362 }
1363
1364 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
1365 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
1366 off_t start = invalid_range->rl_start;
1367 off_t end = invalid_range->rl_end;
1368
1369 /* The range about to be written must be validated
1370 * first, so that VNOP_BLOCKMAP() will return the
1371 * appropriate mapping for the cluster code:
1372 */
1373 rl_remove(start, end, &fp->ff_invalidranges);
1374
1375 /* Don't hold cnode lock when calling into cluster layer. */
1376 hfs_unlock(cp);
1377 (void) cluster_write(vp, (struct uio *) 0,
1378 fp->ff_size, end + 1, start, (off_t)0,
1379 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
1380 hfs_lock(cp, HFS_FORCE_LOCK);
1381 cp->c_flag |= C_MODIFIED;
1382 }
1383 hfs_unlock(cp);
1384 (void) cluster_push(vp, wait ? IO_SYNC : 0);
1385 hfs_lock(cp, HFS_FORCE_LOCK);
1386
1387 cp->c_flag &= ~C_ZFWANTSYNC;
1388 cp->c_zftimeout = 0;
1389 }
1390 datasync:
1391 if (took_trunc_lock)
1392 hfs_unlock_truncate(cp, TRUE);
1393
1394 /*
1395 * if we have a journal and if journal_active() returns != 0 then the
1396 * we shouldn't do anything to a locked block (because it is part
1397 * of a transaction). otherwise we'll just go through the normal
1398 * code path and flush the buffer. note journal_active() can return
1399 * -1 if the journal is invalid -- however we still need to skip any
1400 * locked blocks as they get cleaned up when we finish the transaction
1401 * or close the journal.
1402 */
1403 // if (hfsmp->jnl && journal_active(hfsmp->jnl) >= 0)
1404 if (hfsmp->jnl)
1405 lockflag = BUF_SKIP_LOCKED;
1406 else
1407 lockflag = 0;
1408
1409 /*
1410 * Flush all dirty buffers associated with a vnode.
1411 */
1412 buf_flushdirtyblks(vp, wait, lockflag, "hfs_fsync");
1413
1414 metasync:
1415 if (vnode_isreg(vp) && vnode_issystem(vp)) {
1416 if (VTOF(vp)->fcbBTCBPtr != NULL) {
1417 microuptime(&tv);
1418 BTSetLastSync(VTOF(vp), tv.tv_sec);
1419 }
1420 cp->c_touch_acctime = FALSE;
1421 cp->c_touch_chgtime = FALSE;
1422 cp->c_touch_modtime = FALSE;
1423 } else if ( !(vp->v_flag & VSWAP) ) /* User file */ {
1424 retval = hfs_update(vp, wait);
1425
1426 /*
1427 * When MNT_WAIT is requested push out the catalog record for
1428 * this file. If they asked for a full fsync, we can skip this
1429 * because the journal_flush or hfs_metasync_all will push out
1430 * all of the metadata changes.
1431 */
1432 if ((retval == 0) && wait && !fullsync && cp->c_hint &&
1433 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
1434 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
1435 }
1436
1437 /*
1438 * If this was a full fsync, make sure all metadata
1439 * changes get to stable storage.
1440 */
1441 if (fullsync) {
1442 if (hfsmp->jnl) {
1443 journal_flush(hfsmp->jnl);
1444 } else {
1445 retval = hfs_metasync_all(hfsmp);
1446 /* XXX need to pass context! */
1447 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
1448 }
1449 }
1450 }
1451
1452 return (retval);
1453 }
1454
1455
1456 /* Sync an hfs catalog b-tree node */
1457 static int
1458 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
1459 {
1460 vnode_t vp;
1461 buf_t bp;
1462 int lockflags;
1463
1464 vp = HFSTOVCB(hfsmp)->catalogRefNum;
1465
1466 // XXXdbg - don't need to do this on a journaled volume
1467 if (hfsmp->jnl) {
1468 return 0;
1469 }
1470
1471 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
1472 /*
1473 * Look for a matching node that has been delayed
1474 * but is not part of a set (B_LOCKED).
1475 *
1476 * BLK_ONLYVALID causes buf_getblk to return a
1477 * buf_t for the daddr64_t specified only if it's
1478 * currently resident in the cache... the size
1479 * parameter to buf_getblk is ignored when this flag
1480 * is set
1481 */
1482 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
1483
1484 if (bp) {
1485 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
1486 (void) VNOP_BWRITE(bp);
1487 else
1488 buf_brelse(bp);
1489 }
1490
1491 hfs_systemfile_unlock(hfsmp, lockflags);
1492
1493 return (0);
1494 }
1495
1496
1497 /*
1498 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
1499 * without a journal. Note that the volume bitmap does not get written;
1500 * we rely on fsck_hfs to fix that up (which it can do without any loss
1501 * of data).
1502 */
1503 static int
1504 hfs_metasync_all(struct hfsmount *hfsmp)
1505 {
1506 int lockflags;
1507
1508 /* Lock all of the B-trees so we get a mutually consistent state */
1509 lockflags = hfs_systemfile_lock(hfsmp,
1510 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
1511
1512 /* Sync each of the B-trees */
1513 if (hfsmp->hfs_catalog_vp)
1514 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
1515 if (hfsmp->hfs_extents_vp)
1516 hfs_btsync(hfsmp->hfs_extents_vp, 0);
1517 if (hfsmp->hfs_attribute_vp)
1518 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
1519
1520 /* Wait for all of the writes to complete */
1521 if (hfsmp->hfs_catalog_vp)
1522 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
1523 if (hfsmp->hfs_extents_vp)
1524 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
1525 if (hfsmp->hfs_attribute_vp)
1526 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
1527
1528 hfs_systemfile_unlock(hfsmp, lockflags);
1529
1530 return 0;
1531 }
1532
1533
1534 /*ARGSUSED 1*/
1535 static int
1536 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
1537 {
1538 buf_clearflags(bp, B_LOCKED);
1539 (void) buf_bawrite(bp);
1540
1541 return(BUF_CLAIMED);
1542 }
1543
1544
1545 __private_extern__
1546 int
1547 hfs_btsync(struct vnode *vp, int sync_transaction)
1548 {
1549 struct cnode *cp = VTOC(vp);
1550 struct timeval tv;
1551 int flags = 0;
1552
1553 if (sync_transaction)
1554 flags |= BUF_SKIP_NONLOCKED;
1555 /*
1556 * Flush all dirty buffers associated with b-tree.
1557 */
1558 buf_iterate(vp, hfs_btsync_callback, flags, 0);
1559
1560 microuptime(&tv);
1561 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
1562 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
1563 cp->c_touch_acctime = FALSE;
1564 cp->c_touch_chgtime = FALSE;
1565 cp->c_touch_modtime = FALSE;
1566
1567 return 0;
1568 }
1569
1570 /*
1571 * Remove a directory.
1572 */
1573 static int
1574 hfs_vnop_rmdir(ap)
1575 struct vnop_rmdir_args /* {
1576 struct vnode *a_dvp;
1577 struct vnode *a_vp;
1578 struct componentname *a_cnp;
1579 vfs_context_t a_context;
1580 } */ *ap;
1581 {
1582 struct vnode *dvp = ap->a_dvp;
1583 struct vnode *vp = ap->a_vp;
1584 struct cnode *dcp = VTOC(dvp);
1585 struct cnode *cp = VTOC(vp);
1586 int error;
1587
1588 if (!S_ISDIR(cp->c_mode)) {
1589 return (ENOTDIR);
1590 }
1591 if (dvp == vp) {
1592 return (EINVAL);
1593 }
1594 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
1595 return (error);
1596 }
1597 error = hfs_removedir(dvp, vp, ap->a_cnp, 0);
1598 hfs_unlockpair(dcp, cp);
1599
1600 return (error);
1601 }
1602
1603 /*
1604 * Remove a directory
1605 *
1606 * Both dvp and vp cnodes are locked
1607 */
1608 static int
1609 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1610 int skip_reserve)
1611 {
1612 struct cnode *cp;
1613 struct cnode *dcp;
1614 struct hfsmount * hfsmp;
1615 struct cat_desc desc;
1616 int lockflags;
1617 int error = 0, started_tr = 0;
1618
1619 cp = VTOC(vp);
1620 dcp = VTOC(dvp);
1621 hfsmp = VTOHFS(vp);
1622
1623 if (dcp == cp) {
1624 return (EINVAL); /* cannot remove "." */
1625 }
1626 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
1627 return (0);
1628 }
1629 if (cp->c_entries != 0) {
1630 return (ENOTEMPTY);
1631 }
1632
1633 /* Check if we're removing the last link to an empty directory. */
1634 if (cp->c_flag & C_HARDLINK) {
1635 /* We could also return EBUSY here */
1636 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
1637 }
1638
1639 if ((hfsmp->hfs_attribute_vp != NULL) &&
1640 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
1641
1642 return hfs_removefile(dvp, vp, cnp, 0, 0, 1);
1643 }
1644
1645 dcp->c_flag |= C_DIR_MODIFICATION;
1646
1647 #if QUOTA
1648 if (hfsmp->hfs_flags & HFS_QUOTAS)
1649 (void)hfs_getinoquota(cp);
1650 #endif
1651 if ((error = hfs_start_transaction(hfsmp)) != 0) {
1652 goto out;
1653 }
1654 started_tr = 1;
1655
1656 /*
1657 * Verify the directory is empty (and valid).
1658 * (Rmdir ".." won't be valid since
1659 * ".." will contain a reference to
1660 * the current directory and thus be
1661 * non-empty.)
1662 */
1663 if ((dcp->c_flags & APPEND) || (cp->c_flags & (IMMUTABLE | APPEND))) {
1664 error = EPERM;
1665 goto out;
1666 }
1667
1668 /* Remove the entry from the namei cache: */
1669 cache_purge(vp);
1670
1671 /*
1672 * Protect against a race with rename by using the component
1673 * name passed in and parent id from dvp (instead of using
1674 * the cp->c_desc which may have changed).
1675 */
1676 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
1677 desc.cd_namelen = cnp->cn_namelen;
1678 desc.cd_parentcnid = dcp->c_fileid;
1679 desc.cd_cnid = cp->c_cnid;
1680 desc.cd_flags = CD_ISDIR;
1681 desc.cd_encoding = cp->c_encoding;
1682 desc.cd_hint = 0;
1683
1684 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid)) {
1685 error = 0;
1686 goto out;
1687 }
1688
1689 /* Remove entry from catalog */
1690 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
1691
1692 if (!skip_reserve) {
1693 /*
1694 * Reserve some space in the Catalog file.
1695 */
1696 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
1697 hfs_systemfile_unlock(hfsmp, lockflags);
1698 goto out;
1699 }
1700 }
1701
1702 error = cat_delete(hfsmp, &desc, &cp->c_attr);
1703 if (error == 0) {
1704 /* The parent lost a child */
1705 if (dcp->c_entries > 0)
1706 dcp->c_entries--;
1707 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
1708 dcp->c_dirchangecnt++;
1709 dcp->c_touch_chgtime = TRUE;
1710 dcp->c_touch_modtime = TRUE;
1711 hfs_touchtimes(hfsmp, cp);
1712 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
1713 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
1714 }
1715
1716 hfs_systemfile_unlock(hfsmp, lockflags);
1717
1718 if (error)
1719 goto out;
1720
1721 #if QUOTA
1722 if (hfsmp->hfs_flags & HFS_QUOTAS)
1723 (void)hfs_chkiq(cp, -1, NOCRED, 0);
1724 #endif /* QUOTA */
1725
1726 HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK | NOTE_ATTRIB);
1727
1728 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
1729
1730 /*
1731 * directory open or in use (e.g. opendir() or current working
1732 * directory for some process); wait for inactive to actually
1733 * remove catalog entry
1734 */
1735 if (vnode_isinuse(vp, 0)) {
1736 cp->c_flag |= C_DELETED;
1737 } else {
1738 cp->c_mode = 0; /* Makes the vnode go away...see inactive */
1739 cp->c_flag |= C_NOEXISTS;
1740 }
1741 out:
1742 dcp->c_flag &= ~C_DIR_MODIFICATION;
1743 wakeup((caddr_t)&dcp->c_flag);
1744
1745 HFS_KNOTE(vp, NOTE_DELETE);
1746
1747 if (started_tr) {
1748 hfs_end_transaction(hfsmp);
1749 }
1750
1751 return (error);
1752 }
1753
1754
1755 /*
1756 * Remove a file or link.
1757 */
1758 static int
1759 hfs_vnop_remove(ap)
1760 struct vnop_remove_args /* {
1761 struct vnode *a_dvp;
1762 struct vnode *a_vp;
1763 struct componentname *a_cnp;
1764 int a_flags;
1765 vfs_context_t a_context;
1766 } */ *ap;
1767 {
1768 struct vnode *dvp = ap->a_dvp;
1769 struct vnode *vp = ap->a_vp;
1770 struct cnode *dcp = VTOC(dvp);
1771 struct cnode *cp = VTOC(vp);
1772 struct vnode *rvp = cp->c_rsrc_vp;
1773 int error=0, recycle_rsrc=0, rvid=0;
1774
1775 if (dvp == vp) {
1776 return (EINVAL);
1777 }
1778
1779 hfs_lock_truncate(cp, TRUE);
1780
1781 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
1782 hfs_unlock_truncate(cp, TRUE);
1783 return (error);
1784 }
1785 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0);
1786
1787 //
1788 // If the remove succeeded and it's an open-unlinked file that has
1789 // a resource fork vnode that's not in use, we will want to recycle
1790 // the rvp *after* we're done unlocking everything. Otherwise the
1791 // resource vnode will keep a v_parent reference on this vnode which
1792 // prevents it from going through inactive/reclaim which means that
1793 // the disk space associated with this file won't get free'd until
1794 // something forces the resource vnode to get recycled (and that can
1795 // take a very long time).
1796 //
1797 if (error == 0 && (cp->c_flag & C_DELETED) && rvp && !vnode_isinuse(rvp, 0)) {
1798 rvid = vnode_vid(rvp);
1799 recycle_rsrc = 1;
1800 }
1801
1802 /*
1803 * Drop the truncate lock before unlocking the cnode
1804 * (which can potentially perform a vnode_put and
1805 * recycle the vnode which in turn might require the
1806 * truncate lock)
1807 */
1808 hfs_unlock_truncate(cp, TRUE);
1809 hfs_unlockpair(dcp, cp);
1810
1811 if (recycle_rsrc && vnode_getwithvid(rvp, rvid) == 0) {
1812 vnode_ref(rvp);
1813 vnode_rele(rvp);
1814 vnode_recycle(rvp);
1815 vnode_put(rvp);
1816 }
1817
1818 return (error);
1819 }
1820
1821
1822 static int
1823 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
1824
1825 if ( !(buf_flags(bp) & B_META))
1826 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
1827 /*
1828 * it's part of the current transaction, kill it.
1829 */
1830 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
1831
1832 return (BUF_CLAIMED);
1833 }
1834
1835 /*
1836 * hfs_removefile
1837 *
1838 * Similar to hfs_vnop_remove except there are additional options.
1839 *
1840 * Requires cnode and truncate locks to be held.
1841 */
1842 static int
1843 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1844 int flags, int skip_reserve, int allow_dirs)
1845 {
1846 struct vnode *rvp = NULL;
1847 struct cnode *cp;
1848 struct cnode *dcp;
1849 struct hfsmount *hfsmp;
1850 struct cat_desc desc;
1851 struct timeval tv;
1852 vfs_context_t ctx = cnp->cn_context;
1853 int dataforkbusy = 0;
1854 int rsrcforkbusy = 0;
1855 int truncated = 0;
1856 int lockflags;
1857 int error = 0;
1858 int started_tr = 0;
1859 int isbigfile = 0, defer_remove=0, isdir=0;
1860
1861 cp = VTOC(vp);
1862 dcp = VTOC(dvp);
1863 hfsmp = VTOHFS(vp);
1864
1865 /* Check if we lost a race post lookup. */
1866 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
1867 return (0);
1868 }
1869
1870 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid)) {
1871 return 0;
1872 }
1873
1874 /* Make sure a remove is permitted */
1875 if (VNODE_IS_RSRC(vp)) {
1876 return (EPERM);
1877 }
1878 /* Don't allow deleting the journal or journal_info_block. */
1879 if (hfsmp->jnl &&
1880 (cp->c_fileid == hfsmp->hfs_jnlfileid || cp->c_fileid == hfsmp->hfs_jnlinfoblkid)) {
1881 return (EPERM);
1882 }
1883 /*
1884 * Hard links require special handling.
1885 */
1886 if (cp->c_flag & C_HARDLINK) {
1887 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
1888 return (EBUSY);
1889 } else {
1890 /* A directory hard link with a link count of one is
1891 * treated as a regular directory. Therefore it should
1892 * only be removed using rmdir().
1893 */
1894 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
1895 (allow_dirs == 0)) {
1896 return (EPERM);
1897 }
1898 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
1899 }
1900 }
1901 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
1902 if (vnode_isdir(vp)) {
1903 if (allow_dirs == 0)
1904 return (EPERM); /* POSIX */
1905 isdir = 1;
1906 }
1907 /* Sanity check the parent ids. */
1908 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
1909 (cp->c_parentcnid != dcp->c_fileid)) {
1910 return (EINVAL);
1911 }
1912
1913 dcp->c_flag |= C_DIR_MODIFICATION;
1914
1915 // this guy is going away so mark him as such
1916 cp->c_flag |= C_DELETED;
1917
1918
1919 /* Remove our entry from the namei cache. */
1920 cache_purge(vp);
1921
1922 /*
1923 * Acquire a vnode for a non-empty resource fork.
1924 * (needed for hfs_truncate)
1925 */
1926 if (isdir == 0 && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
1927 /*
1928 * We must avoid calling hfs_vgetrsrc() when we have
1929 * an active resource fork vnode to avoid deadlocks
1930 * when that vnode is in the VL_TERMINATE state. We
1931 * can defer removing the file and its resource fork
1932 * until the call to hfs_vnop_inactive() occurs.
1933 */
1934 if (cp->c_rsrc_vp) {
1935 defer_remove = 1;
1936 } else {
1937 error = hfs_vgetrsrc(hfsmp, vp, &rvp, FALSE);
1938 if (error)
1939 goto out;
1940 /* Defer the vnode_put on rvp until the hfs_unlock(). */
1941 cp->c_flag |= C_NEED_RVNODE_PUT;
1942 }
1943 }
1944 /* Check if this file is being used. */
1945 if (isdir == 0) {
1946 dataforkbusy = vnode_isinuse(vp, 0);
1947 rsrcforkbusy = rvp ? vnode_isinuse(rvp, 0) : 0;
1948 }
1949
1950 /* Check if we have to break the deletion into multiple pieces. */
1951 if (isdir == 0) {
1952 isbigfile = ((cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE) && overflow_extents(VTOF(vp)));
1953 }
1954
1955 /* Check if the file has xattrs. If it does we'll have to delete them in
1956 individual transactions in case there are too many */
1957 if ((hfsmp->hfs_attribute_vp != NULL) &&
1958 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
1959 defer_remove = 1;
1960 }
1961
1962 /*
1963 * Carbon semantics prohibit deleting busy files.
1964 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
1965 */
1966 if (dataforkbusy || rsrcforkbusy) {
1967 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
1968 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
1969 error = EBUSY;
1970 goto out;
1971 }
1972 }
1973
1974 #if QUOTA
1975 if (hfsmp->hfs_flags & HFS_QUOTAS)
1976 (void)hfs_getinoquota(cp);
1977 #endif /* QUOTA */
1978
1979 /* Check if we need a ubc_setsize. */
1980 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy)) {
1981 /*
1982 * A ubc_setsize can cause a pagein so defer it
1983 * until after the cnode lock is dropped. The
1984 * cnode lock cannot be dropped/reacquired here
1985 * since we might already hold the journal lock.
1986 */
1987 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
1988 cp->c_flag |= C_NEED_DATA_SETSIZE;
1989 }
1990 if (!rsrcforkbusy && rvp) {
1991 cp->c_flag |= C_NEED_RSRC_SETSIZE;
1992 }
1993 }
1994
1995 if ((error = hfs_start_transaction(hfsmp)) != 0) {
1996 goto out;
1997 }
1998 started_tr = 1;
1999
2000 // XXXdbg - if we're journaled, kill any dirty symlink buffers
2001 if (hfsmp->jnl && vnode_islnk(vp))
2002 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
2003
2004 /*
2005 * Truncate any non-busy forks. Busy forks will
2006 * get truncated when their vnode goes inactive.
2007 *
2008 * Since we're already inside a transaction,
2009 * tell hfs_truncate to skip the ubc_setsize.
2010 */
2011 if (isdir == 0) {
2012 int mode = cp->c_mode;
2013
2014 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
2015 cp->c_mode = 0; /* Suppress hfs_update */
2016 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ctx);
2017 cp->c_mode = mode;
2018 if (error)
2019 goto out;
2020 truncated = 1;
2021 }
2022 if (!rsrcforkbusy && rvp) {
2023 cp->c_mode = 0; /* Suppress hfs_update */
2024 error = hfs_truncate(rvp, (off_t)0, IO_NDELAY, 1, ctx);
2025 cp->c_mode = mode;
2026 if (error)
2027 goto out;
2028 truncated = 1;
2029 }
2030 }
2031
2032 /*
2033 * Protect against a race with rename by using the component
2034 * name passed in and parent id from dvp (instead of using
2035 * the cp->c_desc which may have changed).
2036 */
2037 desc.cd_flags = 0;
2038 desc.cd_encoding = cp->c_desc.cd_encoding;
2039 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
2040 desc.cd_namelen = cnp->cn_namelen;
2041 desc.cd_parentcnid = dcp->c_fileid;
2042 desc.cd_hint = cp->c_desc.cd_hint;
2043 desc.cd_cnid = cp->c_cnid;
2044 microtime(&tv);
2045
2046 /*
2047 * There are two cases to consider:
2048 * 1. File is busy/big/defer_remove ==> move/rename the file
2049 * 2. File is not in use ==> remove the file
2050 */
2051 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
2052 char delname[32];
2053 struct cat_desc to_desc;
2054 struct cat_desc todir_desc;
2055
2056 /*
2057 * Orphan this file (move to hidden directory).
2058 */
2059 bzero(&todir_desc, sizeof(todir_desc));
2060 todir_desc.cd_parentcnid = 2;
2061
2062 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
2063 bzero(&to_desc, sizeof(to_desc));
2064 to_desc.cd_nameptr = (const u_int8_t *)delname;
2065 to_desc.cd_namelen = strlen(delname);
2066 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2067 to_desc.cd_flags = 0;
2068 to_desc.cd_cnid = cp->c_cnid;
2069
2070 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2071 if (!skip_reserve) {
2072 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
2073 hfs_systemfile_unlock(hfsmp, lockflags);
2074 goto out;
2075 }
2076 }
2077
2078 error = cat_rename(hfsmp, &desc, &todir_desc,
2079 &to_desc, (struct cat_desc *)NULL);
2080
2081 if (error == 0) {
2082 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
2083 if (isdir == 1) {
2084 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
2085 }
2086 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
2087 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
2088
2089 /* Update the parent directory */
2090 if (dcp->c_entries > 0)
2091 dcp->c_entries--;
2092 if (isdir == 1) {
2093 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
2094 }
2095 dcp->c_dirchangecnt++;
2096 dcp->c_ctime = tv.tv_sec;
2097 dcp->c_mtime = tv.tv_sec;
2098 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
2099
2100 /* Update the file's state */
2101 cp->c_flag |= C_DELETED;
2102 cp->c_ctime = tv.tv_sec;
2103 --cp->c_linkcount;
2104 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
2105 }
2106 hfs_systemfile_unlock(hfsmp, lockflags);
2107 if (error)
2108 goto out;
2109
2110 } else /* Not busy */ {
2111
2112 if (cp->c_blocks > 0) {
2113 printf("hfs_remove: attempting to delete a non-empty file %s\n",
2114 cp->c_desc.cd_nameptr);
2115 error = EBUSY;
2116 goto out;
2117 }
2118
2119 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
2120 if (!skip_reserve) {
2121 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
2122 hfs_systemfile_unlock(hfsmp, lockflags);
2123 goto out;
2124 }
2125 }
2126
2127 error = cat_delete(hfsmp, &desc, &cp->c_attr);
2128
2129 if (error && error != ENXIO && error != ENOENT && truncated) {
2130 if ((cp->c_datafork && cp->c_datafork->ff_size != 0) ||
2131 (cp->c_rsrcfork && cp->c_rsrcfork->ff_size != 0)) {
2132 panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)",
2133 error, cp->c_datafork->ff_size, cp->c_rsrcfork->ff_size);
2134 } else {
2135 printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n",
2136 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error);
2137 }
2138 }
2139 if (error == 0) {
2140 /* Update the parent directory */
2141 if (dcp->c_entries > 0)
2142 dcp->c_entries--;
2143 dcp->c_dirchangecnt++;
2144 dcp->c_ctime = tv.tv_sec;
2145 dcp->c_mtime = tv.tv_sec;
2146 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
2147 }
2148 hfs_systemfile_unlock(hfsmp, lockflags);
2149 if (error)
2150 goto out;
2151
2152 #if QUOTA
2153 if (hfsmp->hfs_flags & HFS_QUOTAS)
2154 (void)hfs_chkiq(cp, -1, NOCRED, 0);
2155 #endif /* QUOTA */
2156
2157 cp->c_mode = 0;
2158 truncated = 0; // because the catalog entry is gone
2159 cp->c_flag |= C_NOEXISTS;
2160 cp->c_flag &= ~C_DELETED;
2161 cp->c_touch_chgtime = TRUE; /* XXX needed ? */
2162 --cp->c_linkcount;
2163
2164 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
2165 }
2166
2167 /*
2168 * All done with this cnode's descriptor...
2169 *
2170 * Note: all future catalog calls for this cnode must be by
2171 * fileid only. This is OK for HFS (which doesn't have file
2172 * thread records) since HFS doesn't support the removal of
2173 * busy files.
2174 */
2175 cat_releasedesc(&cp->c_desc);
2176
2177 HFS_KNOTE(dvp, NOTE_WRITE);
2178
2179 out:
2180 if (error) {
2181 cp->c_flag &= ~C_DELETED;
2182 }
2183
2184 /* Commit the truncation to the catalog record */
2185 if (truncated) {
2186 cp->c_flag |= C_FORCEUPDATE;
2187 cp->c_touch_chgtime = TRUE;
2188 cp->c_touch_modtime = TRUE;
2189 (void) hfs_update(vp, 0);
2190 }
2191
2192 if (started_tr) {
2193 hfs_end_transaction(hfsmp);
2194 }
2195
2196 dcp->c_flag &= ~C_DIR_MODIFICATION;
2197 wakeup((caddr_t)&dcp->c_flag);
2198
2199 HFS_KNOTE(vp, NOTE_DELETE);
2200 if (rvp) {
2201 HFS_KNOTE(rvp, NOTE_DELETE);
2202 }
2203
2204 return (error);
2205 }
2206
2207
2208 __private_extern__ void
2209 replace_desc(struct cnode *cp, struct cat_desc *cdp)
2210 {
2211 // fixes 4348457 and 4463138
2212 if (&cp->c_desc == cdp) {
2213 return;
2214 }
2215
2216 /* First release allocated name buffer */
2217 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
2218 const u_int8_t *name = cp->c_desc.cd_nameptr;
2219
2220 cp->c_desc.cd_nameptr = 0;
2221 cp->c_desc.cd_namelen = 0;
2222 cp->c_desc.cd_flags &= ~CD_HASBUF;
2223 vfs_removename((const char *)name);
2224 }
2225 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
2226
2227 /* Cnode now owns the name buffer */
2228 cdp->cd_nameptr = 0;
2229 cdp->cd_namelen = 0;
2230 cdp->cd_flags &= ~CD_HASBUF;
2231 }
2232
2233
2234 /*
2235 * Rename a cnode.
2236 *
2237 * The VFS layer guarantees that:
2238 * - source and destination will either both be directories, or
2239 * both not be directories.
2240 * - all the vnodes are from the same file system
2241 *
2242 * When the target is a directory, HFS must ensure that its empty.
2243 */
2244 static int
2245 hfs_vnop_rename(ap)
2246 struct vnop_rename_args /* {
2247 struct vnode *a_fdvp;
2248 struct vnode *a_fvp;
2249 struct componentname *a_fcnp;
2250 struct vnode *a_tdvp;
2251 struct vnode *a_tvp;
2252 struct componentname *a_tcnp;
2253 vfs_context_t a_context;
2254 } */ *ap;
2255 {
2256 struct vnode *tvp = ap->a_tvp;
2257 struct vnode *tdvp = ap->a_tdvp;
2258 struct vnode *fvp = ap->a_fvp;
2259 struct vnode *fdvp = ap->a_fdvp;
2260 struct vnode *rvp = NULLVP;
2261 struct componentname *tcnp = ap->a_tcnp;
2262 struct componentname *fcnp = ap->a_fcnp;
2263 struct proc *p = vfs_context_proc(ap->a_context);
2264 struct cnode *fcp;
2265 struct cnode *fdcp;
2266 struct cnode *tdcp;
2267 struct cnode *tcp;
2268 struct cat_desc from_desc;
2269 struct cat_desc to_desc;
2270 struct cat_desc out_desc;
2271 struct hfsmount *hfsmp;
2272 cat_cookie_t cookie;
2273 int tvp_deleted = 0;
2274 int started_tr = 0, got_cookie = 0;
2275 int took_trunc_lock = 0;
2276 int lockflags;
2277 int error;
2278 int rsrc_vid = 0;
2279 int recycle_rsrc = 0;
2280
2281 /* When tvp exist, take the truncate lock for the hfs_removefile(). */
2282 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
2283 hfs_lock_truncate(VTOC(tvp), TRUE);
2284 took_trunc_lock = 1;
2285 }
2286
2287 retry:
2288 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
2289 HFS_EXCLUSIVE_LOCK);
2290 if (error) {
2291 if (took_trunc_lock)
2292 hfs_unlock_truncate(VTOC(tvp), TRUE);
2293 return (error);
2294 }
2295
2296 fdcp = VTOC(fdvp);
2297 fcp = VTOC(fvp);
2298 tdcp = VTOC(tdvp);
2299 tcp = tvp ? VTOC(tvp) : NULL;
2300 hfsmp = VTOHFS(tdvp);
2301
2302 /* Check for a race against unlink. */
2303 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid)) {
2304 error = ENOENT;
2305 goto out;
2306 }
2307
2308 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid))) {
2309 //
2310 // hmm, the destination vnode isn't valid any more.
2311 // in this case we can just drop him and pretend he
2312 // never existed in the first place.
2313 //
2314 if (took_trunc_lock) {
2315 hfs_unlock_truncate(VTOC(tvp), TRUE);
2316 took_trunc_lock = 0;
2317 }
2318
2319 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
2320
2321 tcp = NULL;
2322 tvp = NULL;
2323
2324 // retry the locking with tvp null'ed out
2325 goto retry;
2326 }
2327
2328 fdcp->c_flag |= C_DIR_MODIFICATION;
2329 if (fdvp != tdvp) {
2330 tdcp->c_flag |= C_DIR_MODIFICATION;
2331 }
2332
2333 /*
2334 * Disallow renaming of a directory hard link if the source and
2335 * destination parent directories are different, or a directory whose
2336 * descendant is a directory hard link and the one of the ancestors
2337 * of the destination directory is a directory hard link.
2338 */
2339 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
2340 if (fcp->c_flag & C_HARDLINK) {
2341 error = EPERM;
2342 goto out;
2343 }
2344 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
2345 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2346 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
2347 error = EPERM;
2348 hfs_systemfile_unlock(hfsmp, lockflags);
2349 goto out;
2350 }
2351 hfs_systemfile_unlock(hfsmp, lockflags);
2352 }
2353 }
2354
2355 /*
2356 * The following edge case is caught here:
2357 * (to cannot be a descendent of from)
2358 *
2359 * o fdvp
2360 * /
2361 * /
2362 * o fvp
2363 * \
2364 * \
2365 * o tdvp
2366 * /
2367 * /
2368 * o tvp
2369 */
2370 if (tdcp->c_parentcnid == fcp->c_fileid) {
2371 error = EINVAL;
2372 goto out;
2373 }
2374
2375 /*
2376 * The following two edge cases are caught here:
2377 * (note tvp is not empty)
2378 *
2379 * o tdvp o tdvp
2380 * / /
2381 * / /
2382 * o tvp tvp o fdvp
2383 * \ \
2384 * \ \
2385 * o fdvp o fvp
2386 * /
2387 * /
2388 * o fvp
2389 */
2390 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
2391 error = ENOTEMPTY;
2392 goto out;
2393 }
2394
2395 /*
2396 * The following edge case is caught here:
2397 * (the from child and parent are the same)
2398 *
2399 * o tdvp
2400 * /
2401 * /
2402 * fdvp o fvp
2403 */
2404 if (fdvp == fvp) {
2405 error = EINVAL;
2406 goto out;
2407 }
2408
2409 /*
2410 * Make sure "from" vnode and its parent are changeable.
2411 */
2412 if ((fcp->c_flags & (IMMUTABLE | APPEND)) || (fdcp->c_flags & APPEND)) {
2413 error = EPERM;
2414 goto out;
2415 }
2416
2417 /*
2418 * If the destination parent directory is "sticky", then the
2419 * user must own the parent directory, or the destination of
2420 * the rename, otherwise the destination may not be changed
2421 * (except by root). This implements append-only directories.
2422 *
2423 * Note that checks for immutable and write access are done
2424 * by the call to hfs_removefile.
2425 */
2426 if (tvp && (tdcp->c_mode & S_ISTXT) &&
2427 (suser(vfs_context_ucred(tcnp->cn_context), NULL)) &&
2428 (kauth_cred_getuid(vfs_context_ucred(tcnp->cn_context)) != tdcp->c_uid) &&
2429 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(tcnp->cn_context), p, false)) ) {
2430 error = EPERM;
2431 goto out;
2432 }
2433
2434 #if QUOTA
2435 if (tvp)
2436 (void)hfs_getinoquota(tcp);
2437 #endif
2438 /* Preflighting done, take fvp out of the name space. */
2439 cache_purge(fvp);
2440
2441 /*
2442 * When a file moves out of "Cleanup At Startup"
2443 * we can drop its NODUMP status.
2444 */
2445 if ((fcp->c_flags & UF_NODUMP) &&
2446 vnode_isreg(fvp) &&
2447 (fdvp != tdvp) &&
2448 (fdcp->c_desc.cd_nameptr != NULL) &&
2449 (strncmp((const char *)fdcp->c_desc.cd_nameptr,
2450 CARBON_TEMP_DIR_NAME,
2451 sizeof(CARBON_TEMP_DIR_NAME)) == 0)) {
2452 fcp->c_flags &= ~UF_NODUMP;
2453 fcp->c_touch_chgtime = TRUE;
2454 (void) hfs_update(fvp, 0);
2455 }
2456
2457 bzero(&from_desc, sizeof(from_desc));
2458 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
2459 from_desc.cd_namelen = fcnp->cn_namelen;
2460 from_desc.cd_parentcnid = fdcp->c_fileid;
2461 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2462 from_desc.cd_cnid = fcp->c_cnid;
2463
2464 bzero(&to_desc, sizeof(to_desc));
2465 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
2466 to_desc.cd_namelen = tcnp->cn_namelen;
2467 to_desc.cd_parentcnid = tdcp->c_fileid;
2468 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2469 to_desc.cd_cnid = fcp->c_cnid;
2470
2471 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2472 goto out;
2473 }
2474 started_tr = 1;
2475
2476 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
2477 * inside a journal transaction and without holding a cnode lock.
2478 * As setting of this bit depends on being in journal transaction for
2479 * concurrency, check this bit again after we start journal transaction for rename
2480 * to ensure that this directory does not have any descendant that
2481 * is a directory hard link.
2482 */
2483 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
2484 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
2485 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2486 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
2487 error = EPERM;
2488 hfs_systemfile_unlock(hfsmp, lockflags);
2489 goto out;
2490 }
2491 hfs_systemfile_unlock(hfsmp, lockflags);
2492 }
2493 }
2494
2495 // if it's a hardlink then re-lookup the name so
2496 // that we get the correct cnid in from_desc (see
2497 // the comment in hfs_removefile for more details)
2498 //
2499 if (fcp->c_flag & C_HARDLINK) {
2500 struct cat_desc tmpdesc;
2501 cnid_t real_cnid;
2502
2503 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
2504 tmpdesc.cd_namelen = fcnp->cn_namelen;
2505 tmpdesc.cd_parentcnid = fdcp->c_fileid;
2506 tmpdesc.cd_hint = fdcp->c_childhint;
2507 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
2508 tmpdesc.cd_encoding = 0;
2509
2510 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2511
2512 if (cat_lookup(hfsmp, &tmpdesc, 0, NULL, NULL, NULL, &real_cnid) != 0) {
2513 hfs_systemfile_unlock(hfsmp, lockflags);
2514 goto out;
2515 }
2516
2517 // use the real cnid instead of whatever happened to be there
2518 from_desc.cd_cnid = real_cnid;
2519 hfs_systemfile_unlock(hfsmp, lockflags);
2520 }
2521
2522 /*
2523 * Reserve some space in the Catalog file.
2524 */
2525 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
2526 goto out;
2527 }
2528 got_cookie = 1;
2529
2530 /*
2531 * If the destination exists then it may need to be removed.
2532 */
2533 if (tvp) {
2534 /*
2535 * When fvp matches tvp they could be case variants
2536 * or matching hard links.
2537 */
2538 if (fvp == tvp) {
2539 if (!(fcp->c_flag & C_HARDLINK)) {
2540 goto skip_rm; /* simple case variant */
2541
2542 } else if ((fdvp != tdvp) ||
2543 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
2544 goto out; /* matching hardlinks, nothing to do */
2545
2546 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
2547 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
2548 goto skip_rm; /* case-variant hardlink in the same dir */
2549 } else {
2550 goto out; /* matching hardlink, nothing to do */
2551 }
2552 }
2553
2554 if (vnode_isdir(tvp))
2555 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE);
2556 else {
2557 if (tcp){
2558 rvp = tcp->c_rsrc_vp;
2559 }
2560 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0);
2561
2562 /* If the destination file had a resource fork vnode, we couldn't do
2563 * anything about it in hfs_removefile because we didn't have a reference on it.
2564 * We need to take action here to prevent it from leaking blocks. If removefile
2565 * succeeded, then squirrel away the vid of the resource fork vnode and force a
2566 * recycle after dropping all of the locks. The vid is guaranteed not to change
2567 * at this point because we still hold the cnode lock.
2568 */
2569 if ((error == 0) && (tcp->c_flag & C_DELETED) && rvp && !vnode_isinuse(rvp, 0)) {
2570 rsrc_vid = vnode_vid(rvp);
2571 recycle_rsrc = 1;
2572 }
2573 }
2574
2575 if (error)
2576 goto out;
2577 tvp_deleted = 1;
2578 }
2579 skip_rm:
2580 /*
2581 * All done with tvp and fvp
2582 */
2583
2584 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2585 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
2586 hfs_systemfile_unlock(hfsmp, lockflags);
2587
2588 if (error) {
2589 goto out;
2590 }
2591
2592 /* Invalidate negative cache entries in the destination directory */
2593 if (tdcp->c_flag & C_NEG_ENTRIES) {
2594 cache_purge_negatives(tdvp);
2595 tdcp->c_flag &= ~C_NEG_ENTRIES;
2596 }
2597
2598 /* Update cnode's catalog descriptor */
2599 replace_desc(fcp, &out_desc);
2600 fcp->c_parentcnid = tdcp->c_fileid;
2601 fcp->c_hint = 0;
2602
2603 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
2604 (fdcp->c_cnid == kHFSRootFolderID));
2605 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
2606 (tdcp->c_cnid == kHFSRootFolderID));
2607
2608 /* Update both parent directories. */
2609 if (fdvp != tdvp) {
2610 if (vnode_isdir(fvp)) {
2611 /* If the source directory has directory hard link
2612 * descendants, set the kHFSHasChildLinkBit in the
2613 * destination parent hierarchy
2614 */
2615 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
2616 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
2617
2618 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
2619
2620 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
2621 if (error) {
2622 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
2623 error = 0;
2624 }
2625 }
2626 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
2627 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
2628 }
2629 tdcp->c_entries++;
2630 tdcp->c_dirchangecnt++;
2631 if (fdcp->c_entries > 0)
2632 fdcp->c_entries--;
2633 fdcp->c_dirchangecnt++;
2634 fdcp->c_touch_chgtime = TRUE;
2635 fdcp->c_touch_modtime = TRUE;
2636
2637 fdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
2638 (void) hfs_update(fdvp, 0);
2639 }
2640 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
2641 tdcp->c_touch_chgtime = TRUE;
2642 tdcp->c_touch_modtime = TRUE;
2643
2644 tdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
2645 (void) hfs_update(tdvp, 0);
2646 out:
2647 if (got_cookie) {
2648 cat_postflight(hfsmp, &cookie, p);
2649 }
2650 if (started_tr) {
2651 hfs_end_transaction(hfsmp);
2652 }
2653
2654 /* Note that if hfs_removedir or hfs_removefile was invoked above they will already have
2655 generated a NOTE_WRITE for tdvp and a NOTE_DELETE for tvp.
2656 */
2657 if (error == 0) {
2658 HFS_KNOTE(fvp, NOTE_RENAME);
2659 HFS_KNOTE(fdvp, NOTE_WRITE);
2660 if (tdvp != fdvp) HFS_KNOTE(tdvp, NOTE_WRITE);
2661 };
2662
2663 fdcp->c_flag &= ~C_DIR_MODIFICATION;
2664 wakeup((caddr_t)&fdcp->c_flag);
2665 if (fdvp != tdvp) {
2666 tdcp->c_flag &= ~C_DIR_MODIFICATION;
2667 wakeup((caddr_t)&tdcp->c_flag);
2668 }
2669
2670 if (took_trunc_lock)
2671 hfs_unlock_truncate(VTOC(tvp), TRUE);
2672
2673 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
2674
2675 /* Now that we've dropped locks, see if we need to force recycle on the old
2676 * destination's rsrc fork, preventing a leak of the rsrc fork's blocks. Note that
2677 * doing the ref/rele is in order to twiddle the VL_INACTIVE bit to the vnode's flags
2678 * so that on the last vnode_put for this vnode, we will force vnop_inactive to be triggered.
2679 */
2680 if ((recycle_rsrc) && (vnode_getwithvid(rvp, rsrc_vid) == 0)) {
2681 vnode_ref(rvp);
2682 vnode_rele(rvp);
2683 vnode_recycle(rvp);
2684 vnode_put (rvp);
2685 }
2686
2687
2688 /* After tvp is removed the only acceptable error is EIO */
2689 if (error && tvp_deleted)
2690 error = EIO;
2691
2692 return (error);
2693 }
2694
2695
2696 /*
2697 * Make a directory.
2698 */
2699 static int
2700 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
2701 {
2702 /***** HACK ALERT ********/
2703 ap->a_cnp->cn_flags |= MAKEENTRY;
2704 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
2705 }
2706
2707
2708 /*
2709 * Create a symbolic link.
2710 */
2711 static int
2712 hfs_vnop_symlink(struct vnop_symlink_args *ap)
2713 {
2714 struct vnode **vpp = ap->a_vpp;
2715 struct vnode *dvp = ap->a_dvp;
2716 struct vnode *vp = NULL;
2717 struct cnode *cp = NULL;
2718 struct hfsmount *hfsmp;
2719 struct filefork *fp;
2720 struct buf *bp = NULL;
2721 char *datap;
2722 int started_tr = 0;
2723 u_int32_t len;
2724 int error;
2725
2726 /* HFS standard disks don't support symbolic links */
2727 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
2728 return (ENOTSUP);
2729
2730 /* Check for empty target name */
2731 if (ap->a_target[0] == 0)
2732 return (EINVAL);
2733
2734 hfsmp = VTOHFS(dvp);
2735 len = strlen(ap->a_target);
2736
2737 /* Check for free space */
2738 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
2739 return (ENOSPC);
2740 }
2741
2742 /* Create the vnode */
2743 ap->a_vap->va_mode |= S_IFLNK;
2744 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
2745 goto out;
2746 }
2747 vp = *vpp;
2748 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
2749 goto out;
2750 }
2751 cp = VTOC(vp);
2752 fp = VTOF(vp);
2753
2754 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
2755 goto out;
2756 }
2757
2758 #if QUOTA
2759 (void)hfs_getinoquota(cp);
2760 #endif /* QUOTA */
2761
2762 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2763 goto out;
2764 }
2765 started_tr = 1;
2766
2767 /*
2768 * Allocate space for the link.
2769 *
2770 * Since we're already inside a transaction,
2771 * tell hfs_truncate to skip the ubc_setsize.
2772 *
2773 * Don't need truncate lock since a symlink is treated as a system file.
2774 */
2775 error = hfs_truncate(vp, len, IO_NOZEROFILL, 1, ap->a_context);
2776
2777 /* On errors, remove the symlink file */
2778 if (error) {
2779 /*
2780 * End the transaction so we don't re-take the cnode lock
2781 * below while inside a transaction (lock order violation).
2782 */
2783 hfs_end_transaction(hfsmp);
2784
2785 /* hfs_removefile() requires holding the truncate lock */
2786 hfs_unlock(cp);
2787 hfs_lock_truncate(cp, TRUE);
2788 hfs_lock(cp, HFS_FORCE_LOCK);
2789
2790 if (hfs_start_transaction(hfsmp) != 0) {
2791 started_tr = 0;
2792 hfs_unlock_truncate(cp, TRUE);
2793 goto out;
2794 }
2795
2796 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0);
2797 hfs_unlock_truncate(cp, TRUE);
2798 goto out;
2799 }
2800
2801 /* Write the link to disk */
2802 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, VTOHFS(vp)->hfs_phys_block_size),
2803 0, 0, BLK_META);
2804 if (hfsmp->jnl) {
2805 journal_modify_block_start(hfsmp->jnl, bp);
2806 }
2807 datap = (char *)buf_dataptr(bp);
2808 bzero(datap, buf_size(bp));
2809 bcopy(ap->a_target, datap, len);
2810
2811 if (hfsmp->jnl) {
2812 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
2813 } else {
2814 buf_bawrite(bp);
2815 }
2816 /*
2817 * We defered the ubc_setsize for hfs_truncate
2818 * since we were inside a transaction.
2819 *
2820 * We don't need to drop the cnode lock here
2821 * since this is a symlink.
2822 */
2823 ubc_setsize(vp, len);
2824 out:
2825 if (started_tr)
2826 hfs_end_transaction(hfsmp);
2827 if ((cp != NULL) && (vp != NULL)) {
2828 hfs_unlock(cp);
2829 }
2830 if (error) {
2831 if (vp) {
2832 vnode_put(vp);
2833 }
2834 *vpp = NULL;
2835 }
2836 return (error);
2837 }
2838
2839
2840 /* structures to hold a "." or ".." directory entry */
2841 struct hfs_stddotentry {
2842 u_int32_t d_fileno; /* unique file number */
2843 u_int16_t d_reclen; /* length of this structure */
2844 u_int8_t d_type; /* dirent file type */
2845 u_int8_t d_namlen; /* len of filename */
2846 char d_name[4]; /* "." or ".." */
2847 };
2848
2849 struct hfs_extdotentry {
2850 u_int64_t d_fileno; /* unique file number */
2851 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
2852 u_int16_t d_reclen; /* length of this structure */
2853 u_int16_t d_namlen; /* len of filename */
2854 u_int8_t d_type; /* dirent file type */
2855 u_char d_name[3]; /* "." or ".." */
2856 };
2857
2858 typedef union {
2859 struct hfs_stddotentry std;
2860 struct hfs_extdotentry ext;
2861 } hfs_dotentry_t;
2862
2863 /*
2864 * hfs_vnop_readdir reads directory entries into the buffer pointed
2865 * to by uio, in a filesystem independent format. Up to uio_resid
2866 * bytes of data can be transferred. The data in the buffer is a
2867 * series of packed dirent structures where each one contains the
2868 * following entries:
2869 *
2870 * u_int32_t d_fileno; // file number of entry
2871 * u_int16_t d_reclen; // length of this record
2872 * u_int8_t d_type; // file type
2873 * u_int8_t d_namlen; // length of string in d_name
2874 * char d_name[MAXNAMELEN+1]; // null terminated file name
2875 *
2876 * The current position (uio_offset) refers to the next block of
2877 * entries. The offset can only be set to a value previously
2878 * returned by hfs_vnop_readdir or zero. This offset does not have
2879 * to match the number of bytes returned (in uio_resid).
2880 *
2881 * In fact, the offset used by HFS is essentially an index (26 bits)
2882 * with a tag (6 bits). The tag is for associating the next request
2883 * with the current request. This enables us to have multiple threads
2884 * reading the directory while the directory is also being modified.
2885 *
2886 * Each tag/index pair is tied to a unique directory hint. The hint
2887 * contains information (filename) needed to build the catalog b-tree
2888 * key for finding the next set of entries.
2889 *
2890 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
2891 * do NOT synthesize entries for "." and "..".
2892 */
2893 static int
2894 hfs_vnop_readdir(ap)
2895 struct vnop_readdir_args /* {
2896 vnode_t a_vp;
2897 uio_t a_uio;
2898 int a_flags;
2899 int *a_eofflag;
2900 int *a_numdirent;
2901 vfs_context_t a_context;
2902 } */ *ap;
2903 {
2904 struct vnode *vp = ap->a_vp;
2905 uio_t uio = ap->a_uio;
2906 struct cnode *cp;
2907 struct hfsmount *hfsmp;
2908 directoryhint_t *dirhint = NULL;
2909 directoryhint_t localhint;
2910 off_t offset;
2911 off_t startoffset;
2912 int error = 0;
2913 int eofflag = 0;
2914 user_addr_t user_start = 0;
2915 user_size_t user_len = 0;
2916 int index;
2917 unsigned int tag;
2918 int items;
2919 int lockflags;
2920 int extended;
2921 int nfs_cookies;
2922 caddr_t bufstart;
2923 cnid_t cnid_hint = 0;
2924
2925 items = 0;
2926 startoffset = offset = uio_offset(uio);
2927 bufstart = CAST_DOWN(caddr_t, uio_iov_base(uio));
2928 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
2929 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
2930
2931 /* Sanity check the uio data. */
2932 if ((uio_iovcnt(uio) > 1) ||
2933 (uio_resid(uio) < (int)sizeof(struct dirent))) {
2934 return (EINVAL);
2935 }
2936 /* Note that the dirhint calls require an exclusive lock. */
2937 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
2938 return (error);
2939 cp = VTOC(vp);
2940 hfsmp = VTOHFS(vp);
2941
2942 /* Pick up cnid hint (if any). */
2943 if (nfs_cookies) {
2944 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
2945 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
2946 if (cnid_hint == INT_MAX) { /* searching pass the last item */
2947 eofflag = 1;
2948 goto out;
2949 }
2950 }
2951 /*
2952 * Synthesize entries for "." and "..", unless the directory has
2953 * been deleted, but not closed yet (lazy delete in progress).
2954 */
2955 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
2956 hfs_dotentry_t dotentry[2];
2957 size_t uiosize;
2958
2959 if (extended) {
2960 struct hfs_extdotentry *entry = &dotentry[0].ext;
2961
2962 entry->d_fileno = cp->c_cnid;
2963 entry->d_reclen = sizeof(struct hfs_extdotentry);
2964 entry->d_type = DT_DIR;
2965 entry->d_namlen = 1;
2966 entry->d_name[0] = '.';
2967 entry->d_name[1] = '\0';
2968 entry->d_name[2] = '\0';
2969 entry->d_seekoff = 1;
2970
2971 ++entry;
2972 entry->d_fileno = cp->c_parentcnid;
2973 entry->d_reclen = sizeof(struct hfs_extdotentry);
2974 entry->d_type = DT_DIR;
2975 entry->d_namlen = 2;
2976 entry->d_name[0] = '.';
2977 entry->d_name[1] = '.';
2978 entry->d_name[2] = '\0';
2979 entry->d_seekoff = 2;
2980 uiosize = 2 * sizeof(struct hfs_extdotentry);
2981 } else {
2982 struct hfs_stddotentry *entry = &dotentry[0].std;
2983
2984 entry->d_fileno = cp->c_cnid;
2985 entry->d_reclen = sizeof(struct hfs_stddotentry);
2986 entry->d_type = DT_DIR;
2987 entry->d_namlen = 1;
2988 *(int *)&entry->d_name[0] = 0;
2989 entry->d_name[0] = '.';
2990
2991 ++entry;
2992 entry->d_fileno = cp->c_parentcnid;
2993 entry->d_reclen = sizeof(struct hfs_stddotentry);
2994 entry->d_type = DT_DIR;
2995 entry->d_namlen = 2;
2996 *(int *)&entry->d_name[0] = 0;
2997 entry->d_name[0] = '.';
2998 entry->d_name[1] = '.';
2999 uiosize = 2 * sizeof(struct hfs_stddotentry);
3000 }
3001 if ((error = uiomove((caddr_t)&dotentry, uiosize, uio))) {
3002 goto out;
3003 }
3004 offset += 2;
3005 }
3006
3007 /* If there are no real entries then we're done. */
3008 if (cp->c_entries == 0) {
3009 error = 0;
3010 eofflag = 1;
3011 uio_setoffset(uio, offset);
3012 goto seekoffcalc;
3013 }
3014
3015 //
3016 // We have to lock the user's buffer here so that we won't
3017 // fault on it after we've acquired a shared lock on the
3018 // catalog file. The issue is that you can get a 3-way
3019 // deadlock if someone else starts a transaction and then
3020 // tries to lock the catalog file but can't because we're
3021 // here and we can't service our page fault because VM is
3022 // blocked trying to start a transaction as a result of
3023 // trying to free up pages for our page fault. It's messy
3024 // but it does happen on dual-processors that are paging
3025 // heavily (see radar 3082639 for more info). By locking
3026 // the buffer up-front we prevent ourselves from faulting
3027 // while holding the shared catalog file lock.
3028 //
3029 // Fortunately this and hfs_search() are the only two places
3030 // currently (10/30/02) that can fault on user data with a
3031 // shared lock on the catalog file.
3032 //
3033 if (hfsmp->jnl && uio_isuserspace(uio)) {
3034 user_start = uio_curriovbase(uio);
3035 user_len = uio_curriovlen(uio);
3036
3037 if ((error = vslock(user_start, user_len)) != 0) {
3038 user_start = 0;
3039 goto out;
3040 }
3041 }
3042 /* Convert offset into a catalog directory index. */
3043 index = (offset & HFS_INDEX_MASK) - 2;
3044 tag = offset & ~HFS_INDEX_MASK;
3045
3046 /* Lock catalog during cat_findname and cat_getdirentries. */
3047 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3048
3049 /* When called from NFS, try and resolve a cnid hint. */
3050 if (nfs_cookies && cnid_hint != 0) {
3051 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
3052 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
3053 localhint.dh_index = index - 1;
3054 localhint.dh_time = 0;
3055 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
3056 dirhint = &localhint; /* don't forget to release the descriptor */
3057 } else {
3058 cat_releasedesc(&localhint.dh_desc);
3059 }
3060 }
3061 }
3062
3063 /* Get a directory hint (cnode must be locked exclusive) */
3064 if (dirhint == NULL) {
3065 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
3066
3067 /* Hide tag from catalog layer. */
3068 dirhint->dh_index &= HFS_INDEX_MASK;
3069 if (dirhint->dh_index == HFS_INDEX_MASK) {
3070 dirhint->dh_index = -1;
3071 }
3072 }
3073
3074 if (index == 0) {
3075 dirhint->dh_threadhint = cp->c_dirthreadhint;
3076 }
3077
3078 /* Pack the buffer with dirent entries. */
3079 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, extended, &items, &eofflag);
3080
3081 if (index == 0 && error == 0) {
3082 cp->c_dirthreadhint = dirhint->dh_threadhint;
3083 }
3084
3085 hfs_systemfile_unlock(hfsmp, lockflags);
3086
3087 if (error != 0) {
3088 goto out;
3089 }
3090
3091 /* Get index to the next item */
3092 index += items;
3093
3094 if (items >= (int)cp->c_entries) {
3095 eofflag = 1;
3096 }
3097
3098 /* Convert catalog directory index back into an offset. */
3099 while (tag == 0)
3100 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
3101 uio_setoffset(uio, (index + 2) | tag);
3102 dirhint->dh_index |= tag;
3103
3104 seekoffcalc:
3105 cp->c_touch_acctime = TRUE;
3106
3107 if (ap->a_numdirent) {
3108 if (startoffset == 0)
3109 items += 2;
3110 *ap->a_numdirent = items;
3111 }
3112
3113 out:
3114 if (hfsmp->jnl && user_start) {
3115 vsunlock(user_start, user_len, TRUE);
3116 }
3117 /* If we didn't do anything then go ahead and dump the hint. */
3118 if ((dirhint != NULL) &&
3119 (dirhint != &localhint) &&
3120 (uio_offset(uio) == startoffset)) {
3121 hfs_reldirhint(cp, dirhint);
3122 eofflag = 1;
3123 }
3124 if (ap->a_eofflag) {
3125 *ap->a_eofflag = eofflag;
3126 }
3127 if (dirhint == &localhint) {
3128 cat_releasedesc(&localhint.dh_desc);
3129 }
3130 hfs_unlock(cp);
3131 return (error);
3132 }
3133
3134
3135 /*
3136 * Read contents of a symbolic link.
3137 */
3138 static int
3139 hfs_vnop_readlink(ap)
3140 struct vnop_readlink_args /* {
3141 struct vnode *a_vp;
3142 struct uio *a_uio;
3143 vfs_context_t a_context;
3144 } */ *ap;
3145 {
3146 struct vnode *vp = ap->a_vp;
3147 struct cnode *cp;
3148 struct filefork *fp;
3149 int error;
3150
3151 if (!vnode_islnk(vp))
3152 return (EINVAL);
3153
3154 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
3155 return (error);
3156 cp = VTOC(vp);
3157 fp = VTOF(vp);
3158
3159 /* Zero length sym links are not allowed */
3160 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
3161 printf("hfs: zero length symlink on fileid %d\n", cp->c_fileid);
3162 error = EINVAL;
3163 goto exit;
3164 }
3165
3166 /* Cache the path so we don't waste buffer cache resources */
3167 if (fp->ff_symlinkptr == NULL) {
3168 struct buf *bp = NULL;
3169
3170 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
3171 error = (int)buf_meta_bread(vp, (daddr64_t)0,
3172 roundup((int)fp->ff_size,
3173 VTOHFS(vp)->hfs_phys_block_size),
3174 vfs_context_ucred(ap->a_context), &bp);
3175 if (error) {
3176 if (bp)
3177 buf_brelse(bp);
3178 if (fp->ff_symlinkptr) {
3179 FREE(fp->ff_symlinkptr, M_TEMP);
3180 fp->ff_symlinkptr = NULL;
3181 }
3182 goto exit;
3183 }
3184 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
3185
3186 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
3187 buf_markinvalid(bp); /* data no longer needed */
3188 }
3189 buf_brelse(bp);
3190 }
3191 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
3192
3193 /*
3194 * Keep track blocks read
3195 */
3196 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
3197
3198 /*
3199 * If this file hasn't been seen since the start of
3200 * the current sampling period then start over.
3201 */
3202 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
3203 VTOF(vp)->ff_bytesread = fp->ff_size;
3204 else
3205 VTOF(vp)->ff_bytesread += fp->ff_size;
3206
3207 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
3208 // cp->c_touch_acctime = TRUE;
3209 }
3210
3211 exit:
3212 hfs_unlock(cp);
3213 return (error);
3214 }
3215
3216
3217 /*
3218 * Get configurable pathname variables.
3219 */
3220 static int
3221 hfs_vnop_pathconf(ap)
3222 struct vnop_pathconf_args /* {
3223 struct vnode *a_vp;
3224 int a_name;
3225 int *a_retval;
3226 vfs_context_t a_context;
3227 } */ *ap;
3228 {
3229 switch (ap->a_name) {
3230 case _PC_LINK_MAX:
3231 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
3232 *ap->a_retval = 1;
3233 else
3234 *ap->a_retval = HFS_LINK_MAX;
3235 break;
3236 case _PC_NAME_MAX:
3237 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
3238 *ap->a_retval = kHFSMaxFileNameChars; /* 255 */
3239 else
3240 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 31 */
3241 break;
3242 case _PC_PATH_MAX:
3243 *ap->a_retval = PATH_MAX; /* 1024 */
3244 break;
3245 case _PC_PIPE_BUF:
3246 *ap->a_retval = PIPE_BUF;
3247 break;
3248 case _PC_CHOWN_RESTRICTED:
3249 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
3250 break;
3251 case _PC_NO_TRUNC:
3252 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
3253 break;
3254 case _PC_NAME_CHARS_MAX:
3255 *ap->a_retval = kHFSPlusMaxFileNameChars;
3256 break;
3257 case _PC_CASE_SENSITIVE:
3258 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
3259 *ap->a_retval = 1;
3260 else
3261 *ap->a_retval = 0;
3262 break;
3263 case _PC_CASE_PRESERVING:
3264 *ap->a_retval = 1;
3265 break;
3266 case _PC_FILESIZEBITS:
3267 *ap->a_retval = 64; /* number of bits to store max file size */
3268 break;
3269 default:
3270 return (EINVAL);
3271 }
3272
3273 return (0);
3274 }
3275
3276
3277 /*
3278 * Update a cnode's on-disk metadata.
3279 *
3280 * If waitfor is set, then wait for the disk write of
3281 * the node to complete.
3282 *
3283 * The cnode must be locked exclusive
3284 */
3285 __private_extern__
3286 int
3287 hfs_update(struct vnode *vp, __unused int waitfor)
3288 {
3289 struct cnode *cp = VTOC(vp);
3290 struct proc *p;
3291 struct cat_fork *dataforkp = NULL;
3292 struct cat_fork *rsrcforkp = NULL;
3293 struct cat_fork datafork;
3294 struct cat_fork rsrcfork;
3295 struct hfsmount *hfsmp;
3296 int lockflags;
3297 int error;
3298
3299 p = current_proc();
3300 hfsmp = VTOHFS(vp);
3301
3302 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
3303 hfsmp->hfs_catalog_vp == NULL){
3304 return (0);
3305 }
3306 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
3307 cp->c_flag &= ~C_MODIFIED;
3308 cp->c_touch_acctime = 0;
3309 cp->c_touch_chgtime = 0;
3310 cp->c_touch_modtime = 0;
3311 return (0);
3312 }
3313
3314 hfs_touchtimes(hfsmp, cp);
3315
3316 /* Nothing to update. */
3317 if ((cp->c_flag & (C_MODIFIED | C_FORCEUPDATE)) == 0) {
3318 return (0);
3319 }
3320
3321 if (cp->c_datafork)
3322 dataforkp = &cp->c_datafork->ff_data;
3323 if (cp->c_rsrcfork)
3324 rsrcforkp = &cp->c_rsrcfork->ff_data;
3325
3326 /*
3327 * For delayed allocations updates are
3328 * postponed until an fsync or the file
3329 * gets written to disk.
3330 *
3331 * Deleted files can defer meta data updates until inactive.
3332 *
3333 * If we're ever called with the C_FORCEUPDATE flag though
3334 * we have to do the update.
3335 */
3336 if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 &&
3337 (ISSET(cp->c_flag, C_DELETED) ||
3338 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
3339 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) {
3340 // cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
3341 cp->c_flag |= C_MODIFIED;
3342
3343 HFS_KNOTE(vp, NOTE_ATTRIB);
3344
3345 return (0);
3346 }
3347
3348 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3349 return error;
3350 }
3351
3352 /*
3353 * For files with invalid ranges (holes) the on-disk
3354 * field representing the size of the file (cf_size)
3355 * must be no larger than the start of the first hole.
3356 */
3357 if (dataforkp && !CIRCLEQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
3358 bcopy(dataforkp, &datafork, sizeof(datafork));
3359 datafork.cf_size = CIRCLEQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
3360 dataforkp = &datafork;
3361 } else if (dataforkp && (cp->c_datafork->ff_unallocblocks != 0)) {
3362 // always make sure the block count and the size
3363 // of the file match the number of blocks actually
3364 // allocated to the file on disk
3365 bcopy(dataforkp, &datafork, sizeof(datafork));
3366 // make sure that we don't assign a negative block count
3367 if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) {
3368 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
3369 cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks);
3370 }
3371 datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks);
3372 datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
3373 dataforkp = &datafork;
3374 }
3375
3376 /*
3377 * For resource forks with delayed allocations, make sure
3378 * the block count and file size match the number of blocks
3379 * actually allocated to the file on disk.
3380 */
3381 if (rsrcforkp && (cp->c_rsrcfork->ff_unallocblocks != 0)) {
3382 bcopy(rsrcforkp, &rsrcfork, sizeof(rsrcfork));
3383 rsrcfork.cf_blocks = (cp->c_rsrcfork->ff_blocks - cp->c_rsrcfork->ff_unallocblocks);
3384 rsrcfork.cf_size = rsrcfork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
3385 rsrcforkp = &rsrcfork;
3386 }
3387
3388 /*
3389 * Lock the Catalog b-tree file.
3390 */
3391 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3392
3393 /* XXX - waitfor is not enforced */
3394 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
3395
3396 hfs_systemfile_unlock(hfsmp, lockflags);
3397
3398 /* After the updates are finished, clear the flags */
3399 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
3400
3401 hfs_end_transaction(hfsmp);
3402
3403 HFS_KNOTE(vp, NOTE_ATTRIB);
3404
3405 return (error);
3406 }
3407
3408 /*
3409 * Allocate a new node
3410 * Note - Function does not create and return a vnode for whiteout creation.
3411 */
3412 static int
3413 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3414 struct vnode_attr *vap, vfs_context_t ctx)
3415 {
3416 struct cnode *cp = NULL;
3417 struct cnode *dcp;
3418 struct vnode *tvp;
3419 struct hfsmount *hfsmp;
3420 struct cat_desc in_desc, out_desc;
3421 struct cat_attr attr;
3422 struct timeval tv;
3423 int lockflags;
3424 int error, started_tr = 0;
3425 enum vtype vnodetype;
3426 int mode;
3427
3428 dcp = VTOC(dvp);
3429 if ((error = hfs_lock(dcp, HFS_EXCLUSIVE_LOCK)))
3430 return (error);
3431
3432 dcp->c_flag |= C_DIR_MODIFICATION;
3433
3434 hfsmp = VTOHFS(dvp);
3435 *vpp = NULL;
3436 tvp = NULL;
3437 out_desc.cd_flags = 0;
3438 out_desc.cd_nameptr = NULL;
3439
3440 vnodetype = vap->va_type;
3441 if (vnodetype == VNON)
3442 vnodetype = VREG;
3443 mode = MAKEIMODE(vnodetype, vap->va_mode);
3444
3445 /* Check if were out of usable disk space. */
3446 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
3447 error = ENOSPC;
3448 goto exit;
3449 }
3450
3451 microtime(&tv);
3452
3453 /* Setup the default attributes */
3454 bzero(&attr, sizeof(attr));
3455 attr.ca_mode = mode;
3456 attr.ca_linkcount = 1;
3457 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
3458 attr.ca_rdev = vap->va_rdev;
3459 }
3460 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
3461 VATTR_SET_SUPPORTED(vap, va_create_time);
3462 attr.ca_itime = vap->va_create_time.tv_sec;
3463 } else {
3464 attr.ca_itime = tv.tv_sec;
3465 }
3466 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
3467 attr.ca_itime += 3600; /* Same as what hfs_update does */
3468 }
3469 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
3470 attr.ca_atimeondisk = attr.ca_atime;
3471 if (VATTR_IS_ACTIVE(vap, va_flags)) {
3472 VATTR_SET_SUPPORTED(vap, va_flags);
3473 attr.ca_flags = vap->va_flags;
3474 }
3475
3476 /*
3477 * HFS+ only: all files get ThreadExists
3478 * HFSX only: dirs get HasFolderCount
3479 */
3480 if (!(hfsmp->hfs_flags & HFS_STANDARD)) {
3481 if (vnodetype == VDIR) {
3482 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
3483 attr.ca_recflags = kHFSHasFolderCountMask;
3484 } else {
3485 attr.ca_recflags = kHFSThreadExistsMask;
3486 }
3487 }
3488
3489 attr.ca_uid = vap->va_uid;
3490 attr.ca_gid = vap->va_gid;
3491 VATTR_SET_SUPPORTED(vap, va_mode);
3492 VATTR_SET_SUPPORTED(vap, va_uid);
3493 VATTR_SET_SUPPORTED(vap, va_gid);
3494
3495 /* Tag symlinks with a type and creator. */
3496 if (vnodetype == VLNK) {
3497 struct FndrFileInfo *fip;
3498
3499 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
3500 fip->fdType = SWAP_BE32(kSymLinkFileType);
3501 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
3502 }
3503 if (cnp->cn_flags & ISWHITEOUT)
3504 attr.ca_flags |= UF_OPAQUE;
3505
3506 /* Setup the descriptor */
3507 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3508 in_desc.cd_namelen = cnp->cn_namelen;
3509 in_desc.cd_parentcnid = dcp->c_fileid;
3510 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
3511 in_desc.cd_hint = dcp->c_childhint;
3512 in_desc.cd_encoding = 0;
3513
3514 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3515 goto exit;
3516 }
3517 started_tr = 1;
3518
3519 // have to also lock the attribute file because cat_create() needs
3520 // to check that any fileID it wants to use does not have orphaned
3521 // attributes in it.
3522 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
3523
3524 /* Reserve some space in the Catalog file. */
3525 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
3526 hfs_systemfile_unlock(hfsmp, lockflags);
3527 goto exit;
3528 }
3529 error = cat_create(hfsmp, &in_desc, &attr, &out_desc);
3530 if (error == 0) {
3531 /* Update the parent directory */
3532 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
3533 dcp->c_entries++;
3534 if (vnodetype == VDIR) {
3535 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3536 }
3537 dcp->c_dirchangecnt++;
3538 dcp->c_ctime = tv.tv_sec;
3539 dcp->c_mtime = tv.tv_sec;
3540 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3541 HFS_KNOTE(dvp, NOTE_ATTRIB);
3542 }
3543 hfs_systemfile_unlock(hfsmp, lockflags);
3544 if (error)
3545 goto exit;
3546
3547 /* Invalidate negative cache entries in the directory */
3548 if (dcp->c_flag & C_NEG_ENTRIES) {
3549 cache_purge_negatives(dvp);
3550 dcp->c_flag &= ~C_NEG_ENTRIES;
3551 }
3552
3553 if (vnodetype == VDIR) {
3554 HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
3555 } else {
3556 HFS_KNOTE(dvp, NOTE_WRITE);
3557 };
3558
3559 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
3560 (dcp->c_cnid == kHFSRootFolderID));
3561
3562 // XXXdbg
3563 // have to end the transaction here before we call hfs_getnewvnode()
3564 // because that can cause us to try and reclaim a vnode on a different
3565 // file system which could cause us to start a transaction which can
3566 // deadlock with someone on that other file system (since we could be
3567 // holding two transaction locks as well as various vnodes and we did
3568 // not obtain the locks on them in the proper order).
3569 //
3570 // NOTE: this means that if the quota check fails or we have to update
3571 // the change time on a block-special device that those changes
3572 // will happen as part of independent transactions.
3573 //
3574 if (started_tr) {
3575 hfs_end_transaction(hfsmp);
3576 started_tr = 0;
3577 }
3578
3579 /* Do not create vnode for whiteouts */
3580 if (S_ISWHT(mode)) {
3581 goto exit;
3582 }
3583
3584 /*
3585 * Create a vnode for the object just created.
3586 *
3587 * The cnode is locked on successful return.
3588 */
3589 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, GNV_CREATE, &attr, NULL, &tvp);
3590 if (error)
3591 goto exit;
3592
3593 cp = VTOC(tvp);
3594 #if QUOTA
3595 /*
3596 * We call hfs_chkiq with FORCE flag so that if we
3597 * fall through to the rmdir we actually have
3598 * accounted for the inode
3599 */
3600 if (hfsmp->hfs_flags & HFS_QUOTAS) {
3601 if ((error = hfs_getinoquota(cp)) ||
3602 (error = hfs_chkiq(cp, 1, vfs_context_ucred(ctx), FORCE))) {
3603
3604 if (vnode_isdir(tvp))
3605 (void) hfs_removedir(dvp, tvp, cnp, 0);
3606 else {
3607 hfs_unlock(cp);
3608 hfs_lock_truncate(cp, TRUE);
3609 hfs_lock(cp, HFS_FORCE_LOCK);
3610 (void) hfs_removefile(dvp, tvp, cnp, 0, 0, 0);
3611 hfs_unlock_truncate(cp, TRUE);
3612 }
3613 /*
3614 * we successfully allocated a new vnode, but
3615 * the quota check is telling us we're beyond
3616 * our limit, so we need to dump our lock + reference
3617 */
3618 hfs_unlock(cp);
3619 vnode_put(tvp);
3620
3621 goto exit;
3622 }
3623 }
3624 #endif /* QUOTA */
3625
3626 *vpp = tvp;
3627 exit:
3628 cat_releasedesc(&out_desc);
3629
3630 /*
3631 * Check if a file is located in the "Cleanup At Startup"
3632 * directory. If it is then tag it as NODUMP so that we
3633 * can be lazy about zero filling data holes.
3634 */
3635 if ((error == 0) && dvp && (vnodetype == VREG) &&
3636 (dcp->c_desc.cd_nameptr != NULL) &&
3637 (strncmp((const char *)dcp->c_desc.cd_nameptr,
3638 CARBON_TEMP_DIR_NAME,
3639 sizeof(CARBON_TEMP_DIR_NAME)) == 0)) {
3640 struct vnode *ddvp;
3641
3642 dcp->c_flag &= ~C_DIR_MODIFICATION;
3643 wakeup((caddr_t)&dcp->c_flag);
3644
3645 hfs_unlock(dcp);
3646 dvp = NULL;
3647
3648 /*
3649 * The parent of "Cleanup At Startup" should
3650 * have the ASCII name of the userid.
3651 */
3652 if (hfs_vget(hfsmp, dcp->c_parentcnid, &ddvp, 0) == 0) {
3653 if (VTOC(ddvp)->c_desc.cd_nameptr) {
3654 uid_t uid;
3655
3656 uid = strtoul((const char *)VTOC(ddvp)->c_desc.cd_nameptr, 0, 0);
3657 if ((uid == cp->c_uid) ||
3658 (uid == vfs_context_ucred(ctx)->cr_uid)) {
3659 cp->c_flags |= UF_NODUMP;
3660 cp->c_touch_chgtime = TRUE;
3661 }
3662 }
3663 hfs_unlock(VTOC(ddvp));
3664 vnode_put(ddvp);
3665 }
3666 }
3667 if (dvp) {
3668 dcp->c_flag &= ~C_DIR_MODIFICATION;
3669 wakeup((caddr_t)&dcp->c_flag);
3670
3671 hfs_unlock(dcp);
3672 }
3673 if (error == 0 && cp != NULL) {
3674 hfs_unlock(cp);
3675 }
3676 if (started_tr) {
3677 hfs_end_transaction(hfsmp);
3678 started_tr = 0;
3679 }
3680
3681 return (error);
3682 }
3683
3684
3685 /*
3686 * Return a referenced vnode for the resource fork
3687 *
3688 * cnode for vnode vp must already be locked.
3689 *
3690 * can_drop_lock is true if its safe to temporally drop/re-acquire the cnode lock
3691 */
3692 __private_extern__
3693 int
3694 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, int can_drop_lock)
3695 {
3696 struct vnode *rvp;
3697 struct vnode *dvp = NULLVP;
3698 struct cnode *cp = VTOC(vp);
3699 int error;
3700 int vid;
3701
3702 restart:
3703 /* Attempt to use exising vnode */
3704 if ((rvp = cp->c_rsrc_vp)) {
3705 vid = vnode_vid(rvp);
3706
3707 /*
3708 * It is not safe to hold the cnode lock when calling vnode_getwithvid()
3709 * for the alternate fork -- vnode_getwithvid() could deadlock waiting
3710 * for a VL_WANTTERM while another thread has an iocount on the alternate
3711 * fork vnode and is attempting to acquire the common cnode lock.
3712 *
3713 * But it's also not safe to drop the cnode lock when we're holding
3714 * multiple cnode locks, like during a hfs_removefile() operation
3715 * since we could lock out of order when re-acquiring the cnode lock.
3716 *
3717 * So we can only drop the lock here if its safe to drop it -- which is
3718 * most of the time with the exception being hfs_removefile().
3719 */
3720 if (can_drop_lock)
3721 hfs_unlock(cp);
3722
3723 error = vnode_getwithvid(rvp, vid);
3724
3725 if (can_drop_lock) {
3726 (void) hfs_lock(cp, HFS_FORCE_LOCK);
3727 /*
3728 * When our lock was relinquished, the resource fork
3729 * could have been recycled. Check for this and try
3730 * again.
3731 */
3732 if (error == ENOENT)
3733 goto restart;
3734 }
3735 if (error) {
3736 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
3737
3738 if (name)
3739 printf("hfs_vgetrsrc: couldn't get resource"
3740 " fork for %s, err %d\n", name, error);
3741 return (error);
3742 }
3743 } else {
3744 struct cat_fork rsrcfork;
3745 struct componentname cn;
3746 int lockflags;
3747
3748 /*
3749 * Make sure cnode lock is exclusive, if not upgrade it.
3750 *
3751 * We assume that we were called from a read-only VNOP (getattr)
3752 * and that its safe to have the cnode lock dropped and reacquired.
3753 */
3754 if (cp->c_lockowner != current_thread()) {
3755 if (!can_drop_lock)
3756 return (EINVAL);
3757 /*
3758 * If the upgrade fails we loose the lock and
3759 * have to take the exclusive lock on our own.
3760 */
3761 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE)
3762 lck_rw_lock_exclusive(&cp->c_rwlock);
3763 cp->c_lockowner = current_thread();
3764 }
3765
3766 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3767
3768 /* Get resource fork data */
3769 error = cat_lookup(hfsmp, &cp->c_desc, 1, (struct cat_desc *)0,
3770 (struct cat_attr *)0, &rsrcfork, NULL);
3771
3772 hfs_systemfile_unlock(hfsmp, lockflags);
3773 if (error)
3774 return (error);
3775
3776 /*
3777 * Supply hfs_getnewvnode with a component name.
3778 */
3779 cn.cn_pnbuf = NULL;
3780 if (cp->c_desc.cd_nameptr) {
3781 MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
3782 cn.cn_nameiop = LOOKUP;
3783 cn.cn_flags = ISLASTCN | HASBUF;
3784 cn.cn_context = NULL;
3785 cn.cn_pnlen = MAXPATHLEN;
3786 cn.cn_nameptr = cn.cn_pnbuf;
3787 cn.cn_hash = 0;
3788 cn.cn_consume = 0;
3789 cn.cn_namelen = snprintf(cn.cn_nameptr, MAXPATHLEN,
3790 "%s%s", cp->c_desc.cd_nameptr,
3791 _PATH_RSRCFORKSPEC);
3792 }
3793 dvp = vnode_getparent(vp);
3794 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
3795 &cp->c_desc, GNV_WANTRSRC | GNV_SKIPLOCK, &cp->c_attr,
3796 &rsrcfork, &rvp);
3797 if (dvp)
3798 vnode_put(dvp);
3799 if (cn.cn_pnbuf)
3800 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
3801 if (error)
3802 return (error);
3803 }
3804
3805 *rvpp = rvp;
3806 return (0);
3807 }
3808
3809
3810 static void
3811 filt_hfsdetach(struct knote *kn)
3812 {
3813 struct vnode *vp;
3814
3815 vp = (struct vnode *)kn->kn_hook;
3816 if (vnode_getwithvid(vp, kn->kn_hookid))
3817 return;
3818
3819 if (1) { /* ! KNDETACH_VNLOCKED */
3820 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
3821 (void) KNOTE_DETACH(&VTOC(vp)->c_knotes, kn);
3822 hfs_unlock(VTOC(vp));
3823 }
3824 }
3825
3826 vnode_put(vp);
3827 }
3828
3829 /*ARGSUSED*/
3830 static int
3831 filt_hfsread(struct knote *kn, long hint)
3832 {
3833 struct vnode *vp = (struct vnode *)kn->kn_hook;
3834 int dropvp = 0;
3835
3836 if (hint == 0) {
3837 if ((vnode_getwithvid(vp, kn->kn_hookid) != 0)) {
3838 hint = NOTE_REVOKE;
3839 } else
3840 dropvp = 1;
3841 }
3842 if (hint == NOTE_REVOKE) {
3843 /*
3844 * filesystem is gone, so set the EOF flag and schedule
3845 * the knote for deletion.
3846 */
3847 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3848 return (1);
3849 }
3850
3851 /* poll(2) semantics dictate always saying there is data */
3852 if (!(kn->kn_flags & EV_POLL)) {
3853 off_t amount;
3854
3855 amount = VTOF(vp)->ff_size - kn->kn_fp->f_fglob->fg_offset;
3856 if (amount > (off_t)INTPTR_MAX)
3857 kn->kn_data = INTPTR_MAX;
3858 else if (amount < (off_t)INTPTR_MIN)
3859 kn->kn_data = INTPTR_MIN;
3860 else
3861 kn->kn_data = (intptr_t)amount;
3862 } else {
3863 kn->kn_data = 1;
3864 }
3865
3866 if (dropvp)
3867 vnode_put(vp);
3868
3869 return (kn->kn_data != 0);
3870 }
3871
3872 /*ARGSUSED*/
3873 static int
3874 filt_hfswrite(struct knote *kn, long hint)
3875 {
3876 struct vnode *vp = (struct vnode *)kn->kn_hook;
3877
3878 if (hint == 0) {
3879 if ((vnode_getwithvid(vp, kn->kn_hookid) != 0)) {
3880 hint = NOTE_REVOKE;
3881 } else
3882 vnode_put(vp);
3883 }
3884 if (hint == NOTE_REVOKE) {
3885 /*
3886 * filesystem is gone, so set the EOF flag and schedule
3887 * the knote for deletion.
3888 */
3889 kn->kn_data = 0;
3890 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3891 return (1);
3892 }
3893 kn->kn_data = 0;
3894 return (1);
3895 }
3896
3897 static int
3898 filt_hfsvnode(struct knote *kn, long hint)
3899 {
3900 struct vnode *vp = (struct vnode *)kn->kn_hook;
3901
3902 if (hint == 0) {
3903 if ((vnode_getwithvid(vp, kn->kn_hookid) != 0)) {
3904 hint = NOTE_REVOKE;
3905 } else
3906 vnode_put(vp);
3907 }
3908 if (kn->kn_sfflags & hint)
3909 kn->kn_fflags |= hint;
3910 if ((hint == NOTE_REVOKE)) {
3911 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3912 return (1);
3913 }
3914
3915 return (kn->kn_fflags != 0);
3916 }
3917
3918 static struct filterops hfsread_filtops =
3919 { 1, NULL, filt_hfsdetach, filt_hfsread };
3920 static struct filterops hfswrite_filtops =
3921 { 1, NULL, filt_hfsdetach, filt_hfswrite };
3922 static struct filterops hfsvnode_filtops =
3923 { 1, NULL, filt_hfsdetach, filt_hfsvnode };
3924
3925 /*
3926 * Add a kqueue filter.
3927 */
3928 static int
3929 hfs_vnop_kqfiltadd(
3930 struct vnop_kqfilt_add_args /* {
3931 struct vnode *a_vp;
3932 struct knote *a_kn;
3933 struct proc *p;
3934 vfs_context_t a_context;
3935 } */ *ap)
3936 {
3937 struct vnode *vp = ap->a_vp;
3938 struct knote *kn = ap->a_kn;
3939 int error;
3940
3941 switch (kn->kn_filter) {
3942 case EVFILT_READ:
3943 if (vnode_isreg(vp)) {
3944 kn->kn_fop = &hfsread_filtops;
3945 } else {
3946 return EINVAL;
3947 };
3948 break;
3949 case EVFILT_WRITE:
3950 if (vnode_isreg(vp)) {
3951 kn->kn_fop = &hfswrite_filtops;
3952 } else {
3953 return EINVAL;
3954 };
3955 break;
3956 case EVFILT_VNODE:
3957 kn->kn_fop = &hfsvnode_filtops;
3958 break;
3959 default:
3960 return (1);
3961 }
3962
3963 kn->kn_hook = (caddr_t)vp;
3964 kn->kn_hookid = vnode_vid(vp);
3965
3966 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
3967 return (error);
3968 KNOTE_ATTACH(&VTOC(vp)->c_knotes, kn);
3969 hfs_unlock(VTOC(vp));
3970
3971 return (0);
3972 }
3973
3974 /*
3975 * Remove a kqueue filter
3976 */
3977 static int
3978 hfs_vnop_kqfiltremove(ap)
3979 struct vnop_kqfilt_remove_args /* {
3980 struct vnode *a_vp;
3981 uintptr_t ident;
3982 vfs_context_t a_context;
3983 } */__unused *ap;
3984 {
3985 int result;
3986
3987 result = ENOTSUP; /* XXX */
3988
3989 return (result);
3990 }
3991
3992 /*
3993 * Wrapper for special device reads
3994 */
3995 static int
3996 hfsspec_read(ap)
3997 struct vnop_read_args /* {
3998 struct vnode *a_vp;
3999 struct uio *a_uio;
4000 int a_ioflag;
4001 vfs_context_t a_context;
4002 } */ *ap;
4003 {
4004 /*
4005 * Set access flag.
4006 */
4007 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
4008 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_read), ap));
4009 }
4010
4011 /*
4012 * Wrapper for special device writes
4013 */
4014 static int
4015 hfsspec_write(ap)
4016 struct vnop_write_args /* {
4017 struct vnode *a_vp;
4018 struct uio *a_uio;
4019 int a_ioflag;
4020 vfs_context_t a_context;
4021 } */ *ap;
4022 {
4023 /*
4024 * Set update and change flags.
4025 */
4026 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
4027 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
4028 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_write), ap));
4029 }
4030
4031 /*
4032 * Wrapper for special device close
4033 *
4034 * Update the times on the cnode then do device close.
4035 */
4036 static int
4037 hfsspec_close(ap)
4038 struct vnop_close_args /* {
4039 struct vnode *a_vp;
4040 int a_fflag;
4041 vfs_context_t a_context;
4042 } */ *ap;
4043 {
4044 struct vnode *vp = ap->a_vp;
4045 struct cnode *cp;
4046
4047 if (vnode_isinuse(ap->a_vp, 1)) {
4048 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
4049 cp = VTOC(vp);
4050 hfs_touchtimes(VTOHFS(vp), cp);
4051 hfs_unlock(cp);
4052 }
4053 }
4054 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap));
4055 }
4056
4057 #if FIFO
4058 /*
4059 * Wrapper for fifo reads
4060 */
4061 static int
4062 hfsfifo_read(ap)
4063 struct vnop_read_args /* {
4064 struct vnode *a_vp;
4065 struct uio *a_uio;
4066 int a_ioflag;
4067 vfs_context_t a_context;
4068 } */ *ap;
4069 {
4070 /*
4071 * Set access flag.
4072 */
4073 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
4074 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_read), ap));
4075 }
4076
4077 /*
4078 * Wrapper for fifo writes
4079 */
4080 static int
4081 hfsfifo_write(ap)
4082 struct vnop_write_args /* {
4083 struct vnode *a_vp;
4084 struct uio *a_uio;
4085 int a_ioflag;
4086 vfs_context_t a_context;
4087 } */ *ap;
4088 {
4089 /*
4090 * Set update and change flags.
4091 */
4092 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
4093 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
4094 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_write), ap));
4095 }
4096
4097 /*
4098 * Wrapper for fifo close
4099 *
4100 * Update the times on the cnode then do device close.
4101 */
4102 static int
4103 hfsfifo_close(ap)
4104 struct vnop_close_args /* {
4105 struct vnode *a_vp;
4106 int a_fflag;
4107 vfs_context_t a_context;
4108 } */ *ap;
4109 {
4110 struct vnode *vp = ap->a_vp;
4111 struct cnode *cp;
4112
4113 if (vnode_isinuse(ap->a_vp, 1)) {
4114 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
4115 cp = VTOC(vp);
4116 hfs_touchtimes(VTOHFS(vp), cp);
4117 hfs_unlock(cp);
4118 }
4119 }
4120 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_close), ap));
4121 }
4122
4123 /*
4124 * kqfilt_add wrapper for fifos.
4125 *
4126 * Fall through to hfs kqfilt_add routines if needed
4127 */
4128 int
4129 hfsfifo_kqfilt_add(ap)
4130 struct vnop_kqfilt_add_args *ap;
4131 {
4132 int error;
4133
4134 error = VOCALL(fifo_vnodeop_p, VOFFSET(vnop_kqfilt_add), ap);
4135 if (error)
4136 error = hfs_vnop_kqfiltadd(ap);
4137 return (error);
4138 }
4139
4140 /*
4141 * kqfilt_remove wrapper for fifos.
4142 *
4143 * Fall through to hfs kqfilt_remove routines if needed
4144 */
4145 int
4146 hfsfifo_kqfilt_remove(ap)
4147 struct vnop_kqfilt_remove_args *ap;
4148 {
4149 int error;
4150
4151 error = VOCALL(fifo_vnodeop_p, VOFFSET(vnop_kqfilt_remove), ap);
4152 if (error)
4153 error = hfs_vnop_kqfiltremove(ap);
4154 return (error);
4155 }
4156
4157 #endif /* FIFO */
4158
4159 /*
4160 * Synchronize a file's in-core state with that on disk.
4161 */
4162 static int
4163 hfs_vnop_fsync(ap)
4164 struct vnop_fsync_args /* {
4165 struct vnode *a_vp;
4166 int a_waitfor;
4167 vfs_context_t a_context;
4168 } */ *ap;
4169 {
4170 struct vnode* vp = ap->a_vp;
4171 int error;
4172
4173 /*
4174 * We need to allow ENOENT lock errors since unlink
4175 * systenm call can call VNOP_FSYNC during vclean.
4176 */
4177 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
4178 if (error)
4179 return (0);
4180
4181 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
4182
4183 hfs_unlock(VTOC(vp));
4184 return (error);
4185 }
4186
4187
4188 static int
4189 hfs_vnop_whiteout(ap)
4190 struct vnop_whiteout_args /* {
4191 struct vnode *a_dvp;
4192 struct componentname *a_cnp;
4193 int a_flags;
4194 vfs_context_t a_context;
4195 } */ *ap;
4196 {
4197 int error = 0;
4198 struct vnode *vp = NULL;
4199 struct vnode_attr va;
4200 struct vnop_lookup_args lookup_args;
4201 struct vnop_remove_args remove_args;
4202 struct hfsmount *hfsmp;
4203
4204 hfsmp = VTOHFS(ap->a_dvp);
4205 if (hfsmp->hfs_flags & HFS_STANDARD) {
4206 error = ENOTSUP;
4207 goto exit;
4208 }
4209
4210 switch (ap->a_flags) {
4211 case LOOKUP:
4212 error = 0;
4213 break;
4214
4215 case CREATE:
4216 VATTR_INIT(&va);
4217 VATTR_SET(&va, va_type, VREG);
4218 VATTR_SET(&va, va_mode, S_IFWHT);
4219 VATTR_SET(&va, va_uid, 0);
4220 VATTR_SET(&va, va_gid, 0);
4221
4222 error = hfs_makenode(ap->a_dvp, &vp, ap->a_cnp, &va, ap->a_context);
4223 /* No need to release the vnode as no vnode is created for whiteouts */
4224 break;
4225
4226 case DELETE:
4227 lookup_args.a_dvp = ap->a_dvp;
4228 lookup_args.a_vpp = &vp;
4229 lookup_args.a_cnp = ap->a_cnp;
4230 lookup_args.a_context = ap->a_context;
4231
4232 error = hfs_vnop_lookup(&lookup_args);
4233 if (error) {
4234 break;
4235 }
4236
4237 remove_args.a_dvp = ap->a_dvp;
4238 remove_args.a_vp = vp;
4239 remove_args.a_cnp = ap->a_cnp;
4240 remove_args.a_flags = 0;
4241 remove_args.a_context = ap->a_context;
4242
4243 error = hfs_vnop_remove(&remove_args);
4244 vnode_put(vp);
4245 break;
4246
4247 default:
4248 panic("hfs_vnop_whiteout: unknown operation (flag = %x)\n", ap->a_flags);
4249 };
4250
4251 exit:
4252 return (error);
4253 }
4254
4255 int (**hfs_vnodeop_p)(void *);
4256
4257 #define VOPFUNC int (*)(void *)
4258
4259 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
4260 { &vnop_default_desc, (VOPFUNC)vn_default_error },
4261 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
4262 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
4263 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
4264 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
4265 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
4266 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
4267 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
4268 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
4269 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
4270 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
4271 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
4272 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
4273 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
4274 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
4275 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
4276 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
4277 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
4278 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
4279 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
4280 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
4281 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
4282 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
4283 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
4284 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
4285 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
4286 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
4287 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
4288 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
4289 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
4290 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
4291 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
4292 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
4293 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
4294 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
4295 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4296 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
4297 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
4298 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
4299 { &vnop_kqfilt_add_desc, (VOPFUNC)hfs_vnop_kqfiltadd }, /* kqfilt_add */
4300 { &vnop_kqfilt_remove_desc, (VOPFUNC)hfs_vnop_kqfiltremove }, /* kqfilt_remove */
4301 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
4302 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
4303 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
4304 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
4305 { &vnop_whiteout_desc, (VOPFUNC)hfs_vnop_whiteout},
4306 #if NAMEDSTREAMS
4307 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
4308 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
4309 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
4310 #endif
4311 { NULL, (VOPFUNC)NULL }
4312 };
4313
4314 struct vnodeopv_desc hfs_vnodeop_opv_desc =
4315 { &hfs_vnodeop_p, hfs_vnodeop_entries };
4316
4317 int (**hfs_specop_p)(void *);
4318 struct vnodeopv_entry_desc hfs_specop_entries[] = {
4319 { &vnop_default_desc, (VOPFUNC)vn_default_error },
4320 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
4321 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
4322 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
4323 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
4324 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
4325 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
4326 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
4327 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
4328 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
4329 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
4330 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
4331 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
4332 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
4333 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
4334 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
4335 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
4336 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
4337 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
4338 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
4339 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
4340 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
4341 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
4342 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
4343 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
4344 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
4345 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
4346 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
4347 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
4348 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
4349 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
4350 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4351 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
4352 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
4353 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
4354 };
4355 struct vnodeopv_desc hfs_specop_opv_desc =
4356 { &hfs_specop_p, hfs_specop_entries };
4357
4358 #if FIFO
4359 int (**hfs_fifoop_p)(void *);
4360 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
4361 { &vnop_default_desc, (VOPFUNC)vn_default_error },
4362 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
4363 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
4364 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
4365 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
4366 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
4367 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
4368 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
4369 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
4370 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
4371 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
4372 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
4373 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
4374 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
4375 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
4376 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
4377 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
4378 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
4379 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
4380 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
4381 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
4382 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
4383 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
4384 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
4385 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
4386 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
4387 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
4388 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
4389 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
4390 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
4391 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
4392 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4393 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
4394 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
4395 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
4396 { &vnop_kqfilt_add_desc, (VOPFUNC)hfsfifo_kqfilt_add }, /* kqfilt_add */
4397 { &vnop_kqfilt_remove_desc, (VOPFUNC)hfsfifo_kqfilt_remove }, /* kqfilt_remove */
4398 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
4399 };
4400 struct vnodeopv_desc hfs_fifoop_opv_desc =
4401 { &hfs_fifoop_p, hfs_fifoop_entries };
4402 #endif /* FIFO */
4403
4404
4405