]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/param.h>
32 #include <sys/file_internal.h>
33 #include <sys/dirent.h>
34 #include <sys/stat.h>
35 #include <sys/buf.h>
36 #include <sys/mount.h>
37 #include <sys/vnode_if.h>
38 #include <sys/vnode_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/ubc.h>
41 #include <sys/ubc_internal.h>
42 #include <sys/paths.h>
43 #include <sys/quota.h>
44 #include <sys/time.h>
45 #include <sys/disk.h>
46 #include <sys/kauth.h>
47 #include <sys/uio_internal.h>
48
49 #include <miscfs/specfs/specdev.h>
50 #include <miscfs/fifofs/fifo.h>
51 #include <vfs/vfs_support.h>
52 #include <machine/spl.h>
53
54 #include <sys/kdebug.h>
55 #include <sys/sysctl.h>
56
57 #include "hfs.h"
58 #include "hfs_catalog.h"
59 #include "hfs_cnode.h"
60 #include "hfs_dbg.h"
61 #include "hfs_mount.h"
62 #include "hfs_quota.h"
63 #include "hfs_endian.h"
64
65 #include "hfscommon/headers/BTreesInternal.h"
66 #include "hfscommon/headers/FileMgrInternal.h"
67
68
69 #define KNDETACH_VNLOCKED 0x00000001
70
71 #define CARBON_TEMP_DIR_NAME "Cleanup At Startup"
72
73
74 /* Global vfs data structures for hfs */
75
76 /* Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is 'no') */
77 int always_do_fullfsync = 0;
78 SYSCTL_INT (_kern, OID_AUTO, always_do_fullfsync, CTLFLAG_RW, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called");
79
80 static int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
81 struct componentname *cnp, struct vnode_attr *vap,
82 vfs_context_t ctx);
83
84 static int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
85 static int hfs_metasync_all(struct hfsmount *hfsmp);
86
87 static int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
88 int);
89
90 static int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
91 int, int, int);
92
93 #if FIFO
94 static int hfsfifo_read(struct vnop_read_args *);
95 static int hfsfifo_write(struct vnop_write_args *);
96 static int hfsfifo_close(struct vnop_close_args *);
97 static int hfsfifo_kqfilt_add(struct vnop_kqfilt_add_args *);
98 static int hfsfifo_kqfilt_remove(struct vnop_kqfilt_remove_args *);
99
100 extern int (**fifo_vnodeop_p)(void *);
101 #endif /* FIFO */
102
103 static int hfs_vnop_close(struct vnop_close_args*);
104 static int hfs_vnop_create(struct vnop_create_args*);
105 static int hfs_vnop_exchange(struct vnop_exchange_args*);
106 static int hfs_vnop_fsync(struct vnop_fsync_args*);
107 static int hfs_vnop_mkdir(struct vnop_mkdir_args*);
108 static int hfs_vnop_mknod(struct vnop_mknod_args*);
109 static int hfs_vnop_getattr(struct vnop_getattr_args*);
110 static int hfs_vnop_open(struct vnop_open_args*);
111 static int hfs_vnop_readdir(struct vnop_readdir_args*);
112 static int hfs_vnop_remove(struct vnop_remove_args*);
113 static int hfs_vnop_rename(struct vnop_rename_args*);
114 static int hfs_vnop_rmdir(struct vnop_rmdir_args*);
115 static int hfs_vnop_symlink(struct vnop_symlink_args*);
116 static int hfs_vnop_setattr(struct vnop_setattr_args*);
117 static int hfs_vnop_readlink(struct vnop_readlink_args *);
118 static int hfs_vnop_pathconf(struct vnop_pathconf_args *);
119 static int hfs_vnop_kqfiltremove(struct vnop_kqfilt_remove_args *);
120 static int hfs_vnop_whiteout(struct vnop_whiteout_args *);
121 static int hfsspec_read(struct vnop_read_args *);
122 static int hfsspec_write(struct vnop_write_args *);
123 static int hfsspec_close(struct vnop_close_args *);
124
125 /* Options for hfs_removedir and hfs_removefile */
126 #define HFSRM_SKIP_RESERVE 0x01
127
128
129
130
131 /*****************************************************************************
132 *
133 * Common Operations on vnodes
134 *
135 *****************************************************************************/
136
137 /*
138 * Create a regular file.
139 */
140 static int
141 hfs_vnop_create(struct vnop_create_args *ap)
142 {
143 int error;
144
145 again:
146 error = hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
147
148 /*
149 * We speculatively skipped the original lookup of the leaf
150 * for CREATE. Since it exists, go get it as long as they
151 * didn't want an exclusive create.
152 */
153 if ((error == EEXIST) && !(ap->a_vap->va_vaflags & VA_EXCLUSIVE)) {
154 struct vnop_lookup_args args;
155
156 args.a_desc = &vnop_lookup_desc;
157 args.a_dvp = ap->a_dvp;
158 args.a_vpp = ap->a_vpp;
159 args.a_cnp = ap->a_cnp;
160 args.a_context = ap->a_context;
161 args.a_cnp->cn_nameiop = LOOKUP;
162 error = hfs_vnop_lookup(&args);
163 /*
164 * We can also race with remove for this file.
165 */
166 if (error == ENOENT) {
167 goto again;
168 }
169
170 /* Make sure it was file. */
171 if ((error == 0) && !vnode_isreg(*args.a_vpp)) {
172 vnode_put(*args.a_vpp);
173 error = EEXIST;
174 }
175 args.a_cnp->cn_nameiop = CREATE;
176 }
177 return (error);
178 }
179
180 /*
181 * Make device special file.
182 */
183 static int
184 hfs_vnop_mknod(struct vnop_mknod_args *ap)
185 {
186 struct vnode_attr *vap = ap->a_vap;
187 struct vnode *dvp = ap->a_dvp;
188 struct vnode **vpp = ap->a_vpp;
189 struct cnode *cp;
190 int error;
191
192 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
193 return (ENOTSUP);
194 }
195
196 /* Create the vnode */
197 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
198 if (error)
199 return (error);
200
201 cp = VTOC(*vpp);
202 cp->c_touch_acctime = TRUE;
203 cp->c_touch_chgtime = TRUE;
204 cp->c_touch_modtime = TRUE;
205
206 if ((vap->va_rdev != VNOVAL) &&
207 (vap->va_type == VBLK || vap->va_type == VCHR))
208 cp->c_rdev = vap->va_rdev;
209
210 return (0);
211 }
212
213 /*
214 * Open a file/directory.
215 */
216 static int
217 hfs_vnop_open(struct vnop_open_args *ap)
218 {
219 struct vnode *vp = ap->a_vp;
220 struct filefork *fp;
221 struct timeval tv;
222 int error;
223
224 /*
225 * Files marked append-only must be opened for appending.
226 */
227 if ((VTOC(vp)->c_flags & APPEND) && !vnode_isdir(vp) &&
228 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
229 return (EPERM);
230
231 if (vnode_isreg(vp) && !UBCINFOEXISTS(vp))
232 return (EBUSY); /* file is in use by the kernel */
233
234 /* Don't allow journal file to be opened externally. */
235 if (VTOC(vp)->c_fileid == VTOHFS(vp)->hfs_jnlfileid)
236 return (EPERM);
237 /*
238 * On the first (non-busy) open of a fragmented
239 * file attempt to de-frag it (if its less than 20MB).
240 */
241 if ((VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) ||
242 (VTOHFS(vp)->jnl == NULL) ||
243 #if NAMEDSTREAMS
244 !vnode_isreg(vp) || vnode_isinuse(vp, 0) || vnode_isnamedstream(vp)) {
245 #else
246 !vnode_isreg(vp) || vnode_isinuse(vp, 0)) {
247 #endif
248 return (0);
249 }
250
251 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
252 return (error);
253 fp = VTOF(vp);
254 if (fp->ff_blocks &&
255 fp->ff_extents[7].blockCount != 0 &&
256 fp->ff_size <= (20 * 1024 * 1024)) {
257 struct timeval now;
258 struct cnode *cp = VTOC(vp);
259 /*
260 * Wait until system bootup is done (3 min).
261 * And don't relocate a file that's been modified
262 * within the past minute -- this can lead to
263 * system thrashing.
264 */
265 microuptime(&tv);
266 microtime(&now);
267 if (tv.tv_sec > (60 * 3) &&
268 ((now.tv_sec - cp->c_mtime) > 60)) {
269 (void) hfs_relocate(vp, VTOVCB(vp)->nextAllocation + 4096,
270 vfs_context_ucred(ap->a_context),
271 vfs_context_proc(ap->a_context));
272 }
273 }
274 hfs_unlock(VTOC(vp));
275
276 return (0);
277 }
278
279
280 /*
281 * Close a file/directory.
282 */
283 static int
284 hfs_vnop_close(ap)
285 struct vnop_close_args /* {
286 struct vnode *a_vp;
287 int a_fflag;
288 vfs_context_t a_context;
289 } */ *ap;
290 {
291 register struct vnode *vp = ap->a_vp;
292 register struct cnode *cp;
293 struct proc *p = vfs_context_proc(ap->a_context);
294 struct hfsmount *hfsmp;
295 int busy;
296 int knownrefs = 0;
297 int tooktrunclock = 0;
298
299 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0)
300 return (0);
301 cp = VTOC(vp);
302 hfsmp = VTOHFS(vp);
303
304 /*
305 * If the rsrc fork is a named stream, it holds a usecount on
306 * the data fork, which prevents the data fork from getting recycled, which
307 * then prevents the de-allocation of its extra blocks.
308 * Do checks for truncation on close. Purge extra extents if they
309 * exist. Make sure the vp is not a directory, that it has a resource
310 * fork, and that rsrc fork is a named stream.
311 */
312
313 if ((vp->v_type == VREG) && (cp->c_rsrc_vp)
314 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
315 uint32_t blks;
316
317 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
318 /*
319 * If there are any extra blocks and there are only 2 refs on
320 * this vp (ourselves + rsrc fork holding ref on us), go ahead
321 * and try to truncate the extra blocks away.
322 */
323 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
324 // release cnode lock ; must acquire truncate lock BEFORE cnode lock
325 hfs_unlock (cp);
326
327 hfs_lock_truncate(cp, TRUE);
328 tooktrunclock = 1;
329
330 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0) {
331 hfs_unlock_truncate(cp, TRUE);
332 return (0);
333 }
334
335 //now re-test to make sure it's still valid.
336 if (cp->c_rsrc_vp) {
337 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
338 if (!vnode_isinuse(vp, knownrefs)) {
339 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
340 if (blks < VTOF(vp)->ff_blocks) {
341 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY, 0, ap->a_context);
342 }
343 }
344 }
345 }
346 }
347
348 // if we froze the fs and we're exiting, then "thaw" the fs
349 if (hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
350 hfsmp->hfs_freezing_proc = NULL;
351 hfs_global_exclusive_lock_release(hfsmp);
352 lck_rw_unlock_exclusive(&hfsmp->hfs_insync);
353 }
354
355 busy = vnode_isinuse(vp, 1);
356
357 if (busy) {
358 hfs_touchtimes(VTOHFS(vp), cp);
359 }
360 if (vnode_isdir(vp)) {
361 hfs_reldirhints(cp, busy);
362 } else if (vnode_issystem(vp) && !busy) {
363 vnode_recycle(vp);
364 }
365 if (tooktrunclock) {
366 hfs_unlock_truncate(cp, TRUE);
367 }
368
369 hfs_unlock(cp);
370
371 if (ap->a_fflag & FWASWRITTEN) {
372 hfs_sync_ejectable(hfsmp);
373 }
374
375 return (0);
376 }
377
378 /*
379 * Get basic attributes.
380 */
381 static int
382 hfs_vnop_getattr(struct vnop_getattr_args *ap)
383 {
384 #define VNODE_ATTR_TIMES \
385 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
386 #define VNODE_ATTR_AUTH \
387 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
388 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
389
390 struct vnode *vp = ap->a_vp;
391 struct vnode_attr *vap = ap->a_vap;
392 struct vnode *rvp = NULLVP;
393 struct hfsmount *hfsmp;
394 struct cnode *cp;
395 uint64_t data_size;
396 enum vtype v_type;
397 int error = 0;
398
399 cp = VTOC(vp);
400
401 /*
402 * Shortcut for vnode_authorize path. Each of the attributes
403 * in this set is updated atomically so we don't need to take
404 * the cnode lock to access them.
405 */
406 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
407 /* Make sure file still exists. */
408 if (cp->c_flag & C_NOEXISTS)
409 return (ENOENT);
410
411 vap->va_uid = cp->c_uid;
412 vap->va_gid = cp->c_gid;
413 vap->va_mode = cp->c_mode;
414 vap->va_flags = cp->c_flags;
415 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
416
417 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
418 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
419 VATTR_SET_SUPPORTED(vap, va_acl);
420 }
421 return (0);
422 }
423 hfsmp = VTOHFS(vp);
424 v_type = vnode_vtype(vp);
425
426 /*
427 * If time attributes are requested and we have cnode times
428 * that require updating, then acquire an exclusive lock on
429 * the cnode before updating the times. Otherwise we can
430 * just acquire a shared lock.
431 */
432 if ((vap->va_active & VNODE_ATTR_TIMES) &&
433 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
434 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK)))
435 return (error);
436 hfs_touchtimes(hfsmp, cp);
437 } else {
438 if ((error = hfs_lock(cp, HFS_SHARED_LOCK)))
439 return (error);
440 }
441
442 if (v_type == VDIR) {
443 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
444
445 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
446 int nlink;
447
448 /*
449 * For directories, the va_nlink is esentially a count
450 * of the ".." references to a directory plus the "."
451 * reference and the directory itself. So for HFS+ this
452 * becomes the sub-directory count plus two.
453 *
454 * In the absence of a sub-directory count we use the
455 * directory's item count. This will be too high in
456 * most cases since it also includes files.
457 */
458 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
459 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
460 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
461 else
462 nlink = cp->c_entries;
463
464 /* Account for ourself and our "." entry */
465 nlink += 2;
466 /* Hide our private directories. */
467 if (cp->c_cnid == kHFSRootFolderID) {
468 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
469 --nlink;
470 }
471 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
472 --nlink;
473 }
474 }
475 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
476 }
477 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
478 int entries;
479
480 entries = cp->c_entries;
481 /* Hide our private files and directories. */
482 if (cp->c_cnid == kHFSRootFolderID) {
483 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
484 --entries;
485 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
486 --entries;
487 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
488 entries -= 2; /* hide the journal files */
489 }
490 VATTR_RETURN(vap, va_nchildren, entries);
491 }
492 /*
493 * The va_dirlinkcount is the count of real directory hard links.
494 * (i.e. its not the sum of the implied "." and ".." references)
495 */
496 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
497 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
498 }
499 } else /* !VDIR */ {
500 data_size = VCTOF(vp, cp)->ff_size;
501
502 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
503 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
504 u_int64_t blocks;
505
506 blocks = VCTOF(vp, cp)->ff_blocks;
507 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
508 }
509 }
510
511 /* conditional because 64-bit arithmetic can be expensive */
512 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
513 if (v_type == VDIR) {
514 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
515 } else {
516 u_int64_t total_size = 0;
517 struct cnode *rcp;
518
519 if (cp->c_datafork) {
520 total_size = cp->c_datafork->ff_size;
521 }
522
523 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
524 /* We deal with resource fork vnode iocount at the end of the function */
525 error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE);
526 if (error) {
527 goto out;
528 }
529 rcp = VTOC(rvp);
530 if (rcp && rcp->c_rsrcfork) {
531 total_size += rcp->c_rsrcfork->ff_size;
532 }
533 }
534
535 VATTR_RETURN(vap, va_total_size, total_size);
536 }
537 }
538 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
539 if (v_type == VDIR) {
540 VATTR_RETURN(vap, va_total_alloc, 0);
541 } else {
542 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
543 }
544 }
545
546 /*
547 * If the VFS wants extended security data, and we know that we
548 * don't have any (because it never told us it was setting any)
549 * then we can return the supported bit and no data. If we do
550 * have extended security, we can just leave the bit alone and
551 * the VFS will use the fallback path to fetch it.
552 */
553 if (VATTR_IS_ACTIVE(vap, va_acl)) {
554 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
555 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
556 VATTR_SET_SUPPORTED(vap, va_acl);
557 }
558 }
559 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
560 /* Access times are lazily updated, get current time if needed */
561 if (cp->c_touch_acctime) {
562 struct timeval tv;
563
564 microtime(&tv);
565 vap->va_access_time.tv_sec = tv.tv_sec;
566 } else {
567 vap->va_access_time.tv_sec = cp->c_atime;
568 }
569 vap->va_access_time.tv_nsec = 0;
570 VATTR_SET_SUPPORTED(vap, va_access_time);
571 }
572 vap->va_create_time.tv_sec = cp->c_itime;
573 vap->va_create_time.tv_nsec = 0;
574 vap->va_modify_time.tv_sec = cp->c_mtime;
575 vap->va_modify_time.tv_nsec = 0;
576 vap->va_change_time.tv_sec = cp->c_ctime;
577 vap->va_change_time.tv_nsec = 0;
578 vap->va_backup_time.tv_sec = cp->c_btime;
579 vap->va_backup_time.tv_nsec = 0;
580
581 /* XXX is this really a good 'optimal I/O size'? */
582 vap->va_iosize = hfsmp->hfs_logBlockSize;
583 vap->va_uid = cp->c_uid;
584 vap->va_gid = cp->c_gid;
585 vap->va_mode = cp->c_mode;
586 vap->va_flags = cp->c_flags;
587
588 /*
589 * Exporting file IDs from HFS Plus:
590 *
591 * For "normal" files the c_fileid is the same value as the
592 * c_cnid. But for hard link files, they are different - the
593 * c_cnid belongs to the active directory entry (ie the link)
594 * and the c_fileid is for the actual inode (ie the data file).
595 *
596 * The stat call (getattr) uses va_fileid and the Carbon APIs,
597 * which are hardlink-ignorant, will ask for va_linkid.
598 */
599 vap->va_fileid = (u_int64_t)cp->c_fileid;
600 /*
601 * We need to use the origin cache for both hardlinked files
602 * and directories. Hardlinked directories have multiple cnids
603 * and parents (one per link). Hardlinked files also have their
604 * own parents and link IDs separate from the indirect inode number.
605 * If we don't use the cache, we could end up vending the wrong ID
606 * because the cnode will only reflect the link that was looked up most recently.
607 */
608 if (cp->c_flag & C_HARDLINK) {
609 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
610 vap->va_parentid = (u_int64_t)hfs_currentparent(cp);
611 } else {
612 vap->va_linkid = (u_int64_t)cp->c_cnid;
613 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
614 }
615 vap->va_fsid = cp->c_dev;
616 vap->va_filerev = 0;
617 vap->va_encoding = cp->c_encoding;
618 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
619 vap->va_data_size = data_size;
620
621 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
622 vap->va_supported |= VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
623 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
624 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
625 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
626 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
627 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
628 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
629 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev |
630 VNODE_ATTR_va_data_size;
631
632 /* If this is the root, let VFS to find out the mount name, which may be different from the real name.
633 * Otherwise, we need to just take care for hardlinked files, which need to be looked up, if necessary
634 */
635 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
636 struct cat_desc linkdesc;
637 int lockflags;
638 int uselinkdesc = 0;
639 cnid_t nextlinkid = 0;
640 cnid_t prevlinkid = 0;
641
642 /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks
643 * here because the info. for the link ID requested by getattrlist may be
644 * different than what's currently in the cnode. This is because the cnode
645 * will be filled in with the information for the most recent link ID that went
646 * through namei/lookup(). If there are competing lookups for hardlinks that point
647 * to the same inode, one (or more) getattrlists could be vended incorrect name information.
648 * Also, we need to beware of open-unlinked files which could have a namelen of 0. Note
649 * that if another hardlink sibling of this file is being unlinked, that could also thrash
650 * the name fields but it should *not* be treated like an open-unlinked file here.
651 */
652 if ((cp->c_flag & C_HARDLINK) &&
653 ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
654 /* If we have no name and our linkID is the raw inode number, then we may
655 * have an open-unlinked file. Go to the next link in this case.
656 */
657 if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
658 if ((error = hfs_lookuplink(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))) {
659 goto out;
660 }
661 }
662 else {
663 nextlinkid = vap->va_linkid;
664 }
665 /* Now probe the catalog for the linkID. Note that we don't know if we have
666 * the exclusive lock here for the cnode, so we can't just update the descriptor.
667 * Instead, we should just store the descriptor's value locally and then use it to pass
668 * out the name value as needed below.
669 */
670 if (nextlinkid) {
671 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
672 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
673 hfs_systemfile_unlock(hfsmp, lockflags);
674 if (error == 0) {
675 uselinkdesc = 1;
676 }
677 }
678 }
679
680 /* By this point, we either patched the name above, and the c_desc points
681 * to correct data, or it already did, in which case we just proceed by copying
682 * the name into the VAP. Note that we will never set va_name to supported if
683 * nextlinkid is never initialized. This could happen in the degenerate case above
684 * involving the raw inode number, where it has no nextlinkid. In this case, we will
685 * simply not export the name as supported.
686 */
687 if (uselinkdesc) {
688 strlcpy(vap->va_name, (const char *)linkdesc.cd_nameptr, MAXPATHLEN);
689 VATTR_SET_SUPPORTED(vap, va_name);
690 cat_releasedesc(&linkdesc);
691 }
692 else if (cp->c_desc.cd_namelen) {
693 strlcpy(vap->va_name, (const char *)cp->c_desc.cd_nameptr, MAXPATHLEN);
694 VATTR_SET_SUPPORTED(vap, va_name);
695 }
696 }
697
698 out:
699 hfs_unlock(cp);
700 /*
701 * We need to drop the iocount on the rsrc fork vnode only *after* we've
702 * released the cnode lock, since vnode_put can trigger an inactive call, which
703 * will go back into the HFS and try to acquire a cnode lock.
704 */
705 if (rvp) {
706 vnode_put(rvp);
707 }
708 return (error);
709 }
710
711 static int
712 hfs_vnop_setattr(ap)
713 struct vnop_setattr_args /* {
714 struct vnode *a_vp;
715 struct vnode_attr *a_vap;
716 vfs_context_t a_context;
717 } */ *ap;
718 {
719 struct vnode_attr *vap = ap->a_vap;
720 struct vnode *vp = ap->a_vp;
721 struct cnode *cp = NULL;
722 struct hfsmount *hfsmp;
723 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
724 struct proc *p = vfs_context_proc(ap->a_context);
725 int error = 0;
726 uid_t nuid;
727 gid_t ngid;
728
729 hfsmp = VTOHFS(vp);
730
731 /* Don't allow modification of the journal file. */
732 if (hfsmp->hfs_jnlfileid == VTOC(vp)->c_fileid) {
733 return (EPERM);
734 }
735
736 /*
737 * File size change request.
738 * We are guaranteed that this is not a directory, and that
739 * the filesystem object is writeable.
740 */
741 VATTR_SET_SUPPORTED(vap, va_data_size);
742 if (VATTR_IS_ACTIVE(vap, va_data_size) && !vnode_islnk(vp)) {
743
744 /* Take truncate lock before taking cnode lock. */
745 hfs_lock_truncate(VTOC(vp), TRUE);
746
747 /* Perform the ubc_setsize before taking the cnode lock. */
748 ubc_setsize(vp, vap->va_data_size);
749
750 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
751 hfs_unlock_truncate(VTOC(vp), TRUE);
752 return (error);
753 }
754 cp = VTOC(vp);
755
756 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff, 1, ap->a_context);
757
758 hfs_unlock_truncate(cp, TRUE);
759 if (error)
760 goto out;
761 }
762 if (cp == NULL) {
763 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
764 return (error);
765 cp = VTOC(vp);
766 }
767
768 /*
769 * If it is just an access time update request by itself
770 * we know the request is from kernel level code, and we
771 * can delay it without being as worried about consistency.
772 * This change speeds up mmaps, in the rare case that they
773 * get caught behind a sync.
774 */
775
776 if (vap->va_active == VNODE_ATTR_va_access_time) {
777 cp->c_touch_acctime=TRUE;
778 goto out;
779 }
780
781
782
783 /*
784 * Owner/group change request.
785 * We are guaranteed that the new owner/group is valid and legal.
786 */
787 VATTR_SET_SUPPORTED(vap, va_uid);
788 VATTR_SET_SUPPORTED(vap, va_gid);
789 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
790 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
791 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
792 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
793 goto out;
794
795 /*
796 * Mode change request.
797 * We are guaranteed that the mode value is valid and that in
798 * conjunction with the owner and group, this change is legal.
799 */
800 VATTR_SET_SUPPORTED(vap, va_mode);
801 if (VATTR_IS_ACTIVE(vap, va_mode) &&
802 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
803 goto out;
804
805 /*
806 * File flags change.
807 * We are guaranteed that only flags allowed to change given the
808 * current securelevel are being changed.
809 */
810 VATTR_SET_SUPPORTED(vap, va_flags);
811 if (VATTR_IS_ACTIVE(vap, va_flags)) {
812 u_int16_t *fdFlags;
813
814 cp->c_flags = vap->va_flags;
815 cp->c_touch_chgtime = TRUE;
816
817 /*
818 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
819 *
820 * The fdFlags for files and frFlags for folders are both 8 bytes
821 * into the userInfo (the first 16 bytes of the Finder Info). They
822 * are both 16-bit fields.
823 */
824 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
825 if (vap->va_flags & UF_HIDDEN)
826 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
827 else
828 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
829 }
830
831 /*
832 * Timestamp updates.
833 */
834 VATTR_SET_SUPPORTED(vap, va_create_time);
835 VATTR_SET_SUPPORTED(vap, va_access_time);
836 VATTR_SET_SUPPORTED(vap, va_modify_time);
837 VATTR_SET_SUPPORTED(vap, va_backup_time);
838 VATTR_SET_SUPPORTED(vap, va_change_time);
839 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
840 VATTR_IS_ACTIVE(vap, va_access_time) ||
841 VATTR_IS_ACTIVE(vap, va_modify_time) ||
842 VATTR_IS_ACTIVE(vap, va_backup_time)) {
843 if (VATTR_IS_ACTIVE(vap, va_create_time))
844 cp->c_itime = vap->va_create_time.tv_sec;
845 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
846 cp->c_atime = vap->va_access_time.tv_sec;
847 cp->c_touch_acctime = FALSE;
848 }
849 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
850 cp->c_mtime = vap->va_modify_time.tv_sec;
851 cp->c_touch_modtime = FALSE;
852 cp->c_touch_chgtime = TRUE;
853
854 /*
855 * The utimes system call can reset the modification
856 * time but it doesn't know about HFS create times.
857 * So we need to ensure that the creation time is
858 * always at least as old as the modification time.
859 */
860 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
861 (cp->c_cnid != kHFSRootFolderID) &&
862 (cp->c_mtime < cp->c_itime)) {
863 cp->c_itime = cp->c_mtime;
864 }
865 }
866 if (VATTR_IS_ACTIVE(vap, va_backup_time))
867 cp->c_btime = vap->va_backup_time.tv_sec;
868 cp->c_flag |= C_MODIFIED;
869 }
870
871 /*
872 * Set name encoding.
873 */
874 VATTR_SET_SUPPORTED(vap, va_encoding);
875 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
876 cp->c_encoding = vap->va_encoding;
877 hfs_setencodingbits(hfsmp, cp->c_encoding);
878 }
879
880 if ((error = hfs_update(vp, TRUE)) != 0)
881 goto out;
882 HFS_KNOTE(vp, NOTE_ATTRIB);
883 out:
884 if (cp)
885 hfs_unlock(cp);
886 return (error);
887 }
888
889
890 /*
891 * Change the mode on a file.
892 * cnode must be locked before calling.
893 */
894 __private_extern__
895 int
896 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
897 {
898 register struct cnode *cp = VTOC(vp);
899
900 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
901 return (0);
902
903 // XXXdbg - don't allow modification of the journal or journal_info_block
904 if (VTOHFS(vp)->jnl && cp && cp->c_datafork) {
905 struct HFSPlusExtentDescriptor *extd;
906
907 extd = &cp->c_datafork->ff_extents[0];
908 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
909 return EPERM;
910 }
911 }
912
913 #if OVERRIDE_UNKNOWN_PERMISSIONS
914 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
915 return (0);
916 };
917 #endif
918 cp->c_mode &= ~ALLPERMS;
919 cp->c_mode |= (mode & ALLPERMS);
920 cp->c_touch_chgtime = TRUE;
921 return (0);
922 }
923
924
925 __private_extern__
926 int
927 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
928 {
929 struct cnode *cp = VTOC(vp);
930 int retval = 0;
931 int is_member;
932
933 /*
934 * Disallow write attempts on read-only file systems;
935 * unless the file is a socket, fifo, or a block or
936 * character device resident on the file system.
937 */
938 switch (vnode_vtype(vp)) {
939 case VDIR:
940 case VLNK:
941 case VREG:
942 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
943 return (EROFS);
944 break;
945 default:
946 break;
947 }
948
949 /* If immutable bit set, nobody gets to write it. */
950 if (considerFlags && (cp->c_flags & IMMUTABLE))
951 return (EPERM);
952
953 /* Otherwise, user id 0 always gets access. */
954 if (!suser(cred, NULL))
955 return (0);
956
957 /* Otherwise, check the owner. */
958 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
959 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
960
961 /* Otherwise, check the groups. */
962 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
963 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
964 }
965
966 /* Otherwise, check everyone else. */
967 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
968 }
969
970
971 /*
972 * Perform chown operation on cnode cp;
973 * code must be locked prior to call.
974 */
975 __private_extern__
976 int
977 #if !QUOTA
978 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
979 __unused struct proc *p)
980 #else
981 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
982 __unused struct proc *p)
983 #endif
984 {
985 register struct cnode *cp = VTOC(vp);
986 uid_t ouid;
987 gid_t ogid;
988 #if QUOTA
989 int error = 0;
990 register int i;
991 int64_t change;
992 #endif /* QUOTA */
993
994 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
995 return (ENOTSUP);
996
997 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
998 return (0);
999
1000 if (uid == (uid_t)VNOVAL)
1001 uid = cp->c_uid;
1002 if (gid == (gid_t)VNOVAL)
1003 gid = cp->c_gid;
1004
1005 #if 0 /* we are guaranteed that this is already the case */
1006 /*
1007 * If we don't own the file, are trying to change the owner
1008 * of the file, or are not a member of the target group,
1009 * the caller must be superuser or the call fails.
1010 */
1011 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
1012 (gid != cp->c_gid &&
1013 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
1014 (error = suser(cred, 0)))
1015 return (error);
1016 #endif
1017
1018 ogid = cp->c_gid;
1019 ouid = cp->c_uid;
1020 #if QUOTA
1021 if ((error = hfs_getinoquota(cp)))
1022 return (error);
1023 if (ouid == uid) {
1024 dqrele(cp->c_dquot[USRQUOTA]);
1025 cp->c_dquot[USRQUOTA] = NODQUOT;
1026 }
1027 if (ogid == gid) {
1028 dqrele(cp->c_dquot[GRPQUOTA]);
1029 cp->c_dquot[GRPQUOTA] = NODQUOT;
1030 }
1031
1032 /*
1033 * Eventually need to account for (fake) a block per directory
1034 * if (vnode_isdir(vp))
1035 * change = VTOHFS(vp)->blockSize;
1036 * else
1037 */
1038
1039 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1040 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1041 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1042 for (i = 0; i < MAXQUOTAS; i++) {
1043 dqrele(cp->c_dquot[i]);
1044 cp->c_dquot[i] = NODQUOT;
1045 }
1046 #endif /* QUOTA */
1047 cp->c_gid = gid;
1048 cp->c_uid = uid;
1049 #if QUOTA
1050 if ((error = hfs_getinoquota(cp)) == 0) {
1051 if (ouid == uid) {
1052 dqrele(cp->c_dquot[USRQUOTA]);
1053 cp->c_dquot[USRQUOTA] = NODQUOT;
1054 }
1055 if (ogid == gid) {
1056 dqrele(cp->c_dquot[GRPQUOTA]);
1057 cp->c_dquot[GRPQUOTA] = NODQUOT;
1058 }
1059 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1060 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1061 goto good;
1062 else
1063 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1064 }
1065 for (i = 0; i < MAXQUOTAS; i++) {
1066 dqrele(cp->c_dquot[i]);
1067 cp->c_dquot[i] = NODQUOT;
1068 }
1069 }
1070 cp->c_gid = ogid;
1071 cp->c_uid = ouid;
1072 if (hfs_getinoquota(cp) == 0) {
1073 if (ouid == uid) {
1074 dqrele(cp->c_dquot[USRQUOTA]);
1075 cp->c_dquot[USRQUOTA] = NODQUOT;
1076 }
1077 if (ogid == gid) {
1078 dqrele(cp->c_dquot[GRPQUOTA]);
1079 cp->c_dquot[GRPQUOTA] = NODQUOT;
1080 }
1081 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1082 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1083 (void) hfs_getinoquota(cp);
1084 }
1085 return (error);
1086 good:
1087 if (hfs_getinoquota(cp))
1088 panic("hfs_chown: lost quota");
1089 #endif /* QUOTA */
1090
1091
1092 /*
1093 According to the SUSv3 Standard, chown() shall mark
1094 for update the st_ctime field of the file.
1095 (No exceptions mentioned)
1096 */
1097 cp->c_touch_chgtime = TRUE;
1098 return (0);
1099 }
1100
1101
1102 /*
1103 * The hfs_exchange routine swaps the fork data in two files by
1104 * exchanging some of the information in the cnode. It is used
1105 * to preserve the file ID when updating an existing file, in
1106 * case the file is being tracked through its file ID. Typically
1107 * its used after creating a new file during a safe-save.
1108 */
1109 static int
1110 hfs_vnop_exchange(ap)
1111 struct vnop_exchange_args /* {
1112 struct vnode *a_fvp;
1113 struct vnode *a_tvp;
1114 int a_options;
1115 vfs_context_t a_context;
1116 } */ *ap;
1117 {
1118 struct vnode *from_vp = ap->a_fvp;
1119 struct vnode *to_vp = ap->a_tvp;
1120 struct cnode *from_cp;
1121 struct cnode *to_cp;
1122 struct hfsmount *hfsmp;
1123 struct cat_desc tempdesc;
1124 struct cat_attr tempattr;
1125 const unsigned char *from_nameptr;
1126 const unsigned char *to_nameptr;
1127 char from_iname[32];
1128 char to_iname[32];
1129 u_int32_t tempflag;
1130 cnid_t from_parid;
1131 cnid_t to_parid;
1132 int lockflags;
1133 int error = 0, started_tr = 0, got_cookie = 0;
1134 cat_cookie_t cookie;
1135
1136 /* The files must be on the same volume. */
1137 if (vnode_mount(from_vp) != vnode_mount(to_vp))
1138 return (EXDEV);
1139
1140 if (from_vp == to_vp)
1141 return (EINVAL);
1142
1143 if ((error = hfs_lockpair(VTOC(from_vp), VTOC(to_vp), HFS_EXCLUSIVE_LOCK)))
1144 return (error);
1145
1146 from_cp = VTOC(from_vp);
1147 to_cp = VTOC(to_vp);
1148 hfsmp = VTOHFS(from_vp);
1149
1150 /* Only normal files can be exchanged. */
1151 if (!vnode_isreg(from_vp) || !vnode_isreg(to_vp) ||
1152 VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp)) {
1153 error = EINVAL;
1154 goto exit;
1155 }
1156
1157 // XXXdbg - don't allow modification of the journal or journal_info_block
1158 if (hfsmp->jnl) {
1159 struct HFSPlusExtentDescriptor *extd;
1160
1161 if (from_cp->c_datafork) {
1162 extd = &from_cp->c_datafork->ff_extents[0];
1163 if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1164 error = EPERM;
1165 goto exit;
1166 }
1167 }
1168
1169 if (to_cp->c_datafork) {
1170 extd = &to_cp->c_datafork->ff_extents[0];
1171 if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1172 error = EPERM;
1173 goto exit;
1174 }
1175 }
1176 }
1177
1178 if ((error = hfs_start_transaction(hfsmp)) != 0) {
1179 goto exit;
1180 }
1181 started_tr = 1;
1182
1183 /*
1184 * Reserve some space in the Catalog file.
1185 */
1186 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
1187 goto exit;
1188 }
1189 got_cookie = 1;
1190
1191 /* The backend code always tries to delete the virtual
1192 * extent id for exchanging files so we need to lock
1193 * the extents b-tree.
1194 */
1195 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
1196
1197 /* Account for the location of the catalog objects. */
1198 if (from_cp->c_flag & C_HARDLINK) {
1199 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
1200 from_cp->c_attr.ca_linkref);
1201 from_nameptr = (unsigned char *)from_iname;
1202 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1203 from_cp->c_hint = 0;
1204 } else {
1205 from_nameptr = from_cp->c_desc.cd_nameptr;
1206 from_parid = from_cp->c_parentcnid;
1207 }
1208 if (to_cp->c_flag & C_HARDLINK) {
1209 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
1210 to_cp->c_attr.ca_linkref);
1211 to_nameptr = (unsigned char *)to_iname;
1212 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1213 to_cp->c_hint = 0;
1214 } else {
1215 to_nameptr = to_cp->c_desc.cd_nameptr;
1216 to_parid = to_cp->c_parentcnid;
1217 }
1218
1219 /* Do the exchange */
1220 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
1221 to_parid, from_cp->c_hint, to_cp->c_hint);
1222 hfs_systemfile_unlock(hfsmp, lockflags);
1223
1224 /*
1225 * Note that we don't need to exchange any extended attributes
1226 * since the attributes are keyed by file ID.
1227 */
1228
1229 if (error != E_NONE) {
1230 error = MacToVFSError(error);
1231 goto exit;
1232 }
1233
1234 /* Purge the vnodes from the name cache */
1235 if (from_vp)
1236 cache_purge(from_vp);
1237 if (to_vp)
1238 cache_purge(to_vp);
1239
1240 /* Save a copy of from attributes before swapping. */
1241 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
1242 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
1243 tempflag = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
1244
1245 /*
1246 * Swap the descriptors and all non-fork related attributes.
1247 * (except the modify date)
1248 */
1249 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
1250
1251 from_cp->c_hint = 0;
1252 from_cp->c_fileid = from_cp->c_cnid;
1253 from_cp->c_itime = to_cp->c_itime;
1254 from_cp->c_btime = to_cp->c_btime;
1255 from_cp->c_atime = to_cp->c_atime;
1256 from_cp->c_ctime = to_cp->c_ctime;
1257 from_cp->c_gid = to_cp->c_gid;
1258 from_cp->c_uid = to_cp->c_uid;
1259 from_cp->c_flags = to_cp->c_flags;
1260 from_cp->c_mode = to_cp->c_mode;
1261 from_cp->c_linkcount = to_cp->c_linkcount;
1262 from_cp->c_flag = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
1263 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
1264 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
1265
1266 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
1267 to_cp->c_hint = 0;
1268 to_cp->c_fileid = to_cp->c_cnid;
1269 to_cp->c_itime = tempattr.ca_itime;
1270 to_cp->c_btime = tempattr.ca_btime;
1271 to_cp->c_atime = tempattr.ca_atime;
1272 to_cp->c_ctime = tempattr.ca_ctime;
1273 to_cp->c_gid = tempattr.ca_gid;
1274 to_cp->c_uid = tempattr.ca_uid;
1275 to_cp->c_flags = tempattr.ca_flags;
1276 to_cp->c_mode = tempattr.ca_mode;
1277 to_cp->c_linkcount = tempattr.ca_linkcount;
1278 to_cp->c_flag = tempflag;
1279 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
1280 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
1281
1282 /* Rehash the cnodes using their new file IDs */
1283 hfs_chash_rehash(from_cp, to_cp);
1284
1285 /*
1286 * When a file moves out of "Cleanup At Startup"
1287 * we can drop its NODUMP status.
1288 */
1289 if ((from_cp->c_flags & UF_NODUMP) &&
1290 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
1291 from_cp->c_flags &= ~UF_NODUMP;
1292 from_cp->c_touch_chgtime = TRUE;
1293 }
1294 if ((to_cp->c_flags & UF_NODUMP) &&
1295 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
1296 to_cp->c_flags &= ~UF_NODUMP;
1297 to_cp->c_touch_chgtime = TRUE;
1298 }
1299
1300 HFS_KNOTE(from_vp, NOTE_ATTRIB);
1301 HFS_KNOTE(to_vp, NOTE_ATTRIB);
1302
1303 exit:
1304 if (got_cookie) {
1305 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
1306 }
1307 if (started_tr) {
1308 hfs_end_transaction(hfsmp);
1309 }
1310
1311 hfs_unlockpair(from_cp, to_cp);
1312 return (error);
1313 }
1314
1315
1316 /*
1317 * cnode must be locked
1318 */
1319 __private_extern__
1320 int
1321 hfs_fsync(struct vnode *vp, int waitfor, int fullsync, struct proc *p)
1322 {
1323 struct cnode *cp = VTOC(vp);
1324 struct filefork *fp = NULL;
1325 int retval = 0;
1326 struct hfsmount *hfsmp = VTOHFS(vp);
1327 struct timeval tv;
1328 int wait;
1329 int lockflag;
1330 int took_trunc_lock = 0;
1331
1332 wait = (waitfor == MNT_WAIT);
1333 if (always_do_fullfsync)
1334 fullsync = 1;
1335
1336 /* HFS directories don't have any data blocks. */
1337 if (vnode_isdir(vp))
1338 goto metasync;
1339
1340 /*
1341 * For system files flush the B-tree header and
1342 * for regular files write out any clusters
1343 */
1344 if (vnode_issystem(vp)) {
1345 if (VTOF(vp)->fcbBTCBPtr != NULL) {
1346 // XXXdbg
1347 if (hfsmp->jnl == NULL) {
1348 BTFlushPath(VTOF(vp));
1349 }
1350 }
1351 } else if (UBCINFOEXISTS(vp)) {
1352 hfs_unlock(cp);
1353 hfs_lock_truncate(cp, TRUE);
1354 took_trunc_lock = 1;
1355
1356 /* Don't hold cnode lock when calling into cluster layer. */
1357 (void) cluster_push(vp, wait ? IO_SYNC : 0);
1358
1359 hfs_lock(cp, HFS_FORCE_LOCK);
1360 }
1361 /*
1362 * When MNT_WAIT is requested and the zero fill timeout
1363 * has expired then we must explicitly zero out any areas
1364 * that are currently marked invalid (holes).
1365 *
1366 * Files with NODUMP can bypass zero filling here.
1367 */
1368 if ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
1369 ((cp->c_flags & UF_NODUMP) == 0) &&
1370 UBCINFOEXISTS(vp) && (vnode_issystem(vp) ==0) && (fp = VTOF(vp)) &&
1371 cp->c_zftimeout != 0) {
1372 microuptime(&tv);
1373 if (!fullsync && tv.tv_sec < (long)cp->c_zftimeout) {
1374 /* Remember that a force sync was requested. */
1375 cp->c_flag |= C_ZFWANTSYNC;
1376 goto datasync;
1377 }
1378 if (!took_trunc_lock) {
1379 hfs_unlock(cp);
1380 hfs_lock_truncate(cp, TRUE);
1381 hfs_lock(cp, HFS_FORCE_LOCK);
1382 took_trunc_lock = 1;
1383 }
1384
1385 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
1386 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
1387 off_t start = invalid_range->rl_start;
1388 off_t end = invalid_range->rl_end;
1389
1390 /* The range about to be written must be validated
1391 * first, so that VNOP_BLOCKMAP() will return the
1392 * appropriate mapping for the cluster code:
1393 */
1394 rl_remove(start, end, &fp->ff_invalidranges);
1395
1396 /* Don't hold cnode lock when calling into cluster layer. */
1397 hfs_unlock(cp);
1398 (void) cluster_write(vp, (struct uio *) 0,
1399 fp->ff_size, end + 1, start, (off_t)0,
1400 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
1401 hfs_lock(cp, HFS_FORCE_LOCK);
1402 cp->c_flag |= C_MODIFIED;
1403 }
1404 hfs_unlock(cp);
1405 (void) cluster_push(vp, wait ? IO_SYNC : 0);
1406 hfs_lock(cp, HFS_FORCE_LOCK);
1407
1408 cp->c_flag &= ~C_ZFWANTSYNC;
1409 cp->c_zftimeout = 0;
1410 }
1411 datasync:
1412 if (took_trunc_lock)
1413 hfs_unlock_truncate(cp, TRUE);
1414
1415 /*
1416 * if we have a journal and if journal_active() returns != 0 then the
1417 * we shouldn't do anything to a locked block (because it is part
1418 * of a transaction). otherwise we'll just go through the normal
1419 * code path and flush the buffer. note journal_active() can return
1420 * -1 if the journal is invalid -- however we still need to skip any
1421 * locked blocks as they get cleaned up when we finish the transaction
1422 * or close the journal.
1423 */
1424 // if (hfsmp->jnl && journal_active(hfsmp->jnl) >= 0)
1425 if (hfsmp->jnl)
1426 lockflag = BUF_SKIP_LOCKED;
1427 else
1428 lockflag = 0;
1429
1430 /*
1431 * Flush all dirty buffers associated with a vnode.
1432 */
1433 buf_flushdirtyblks(vp, wait, lockflag, "hfs_fsync");
1434
1435 metasync:
1436 if (vnode_isreg(vp) && vnode_issystem(vp)) {
1437 if (VTOF(vp)->fcbBTCBPtr != NULL) {
1438 microuptime(&tv);
1439 BTSetLastSync(VTOF(vp), tv.tv_sec);
1440 }
1441 cp->c_touch_acctime = FALSE;
1442 cp->c_touch_chgtime = FALSE;
1443 cp->c_touch_modtime = FALSE;
1444 } else if ( !(vp->v_flag & VSWAP) ) /* User file */ {
1445 retval = hfs_update(vp, wait);
1446
1447 /*
1448 * When MNT_WAIT is requested push out the catalog record for
1449 * this file. If they asked for a full fsync, we can skip this
1450 * because the journal_flush or hfs_metasync_all will push out
1451 * all of the metadata changes.
1452 */
1453 if ((retval == 0) && wait && !fullsync && cp->c_hint &&
1454 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
1455 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
1456 }
1457
1458 /*
1459 * If this was a full fsync, make sure all metadata
1460 * changes get to stable storage.
1461 */
1462 if (fullsync) {
1463 if (hfsmp->jnl) {
1464 journal_flush(hfsmp->jnl);
1465 } else {
1466 retval = hfs_metasync_all(hfsmp);
1467 /* XXX need to pass context! */
1468 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
1469 }
1470 }
1471 }
1472
1473 return (retval);
1474 }
1475
1476
1477 /* Sync an hfs catalog b-tree node */
1478 static int
1479 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
1480 {
1481 vnode_t vp;
1482 buf_t bp;
1483 int lockflags;
1484
1485 vp = HFSTOVCB(hfsmp)->catalogRefNum;
1486
1487 // XXXdbg - don't need to do this on a journaled volume
1488 if (hfsmp->jnl) {
1489 return 0;
1490 }
1491
1492 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
1493 /*
1494 * Look for a matching node that has been delayed
1495 * but is not part of a set (B_LOCKED).
1496 *
1497 * BLK_ONLYVALID causes buf_getblk to return a
1498 * buf_t for the daddr64_t specified only if it's
1499 * currently resident in the cache... the size
1500 * parameter to buf_getblk is ignored when this flag
1501 * is set
1502 */
1503 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
1504
1505 if (bp) {
1506 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
1507 (void) VNOP_BWRITE(bp);
1508 else
1509 buf_brelse(bp);
1510 }
1511
1512 hfs_systemfile_unlock(hfsmp, lockflags);
1513
1514 return (0);
1515 }
1516
1517
1518 /*
1519 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
1520 * without a journal. Note that the volume bitmap does not get written;
1521 * we rely on fsck_hfs to fix that up (which it can do without any loss
1522 * of data).
1523 */
1524 static int
1525 hfs_metasync_all(struct hfsmount *hfsmp)
1526 {
1527 int lockflags;
1528
1529 /* Lock all of the B-trees so we get a mutually consistent state */
1530 lockflags = hfs_systemfile_lock(hfsmp,
1531 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
1532
1533 /* Sync each of the B-trees */
1534 if (hfsmp->hfs_catalog_vp)
1535 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
1536 if (hfsmp->hfs_extents_vp)
1537 hfs_btsync(hfsmp->hfs_extents_vp, 0);
1538 if (hfsmp->hfs_attribute_vp)
1539 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
1540
1541 /* Wait for all of the writes to complete */
1542 if (hfsmp->hfs_catalog_vp)
1543 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
1544 if (hfsmp->hfs_extents_vp)
1545 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
1546 if (hfsmp->hfs_attribute_vp)
1547 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
1548
1549 hfs_systemfile_unlock(hfsmp, lockflags);
1550
1551 return 0;
1552 }
1553
1554
1555 /*ARGSUSED 1*/
1556 static int
1557 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
1558 {
1559 buf_clearflags(bp, B_LOCKED);
1560 (void) buf_bawrite(bp);
1561
1562 return(BUF_CLAIMED);
1563 }
1564
1565
1566 __private_extern__
1567 int
1568 hfs_btsync(struct vnode *vp, int sync_transaction)
1569 {
1570 struct cnode *cp = VTOC(vp);
1571 struct timeval tv;
1572 int flags = 0;
1573
1574 if (sync_transaction)
1575 flags |= BUF_SKIP_NONLOCKED;
1576 /*
1577 * Flush all dirty buffers associated with b-tree.
1578 */
1579 buf_iterate(vp, hfs_btsync_callback, flags, 0);
1580
1581 microuptime(&tv);
1582 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
1583 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
1584 cp->c_touch_acctime = FALSE;
1585 cp->c_touch_chgtime = FALSE;
1586 cp->c_touch_modtime = FALSE;
1587
1588 return 0;
1589 }
1590
1591 /*
1592 * Remove a directory.
1593 */
1594 static int
1595 hfs_vnop_rmdir(ap)
1596 struct vnop_rmdir_args /* {
1597 struct vnode *a_dvp;
1598 struct vnode *a_vp;
1599 struct componentname *a_cnp;
1600 vfs_context_t a_context;
1601 } */ *ap;
1602 {
1603 struct vnode *dvp = ap->a_dvp;
1604 struct vnode *vp = ap->a_vp;
1605 struct cnode *dcp = VTOC(dvp);
1606 struct cnode *cp = VTOC(vp);
1607 int error;
1608
1609 if (!S_ISDIR(cp->c_mode)) {
1610 return (ENOTDIR);
1611 }
1612 if (dvp == vp) {
1613 return (EINVAL);
1614 }
1615 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
1616 return (error);
1617 }
1618 error = hfs_removedir(dvp, vp, ap->a_cnp, 0);
1619 hfs_unlockpair(dcp, cp);
1620
1621 return (error);
1622 }
1623
1624 /*
1625 * Remove a directory
1626 *
1627 * Both dvp and vp cnodes are locked
1628 */
1629 static int
1630 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1631 int skip_reserve)
1632 {
1633 struct cnode *cp;
1634 struct cnode *dcp;
1635 struct hfsmount * hfsmp;
1636 struct cat_desc desc;
1637 int lockflags;
1638 int error = 0, started_tr = 0;
1639
1640 cp = VTOC(vp);
1641 dcp = VTOC(dvp);
1642 hfsmp = VTOHFS(vp);
1643
1644 if (dcp == cp) {
1645 return (EINVAL); /* cannot remove "." */
1646 }
1647 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
1648 return (0);
1649 }
1650 if (cp->c_entries != 0) {
1651 return (ENOTEMPTY);
1652 }
1653
1654 /* Check if we're removing the last link to an empty directory. */
1655 if (cp->c_flag & C_HARDLINK) {
1656 /* We could also return EBUSY here */
1657 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
1658 }
1659
1660 if ((hfsmp->hfs_attribute_vp != NULL) &&
1661 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
1662
1663 return hfs_removefile(dvp, vp, cnp, 0, 0, 1);
1664 }
1665
1666 dcp->c_flag |= C_DIR_MODIFICATION;
1667
1668 #if QUOTA
1669 if (hfsmp->hfs_flags & HFS_QUOTAS)
1670 (void)hfs_getinoquota(cp);
1671 #endif
1672 if ((error = hfs_start_transaction(hfsmp)) != 0) {
1673 goto out;
1674 }
1675 started_tr = 1;
1676
1677 /*
1678 * Verify the directory is empty (and valid).
1679 * (Rmdir ".." won't be valid since
1680 * ".." will contain a reference to
1681 * the current directory and thus be
1682 * non-empty.)
1683 */
1684 if ((dcp->c_flags & APPEND) || (cp->c_flags & (IMMUTABLE | APPEND))) {
1685 error = EPERM;
1686 goto out;
1687 }
1688
1689 /* Remove the entry from the namei cache: */
1690 cache_purge(vp);
1691
1692 /*
1693 * Protect against a race with rename by using the component
1694 * name passed in and parent id from dvp (instead of using
1695 * the cp->c_desc which may have changed).
1696 */
1697 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
1698 desc.cd_namelen = cnp->cn_namelen;
1699 desc.cd_parentcnid = dcp->c_fileid;
1700 desc.cd_cnid = cp->c_cnid;
1701 desc.cd_flags = CD_ISDIR;
1702 desc.cd_encoding = cp->c_encoding;
1703 desc.cd_hint = 0;
1704
1705 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid)) {
1706 error = 0;
1707 goto out;
1708 }
1709
1710 /* Remove entry from catalog */
1711 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
1712
1713 if (!skip_reserve) {
1714 /*
1715 * Reserve some space in the Catalog file.
1716 */
1717 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
1718 hfs_systemfile_unlock(hfsmp, lockflags);
1719 goto out;
1720 }
1721 }
1722
1723 error = cat_delete(hfsmp, &desc, &cp->c_attr);
1724 if (error == 0) {
1725 /* The parent lost a child */
1726 if (dcp->c_entries > 0)
1727 dcp->c_entries--;
1728 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
1729 dcp->c_dirchangecnt++;
1730 dcp->c_touch_chgtime = TRUE;
1731 dcp->c_touch_modtime = TRUE;
1732 hfs_touchtimes(hfsmp, cp);
1733 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
1734 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
1735 }
1736
1737 hfs_systemfile_unlock(hfsmp, lockflags);
1738
1739 if (error)
1740 goto out;
1741
1742 #if QUOTA
1743 if (hfsmp->hfs_flags & HFS_QUOTAS)
1744 (void)hfs_chkiq(cp, -1, NOCRED, 0);
1745 #endif /* QUOTA */
1746
1747 HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK | NOTE_ATTRIB);
1748
1749 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
1750
1751 /*
1752 * directory open or in use (e.g. opendir() or current working
1753 * directory for some process); wait for inactive to actually
1754 * remove catalog entry
1755 */
1756 if (vnode_isinuse(vp, 0)) {
1757 cp->c_flag |= C_DELETED;
1758 } else {
1759 cp->c_mode = 0; /* Makes the vnode go away...see inactive */
1760 cp->c_flag |= C_NOEXISTS;
1761 }
1762 out:
1763 dcp->c_flag &= ~C_DIR_MODIFICATION;
1764 wakeup((caddr_t)&dcp->c_flag);
1765
1766 HFS_KNOTE(vp, NOTE_DELETE);
1767
1768 if (started_tr) {
1769 hfs_end_transaction(hfsmp);
1770 }
1771
1772 return (error);
1773 }
1774
1775
1776 /*
1777 * Remove a file or link.
1778 */
1779 static int
1780 hfs_vnop_remove(ap)
1781 struct vnop_remove_args /* {
1782 struct vnode *a_dvp;
1783 struct vnode *a_vp;
1784 struct componentname *a_cnp;
1785 int a_flags;
1786 vfs_context_t a_context;
1787 } */ *ap;
1788 {
1789 struct vnode *dvp = ap->a_dvp;
1790 struct vnode *vp = ap->a_vp;
1791 struct cnode *dcp = VTOC(dvp);
1792 struct cnode *cp = VTOC(vp);
1793 struct vnode *rvp = cp->c_rsrc_vp;
1794 int error=0, recycle_rsrc=0, rvid=0;
1795
1796 if (dvp == vp) {
1797 return (EINVAL);
1798 }
1799
1800 hfs_lock_truncate(cp, TRUE);
1801
1802 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
1803 hfs_unlock_truncate(cp, TRUE);
1804 return (error);
1805 }
1806 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0);
1807
1808 //
1809 // If the remove succeeded and it's an open-unlinked file that has
1810 // a resource fork vnode that's not in use, we will want to recycle
1811 // the rvp *after* we're done unlocking everything. Otherwise the
1812 // resource vnode will keep a v_parent reference on this vnode which
1813 // prevents it from going through inactive/reclaim which means that
1814 // the disk space associated with this file won't get free'd until
1815 // something forces the resource vnode to get recycled (and that can
1816 // take a very long time).
1817 //
1818 if (error == 0 && (cp->c_flag & C_DELETED) && rvp && !vnode_isinuse(rvp, 0)) {
1819 rvid = vnode_vid(rvp);
1820 recycle_rsrc = 1;
1821 }
1822
1823 /*
1824 * Drop the truncate lock before unlocking the cnode
1825 * (which can potentially perform a vnode_put and
1826 * recycle the vnode which in turn might require the
1827 * truncate lock)
1828 */
1829 hfs_unlock_truncate(cp, TRUE);
1830 hfs_unlockpair(dcp, cp);
1831
1832 if (recycle_rsrc && vnode_getwithvid(rvp, rvid) == 0) {
1833 vnode_ref(rvp);
1834 vnode_rele(rvp);
1835 vnode_recycle(rvp);
1836 vnode_put(rvp);
1837 }
1838
1839 return (error);
1840 }
1841
1842
1843 static int
1844 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
1845
1846 if ( !(buf_flags(bp) & B_META))
1847 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
1848 /*
1849 * it's part of the current transaction, kill it.
1850 */
1851 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
1852
1853 return (BUF_CLAIMED);
1854 }
1855
1856 /*
1857 * hfs_removefile
1858 *
1859 * Similar to hfs_vnop_remove except there are additional options.
1860 *
1861 * Requires cnode and truncate locks to be held.
1862 */
1863 static int
1864 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1865 int flags, int skip_reserve, int allow_dirs)
1866 {
1867 struct vnode *rvp = NULL;
1868 struct cnode *cp;
1869 struct cnode *dcp;
1870 struct hfsmount *hfsmp;
1871 struct cat_desc desc;
1872 struct timeval tv;
1873 vfs_context_t ctx = cnp->cn_context;
1874 int dataforkbusy = 0;
1875 int rsrcforkbusy = 0;
1876 int truncated = 0;
1877 int lockflags;
1878 int error = 0;
1879 int started_tr = 0;
1880 int isbigfile = 0, defer_remove=0, isdir=0;
1881
1882 cp = VTOC(vp);
1883 dcp = VTOC(dvp);
1884 hfsmp = VTOHFS(vp);
1885
1886 /* Check if we lost a race post lookup. */
1887 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
1888 return (0);
1889 }
1890
1891 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid)) {
1892 return 0;
1893 }
1894
1895 /* Make sure a remove is permitted */
1896 if (VNODE_IS_RSRC(vp)) {
1897 return (EPERM);
1898 }
1899 /* Don't allow deleting the journal or journal_info_block. */
1900 if (hfsmp->jnl &&
1901 (cp->c_fileid == hfsmp->hfs_jnlfileid || cp->c_fileid == hfsmp->hfs_jnlinfoblkid)) {
1902 return (EPERM);
1903 }
1904 /*
1905 * Hard links require special handling.
1906 */
1907 if (cp->c_flag & C_HARDLINK) {
1908 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
1909 return (EBUSY);
1910 } else {
1911 /* A directory hard link with a link count of one is
1912 * treated as a regular directory. Therefore it should
1913 * only be removed using rmdir().
1914 */
1915 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
1916 (allow_dirs == 0)) {
1917 return (EPERM);
1918 }
1919 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
1920 }
1921 }
1922 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
1923 if (vnode_isdir(vp)) {
1924 if (allow_dirs == 0)
1925 return (EPERM); /* POSIX */
1926 isdir = 1;
1927 }
1928 /* Sanity check the parent ids. */
1929 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
1930 (cp->c_parentcnid != dcp->c_fileid)) {
1931 return (EINVAL);
1932 }
1933
1934 dcp->c_flag |= C_DIR_MODIFICATION;
1935
1936 // this guy is going away so mark him as such
1937 cp->c_flag |= C_DELETED;
1938
1939
1940 /* Remove our entry from the namei cache. */
1941 cache_purge(vp);
1942
1943 /*
1944 * Acquire a vnode for a non-empty resource fork.
1945 * (needed for hfs_truncate)
1946 */
1947 if (isdir == 0 && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
1948 /*
1949 * We must avoid calling hfs_vgetrsrc() when we have
1950 * an active resource fork vnode to avoid deadlocks
1951 * when that vnode is in the VL_TERMINATE state. We
1952 * can defer removing the file and its resource fork
1953 * until the call to hfs_vnop_inactive() occurs.
1954 */
1955 if (cp->c_rsrc_vp) {
1956 defer_remove = 1;
1957 } else {
1958 error = hfs_vgetrsrc(hfsmp, vp, &rvp, FALSE);
1959 if (error)
1960 goto out;
1961 /* Defer the vnode_put on rvp until the hfs_unlock(). */
1962 cp->c_flag |= C_NEED_RVNODE_PUT;
1963 }
1964 }
1965 /* Check if this file is being used. */
1966 if (isdir == 0) {
1967 dataforkbusy = vnode_isinuse(vp, 0);
1968 rsrcforkbusy = rvp ? vnode_isinuse(rvp, 0) : 0;
1969 }
1970
1971 /* Check if we have to break the deletion into multiple pieces. */
1972 if (isdir == 0) {
1973 isbigfile = ((cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE) && overflow_extents(VTOF(vp)));
1974 }
1975
1976 /* Check if the file has xattrs. If it does we'll have to delete them in
1977 individual transactions in case there are too many */
1978 if ((hfsmp->hfs_attribute_vp != NULL) &&
1979 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
1980 defer_remove = 1;
1981 }
1982
1983 /*
1984 * Carbon semantics prohibit deleting busy files.
1985 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
1986 */
1987 if (dataforkbusy || rsrcforkbusy) {
1988 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
1989 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
1990 error = EBUSY;
1991 goto out;
1992 }
1993 }
1994
1995 #if QUOTA
1996 if (hfsmp->hfs_flags & HFS_QUOTAS)
1997 (void)hfs_getinoquota(cp);
1998 #endif /* QUOTA */
1999
2000 /* Check if we need a ubc_setsize. */
2001 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy)) {
2002 /*
2003 * A ubc_setsize can cause a pagein so defer it
2004 * until after the cnode lock is dropped. The
2005 * cnode lock cannot be dropped/reacquired here
2006 * since we might already hold the journal lock.
2007 */
2008 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
2009 cp->c_flag |= C_NEED_DATA_SETSIZE;
2010 }
2011 if (!rsrcforkbusy && rvp) {
2012 cp->c_flag |= C_NEED_RSRC_SETSIZE;
2013 }
2014 }
2015
2016 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2017 goto out;
2018 }
2019 started_tr = 1;
2020
2021 // XXXdbg - if we're journaled, kill any dirty symlink buffers
2022 if (hfsmp->jnl && vnode_islnk(vp))
2023 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
2024
2025 /*
2026 * Truncate any non-busy forks. Busy forks will
2027 * get truncated when their vnode goes inactive.
2028 *
2029 * Since we're already inside a transaction,
2030 * tell hfs_truncate to skip the ubc_setsize.
2031 */
2032 if (isdir == 0) {
2033 int mode = cp->c_mode;
2034
2035 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
2036 cp->c_mode = 0; /* Suppress hfs_update */
2037 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ctx);
2038 cp->c_mode = mode;
2039 if (error)
2040 goto out;
2041 truncated = 1;
2042 }
2043 if (!rsrcforkbusy && rvp) {
2044 cp->c_mode = 0; /* Suppress hfs_update */
2045 error = hfs_truncate(rvp, (off_t)0, IO_NDELAY, 1, ctx);
2046 cp->c_mode = mode;
2047 if (error)
2048 goto out;
2049 truncated = 1;
2050 }
2051 }
2052
2053 /*
2054 * Protect against a race with rename by using the component
2055 * name passed in and parent id from dvp (instead of using
2056 * the cp->c_desc which may have changed).
2057 */
2058 desc.cd_flags = 0;
2059 desc.cd_encoding = cp->c_desc.cd_encoding;
2060 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
2061 desc.cd_namelen = cnp->cn_namelen;
2062 desc.cd_parentcnid = dcp->c_fileid;
2063 desc.cd_hint = cp->c_desc.cd_hint;
2064 desc.cd_cnid = cp->c_cnid;
2065 microtime(&tv);
2066
2067 /*
2068 * There are two cases to consider:
2069 * 1. File is busy/big/defer_remove ==> move/rename the file
2070 * 2. File is not in use ==> remove the file
2071 */
2072 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
2073 char delname[32];
2074 struct cat_desc to_desc;
2075 struct cat_desc todir_desc;
2076
2077 /*
2078 * Orphan this file (move to hidden directory).
2079 */
2080 bzero(&todir_desc, sizeof(todir_desc));
2081 todir_desc.cd_parentcnid = 2;
2082
2083 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
2084 bzero(&to_desc, sizeof(to_desc));
2085 to_desc.cd_nameptr = (const u_int8_t *)delname;
2086 to_desc.cd_namelen = strlen(delname);
2087 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2088 to_desc.cd_flags = 0;
2089 to_desc.cd_cnid = cp->c_cnid;
2090
2091 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2092 if (!skip_reserve) {
2093 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
2094 hfs_systemfile_unlock(hfsmp, lockflags);
2095 goto out;
2096 }
2097 }
2098
2099 error = cat_rename(hfsmp, &desc, &todir_desc,
2100 &to_desc, (struct cat_desc *)NULL);
2101
2102 if (error == 0) {
2103 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
2104 if (isdir == 1) {
2105 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
2106 }
2107 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
2108 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
2109
2110 /* Update the parent directory */
2111 if (dcp->c_entries > 0)
2112 dcp->c_entries--;
2113 if (isdir == 1) {
2114 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
2115 }
2116 dcp->c_dirchangecnt++;
2117 dcp->c_ctime = tv.tv_sec;
2118 dcp->c_mtime = tv.tv_sec;
2119 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
2120
2121 /* Update the file's state */
2122 cp->c_flag |= C_DELETED;
2123 cp->c_ctime = tv.tv_sec;
2124 --cp->c_linkcount;
2125 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
2126 }
2127 hfs_systemfile_unlock(hfsmp, lockflags);
2128 if (error)
2129 goto out;
2130
2131 } else /* Not busy */ {
2132
2133 if (cp->c_blocks > 0) {
2134 printf("hfs_remove: attempting to delete a non-empty file %s\n",
2135 cp->c_desc.cd_nameptr);
2136 error = EBUSY;
2137 goto out;
2138 }
2139
2140 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
2141 if (!skip_reserve) {
2142 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
2143 hfs_systemfile_unlock(hfsmp, lockflags);
2144 goto out;
2145 }
2146 }
2147
2148 error = cat_delete(hfsmp, &desc, &cp->c_attr);
2149
2150 if (error && error != ENXIO && error != ENOENT && truncated) {
2151 if ((cp->c_datafork && cp->c_datafork->ff_size != 0) ||
2152 (cp->c_rsrcfork && cp->c_rsrcfork->ff_size != 0)) {
2153 panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)",
2154 error, cp->c_datafork->ff_size, cp->c_rsrcfork->ff_size);
2155 } else {
2156 printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n",
2157 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error);
2158 }
2159 }
2160 if (error == 0) {
2161 /* Update the parent directory */
2162 if (dcp->c_entries > 0)
2163 dcp->c_entries--;
2164 dcp->c_dirchangecnt++;
2165 dcp->c_ctime = tv.tv_sec;
2166 dcp->c_mtime = tv.tv_sec;
2167 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
2168 }
2169 hfs_systemfile_unlock(hfsmp, lockflags);
2170 if (error)
2171 goto out;
2172
2173 #if QUOTA
2174 if (hfsmp->hfs_flags & HFS_QUOTAS)
2175 (void)hfs_chkiq(cp, -1, NOCRED, 0);
2176 #endif /* QUOTA */
2177
2178 cp->c_mode = 0;
2179 truncated = 0; // because the catalog entry is gone
2180 cp->c_flag |= C_NOEXISTS;
2181 cp->c_flag &= ~C_DELETED;
2182 cp->c_touch_chgtime = TRUE; /* XXX needed ? */
2183 --cp->c_linkcount;
2184
2185 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
2186 }
2187
2188 /*
2189 * All done with this cnode's descriptor...
2190 *
2191 * Note: all future catalog calls for this cnode must be by
2192 * fileid only. This is OK for HFS (which doesn't have file
2193 * thread records) since HFS doesn't support the removal of
2194 * busy files.
2195 */
2196 cat_releasedesc(&cp->c_desc);
2197
2198 HFS_KNOTE(dvp, NOTE_WRITE);
2199
2200 out:
2201 if (error) {
2202 cp->c_flag &= ~C_DELETED;
2203 }
2204
2205 /* Commit the truncation to the catalog record */
2206 if (truncated) {
2207 cp->c_flag |= C_FORCEUPDATE;
2208 cp->c_touch_chgtime = TRUE;
2209 cp->c_touch_modtime = TRUE;
2210 (void) hfs_update(vp, 0);
2211 }
2212
2213 if (started_tr) {
2214 hfs_end_transaction(hfsmp);
2215 }
2216
2217 dcp->c_flag &= ~C_DIR_MODIFICATION;
2218 wakeup((caddr_t)&dcp->c_flag);
2219
2220 HFS_KNOTE(vp, NOTE_DELETE);
2221 if (rvp) {
2222 HFS_KNOTE(rvp, NOTE_DELETE);
2223 }
2224
2225 return (error);
2226 }
2227
2228
2229 __private_extern__ void
2230 replace_desc(struct cnode *cp, struct cat_desc *cdp)
2231 {
2232 // fixes 4348457 and 4463138
2233 if (&cp->c_desc == cdp) {
2234 return;
2235 }
2236
2237 /* First release allocated name buffer */
2238 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
2239 const u_int8_t *name = cp->c_desc.cd_nameptr;
2240
2241 cp->c_desc.cd_nameptr = 0;
2242 cp->c_desc.cd_namelen = 0;
2243 cp->c_desc.cd_flags &= ~CD_HASBUF;
2244 vfs_removename((const char *)name);
2245 }
2246 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
2247
2248 /* Cnode now owns the name buffer */
2249 cdp->cd_nameptr = 0;
2250 cdp->cd_namelen = 0;
2251 cdp->cd_flags &= ~CD_HASBUF;
2252 }
2253
2254
2255 /*
2256 * Rename a cnode.
2257 *
2258 * The VFS layer guarantees that:
2259 * - source and destination will either both be directories, or
2260 * both not be directories.
2261 * - all the vnodes are from the same file system
2262 *
2263 * When the target is a directory, HFS must ensure that its empty.
2264 */
2265 static int
2266 hfs_vnop_rename(ap)
2267 struct vnop_rename_args /* {
2268 struct vnode *a_fdvp;
2269 struct vnode *a_fvp;
2270 struct componentname *a_fcnp;
2271 struct vnode *a_tdvp;
2272 struct vnode *a_tvp;
2273 struct componentname *a_tcnp;
2274 vfs_context_t a_context;
2275 } */ *ap;
2276 {
2277 struct vnode *tvp = ap->a_tvp;
2278 struct vnode *tdvp = ap->a_tdvp;
2279 struct vnode *fvp = ap->a_fvp;
2280 struct vnode *fdvp = ap->a_fdvp;
2281 struct vnode *rvp = NULLVP;
2282 struct componentname *tcnp = ap->a_tcnp;
2283 struct componentname *fcnp = ap->a_fcnp;
2284 struct proc *p = vfs_context_proc(ap->a_context);
2285 struct cnode *fcp;
2286 struct cnode *fdcp;
2287 struct cnode *tdcp;
2288 struct cnode *tcp;
2289 struct cat_desc from_desc;
2290 struct cat_desc to_desc;
2291 struct cat_desc out_desc;
2292 struct hfsmount *hfsmp;
2293 cat_cookie_t cookie;
2294 int tvp_deleted = 0;
2295 int started_tr = 0, got_cookie = 0;
2296 int took_trunc_lock = 0;
2297 int lockflags;
2298 int error;
2299 int rsrc_vid = 0;
2300 int recycle_rsrc = 0;
2301
2302 /* When tvp exist, take the truncate lock for the hfs_removefile(). */
2303 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
2304 hfs_lock_truncate(VTOC(tvp), TRUE);
2305 took_trunc_lock = 1;
2306 }
2307
2308 retry:
2309 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
2310 HFS_EXCLUSIVE_LOCK);
2311 if (error) {
2312 if (took_trunc_lock) {
2313 hfs_unlock_truncate(VTOC(tvp), TRUE);
2314 took_trunc_lock = 0;
2315 }
2316 /*
2317 * tvp might no longer exist. if we get ENOENT, re-check the
2318 * C_NOEXISTS flag on tvp to find out whether it's still in the
2319 * namespace.
2320 */
2321 if (error == ENOENT && tvp) {
2322 /*
2323 * It's okay to just check C_NOEXISTS without having a lock,
2324 * because we have an iocount on it from the vfs layer so it can't
2325 * have disappeared.
2326 */
2327 if (VTOC(tvp)->c_flag & C_NOEXISTS) {
2328 /*
2329 * tvp is no longer in the namespace. Try again with NULL
2330 * tvp/tcp (NULLing these out is fine because the vfs syscall
2331 * will vnode_put the vnodes).
2332 */
2333 tcp = NULL;
2334 tvp = NULL;
2335 goto retry;
2336 }
2337 }
2338 return (error);
2339 }
2340
2341 fdcp = VTOC(fdvp);
2342 fcp = VTOC(fvp);
2343 tdcp = VTOC(tdvp);
2344 tcp = tvp ? VTOC(tvp) : NULL;
2345 hfsmp = VTOHFS(tdvp);
2346
2347 /* Check for a race against unlink. */
2348 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid)) {
2349 error = ENOENT;
2350 goto out;
2351 }
2352
2353 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid))) {
2354 //
2355 // hmm, the destination vnode isn't valid any more.
2356 // in this case we can just drop him and pretend he
2357 // never existed in the first place.
2358 //
2359 if (took_trunc_lock) {
2360 hfs_unlock_truncate(VTOC(tvp), TRUE);
2361 took_trunc_lock = 0;
2362 }
2363
2364 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
2365
2366 tcp = NULL;
2367 tvp = NULL;
2368
2369 // retry the locking with tvp null'ed out
2370 goto retry;
2371 }
2372
2373 fdcp->c_flag |= C_DIR_MODIFICATION;
2374 if (fdvp != tdvp) {
2375 tdcp->c_flag |= C_DIR_MODIFICATION;
2376 }
2377
2378 /*
2379 * Disallow renaming of a directory hard link if the source and
2380 * destination parent directories are different, or a directory whose
2381 * descendant is a directory hard link and the one of the ancestors
2382 * of the destination directory is a directory hard link.
2383 */
2384 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
2385 if (fcp->c_flag & C_HARDLINK) {
2386 error = EPERM;
2387 goto out;
2388 }
2389 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
2390 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2391 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
2392 error = EPERM;
2393 hfs_systemfile_unlock(hfsmp, lockflags);
2394 goto out;
2395 }
2396 hfs_systemfile_unlock(hfsmp, lockflags);
2397 }
2398 }
2399
2400 /*
2401 * The following edge case is caught here:
2402 * (to cannot be a descendent of from)
2403 *
2404 * o fdvp
2405 * /
2406 * /
2407 * o fvp
2408 * \
2409 * \
2410 * o tdvp
2411 * /
2412 * /
2413 * o tvp
2414 */
2415 if (tdcp->c_parentcnid == fcp->c_fileid) {
2416 error = EINVAL;
2417 goto out;
2418 }
2419
2420 /*
2421 * The following two edge cases are caught here:
2422 * (note tvp is not empty)
2423 *
2424 * o tdvp o tdvp
2425 * / /
2426 * / /
2427 * o tvp tvp o fdvp
2428 * \ \
2429 * \ \
2430 * o fdvp o fvp
2431 * /
2432 * /
2433 * o fvp
2434 */
2435 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
2436 error = ENOTEMPTY;
2437 goto out;
2438 }
2439
2440 /*
2441 * The following edge case is caught here:
2442 * (the from child and parent are the same)
2443 *
2444 * o tdvp
2445 * /
2446 * /
2447 * fdvp o fvp
2448 */
2449 if (fdvp == fvp) {
2450 error = EINVAL;
2451 goto out;
2452 }
2453
2454 /*
2455 * Make sure "from" vnode and its parent are changeable.
2456 */
2457 if ((fcp->c_flags & (IMMUTABLE | APPEND)) || (fdcp->c_flags & APPEND)) {
2458 error = EPERM;
2459 goto out;
2460 }
2461
2462 /*
2463 * If the destination parent directory is "sticky", then the
2464 * user must own the parent directory, or the destination of
2465 * the rename, otherwise the destination may not be changed
2466 * (except by root). This implements append-only directories.
2467 *
2468 * Note that checks for immutable and write access are done
2469 * by the call to hfs_removefile.
2470 */
2471 if (tvp && (tdcp->c_mode & S_ISTXT) &&
2472 (suser(vfs_context_ucred(tcnp->cn_context), NULL)) &&
2473 (kauth_cred_getuid(vfs_context_ucred(tcnp->cn_context)) != tdcp->c_uid) &&
2474 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(tcnp->cn_context), p, false)) ) {
2475 error = EPERM;
2476 goto out;
2477 }
2478
2479 #if QUOTA
2480 if (tvp)
2481 (void)hfs_getinoquota(tcp);
2482 #endif
2483 /* Preflighting done, take fvp out of the name space. */
2484 cache_purge(fvp);
2485
2486 /*
2487 * When a file moves out of "Cleanup At Startup"
2488 * we can drop its NODUMP status.
2489 */
2490 if ((fcp->c_flags & UF_NODUMP) &&
2491 vnode_isreg(fvp) &&
2492 (fdvp != tdvp) &&
2493 (fdcp->c_desc.cd_nameptr != NULL) &&
2494 (strncmp((const char *)fdcp->c_desc.cd_nameptr,
2495 CARBON_TEMP_DIR_NAME,
2496 sizeof(CARBON_TEMP_DIR_NAME)) == 0)) {
2497 fcp->c_flags &= ~UF_NODUMP;
2498 fcp->c_touch_chgtime = TRUE;
2499 (void) hfs_update(fvp, 0);
2500 }
2501
2502 bzero(&from_desc, sizeof(from_desc));
2503 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
2504 from_desc.cd_namelen = fcnp->cn_namelen;
2505 from_desc.cd_parentcnid = fdcp->c_fileid;
2506 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2507 from_desc.cd_cnid = fcp->c_cnid;
2508
2509 bzero(&to_desc, sizeof(to_desc));
2510 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
2511 to_desc.cd_namelen = tcnp->cn_namelen;
2512 to_desc.cd_parentcnid = tdcp->c_fileid;
2513 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2514 to_desc.cd_cnid = fcp->c_cnid;
2515
2516 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2517 goto out;
2518 }
2519 started_tr = 1;
2520
2521 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
2522 * inside a journal transaction and without holding a cnode lock.
2523 * As setting of this bit depends on being in journal transaction for
2524 * concurrency, check this bit again after we start journal transaction for rename
2525 * to ensure that this directory does not have any descendant that
2526 * is a directory hard link.
2527 */
2528 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
2529 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
2530 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2531 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
2532 error = EPERM;
2533 hfs_systemfile_unlock(hfsmp, lockflags);
2534 goto out;
2535 }
2536 hfs_systemfile_unlock(hfsmp, lockflags);
2537 }
2538 }
2539
2540 // if it's a hardlink then re-lookup the name so
2541 // that we get the correct cnid in from_desc (see
2542 // the comment in hfs_removefile for more details)
2543 //
2544 if (fcp->c_flag & C_HARDLINK) {
2545 struct cat_desc tmpdesc;
2546 cnid_t real_cnid;
2547
2548 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
2549 tmpdesc.cd_namelen = fcnp->cn_namelen;
2550 tmpdesc.cd_parentcnid = fdcp->c_fileid;
2551 tmpdesc.cd_hint = fdcp->c_childhint;
2552 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
2553 tmpdesc.cd_encoding = 0;
2554
2555 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2556
2557 if (cat_lookup(hfsmp, &tmpdesc, 0, NULL, NULL, NULL, &real_cnid) != 0) {
2558 hfs_systemfile_unlock(hfsmp, lockflags);
2559 goto out;
2560 }
2561
2562 // use the real cnid instead of whatever happened to be there
2563 from_desc.cd_cnid = real_cnid;
2564 hfs_systemfile_unlock(hfsmp, lockflags);
2565 }
2566
2567 /*
2568 * Reserve some space in the Catalog file.
2569 */
2570 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
2571 goto out;
2572 }
2573 got_cookie = 1;
2574
2575 /*
2576 * If the destination exists then it may need to be removed.
2577 */
2578 if (tvp) {
2579 /*
2580 * When fvp matches tvp they could be case variants
2581 * or matching hard links.
2582 */
2583 if (fvp == tvp) {
2584 if (!(fcp->c_flag & C_HARDLINK)) {
2585 goto skip_rm; /* simple case variant */
2586
2587 } else if ((fdvp != tdvp) ||
2588 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
2589 goto out; /* matching hardlinks, nothing to do */
2590
2591 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
2592 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
2593 goto skip_rm; /* case-variant hardlink in the same dir */
2594 } else {
2595 goto out; /* matching hardlink, nothing to do */
2596 }
2597 }
2598
2599 if (vnode_isdir(tvp))
2600 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE);
2601 else {
2602 if (tcp){
2603 rvp = tcp->c_rsrc_vp;
2604 }
2605 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0);
2606
2607 /* If the destination file had a resource fork vnode, we couldn't do
2608 * anything about it in hfs_removefile because we didn't have a reference on it.
2609 * We need to take action here to prevent it from leaking blocks. If removefile
2610 * succeeded, then squirrel away the vid of the resource fork vnode and force a
2611 * recycle after dropping all of the locks. The vid is guaranteed not to change
2612 * at this point because we still hold the cnode lock.
2613 */
2614 if ((error == 0) && (tcp->c_flag & C_DELETED) && rvp && !vnode_isinuse(rvp, 0)) {
2615 rsrc_vid = vnode_vid(rvp);
2616 recycle_rsrc = 1;
2617 }
2618 }
2619
2620 if (error)
2621 goto out;
2622 tvp_deleted = 1;
2623 }
2624 skip_rm:
2625 /*
2626 * All done with tvp and fvp
2627 *
2628 * We also jump to this point if there was no destination observed during lookup and namei.
2629 * However, because only iocounts are held at the VFS layer, there is nothing preventing a
2630 * competing thread from racing us and creating a file or dir at the destination of this rename
2631 * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename
2632 * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the
2633 * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled
2634 * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY.
2635 * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno
2636 * will be swallowed and it will restart the operation.
2637 */
2638
2639 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2640 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
2641 hfs_systemfile_unlock(hfsmp, lockflags);
2642
2643 if (error) {
2644 if (error == EEXIST) {
2645 error = ERECYCLE;
2646 }
2647 goto out;
2648 }
2649
2650 /* Invalidate negative cache entries in the destination directory */
2651 if (tdcp->c_flag & C_NEG_ENTRIES) {
2652 cache_purge_negatives(tdvp);
2653 tdcp->c_flag &= ~C_NEG_ENTRIES;
2654 }
2655
2656 /* Update cnode's catalog descriptor */
2657 replace_desc(fcp, &out_desc);
2658 fcp->c_parentcnid = tdcp->c_fileid;
2659 fcp->c_hint = 0;
2660
2661 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
2662 (fdcp->c_cnid == kHFSRootFolderID));
2663 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
2664 (tdcp->c_cnid == kHFSRootFolderID));
2665
2666 /* Update both parent directories. */
2667 if (fdvp != tdvp) {
2668 if (vnode_isdir(fvp)) {
2669 /* If the source directory has directory hard link
2670 * descendants, set the kHFSHasChildLinkBit in the
2671 * destination parent hierarchy
2672 */
2673 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
2674 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
2675
2676 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
2677
2678 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
2679 if (error) {
2680 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
2681 error = 0;
2682 }
2683 }
2684 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
2685 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
2686 }
2687 tdcp->c_entries++;
2688 tdcp->c_dirchangecnt++;
2689 if (fdcp->c_entries > 0)
2690 fdcp->c_entries--;
2691 fdcp->c_dirchangecnt++;
2692 fdcp->c_touch_chgtime = TRUE;
2693 fdcp->c_touch_modtime = TRUE;
2694
2695 fdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
2696 (void) hfs_update(fdvp, 0);
2697 }
2698 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
2699 tdcp->c_touch_chgtime = TRUE;
2700 tdcp->c_touch_modtime = TRUE;
2701
2702 tdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
2703 (void) hfs_update(tdvp, 0);
2704 out:
2705 if (got_cookie) {
2706 cat_postflight(hfsmp, &cookie, p);
2707 }
2708 if (started_tr) {
2709 hfs_end_transaction(hfsmp);
2710 }
2711
2712 /* Note that if hfs_removedir or hfs_removefile was invoked above they will already have
2713 generated a NOTE_WRITE for tdvp and a NOTE_DELETE for tvp.
2714 */
2715 if (error == 0) {
2716 HFS_KNOTE(fvp, NOTE_RENAME);
2717 HFS_KNOTE(fdvp, NOTE_WRITE);
2718 if (tdvp != fdvp) HFS_KNOTE(tdvp, NOTE_WRITE);
2719 };
2720
2721 fdcp->c_flag &= ~C_DIR_MODIFICATION;
2722 wakeup((caddr_t)&fdcp->c_flag);
2723 if (fdvp != tdvp) {
2724 tdcp->c_flag &= ~C_DIR_MODIFICATION;
2725 wakeup((caddr_t)&tdcp->c_flag);
2726 }
2727
2728 if (took_trunc_lock)
2729 hfs_unlock_truncate(VTOC(tvp), TRUE);
2730
2731 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
2732
2733 /* Now that we've dropped locks, see if we need to force recycle on the old
2734 * destination's rsrc fork, preventing a leak of the rsrc fork's blocks. Note that
2735 * doing the ref/rele is in order to twiddle the VL_INACTIVE bit to the vnode's flags
2736 * so that on the last vnode_put for this vnode, we will force vnop_inactive to be triggered.
2737 */
2738 if ((recycle_rsrc) && (vnode_getwithvid(rvp, rsrc_vid) == 0)) {
2739 vnode_ref(rvp);
2740 vnode_rele(rvp);
2741 vnode_recycle(rvp);
2742 vnode_put (rvp);
2743 }
2744
2745
2746 /* After tvp is removed the only acceptable error is EIO */
2747 if (error && tvp_deleted)
2748 error = EIO;
2749
2750 return (error);
2751 }
2752
2753
2754 /*
2755 * Make a directory.
2756 */
2757 static int
2758 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
2759 {
2760 /***** HACK ALERT ********/
2761 ap->a_cnp->cn_flags |= MAKEENTRY;
2762 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
2763 }
2764
2765
2766 /*
2767 * Create a symbolic link.
2768 */
2769 static int
2770 hfs_vnop_symlink(struct vnop_symlink_args *ap)
2771 {
2772 struct vnode **vpp = ap->a_vpp;
2773 struct vnode *dvp = ap->a_dvp;
2774 struct vnode *vp = NULL;
2775 struct cnode *cp = NULL;
2776 struct hfsmount *hfsmp;
2777 struct filefork *fp;
2778 struct buf *bp = NULL;
2779 char *datap;
2780 int started_tr = 0;
2781 u_int32_t len;
2782 int error;
2783
2784 /* HFS standard disks don't support symbolic links */
2785 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
2786 return (ENOTSUP);
2787
2788 /* Check for empty target name */
2789 if (ap->a_target[0] == 0)
2790 return (EINVAL);
2791
2792 hfsmp = VTOHFS(dvp);
2793 len = strlen(ap->a_target);
2794
2795 /* Check for free space */
2796 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
2797 return (ENOSPC);
2798 }
2799
2800 /* Create the vnode */
2801 ap->a_vap->va_mode |= S_IFLNK;
2802 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
2803 goto out;
2804 }
2805 vp = *vpp;
2806 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
2807 goto out;
2808 }
2809 cp = VTOC(vp);
2810 fp = VTOF(vp);
2811
2812 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
2813 goto out;
2814 }
2815
2816 #if QUOTA
2817 (void)hfs_getinoquota(cp);
2818 #endif /* QUOTA */
2819
2820 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2821 goto out;
2822 }
2823 started_tr = 1;
2824
2825 /*
2826 * Allocate space for the link.
2827 *
2828 * Since we're already inside a transaction,
2829 * tell hfs_truncate to skip the ubc_setsize.
2830 *
2831 * Don't need truncate lock since a symlink is treated as a system file.
2832 */
2833 error = hfs_truncate(vp, len, IO_NOZEROFILL, 1, ap->a_context);
2834
2835 /* On errors, remove the symlink file */
2836 if (error) {
2837 /*
2838 * End the transaction so we don't re-take the cnode lock
2839 * below while inside a transaction (lock order violation).
2840 */
2841 hfs_end_transaction(hfsmp);
2842
2843 /* hfs_removefile() requires holding the truncate lock */
2844 hfs_unlock(cp);
2845 hfs_lock_truncate(cp, TRUE);
2846 hfs_lock(cp, HFS_FORCE_LOCK);
2847
2848 if (hfs_start_transaction(hfsmp) != 0) {
2849 started_tr = 0;
2850 hfs_unlock_truncate(cp, TRUE);
2851 goto out;
2852 }
2853
2854 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0);
2855 hfs_unlock_truncate(cp, TRUE);
2856 goto out;
2857 }
2858
2859 /* Write the link to disk */
2860 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size),
2861 0, 0, BLK_META);
2862 if (hfsmp->jnl) {
2863 journal_modify_block_start(hfsmp->jnl, bp);
2864 }
2865 datap = (char *)buf_dataptr(bp);
2866 bzero(datap, buf_size(bp));
2867 bcopy(ap->a_target, datap, len);
2868
2869 if (hfsmp->jnl) {
2870 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
2871 } else {
2872 buf_bawrite(bp);
2873 }
2874 /*
2875 * We defered the ubc_setsize for hfs_truncate
2876 * since we were inside a transaction.
2877 *
2878 * We don't need to drop the cnode lock here
2879 * since this is a symlink.
2880 */
2881 ubc_setsize(vp, len);
2882 out:
2883 if (started_tr)
2884 hfs_end_transaction(hfsmp);
2885 if ((cp != NULL) && (vp != NULL)) {
2886 hfs_unlock(cp);
2887 }
2888 if (error) {
2889 if (vp) {
2890 vnode_put(vp);
2891 }
2892 *vpp = NULL;
2893 }
2894 return (error);
2895 }
2896
2897
2898 /* structures to hold a "." or ".." directory entry */
2899 struct hfs_stddotentry {
2900 u_int32_t d_fileno; /* unique file number */
2901 u_int16_t d_reclen; /* length of this structure */
2902 u_int8_t d_type; /* dirent file type */
2903 u_int8_t d_namlen; /* len of filename */
2904 char d_name[4]; /* "." or ".." */
2905 };
2906
2907 struct hfs_extdotentry {
2908 u_int64_t d_fileno; /* unique file number */
2909 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
2910 u_int16_t d_reclen; /* length of this structure */
2911 u_int16_t d_namlen; /* len of filename */
2912 u_int8_t d_type; /* dirent file type */
2913 u_char d_name[3]; /* "." or ".." */
2914 };
2915
2916 typedef union {
2917 struct hfs_stddotentry std;
2918 struct hfs_extdotentry ext;
2919 } hfs_dotentry_t;
2920
2921 /*
2922 * hfs_vnop_readdir reads directory entries into the buffer pointed
2923 * to by uio, in a filesystem independent format. Up to uio_resid
2924 * bytes of data can be transferred. The data in the buffer is a
2925 * series of packed dirent structures where each one contains the
2926 * following entries:
2927 *
2928 * u_int32_t d_fileno; // file number of entry
2929 * u_int16_t d_reclen; // length of this record
2930 * u_int8_t d_type; // file type
2931 * u_int8_t d_namlen; // length of string in d_name
2932 * char d_name[MAXNAMELEN+1]; // null terminated file name
2933 *
2934 * The current position (uio_offset) refers to the next block of
2935 * entries. The offset can only be set to a value previously
2936 * returned by hfs_vnop_readdir or zero. This offset does not have
2937 * to match the number of bytes returned (in uio_resid).
2938 *
2939 * In fact, the offset used by HFS is essentially an index (26 bits)
2940 * with a tag (6 bits). The tag is for associating the next request
2941 * with the current request. This enables us to have multiple threads
2942 * reading the directory while the directory is also being modified.
2943 *
2944 * Each tag/index pair is tied to a unique directory hint. The hint
2945 * contains information (filename) needed to build the catalog b-tree
2946 * key for finding the next set of entries.
2947 *
2948 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
2949 * do NOT synthesize entries for "." and "..".
2950 */
2951 static int
2952 hfs_vnop_readdir(ap)
2953 struct vnop_readdir_args /* {
2954 vnode_t a_vp;
2955 uio_t a_uio;
2956 int a_flags;
2957 int *a_eofflag;
2958 int *a_numdirent;
2959 vfs_context_t a_context;
2960 } */ *ap;
2961 {
2962 struct vnode *vp = ap->a_vp;
2963 uio_t uio = ap->a_uio;
2964 struct cnode *cp;
2965 struct hfsmount *hfsmp;
2966 directoryhint_t *dirhint = NULL;
2967 directoryhint_t localhint;
2968 off_t offset;
2969 off_t startoffset;
2970 int error = 0;
2971 int eofflag = 0;
2972 user_addr_t user_start = 0;
2973 user_size_t user_len = 0;
2974 int index;
2975 unsigned int tag;
2976 int items;
2977 int lockflags;
2978 int extended;
2979 int nfs_cookies;
2980 caddr_t bufstart;
2981 cnid_t cnid_hint = 0;
2982
2983 items = 0;
2984 startoffset = offset = uio_offset(uio);
2985 bufstart = CAST_DOWN(caddr_t, uio_iov_base(uio));
2986 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
2987 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
2988
2989 /* Sanity check the uio data. */
2990 if ((uio_iovcnt(uio) > 1) ||
2991 (uio_resid(uio) < (int)sizeof(struct dirent))) {
2992 return (EINVAL);
2993 }
2994 /* Note that the dirhint calls require an exclusive lock. */
2995 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
2996 return (error);
2997 cp = VTOC(vp);
2998 hfsmp = VTOHFS(vp);
2999
3000 /* Pick up cnid hint (if any). */
3001 if (nfs_cookies) {
3002 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
3003 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
3004 if (cnid_hint == INT_MAX) { /* searching pass the last item */
3005 eofflag = 1;
3006 goto out;
3007 }
3008 }
3009 /*
3010 * Synthesize entries for "." and "..", unless the directory has
3011 * been deleted, but not closed yet (lazy delete in progress).
3012 */
3013 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
3014 hfs_dotentry_t dotentry[2];
3015 size_t uiosize;
3016
3017 if (extended) {
3018 struct hfs_extdotentry *entry = &dotentry[0].ext;
3019
3020 entry->d_fileno = cp->c_cnid;
3021 entry->d_reclen = sizeof(struct hfs_extdotentry);
3022 entry->d_type = DT_DIR;
3023 entry->d_namlen = 1;
3024 entry->d_name[0] = '.';
3025 entry->d_name[1] = '\0';
3026 entry->d_name[2] = '\0';
3027 entry->d_seekoff = 1;
3028
3029 ++entry;
3030 entry->d_fileno = cp->c_parentcnid;
3031 entry->d_reclen = sizeof(struct hfs_extdotentry);
3032 entry->d_type = DT_DIR;
3033 entry->d_namlen = 2;
3034 entry->d_name[0] = '.';
3035 entry->d_name[1] = '.';
3036 entry->d_name[2] = '\0';
3037 entry->d_seekoff = 2;
3038 uiosize = 2 * sizeof(struct hfs_extdotentry);
3039 } else {
3040 struct hfs_stddotentry *entry = &dotentry[0].std;
3041
3042 entry->d_fileno = cp->c_cnid;
3043 entry->d_reclen = sizeof(struct hfs_stddotentry);
3044 entry->d_type = DT_DIR;
3045 entry->d_namlen = 1;
3046 *(int *)&entry->d_name[0] = 0;
3047 entry->d_name[0] = '.';
3048
3049 ++entry;
3050 entry->d_fileno = cp->c_parentcnid;
3051 entry->d_reclen = sizeof(struct hfs_stddotentry);
3052 entry->d_type = DT_DIR;
3053 entry->d_namlen = 2;
3054 *(int *)&entry->d_name[0] = 0;
3055 entry->d_name[0] = '.';
3056 entry->d_name[1] = '.';
3057 uiosize = 2 * sizeof(struct hfs_stddotentry);
3058 }
3059 if ((error = uiomove((caddr_t)&dotentry, uiosize, uio))) {
3060 goto out;
3061 }
3062 offset += 2;
3063 }
3064
3065 /* If there are no real entries then we're done. */
3066 if (cp->c_entries == 0) {
3067 error = 0;
3068 eofflag = 1;
3069 uio_setoffset(uio, offset);
3070 goto seekoffcalc;
3071 }
3072
3073 //
3074 // We have to lock the user's buffer here so that we won't
3075 // fault on it after we've acquired a shared lock on the
3076 // catalog file. The issue is that you can get a 3-way
3077 // deadlock if someone else starts a transaction and then
3078 // tries to lock the catalog file but can't because we're
3079 // here and we can't service our page fault because VM is
3080 // blocked trying to start a transaction as a result of
3081 // trying to free up pages for our page fault. It's messy
3082 // but it does happen on dual-processors that are paging
3083 // heavily (see radar 3082639 for more info). By locking
3084 // the buffer up-front we prevent ourselves from faulting
3085 // while holding the shared catalog file lock.
3086 //
3087 // Fortunately this and hfs_search() are the only two places
3088 // currently (10/30/02) that can fault on user data with a
3089 // shared lock on the catalog file.
3090 //
3091 if (hfsmp->jnl && uio_isuserspace(uio)) {
3092 user_start = uio_curriovbase(uio);
3093 user_len = uio_curriovlen(uio);
3094
3095 if ((error = vslock(user_start, user_len)) != 0) {
3096 user_start = 0;
3097 goto out;
3098 }
3099 }
3100 /* Convert offset into a catalog directory index. */
3101 index = (offset & HFS_INDEX_MASK) - 2;
3102 tag = offset & ~HFS_INDEX_MASK;
3103
3104 /* Lock catalog during cat_findname and cat_getdirentries. */
3105 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3106
3107 /* When called from NFS, try and resolve a cnid hint. */
3108 if (nfs_cookies && cnid_hint != 0) {
3109 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
3110 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
3111 localhint.dh_index = index - 1;
3112 localhint.dh_time = 0;
3113 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
3114 dirhint = &localhint; /* don't forget to release the descriptor */
3115 } else {
3116 cat_releasedesc(&localhint.dh_desc);
3117 }
3118 }
3119 }
3120
3121 /* Get a directory hint (cnode must be locked exclusive) */
3122 if (dirhint == NULL) {
3123 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
3124
3125 /* Hide tag from catalog layer. */
3126 dirhint->dh_index &= HFS_INDEX_MASK;
3127 if (dirhint->dh_index == HFS_INDEX_MASK) {
3128 dirhint->dh_index = -1;
3129 }
3130 }
3131
3132 if (index == 0) {
3133 dirhint->dh_threadhint = cp->c_dirthreadhint;
3134 }
3135
3136 /* Pack the buffer with dirent entries. */
3137 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, extended, &items, &eofflag);
3138
3139 if (index == 0 && error == 0) {
3140 cp->c_dirthreadhint = dirhint->dh_threadhint;
3141 }
3142
3143 hfs_systemfile_unlock(hfsmp, lockflags);
3144
3145 if (error != 0) {
3146 goto out;
3147 }
3148
3149 /* Get index to the next item */
3150 index += items;
3151
3152 if (items >= (int)cp->c_entries) {
3153 eofflag = 1;
3154 }
3155
3156 /* Convert catalog directory index back into an offset. */
3157 while (tag == 0)
3158 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
3159 uio_setoffset(uio, (index + 2) | tag);
3160 dirhint->dh_index |= tag;
3161
3162 seekoffcalc:
3163 cp->c_touch_acctime = TRUE;
3164
3165 if (ap->a_numdirent) {
3166 if (startoffset == 0)
3167 items += 2;
3168 *ap->a_numdirent = items;
3169 }
3170
3171 out:
3172 if (hfsmp->jnl && user_start) {
3173 vsunlock(user_start, user_len, TRUE);
3174 }
3175 /* If we didn't do anything then go ahead and dump the hint. */
3176 if ((dirhint != NULL) &&
3177 (dirhint != &localhint) &&
3178 (uio_offset(uio) == startoffset)) {
3179 hfs_reldirhint(cp, dirhint);
3180 eofflag = 1;
3181 }
3182 if (ap->a_eofflag) {
3183 *ap->a_eofflag = eofflag;
3184 }
3185 if (dirhint == &localhint) {
3186 cat_releasedesc(&localhint.dh_desc);
3187 }
3188 hfs_unlock(cp);
3189 return (error);
3190 }
3191
3192
3193 /*
3194 * Read contents of a symbolic link.
3195 */
3196 static int
3197 hfs_vnop_readlink(ap)
3198 struct vnop_readlink_args /* {
3199 struct vnode *a_vp;
3200 struct uio *a_uio;
3201 vfs_context_t a_context;
3202 } */ *ap;
3203 {
3204 struct vnode *vp = ap->a_vp;
3205 struct cnode *cp;
3206 struct filefork *fp;
3207 int error;
3208
3209 if (!vnode_islnk(vp))
3210 return (EINVAL);
3211
3212 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
3213 return (error);
3214 cp = VTOC(vp);
3215 fp = VTOF(vp);
3216
3217 /* Zero length sym links are not allowed */
3218 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
3219 printf("hfs: zero length symlink on fileid %d\n", cp->c_fileid);
3220 error = EINVAL;
3221 goto exit;
3222 }
3223
3224 /* Cache the path so we don't waste buffer cache resources */
3225 if (fp->ff_symlinkptr == NULL) {
3226 struct buf *bp = NULL;
3227
3228 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
3229 error = (int)buf_meta_bread(vp, (daddr64_t)0,
3230 roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size),
3231 vfs_context_ucred(ap->a_context), &bp);
3232 if (error) {
3233 if (bp)
3234 buf_brelse(bp);
3235 if (fp->ff_symlinkptr) {
3236 FREE(fp->ff_symlinkptr, M_TEMP);
3237 fp->ff_symlinkptr = NULL;
3238 }
3239 goto exit;
3240 }
3241 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
3242
3243 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
3244 buf_markinvalid(bp); /* data no longer needed */
3245 }
3246 buf_brelse(bp);
3247 }
3248 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
3249
3250 /*
3251 * Keep track blocks read
3252 */
3253 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
3254
3255 /*
3256 * If this file hasn't been seen since the start of
3257 * the current sampling period then start over.
3258 */
3259 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
3260 VTOF(vp)->ff_bytesread = fp->ff_size;
3261 else
3262 VTOF(vp)->ff_bytesread += fp->ff_size;
3263
3264 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
3265 // cp->c_touch_acctime = TRUE;
3266 }
3267
3268 exit:
3269 hfs_unlock(cp);
3270 return (error);
3271 }
3272
3273
3274 /*
3275 * Get configurable pathname variables.
3276 */
3277 static int
3278 hfs_vnop_pathconf(ap)
3279 struct vnop_pathconf_args /* {
3280 struct vnode *a_vp;
3281 int a_name;
3282 int *a_retval;
3283 vfs_context_t a_context;
3284 } */ *ap;
3285 {
3286 switch (ap->a_name) {
3287 case _PC_LINK_MAX:
3288 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
3289 *ap->a_retval = 1;
3290 else
3291 *ap->a_retval = HFS_LINK_MAX;
3292 break;
3293 case _PC_NAME_MAX:
3294 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
3295 *ap->a_retval = kHFSMaxFileNameChars; /* 255 */
3296 else
3297 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 31 */
3298 break;
3299 case _PC_PATH_MAX:
3300 *ap->a_retval = PATH_MAX; /* 1024 */
3301 break;
3302 case _PC_PIPE_BUF:
3303 *ap->a_retval = PIPE_BUF;
3304 break;
3305 case _PC_CHOWN_RESTRICTED:
3306 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
3307 break;
3308 case _PC_NO_TRUNC:
3309 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
3310 break;
3311 case _PC_NAME_CHARS_MAX:
3312 *ap->a_retval = kHFSPlusMaxFileNameChars;
3313 break;
3314 case _PC_CASE_SENSITIVE:
3315 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
3316 *ap->a_retval = 1;
3317 else
3318 *ap->a_retval = 0;
3319 break;
3320 case _PC_CASE_PRESERVING:
3321 *ap->a_retval = 1;
3322 break;
3323 case _PC_FILESIZEBITS:
3324 *ap->a_retval = 64; /* number of bits to store max file size */
3325 break;
3326 default:
3327 return (EINVAL);
3328 }
3329
3330 return (0);
3331 }
3332
3333
3334 /*
3335 * Update a cnode's on-disk metadata.
3336 *
3337 * If waitfor is set, then wait for the disk write of
3338 * the node to complete.
3339 *
3340 * The cnode must be locked exclusive
3341 */
3342 __private_extern__
3343 int
3344 hfs_update(struct vnode *vp, __unused int waitfor)
3345 {
3346 struct cnode *cp = VTOC(vp);
3347 struct proc *p;
3348 struct cat_fork *dataforkp = NULL;
3349 struct cat_fork *rsrcforkp = NULL;
3350 struct cat_fork datafork;
3351 struct cat_fork rsrcfork;
3352 struct hfsmount *hfsmp;
3353 int lockflags;
3354 int error;
3355
3356 p = current_proc();
3357 hfsmp = VTOHFS(vp);
3358
3359 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
3360 hfsmp->hfs_catalog_vp == NULL){
3361 return (0);
3362 }
3363 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
3364 cp->c_flag &= ~C_MODIFIED;
3365 cp->c_touch_acctime = 0;
3366 cp->c_touch_chgtime = 0;
3367 cp->c_touch_modtime = 0;
3368 return (0);
3369 }
3370
3371 hfs_touchtimes(hfsmp, cp);
3372
3373 /* Nothing to update. */
3374 if ((cp->c_flag & (C_MODIFIED | C_FORCEUPDATE)) == 0) {
3375 return (0);
3376 }
3377
3378 if (cp->c_datafork)
3379 dataforkp = &cp->c_datafork->ff_data;
3380 if (cp->c_rsrcfork)
3381 rsrcforkp = &cp->c_rsrcfork->ff_data;
3382
3383 /*
3384 * For delayed allocations updates are
3385 * postponed until an fsync or the file
3386 * gets written to disk.
3387 *
3388 * Deleted files can defer meta data updates until inactive.
3389 *
3390 * If we're ever called with the C_FORCEUPDATE flag though
3391 * we have to do the update.
3392 */
3393 if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 &&
3394 (ISSET(cp->c_flag, C_DELETED) ||
3395 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
3396 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) {
3397 // cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
3398 cp->c_flag |= C_MODIFIED;
3399
3400 HFS_KNOTE(vp, NOTE_ATTRIB);
3401
3402 return (0);
3403 }
3404
3405 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3406 return error;
3407 }
3408
3409 /*
3410 * For files with invalid ranges (holes) the on-disk
3411 * field representing the size of the file (cf_size)
3412 * must be no larger than the start of the first hole.
3413 */
3414 if (dataforkp && !CIRCLEQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
3415 bcopy(dataforkp, &datafork, sizeof(datafork));
3416 datafork.cf_size = CIRCLEQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
3417 dataforkp = &datafork;
3418 } else if (dataforkp && (cp->c_datafork->ff_unallocblocks != 0)) {
3419 // always make sure the block count and the size
3420 // of the file match the number of blocks actually
3421 // allocated to the file on disk
3422 bcopy(dataforkp, &datafork, sizeof(datafork));
3423 // make sure that we don't assign a negative block count
3424 if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) {
3425 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
3426 cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks);
3427 }
3428 datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks);
3429 datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
3430 dataforkp = &datafork;
3431 }
3432
3433 /*
3434 * For resource forks with delayed allocations, make sure
3435 * the block count and file size match the number of blocks
3436 * actually allocated to the file on disk.
3437 */
3438 if (rsrcforkp && (cp->c_rsrcfork->ff_unallocblocks != 0)) {
3439 bcopy(rsrcforkp, &rsrcfork, sizeof(rsrcfork));
3440 rsrcfork.cf_blocks = (cp->c_rsrcfork->ff_blocks - cp->c_rsrcfork->ff_unallocblocks);
3441 rsrcfork.cf_size = rsrcfork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
3442 rsrcforkp = &rsrcfork;
3443 }
3444
3445 /*
3446 * Lock the Catalog b-tree file.
3447 */
3448 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3449
3450 /* XXX - waitfor is not enforced */
3451 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
3452
3453 hfs_systemfile_unlock(hfsmp, lockflags);
3454
3455 /* After the updates are finished, clear the flags */
3456 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
3457
3458 hfs_end_transaction(hfsmp);
3459
3460 HFS_KNOTE(vp, NOTE_ATTRIB);
3461
3462 return (error);
3463 }
3464
3465 /*
3466 * Allocate a new node
3467 * Note - Function does not create and return a vnode for whiteout creation.
3468 */
3469 static int
3470 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3471 struct vnode_attr *vap, vfs_context_t ctx)
3472 {
3473 struct cnode *cp = NULL;
3474 struct cnode *dcp;
3475 struct vnode *tvp;
3476 struct hfsmount *hfsmp;
3477 struct cat_desc in_desc, out_desc;
3478 struct cat_attr attr;
3479 struct timeval tv;
3480 int lockflags;
3481 int error, started_tr = 0;
3482 enum vtype vnodetype;
3483 int mode;
3484
3485 dcp = VTOC(dvp);
3486 if ((error = hfs_lock(dcp, HFS_EXCLUSIVE_LOCK)))
3487 return (error);
3488
3489 dcp->c_flag |= C_DIR_MODIFICATION;
3490
3491 hfsmp = VTOHFS(dvp);
3492 *vpp = NULL;
3493 tvp = NULL;
3494 out_desc.cd_flags = 0;
3495 out_desc.cd_nameptr = NULL;
3496
3497 vnodetype = vap->va_type;
3498 if (vnodetype == VNON)
3499 vnodetype = VREG;
3500 mode = MAKEIMODE(vnodetype, vap->va_mode);
3501
3502 /* Check if were out of usable disk space. */
3503 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
3504 error = ENOSPC;
3505 goto exit;
3506 }
3507
3508 microtime(&tv);
3509
3510 /* Setup the default attributes */
3511 bzero(&attr, sizeof(attr));
3512 attr.ca_mode = mode;
3513 attr.ca_linkcount = 1;
3514 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
3515 attr.ca_rdev = vap->va_rdev;
3516 }
3517 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
3518 VATTR_SET_SUPPORTED(vap, va_create_time);
3519 attr.ca_itime = vap->va_create_time.tv_sec;
3520 } else {
3521 attr.ca_itime = tv.tv_sec;
3522 }
3523 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
3524 attr.ca_itime += 3600; /* Same as what hfs_update does */
3525 }
3526 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
3527 attr.ca_atimeondisk = attr.ca_atime;
3528 if (VATTR_IS_ACTIVE(vap, va_flags)) {
3529 VATTR_SET_SUPPORTED(vap, va_flags);
3530 attr.ca_flags = vap->va_flags;
3531 }
3532
3533 /*
3534 * HFS+ only: all files get ThreadExists
3535 * HFSX only: dirs get HasFolderCount
3536 */
3537 if (!(hfsmp->hfs_flags & HFS_STANDARD)) {
3538 if (vnodetype == VDIR) {
3539 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
3540 attr.ca_recflags = kHFSHasFolderCountMask;
3541 } else {
3542 attr.ca_recflags = kHFSThreadExistsMask;
3543 }
3544 }
3545
3546 attr.ca_uid = vap->va_uid;
3547 attr.ca_gid = vap->va_gid;
3548 VATTR_SET_SUPPORTED(vap, va_mode);
3549 VATTR_SET_SUPPORTED(vap, va_uid);
3550 VATTR_SET_SUPPORTED(vap, va_gid);
3551
3552 /* Tag symlinks with a type and creator. */
3553 if (vnodetype == VLNK) {
3554 struct FndrFileInfo *fip;
3555
3556 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
3557 fip->fdType = SWAP_BE32(kSymLinkFileType);
3558 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
3559 }
3560 if (cnp->cn_flags & ISWHITEOUT)
3561 attr.ca_flags |= UF_OPAQUE;
3562
3563 /* Setup the descriptor */
3564 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3565 in_desc.cd_namelen = cnp->cn_namelen;
3566 in_desc.cd_parentcnid = dcp->c_fileid;
3567 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
3568 in_desc.cd_hint = dcp->c_childhint;
3569 in_desc.cd_encoding = 0;
3570
3571 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3572 goto exit;
3573 }
3574 started_tr = 1;
3575
3576 // have to also lock the attribute file because cat_create() needs
3577 // to check that any fileID it wants to use does not have orphaned
3578 // attributes in it.
3579 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
3580
3581 /* Reserve some space in the Catalog file. */
3582 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
3583 hfs_systemfile_unlock(hfsmp, lockflags);
3584 goto exit;
3585 }
3586 error = cat_create(hfsmp, &in_desc, &attr, &out_desc);
3587 if (error == 0) {
3588 /* Update the parent directory */
3589 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
3590 dcp->c_entries++;
3591 if (vnodetype == VDIR) {
3592 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3593 }
3594 dcp->c_dirchangecnt++;
3595 dcp->c_ctime = tv.tv_sec;
3596 dcp->c_mtime = tv.tv_sec;
3597 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3598 HFS_KNOTE(dvp, NOTE_ATTRIB);
3599 }
3600 hfs_systemfile_unlock(hfsmp, lockflags);
3601 if (error)
3602 goto exit;
3603
3604 /* Invalidate negative cache entries in the directory */
3605 if (dcp->c_flag & C_NEG_ENTRIES) {
3606 cache_purge_negatives(dvp);
3607 dcp->c_flag &= ~C_NEG_ENTRIES;
3608 }
3609
3610 if (vnodetype == VDIR) {
3611 HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
3612 } else {
3613 HFS_KNOTE(dvp, NOTE_WRITE);
3614 };
3615
3616 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
3617 (dcp->c_cnid == kHFSRootFolderID));
3618
3619 // XXXdbg
3620 // have to end the transaction here before we call hfs_getnewvnode()
3621 // because that can cause us to try and reclaim a vnode on a different
3622 // file system which could cause us to start a transaction which can
3623 // deadlock with someone on that other file system (since we could be
3624 // holding two transaction locks as well as various vnodes and we did
3625 // not obtain the locks on them in the proper order).
3626 //
3627 // NOTE: this means that if the quota check fails or we have to update
3628 // the change time on a block-special device that those changes
3629 // will happen as part of independent transactions.
3630 //
3631 if (started_tr) {
3632 hfs_end_transaction(hfsmp);
3633 started_tr = 0;
3634 }
3635
3636 /* Do not create vnode for whiteouts */
3637 if (S_ISWHT(mode)) {
3638 goto exit;
3639 }
3640
3641 /*
3642 * Create a vnode for the object just created.
3643 *
3644 * The cnode is locked on successful return.
3645 */
3646 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, GNV_CREATE, &attr, NULL, &tvp);
3647 if (error)
3648 goto exit;
3649
3650 cp = VTOC(tvp);
3651 #if QUOTA
3652 /*
3653 * We call hfs_chkiq with FORCE flag so that if we
3654 * fall through to the rmdir we actually have
3655 * accounted for the inode
3656 */
3657 if (hfsmp->hfs_flags & HFS_QUOTAS) {
3658 if ((error = hfs_getinoquota(cp)) ||
3659 (error = hfs_chkiq(cp, 1, vfs_context_ucred(ctx), FORCE))) {
3660
3661 if (vnode_isdir(tvp))
3662 (void) hfs_removedir(dvp, tvp, cnp, 0);
3663 else {
3664 hfs_unlock(cp);
3665 hfs_lock_truncate(cp, TRUE);
3666 hfs_lock(cp, HFS_FORCE_LOCK);
3667 (void) hfs_removefile(dvp, tvp, cnp, 0, 0, 0);
3668 hfs_unlock_truncate(cp, TRUE);
3669 }
3670 /*
3671 * we successfully allocated a new vnode, but
3672 * the quota check is telling us we're beyond
3673 * our limit, so we need to dump our lock + reference
3674 */
3675 hfs_unlock(cp);
3676 vnode_put(tvp);
3677
3678 goto exit;
3679 }
3680 }
3681 #endif /* QUOTA */
3682
3683 *vpp = tvp;
3684 exit:
3685 cat_releasedesc(&out_desc);
3686
3687 /*
3688 * Check if a file is located in the "Cleanup At Startup"
3689 * directory. If it is then tag it as NODUMP so that we
3690 * can be lazy about zero filling data holes.
3691 */
3692 if ((error == 0) && dvp && (vnodetype == VREG) &&
3693 (dcp->c_desc.cd_nameptr != NULL) &&
3694 (strncmp((const char *)dcp->c_desc.cd_nameptr,
3695 CARBON_TEMP_DIR_NAME,
3696 sizeof(CARBON_TEMP_DIR_NAME)) == 0)) {
3697 struct vnode *ddvp;
3698
3699 dcp->c_flag &= ~C_DIR_MODIFICATION;
3700 wakeup((caddr_t)&dcp->c_flag);
3701
3702 hfs_unlock(dcp);
3703 dvp = NULL;
3704
3705 /*
3706 * The parent of "Cleanup At Startup" should
3707 * have the ASCII name of the userid.
3708 */
3709 if (hfs_vget(hfsmp, dcp->c_parentcnid, &ddvp, 0) == 0) {
3710 if (VTOC(ddvp)->c_desc.cd_nameptr) {
3711 uid_t uid;
3712
3713 uid = strtoul((const char *)VTOC(ddvp)->c_desc.cd_nameptr, 0, 0);
3714 if ((uid == cp->c_uid) ||
3715 (uid == vfs_context_ucred(ctx)->cr_uid)) {
3716 cp->c_flags |= UF_NODUMP;
3717 cp->c_touch_chgtime = TRUE;
3718 }
3719 }
3720 hfs_unlock(VTOC(ddvp));
3721 vnode_put(ddvp);
3722 }
3723 }
3724 if (dvp) {
3725 dcp->c_flag &= ~C_DIR_MODIFICATION;
3726 wakeup((caddr_t)&dcp->c_flag);
3727
3728 hfs_unlock(dcp);
3729 }
3730 if (error == 0 && cp != NULL) {
3731 hfs_unlock(cp);
3732 }
3733 if (started_tr) {
3734 hfs_end_transaction(hfsmp);
3735 started_tr = 0;
3736 }
3737
3738 return (error);
3739 }
3740
3741
3742 /*
3743 * Return a referenced vnode for the resource fork
3744 *
3745 * cnode for vnode vp must already be locked.
3746 *
3747 * can_drop_lock is true if its safe to temporally drop/re-acquire the cnode lock
3748 */
3749 __private_extern__
3750 int
3751 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, int can_drop_lock)
3752 {
3753 struct vnode *rvp;
3754 struct vnode *dvp = NULLVP;
3755 struct cnode *cp = VTOC(vp);
3756 int error;
3757 int vid;
3758
3759 restart:
3760 /* Attempt to use exising vnode */
3761 if ((rvp = cp->c_rsrc_vp)) {
3762 vid = vnode_vid(rvp);
3763
3764 /*
3765 * It is not safe to hold the cnode lock when calling vnode_getwithvid()
3766 * for the alternate fork -- vnode_getwithvid() could deadlock waiting
3767 * for a VL_WANTTERM while another thread has an iocount on the alternate
3768 * fork vnode and is attempting to acquire the common cnode lock.
3769 *
3770 * But it's also not safe to drop the cnode lock when we're holding
3771 * multiple cnode locks, like during a hfs_removefile() operation
3772 * since we could lock out of order when re-acquiring the cnode lock.
3773 *
3774 * So we can only drop the lock here if its safe to drop it -- which is
3775 * most of the time with the exception being hfs_removefile().
3776 */
3777 if (can_drop_lock)
3778 hfs_unlock(cp);
3779
3780 error = vnode_getwithvid(rvp, vid);
3781
3782 if (can_drop_lock) {
3783 (void) hfs_lock(cp, HFS_FORCE_LOCK);
3784 /*
3785 * When our lock was relinquished, the resource fork
3786 * could have been recycled. Check for this and try
3787 * again.
3788 */
3789 if (error == ENOENT)
3790 goto restart;
3791 }
3792 if (error) {
3793 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
3794
3795 if (name)
3796 printf("hfs_vgetrsrc: couldn't get resource"
3797 " fork for %s, err %d\n", name, error);
3798 return (error);
3799 }
3800 } else {
3801 struct cat_fork rsrcfork;
3802 struct componentname cn;
3803 int lockflags;
3804
3805 /*
3806 * Make sure cnode lock is exclusive, if not upgrade it.
3807 *
3808 * We assume that we were called from a read-only VNOP (getattr)
3809 * and that its safe to have the cnode lock dropped and reacquired.
3810 */
3811 if (cp->c_lockowner != current_thread()) {
3812 if (!can_drop_lock)
3813 return (EINVAL);
3814 /*
3815 * If the upgrade fails we loose the lock and
3816 * have to take the exclusive lock on our own.
3817 */
3818 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE)
3819 lck_rw_lock_exclusive(&cp->c_rwlock);
3820 cp->c_lockowner = current_thread();
3821 }
3822
3823 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3824
3825 /* Get resource fork data */
3826 error = cat_lookup(hfsmp, &cp->c_desc, 1, (struct cat_desc *)0,
3827 (struct cat_attr *)0, &rsrcfork, NULL);
3828
3829 hfs_systemfile_unlock(hfsmp, lockflags);
3830 if (error)
3831 return (error);
3832
3833 /*
3834 * Supply hfs_getnewvnode with a component name.
3835 */
3836 cn.cn_pnbuf = NULL;
3837 if (cp->c_desc.cd_nameptr) {
3838 MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
3839 cn.cn_nameiop = LOOKUP;
3840 cn.cn_flags = ISLASTCN | HASBUF;
3841 cn.cn_context = NULL;
3842 cn.cn_pnlen = MAXPATHLEN;
3843 cn.cn_nameptr = cn.cn_pnbuf;
3844 cn.cn_hash = 0;
3845 cn.cn_consume = 0;
3846 cn.cn_namelen = snprintf(cn.cn_nameptr, MAXPATHLEN,
3847 "%s%s", cp->c_desc.cd_nameptr,
3848 _PATH_RSRCFORKSPEC);
3849 }
3850 dvp = vnode_getparent(vp);
3851 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
3852 &cp->c_desc, GNV_WANTRSRC | GNV_SKIPLOCK, &cp->c_attr,
3853 &rsrcfork, &rvp);
3854 if (dvp)
3855 vnode_put(dvp);
3856 if (cn.cn_pnbuf)
3857 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
3858 if (error)
3859 return (error);
3860 }
3861
3862 *rvpp = rvp;
3863 return (0);
3864 }
3865
3866
3867 static void
3868 filt_hfsdetach(struct knote *kn)
3869 {
3870 struct vnode *vp;
3871
3872 vp = (struct vnode *)kn->kn_hook;
3873 if (vnode_getwithvid(vp, kn->kn_hookid))
3874 return;
3875
3876 if (1) { /* ! KNDETACH_VNLOCKED */
3877 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
3878 (void) KNOTE_DETACH(&VTOC(vp)->c_knotes, kn);
3879 hfs_unlock(VTOC(vp));
3880 }
3881 }
3882
3883 vnode_put(vp);
3884 }
3885
3886 /*ARGSUSED*/
3887 static int
3888 filt_hfsread(struct knote *kn, long hint)
3889 {
3890 struct vnode *vp = (struct vnode *)kn->kn_hook;
3891 int dropvp = 0;
3892
3893 if (hint == 0) {
3894 if ((vnode_getwithvid(vp, kn->kn_hookid) != 0)) {
3895 hint = NOTE_REVOKE;
3896 } else
3897 dropvp = 1;
3898 }
3899 if (hint == NOTE_REVOKE) {
3900 /*
3901 * filesystem is gone, so set the EOF flag and schedule
3902 * the knote for deletion.
3903 */
3904 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3905 return (1);
3906 }
3907
3908 /* poll(2) semantics dictate always saying there is data */
3909 if (!(kn->kn_flags & EV_POLL)) {
3910 off_t amount;
3911
3912 amount = VTOF(vp)->ff_size - kn->kn_fp->f_fglob->fg_offset;
3913 if (amount > (off_t)INTPTR_MAX)
3914 kn->kn_data = INTPTR_MAX;
3915 else if (amount < (off_t)INTPTR_MIN)
3916 kn->kn_data = INTPTR_MIN;
3917 else
3918 kn->kn_data = (intptr_t)amount;
3919 } else {
3920 kn->kn_data = 1;
3921 }
3922
3923 if (dropvp)
3924 vnode_put(vp);
3925
3926 return (kn->kn_data != 0);
3927 }
3928
3929 /*ARGSUSED*/
3930 static int
3931 filt_hfswrite(struct knote *kn, long hint)
3932 {
3933 struct vnode *vp = (struct vnode *)kn->kn_hook;
3934
3935 if (hint == 0) {
3936 if ((vnode_getwithvid(vp, kn->kn_hookid) != 0)) {
3937 hint = NOTE_REVOKE;
3938 } else
3939 vnode_put(vp);
3940 }
3941 if (hint == NOTE_REVOKE) {
3942 /*
3943 * filesystem is gone, so set the EOF flag and schedule
3944 * the knote for deletion.
3945 */
3946 kn->kn_data = 0;
3947 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3948 return (1);
3949 }
3950 kn->kn_data = 0;
3951 return (1);
3952 }
3953
3954 static int
3955 filt_hfsvnode(struct knote *kn, long hint)
3956 {
3957 struct vnode *vp = (struct vnode *)kn->kn_hook;
3958
3959 if (hint == 0) {
3960 if ((vnode_getwithvid(vp, kn->kn_hookid) != 0)) {
3961 hint = NOTE_REVOKE;
3962 } else
3963 vnode_put(vp);
3964 }
3965 if (kn->kn_sfflags & hint)
3966 kn->kn_fflags |= hint;
3967 if ((hint == NOTE_REVOKE)) {
3968 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3969 return (1);
3970 }
3971
3972 return (kn->kn_fflags != 0);
3973 }
3974
3975 static struct filterops hfsread_filtops =
3976 { 1, NULL, filt_hfsdetach, filt_hfsread };
3977 static struct filterops hfswrite_filtops =
3978 { 1, NULL, filt_hfsdetach, filt_hfswrite };
3979 static struct filterops hfsvnode_filtops =
3980 { 1, NULL, filt_hfsdetach, filt_hfsvnode };
3981
3982 /*
3983 * Add a kqueue filter.
3984 */
3985 static int
3986 hfs_vnop_kqfiltadd(
3987 struct vnop_kqfilt_add_args /* {
3988 struct vnode *a_vp;
3989 struct knote *a_kn;
3990 struct proc *p;
3991 vfs_context_t a_context;
3992 } */ *ap)
3993 {
3994 struct vnode *vp = ap->a_vp;
3995 struct knote *kn = ap->a_kn;
3996 int error;
3997
3998 switch (kn->kn_filter) {
3999 case EVFILT_READ:
4000 if (vnode_isreg(vp)) {
4001 kn->kn_fop = &hfsread_filtops;
4002 } else {
4003 return EINVAL;
4004 };
4005 break;
4006 case EVFILT_WRITE:
4007 if (vnode_isreg(vp)) {
4008 kn->kn_fop = &hfswrite_filtops;
4009 } else {
4010 return EINVAL;
4011 };
4012 break;
4013 case EVFILT_VNODE:
4014 kn->kn_fop = &hfsvnode_filtops;
4015 break;
4016 default:
4017 return (1);
4018 }
4019
4020 kn->kn_hook = (caddr_t)vp;
4021 kn->kn_hookid = vnode_vid(vp);
4022
4023 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
4024 return (error);
4025 KNOTE_ATTACH(&VTOC(vp)->c_knotes, kn);
4026 hfs_unlock(VTOC(vp));
4027
4028 return (0);
4029 }
4030
4031 /*
4032 * Remove a kqueue filter
4033 */
4034 static int
4035 hfs_vnop_kqfiltremove(ap)
4036 struct vnop_kqfilt_remove_args /* {
4037 struct vnode *a_vp;
4038 uintptr_t ident;
4039 vfs_context_t a_context;
4040 } */__unused *ap;
4041 {
4042 int result;
4043
4044 result = ENOTSUP; /* XXX */
4045
4046 return (result);
4047 }
4048
4049 /*
4050 * Wrapper for special device reads
4051 */
4052 static int
4053 hfsspec_read(ap)
4054 struct vnop_read_args /* {
4055 struct vnode *a_vp;
4056 struct uio *a_uio;
4057 int a_ioflag;
4058 vfs_context_t a_context;
4059 } */ *ap;
4060 {
4061 /*
4062 * Set access flag.
4063 */
4064 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
4065 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_read), ap));
4066 }
4067
4068 /*
4069 * Wrapper for special device writes
4070 */
4071 static int
4072 hfsspec_write(ap)
4073 struct vnop_write_args /* {
4074 struct vnode *a_vp;
4075 struct uio *a_uio;
4076 int a_ioflag;
4077 vfs_context_t a_context;
4078 } */ *ap;
4079 {
4080 /*
4081 * Set update and change flags.
4082 */
4083 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
4084 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
4085 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_write), ap));
4086 }
4087
4088 /*
4089 * Wrapper for special device close
4090 *
4091 * Update the times on the cnode then do device close.
4092 */
4093 static int
4094 hfsspec_close(ap)
4095 struct vnop_close_args /* {
4096 struct vnode *a_vp;
4097 int a_fflag;
4098 vfs_context_t a_context;
4099 } */ *ap;
4100 {
4101 struct vnode *vp = ap->a_vp;
4102 struct cnode *cp;
4103
4104 if (vnode_isinuse(ap->a_vp, 1)) {
4105 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
4106 cp = VTOC(vp);
4107 hfs_touchtimes(VTOHFS(vp), cp);
4108 hfs_unlock(cp);
4109 }
4110 }
4111 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap));
4112 }
4113
4114 #if FIFO
4115 /*
4116 * Wrapper for fifo reads
4117 */
4118 static int
4119 hfsfifo_read(ap)
4120 struct vnop_read_args /* {
4121 struct vnode *a_vp;
4122 struct uio *a_uio;
4123 int a_ioflag;
4124 vfs_context_t a_context;
4125 } */ *ap;
4126 {
4127 /*
4128 * Set access flag.
4129 */
4130 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
4131 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_read), ap));
4132 }
4133
4134 /*
4135 * Wrapper for fifo writes
4136 */
4137 static int
4138 hfsfifo_write(ap)
4139 struct vnop_write_args /* {
4140 struct vnode *a_vp;
4141 struct uio *a_uio;
4142 int a_ioflag;
4143 vfs_context_t a_context;
4144 } */ *ap;
4145 {
4146 /*
4147 * Set update and change flags.
4148 */
4149 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
4150 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
4151 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_write), ap));
4152 }
4153
4154 /*
4155 * Wrapper for fifo close
4156 *
4157 * Update the times on the cnode then do device close.
4158 */
4159 static int
4160 hfsfifo_close(ap)
4161 struct vnop_close_args /* {
4162 struct vnode *a_vp;
4163 int a_fflag;
4164 vfs_context_t a_context;
4165 } */ *ap;
4166 {
4167 struct vnode *vp = ap->a_vp;
4168 struct cnode *cp;
4169
4170 if (vnode_isinuse(ap->a_vp, 1)) {
4171 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
4172 cp = VTOC(vp);
4173 hfs_touchtimes(VTOHFS(vp), cp);
4174 hfs_unlock(cp);
4175 }
4176 }
4177 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_close), ap));
4178 }
4179
4180 /*
4181 * kqfilt_add wrapper for fifos.
4182 *
4183 * Fall through to hfs kqfilt_add routines if needed
4184 */
4185 int
4186 hfsfifo_kqfilt_add(ap)
4187 struct vnop_kqfilt_add_args *ap;
4188 {
4189 int error;
4190
4191 error = VOCALL(fifo_vnodeop_p, VOFFSET(vnop_kqfilt_add), ap);
4192 if (error)
4193 error = hfs_vnop_kqfiltadd(ap);
4194 return (error);
4195 }
4196
4197 /*
4198 * kqfilt_remove wrapper for fifos.
4199 *
4200 * Fall through to hfs kqfilt_remove routines if needed
4201 */
4202 int
4203 hfsfifo_kqfilt_remove(ap)
4204 struct vnop_kqfilt_remove_args *ap;
4205 {
4206 int error;
4207
4208 error = VOCALL(fifo_vnodeop_p, VOFFSET(vnop_kqfilt_remove), ap);
4209 if (error)
4210 error = hfs_vnop_kqfiltremove(ap);
4211 return (error);
4212 }
4213
4214 #endif /* FIFO */
4215
4216 /*
4217 * Synchronize a file's in-core state with that on disk.
4218 */
4219 static int
4220 hfs_vnop_fsync(ap)
4221 struct vnop_fsync_args /* {
4222 struct vnode *a_vp;
4223 int a_waitfor;
4224 vfs_context_t a_context;
4225 } */ *ap;
4226 {
4227 struct vnode* vp = ap->a_vp;
4228 int error;
4229
4230 /*
4231 * We need to allow ENOENT lock errors since unlink
4232 * systenm call can call VNOP_FSYNC during vclean.
4233 */
4234 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
4235 if (error)
4236 return (0);
4237
4238 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
4239
4240 hfs_unlock(VTOC(vp));
4241 return (error);
4242 }
4243
4244
4245 static int
4246 hfs_vnop_whiteout(ap)
4247 struct vnop_whiteout_args /* {
4248 struct vnode *a_dvp;
4249 struct componentname *a_cnp;
4250 int a_flags;
4251 vfs_context_t a_context;
4252 } */ *ap;
4253 {
4254 int error = 0;
4255 struct vnode *vp = NULL;
4256 struct vnode_attr va;
4257 struct vnop_lookup_args lookup_args;
4258 struct vnop_remove_args remove_args;
4259 struct hfsmount *hfsmp;
4260
4261 hfsmp = VTOHFS(ap->a_dvp);
4262 if (hfsmp->hfs_flags & HFS_STANDARD) {
4263 error = ENOTSUP;
4264 goto exit;
4265 }
4266
4267 switch (ap->a_flags) {
4268 case LOOKUP:
4269 error = 0;
4270 break;
4271
4272 case CREATE:
4273 VATTR_INIT(&va);
4274 VATTR_SET(&va, va_type, VREG);
4275 VATTR_SET(&va, va_mode, S_IFWHT);
4276 VATTR_SET(&va, va_uid, 0);
4277 VATTR_SET(&va, va_gid, 0);
4278
4279 error = hfs_makenode(ap->a_dvp, &vp, ap->a_cnp, &va, ap->a_context);
4280 /* No need to release the vnode as no vnode is created for whiteouts */
4281 break;
4282
4283 case DELETE:
4284 lookup_args.a_dvp = ap->a_dvp;
4285 lookup_args.a_vpp = &vp;
4286 lookup_args.a_cnp = ap->a_cnp;
4287 lookup_args.a_context = ap->a_context;
4288
4289 error = hfs_vnop_lookup(&lookup_args);
4290 if (error) {
4291 break;
4292 }
4293
4294 remove_args.a_dvp = ap->a_dvp;
4295 remove_args.a_vp = vp;
4296 remove_args.a_cnp = ap->a_cnp;
4297 remove_args.a_flags = 0;
4298 remove_args.a_context = ap->a_context;
4299
4300 error = hfs_vnop_remove(&remove_args);
4301 vnode_put(vp);
4302 break;
4303
4304 default:
4305 panic("hfs_vnop_whiteout: unknown operation (flag = %x)\n", ap->a_flags);
4306 };
4307
4308 exit:
4309 return (error);
4310 }
4311
4312 int (**hfs_vnodeop_p)(void *);
4313
4314 #define VOPFUNC int (*)(void *)
4315
4316 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
4317 { &vnop_default_desc, (VOPFUNC)vn_default_error },
4318 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
4319 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
4320 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
4321 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
4322 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
4323 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
4324 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
4325 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
4326 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
4327 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
4328 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
4329 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
4330 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
4331 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
4332 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
4333 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
4334 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
4335 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
4336 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
4337 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
4338 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
4339 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
4340 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
4341 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
4342 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
4343 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
4344 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
4345 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
4346 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
4347 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
4348 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
4349 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
4350 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
4351 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
4352 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4353 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
4354 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
4355 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
4356 { &vnop_kqfilt_add_desc, (VOPFUNC)hfs_vnop_kqfiltadd }, /* kqfilt_add */
4357 { &vnop_kqfilt_remove_desc, (VOPFUNC)hfs_vnop_kqfiltremove }, /* kqfilt_remove */
4358 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
4359 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
4360 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
4361 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
4362 { &vnop_whiteout_desc, (VOPFUNC)hfs_vnop_whiteout},
4363 #if NAMEDSTREAMS
4364 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
4365 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
4366 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
4367 #endif
4368 { NULL, (VOPFUNC)NULL }
4369 };
4370
4371 struct vnodeopv_desc hfs_vnodeop_opv_desc =
4372 { &hfs_vnodeop_p, hfs_vnodeop_entries };
4373
4374 int (**hfs_specop_p)(void *);
4375 struct vnodeopv_entry_desc hfs_specop_entries[] = {
4376 { &vnop_default_desc, (VOPFUNC)vn_default_error },
4377 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
4378 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
4379 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
4380 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
4381 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
4382 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
4383 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
4384 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
4385 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
4386 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
4387 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
4388 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
4389 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
4390 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
4391 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
4392 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
4393 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
4394 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
4395 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
4396 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
4397 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
4398 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
4399 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
4400 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
4401 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
4402 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
4403 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
4404 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
4405 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
4406 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
4407 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4408 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
4409 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
4410 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
4411 };
4412 struct vnodeopv_desc hfs_specop_opv_desc =
4413 { &hfs_specop_p, hfs_specop_entries };
4414
4415 #if FIFO
4416 int (**hfs_fifoop_p)(void *);
4417 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
4418 { &vnop_default_desc, (VOPFUNC)vn_default_error },
4419 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
4420 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
4421 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
4422 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
4423 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
4424 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
4425 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
4426 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
4427 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
4428 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
4429 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
4430 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
4431 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
4432 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
4433 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
4434 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
4435 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
4436 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
4437 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
4438 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
4439 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
4440 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
4441 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
4442 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
4443 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
4444 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
4445 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
4446 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
4447 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
4448 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
4449 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4450 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
4451 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
4452 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
4453 { &vnop_kqfilt_add_desc, (VOPFUNC)hfsfifo_kqfilt_add }, /* kqfilt_add */
4454 { &vnop_kqfilt_remove_desc, (VOPFUNC)hfsfifo_kqfilt_remove }, /* kqfilt_remove */
4455 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
4456 };
4457 struct vnodeopv_desc hfs_fifoop_opv_desc =
4458 { &hfs_fifoop_p, hfs_fifoop_entries };
4459 #endif /* FIFO */
4460
4461
4462