]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cnode.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_cnode.c
1 /*
2 * Copyright (c) 2002-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/proc.h>
31 #include <sys/vnode.h>
32 #include <sys/mount.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/time.h>
36 #include <sys/ubc.h>
37 #include <sys/quota.h>
38 #include <sys/kdebug.h>
39
40 #include <kern/locks.h>
41
42 #include <miscfs/specfs/specdev.h>
43 #include <miscfs/fifofs/fifo.h>
44
45 #include <hfs/hfs.h>
46 #include <hfs/hfs_catalog.h>
47 #include <hfs/hfs_cnode.h>
48 #include <hfs/hfs_quota.h>
49
50 extern int prtactive;
51
52 extern lck_attr_t * hfs_lock_attr;
53 extern lck_grp_t * hfs_mutex_group;
54 extern lck_grp_t * hfs_rwlock_group;
55
56 static int hfs_filedone(struct vnode *vp, vfs_context_t context);
57
58 static void hfs_reclaim_cnode(struct cnode *);
59
60 static int hfs_isordered(struct cnode *, struct cnode *);
61
62
63 /*
64 * Last reference to an cnode. If necessary, write or delete it.
65 */
66 __private_extern__
67 int
68 hfs_vnop_inactive(struct vnop_inactive_args *ap)
69 {
70 struct vnode *vp = ap->a_vp;
71 struct cnode *cp;
72 struct hfsmount *hfsmp = VTOHFS(vp);
73 struct proc *p = vfs_context_proc(ap->a_context);
74 int error = 0;
75 int recycle = 0;
76 int forkcount = 0;
77 int truncated = 0;
78 int started_tr = 0;
79 int took_trunc_lock = 0;
80 cat_cookie_t cookie;
81 int cat_reserve = 0;
82 int lockflags;
83 enum vtype v_type;
84
85 v_type = vnode_vtype(vp);
86 cp = VTOC(vp);
87
88 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || vnode_issystem(vp) ||
89 (hfsmp->hfs_freezing_proc == p)) {
90 return (0);
91 }
92
93 /*
94 * Ignore nodes related to stale file handles.
95 */
96 if (cp->c_mode == 0) {
97 vnode_recycle(vp);
98 return (0);
99 }
100
101 if ((v_type == VREG || v_type == VLNK)) {
102 hfs_lock_truncate(cp, TRUE);
103 took_trunc_lock = 1;
104 }
105
106 (void) hfs_lock(cp, HFS_FORCE_LOCK);
107
108 /*
109 * Recycle named streams quickly so that the data fork vnode can
110 * go inactive in a timely manner (so that it can be zero filled
111 * or truncated if needed).
112 */
113 if (vnode_isnamedstream(vp))
114 recycle = 1;
115
116 /*
117 * We should lock cnode before checking the flags in the
118 * condition below and should unlock the cnode before calling
119 * ubc_setsize() as cluster code can call other HFS vnops which
120 * will try to acquire the same cnode lock and cause deadlock.
121 */
122 if ((v_type == VREG || v_type == VLNK) &&
123 (cp->c_flag & C_DELETED) &&
124 (VTOF(vp)->ff_blocks != 0)) {
125 hfs_unlock(cp);
126 ubc_setsize(vp, 0);
127 (void) hfs_lock(cp, HFS_FORCE_LOCK);
128 }
129
130 if (v_type == VREG && !ISSET(cp->c_flag, C_DELETED) && VTOF(vp)->ff_blocks) {
131 hfs_filedone(vp, ap->a_context);
132 }
133 /*
134 * Remove any directory hints or cached origins
135 */
136 if (v_type == VDIR) {
137 hfs_reldirhints(cp, 0);
138 if (cp->c_flag & C_HARDLINK)
139 hfs_relorigins(cp);
140 }
141
142 if (cp->c_datafork)
143 ++forkcount;
144 if (cp->c_rsrcfork)
145 ++forkcount;
146
147 /* If needed, get rid of any fork's data for a deleted file */
148 if ((v_type == VREG || v_type == VLNK) && (cp->c_flag & C_DELETED)) {
149 if (VTOF(vp)->ff_blocks != 0) {
150 /*
151 * Since we're already inside a transaction,
152 * tell hfs_truncate to skip the ubc_setsize.
153 */
154 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ap->a_context);
155 if (error)
156 goto out;
157 truncated = 1;
158 }
159 recycle = 1;
160
161 /*
162 * Check if there's any resource fork blocks that need to
163 * be reclaimed. This covers the case where there is a
164 * resource fork but its not in core.
165 */
166 if ((cp->c_blocks > 0) && (forkcount == 1) && (vp != cp->c_rsrc_vp)) {
167 struct vnode *rvp = NULLVP;
168
169 error = hfs_vgetrsrc(hfsmp, vp, &rvp, FALSE);
170 if (error)
171 goto out;
172 /*
173 * Defer the vnode_put and ubc_setsize on rvp until hfs_unlock().
174 */
175 cp->c_flag |= C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE;
176 error = hfs_truncate(rvp, (off_t)0, IO_NDELAY, 1, ap->a_context);
177 if (error)
178 goto out;
179 vnode_recycle(rvp); /* all done with this vnode */
180 }
181 }
182
183 // If needed, get rid of any xattrs that this file may have.
184 // Note that this must happen outside of any other transactions
185 // because it starts/ends its own transactions and grabs its
186 // own locks. This is to prevent a file with a lot of attributes
187 // from creating a transaction that is too large (which panics).
188 //
189 if ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0 && (cp->c_flag & C_DELETED)) {
190 hfs_removeallattr(hfsmp, cp->c_fileid);
191 }
192
193 /*
194 * Check for a postponed deletion.
195 * (only delete cnode when the last fork goes inactive)
196 */
197 if ((cp->c_flag & C_DELETED) && (forkcount <= 1)) {
198 /*
199 * Mark cnode in transit so that no one can get this
200 * cnode from cnode hash.
201 */
202 // hfs_chash_mark_in_transit(cp);
203 // XXXdbg - remove the cnode from the hash table since it's deleted
204 // otherwise someone could go to sleep on the cnode and not
205 // be woken up until this vnode gets recycled which could be
206 // a very long time...
207 hfs_chashremove(cp);
208
209 cp->c_flag |= C_NOEXISTS; // XXXdbg
210 cp->c_rdev = 0;
211
212 if (started_tr == 0) {
213 if (hfs_start_transaction(hfsmp) != 0) {
214 error = EINVAL;
215 goto out;
216 }
217 started_tr = 1;
218 }
219
220 /*
221 * Reserve some space in the Catalog file.
222 */
223 if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) {
224 goto out;
225 }
226 cat_reserve = 1;
227
228 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
229
230 if (cp->c_blocks > 0) {
231 printf("hfs_inactive: deleting non-empty%sfile %d, "
232 "blks %d\n", VNODE_IS_RSRC(vp) ? " rsrc " : " ",
233 (int)cp->c_fileid, (int)cp->c_blocks);
234 }
235
236 //
237 // release the name pointer in the descriptor so that
238 // cat_delete() will use the file-id to do the deletion.
239 // in the case of hard links this is imperative (in the
240 // case of regular files the fileid and cnid are the
241 // same so it doesn't matter).
242 //
243 cat_releasedesc(&cp->c_desc);
244
245 /*
246 * The descriptor name may be zero,
247 * in which case the fileid is used.
248 */
249 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
250
251 if (error && truncated && (error != ENXIO))
252 printf("hfs_inactive: couldn't delete a truncated file!");
253
254 /* Update HFS Private Data dir */
255 if (error == 0) {
256 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries--;
257 if (vnode_isdir(vp)) {
258 DEC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
259 }
260 (void)cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
261 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
262 }
263
264 hfs_systemfile_unlock(hfsmp, lockflags);
265
266 if (error)
267 goto out;
268
269 #if QUOTA
270 if (hfsmp->hfs_flags & HFS_QUOTAS)
271 (void)hfs_chkiq(cp, -1, NOCRED, 0);
272 #endif /* QUOTA */
273
274 cp->c_mode = 0;
275 cp->c_flag &= ~C_DELETED;
276 cp->c_touch_chgtime = TRUE;
277 cp->c_touch_modtime = TRUE;
278
279 if (error == 0)
280 hfs_volupdate(hfsmp, (v_type == VDIR) ? VOL_RMDIR : VOL_RMFILE, 0);
281 }
282
283 /*
284 * A file may have had delayed allocations, in which case hfs_update
285 * would not have updated the catalog record (cat_update). We need
286 * to do that now, before we lose our fork data. We also need to
287 * force the update, or hfs_update will again skip the cat_update.
288 */
289 if ((cp->c_flag & C_MODIFIED) ||
290 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
291 cp->c_flag |= C_FORCEUPDATE;
292 hfs_update(vp, 0);
293 }
294 out:
295 if (cat_reserve)
296 cat_postflight(hfsmp, &cookie, p);
297
298 // XXXdbg - have to do this because a goto could have come here
299 if (started_tr) {
300 hfs_end_transaction(hfsmp);
301 started_tr = 0;
302 }
303
304 hfs_unlock(cp);
305
306 if (took_trunc_lock)
307 hfs_unlock_truncate(cp, TRUE);
308
309 /*
310 * If we are done with the vnode, reclaim it
311 * so that it can be reused immediately.
312 */
313 if (cp->c_mode == 0 || recycle)
314 vnode_recycle(vp);
315
316 return (error);
317 }
318
319 /*
320 * File clean-up (zero fill and shrink peof).
321 */
322 static int
323 hfs_filedone(struct vnode *vp, vfs_context_t context)
324 {
325 struct cnode *cp;
326 struct filefork *fp;
327 struct hfsmount *hfsmp;
328 off_t leof;
329 u_long blks, blocksize;
330
331 cp = VTOC(vp);
332 fp = VTOF(vp);
333 hfsmp = VTOHFS(vp);
334 leof = fp->ff_size;
335
336 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (fp->ff_blocks == 0))
337 return (0);
338
339 hfs_unlock(cp);
340 (void) cluster_push(vp, IO_CLOSE);
341 hfs_lock(cp, HFS_FORCE_LOCK);
342
343 /*
344 * Explicitly zero out the areas of file
345 * that are currently marked invalid.
346 */
347 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
348 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
349 off_t start = invalid_range->rl_start;
350 off_t end = invalid_range->rl_end;
351
352 /* The range about to be written must be validated
353 * first, so that VNOP_BLOCKMAP() will return the
354 * appropriate mapping for the cluster code:
355 */
356 rl_remove(start, end, &fp->ff_invalidranges);
357
358 hfs_unlock(cp);
359 (void) cluster_write(vp, (struct uio *) 0,
360 leof, end + 1, start, (off_t)0,
361 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
362 hfs_lock(cp, HFS_FORCE_LOCK);
363 cp->c_flag |= C_MODIFIED;
364 }
365 cp->c_flag &= ~C_ZFWANTSYNC;
366 cp->c_zftimeout = 0;
367 blocksize = VTOVCB(vp)->blockSize;
368 blks = leof / blocksize;
369 if (((off_t)blks * (off_t)blocksize) != leof)
370 blks++;
371 /*
372 * Shrink the peof to the smallest size neccessary to contain the leof.
373 */
374 if (blks < fp->ff_blocks)
375 (void) hfs_truncate(vp, leof, IO_NDELAY, 0, context);
376 hfs_unlock(cp);
377 (void) cluster_push(vp, IO_CLOSE);
378 hfs_lock(cp, HFS_FORCE_LOCK);
379
380 /*
381 * If the hfs_truncate didn't happen to flush the vnode's
382 * information out to disk, force it to be updated now that
383 * all invalid ranges have been zero-filled and validated:
384 */
385 if (cp->c_flag & C_MODIFIED) {
386 hfs_update(vp, 0);
387 }
388 return (0);
389 }
390
391
392 /*
393 * Reclaim a cnode so that it can be used for other purposes.
394 */
395 __private_extern__
396 int
397 hfs_vnop_reclaim(struct vnop_reclaim_args *ap)
398 {
399 struct vnode *vp = ap->a_vp;
400 struct cnode *cp;
401 struct filefork *fp = NULL;
402 struct filefork *altfp = NULL;
403 int reclaim_cnode = 0;
404
405 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
406 cp = VTOC(vp);
407
408 /*
409 * Check if a deleted resource fork vnode missed a
410 * VNOP_INACTIVE call and requires truncation.
411 */
412 if (VNODE_IS_RSRC(vp) &&
413 (cp->c_flag & C_DELETED) &&
414 (VTOF(vp)->ff_blocks != 0)) {
415 hfs_unlock(cp);
416 ubc_setsize(vp, 0);
417
418 hfs_lock_truncate(cp, TRUE);
419 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
420
421 (void) hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ap->a_context);
422
423 hfs_unlock_truncate(cp, TRUE);
424 }
425 /*
426 * A file may have had delayed allocations, in which case hfs_update
427 * would not have updated the catalog record (cat_update). We need
428 * to do that now, before we lose our fork data. We also need to
429 * force the update, or hfs_update will again skip the cat_update.
430 */
431 if ((cp->c_flag & C_MODIFIED) ||
432 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
433 cp->c_flag |= C_FORCEUPDATE;
434 hfs_update(vp, 0);
435 }
436
437 /*
438 * Keep track of an inactive hot file.
439 */
440 if (!vnode_isdir(vp) &&
441 !vnode_issystem(vp) &&
442 !(cp->c_flag & (C_DELETED | C_NOEXISTS)) ) {
443 (void) hfs_addhotfile(vp);
444 }
445 vnode_removefsref(vp);
446
447 /*
448 * Find file fork for this vnode (if any)
449 * Also check if another fork is active
450 */
451 if (cp->c_vp == vp) {
452 fp = cp->c_datafork;
453 altfp = cp->c_rsrcfork;
454
455 cp->c_datafork = NULL;
456 cp->c_vp = NULL;
457 } else if (cp->c_rsrc_vp == vp) {
458 fp = cp->c_rsrcfork;
459 altfp = cp->c_datafork;
460
461 cp->c_rsrcfork = NULL;
462 cp->c_rsrc_vp = NULL;
463 } else {
464 panic("hfs_vnop_reclaim: vp points to wrong cnode\n");
465 }
466 /*
467 * On the last fork, remove the cnode from its hash chain.
468 */
469 if (altfp == NULL) {
470 /* If we can't remove it then the cnode must persist! */
471 if (hfs_chashremove(cp) == 0)
472 reclaim_cnode = 1;
473 /*
474 * Remove any directory hints
475 */
476 if (vnode_isdir(vp)) {
477 hfs_reldirhints(cp, 0);
478 }
479 }
480 /* Release the file fork and related data */
481 if (fp) {
482 /* Dump cached symlink data */
483 if (vnode_islnk(vp) && (fp->ff_symlinkptr != NULL)) {
484 FREE(fp->ff_symlinkptr, M_TEMP);
485 }
486 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
487 }
488
489 /*
490 * If there was only one active fork then we can release the cnode.
491 */
492 if (reclaim_cnode) {
493 hfs_chashwakeup(cp, H_ALLOC | H_TRANSIT);
494 hfs_reclaim_cnode(cp);
495 } else /* cnode in use */ {
496 hfs_unlock(cp);
497 }
498
499 vnode_clearfsnode(vp);
500 return (0);
501 }
502
503
504 extern int (**hfs_vnodeop_p) (void *);
505 extern int (**hfs_specop_p) (void *);
506 #if FIFO
507 extern int (**hfs_fifoop_p) (void *);
508 #endif
509
510 /*
511 * hfs_getnewvnode - get new default vnode
512 *
513 * The vnode is returned with an iocount and the cnode locked
514 */
515 __private_extern__
516 int
517 hfs_getnewvnode(
518 struct hfsmount *hfsmp,
519 struct vnode *dvp,
520 struct componentname *cnp,
521 struct cat_desc *descp,
522 int flags,
523 struct cat_attr *attrp,
524 struct cat_fork *forkp,
525 struct vnode **vpp)
526 {
527 struct mount *mp = HFSTOVFS(hfsmp);
528 struct vnode *vp = NULL;
529 struct vnode **cvpp;
530 struct vnode *tvp = NULLVP;
531 struct cnode *cp = NULL;
532 struct filefork *fp = NULL;
533 int retval;
534 int issystemfile;
535 int wantrsrc;
536 struct vnode_fsparam vfsp;
537 enum vtype vtype;
538 #if QUOTA
539 int i;
540 #endif /* QUOTA */
541
542 if (attrp->ca_fileid == 0) {
543 *vpp = NULL;
544 return (ENOENT);
545 }
546
547 #if !FIFO
548 if (IFTOVT(attrp->ca_mode) == VFIFO) {
549 *vpp = NULL;
550 return (ENOTSUP);
551 }
552 #endif /* !FIFO */
553 vtype = IFTOVT(attrp->ca_mode);
554 issystemfile = (descp->cd_flags & CD_ISMETA) && (vtype == VREG);
555 wantrsrc = flags & GNV_WANTRSRC;
556
557 #ifdef HFS_CHECK_LOCK_ORDER
558 /*
559 * The only case were its permissible to hold the parent cnode
560 * lock is during a create operation (hfs_makenode) or when
561 * we don't need the cnode lock (GNV_SKIPLOCK).
562 */
563 if ((dvp != NULL) &&
564 (flags & (GNV_CREATE | GNV_SKIPLOCK)) == 0 &&
565 VTOC(dvp)->c_lockowner == current_thread()) {
566 panic("hfs_getnewvnode: unexpected hold of parent cnode %p", VTOC(dvp));
567 }
568 #endif /* HFS_CHECK_LOCK_ORDER */
569
570 /*
571 * Get a cnode (new or existing)
572 */
573 cp = hfs_chash_getcnode(hfsmp->hfs_raw_dev, attrp->ca_fileid, vpp, wantrsrc, (flags & GNV_SKIPLOCK));
574
575 /*
576 * If the id is no longer valid for lookups we'll get back a NULL cp.
577 */
578 if (cp == NULL) {
579 return (ENOENT);
580 }
581
582 /* Hardlinks may need an updated catalog descriptor */
583 if ((cp->c_flag & C_HARDLINK) && descp->cd_nameptr && descp->cd_namelen > 0) {
584 replace_desc(cp, descp);
585 }
586 /* Check if we found a matching vnode */
587 if (*vpp != NULL)
588 return (0);
589
590 /*
591 * If this is a new cnode then initialize it.
592 */
593 if (ISSET(cp->c_hflag, H_ALLOC)) {
594 lck_rw_init(&cp->c_truncatelock, hfs_rwlock_group, hfs_lock_attr);
595
596 /* Make sure its still valid (ie exists on disk). */
597 if (!(flags & GNV_CREATE) &&
598 !hfs_valid_cnode(hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid)) {
599 hfs_chash_abort(cp);
600 hfs_reclaim_cnode(cp);
601 *vpp = NULL;
602 return (ENOENT);
603 }
604 bcopy(attrp, &cp->c_attr, sizeof(struct cat_attr));
605 bcopy(descp, &cp->c_desc, sizeof(struct cat_desc));
606
607 /* The name was inherited so clear descriptor state... */
608 descp->cd_namelen = 0;
609 descp->cd_nameptr = NULL;
610 descp->cd_flags &= ~CD_HASBUF;
611
612 /* Tag hardlinks */
613 if ((vtype == VREG || vtype == VDIR) &&
614 ((descp->cd_cnid != attrp->ca_fileid) ||
615 (attrp->ca_recflags & kHFSHasLinkChainMask))) {
616 cp->c_flag |= C_HARDLINK;
617 }
618 /*
619 * Fix-up dir link counts.
620 *
621 * Earlier versions of Leopard used ca_linkcount for posix
622 * nlink support (effectively the sub-directory count + 2).
623 * That is now accomplished using the ca_dircount field with
624 * the corresponding kHFSHasFolderCountMask flag.
625 *
626 * For directories the ca_linkcount is the true link count,
627 * tracking the number of actual hardlinks to a directory.
628 *
629 * We only do this if the mount has HFS_FOLDERCOUNT set;
630 * at the moment, we only set that for HFSX volumes.
631 */
632 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
633 (vtype == VDIR) &&
634 !(attrp->ca_recflags & kHFSHasFolderCountMask) &&
635 (cp->c_attr.ca_linkcount > 1)) {
636 if (cp->c_attr.ca_entries == 0)
637 cp->c_attr.ca_dircount = 0;
638 else
639 cp->c_attr.ca_dircount = cp->c_attr.ca_linkcount - 2;
640
641 cp->c_attr.ca_linkcount = 1;
642 cp->c_attr.ca_recflags |= kHFSHasFolderCountMask;
643 if ( !(hfsmp->hfs_flags & HFS_READ_ONLY) )
644 cp->c_flag |= C_MODIFIED;
645 }
646 #if QUOTA
647 if (hfsmp->hfs_flags & HFS_QUOTAS) {
648 for (i = 0; i < MAXQUOTAS; i++)
649 cp->c_dquot[i] = NODQUOT;
650 }
651 #endif /* QUOTA */
652 }
653
654 if (vtype == VDIR) {
655 if (cp->c_vp != NULL)
656 panic("hfs_getnewvnode: orphaned vnode (data)");
657 cvpp = &cp->c_vp;
658 } else {
659 if (forkp && attrp->ca_blocks < forkp->cf_blocks)
660 panic("hfs_getnewvnode: bad ca_blocks (too small)");
661 /*
662 * Allocate and initialize a file fork...
663 */
664 MALLOC_ZONE(fp, struct filefork *, sizeof(struct filefork),
665 M_HFSFORK, M_WAITOK);
666 fp->ff_cp = cp;
667 if (forkp)
668 bcopy(forkp, &fp->ff_data, sizeof(struct cat_fork));
669 else
670 bzero(&fp->ff_data, sizeof(struct cat_fork));
671 rl_init(&fp->ff_invalidranges);
672 fp->ff_sysfileinfo = 0;
673
674 if (wantrsrc) {
675 if (cp->c_rsrcfork != NULL)
676 panic("hfs_getnewvnode: orphaned rsrc fork");
677 if (cp->c_rsrc_vp != NULL)
678 panic("hfs_getnewvnode: orphaned vnode (rsrc)");
679 cp->c_rsrcfork = fp;
680 cvpp = &cp->c_rsrc_vp;
681 if ( (tvp = cp->c_vp) != NULLVP )
682 cp->c_flag |= C_NEED_DVNODE_PUT;
683 } else {
684 if (cp->c_datafork != NULL)
685 panic("hfs_getnewvnode: orphaned data fork");
686 if (cp->c_vp != NULL)
687 panic("hfs_getnewvnode: orphaned vnode (data)");
688 cp->c_datafork = fp;
689 cvpp = &cp->c_vp;
690 if ( (tvp = cp->c_rsrc_vp) != NULLVP)
691 cp->c_flag |= C_NEED_RVNODE_PUT;
692 }
693 }
694 if (tvp != NULLVP) {
695 /*
696 * grab an iocount on the vnode we weren't
697 * interested in (i.e. we want the resource fork
698 * but the cnode already has the data fork)
699 * to prevent it from being
700 * recycled by us when we call vnode_create
701 * which will result in a deadlock when we
702 * try to take the cnode lock in hfs_vnop_fsync or
703 * hfs_vnop_reclaim... vnode_get can be called here
704 * because we already hold the cnode lock which will
705 * prevent the vnode from changing identity until
706 * we drop it.. vnode_get will not block waiting for
707 * a change of state... however, it will return an
708 * error if the current iocount == 0 and we've already
709 * started to terminate the vnode... we don't need/want to
710 * grab an iocount in the case since we can't cause
711 * the fileystem to be re-entered on this thread for this vp
712 *
713 * the matching vnode_put will happen in hfs_unlock
714 * after we've dropped the cnode lock
715 */
716 if ( vnode_get(tvp) != 0)
717 cp->c_flag &= ~(C_NEED_RVNODE_PUT | C_NEED_DVNODE_PUT);
718 }
719 vfsp.vnfs_mp = mp;
720 vfsp.vnfs_vtype = vtype;
721 vfsp.vnfs_str = "hfs";
722 if ((cp->c_flag & C_HARDLINK) && (vtype == VDIR)) {
723 vfsp.vnfs_dvp = NULL; /* no parent for me! */
724 vfsp.vnfs_cnp = NULL; /* no name for me! */
725 } else {
726 vfsp.vnfs_dvp = dvp;
727 vfsp.vnfs_cnp = cnp;
728 }
729 vfsp.vnfs_fsnode = cp;
730 #if FIFO
731 if (vtype == VFIFO )
732 vfsp.vnfs_vops = hfs_fifoop_p;
733 else
734 #endif
735 if (vtype == VBLK || vtype == VCHR)
736 vfsp.vnfs_vops = hfs_specop_p;
737 else
738 vfsp.vnfs_vops = hfs_vnodeop_p;
739
740 if (vtype == VBLK || vtype == VCHR)
741 vfsp.vnfs_rdev = attrp->ca_rdev;
742 else
743 vfsp.vnfs_rdev = 0;
744
745 if (forkp)
746 vfsp.vnfs_filesize = forkp->cf_size;
747 else
748 vfsp.vnfs_filesize = 0;
749
750 vfsp.vnfs_flags = VNFS_ADDFSREF;
751 if (dvp == NULLVP || cnp == NULL || !(cnp->cn_flags & MAKEENTRY))
752 vfsp.vnfs_flags |= VNFS_NOCACHE;
753
754 /* Tag system files */
755 vfsp.vnfs_marksystem = issystemfile;
756
757 /* Tag root directory */
758 if (descp->cd_cnid == kHFSRootFolderID)
759 vfsp.vnfs_markroot = 1;
760 else
761 vfsp.vnfs_markroot = 0;
762
763 if ((retval = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, cvpp))) {
764 if (fp) {
765 if (fp == cp->c_datafork)
766 cp->c_datafork = NULL;
767 else
768 cp->c_rsrcfork = NULL;
769
770 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
771 }
772 /*
773 * If this is a newly created cnode or a vnode reclaim
774 * occurred during the attachment, then cleanup the cnode.
775 */
776 if ((cp->c_vp == NULL) && (cp->c_rsrc_vp == NULL)) {
777 hfs_chash_abort(cp);
778 hfs_reclaim_cnode(cp);
779 } else {
780 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
781 hfs_unlock(cp);
782 }
783 *vpp = NULL;
784 return (retval);
785 }
786 vp = *cvpp;
787 vnode_settag(vp, VT_HFS);
788 if (cp->c_flag & C_HARDLINK) {
789 vnode_setmultipath(vp);
790 }
791 /*
792 * Tag resource fork vnodes as needing an VNOP_INACTIVE
793 * so that any deferred removes (open unlinked files)
794 * have the chance to process the resource fork.
795 */
796 if (VNODE_IS_RSRC(vp)) {
797 /* Force VL_NEEDINACTIVE on this vnode */
798 vnode_ref(vp);
799 vnode_rele(vp);
800 }
801 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
802
803 /*
804 * Stop tracking an active hot file.
805 */
806 if (!(flags & GNV_CREATE) && (vtype != VDIR) && !issystemfile) {
807 (void) hfs_removehotfile(vp);
808 }
809
810 *vpp = vp;
811 return (0);
812 }
813
814
815 static void
816 hfs_reclaim_cnode(struct cnode *cp)
817 {
818 #if QUOTA
819 int i;
820
821 for (i = 0; i < MAXQUOTAS; i++) {
822 if (cp->c_dquot[i] != NODQUOT) {
823 dqreclaim(cp->c_dquot[i]);
824 cp->c_dquot[i] = NODQUOT;
825 }
826 }
827 #endif /* QUOTA */
828
829 /*
830 * If the descriptor has a name then release it
831 */
832 if ((cp->c_desc.cd_flags & CD_HASBUF) && (cp->c_desc.cd_nameptr != 0)) {
833 const char *nameptr;
834
835 nameptr = (const char *) cp->c_desc.cd_nameptr;
836 cp->c_desc.cd_nameptr = 0;
837 cp->c_desc.cd_flags &= ~CD_HASBUF;
838 cp->c_desc.cd_namelen = 0;
839 vfs_removename(nameptr);
840 }
841
842 lck_rw_destroy(&cp->c_rwlock, hfs_rwlock_group);
843 lck_rw_destroy(&cp->c_truncatelock, hfs_rwlock_group);
844 bzero(cp, sizeof(struct cnode));
845 FREE_ZONE(cp, sizeof(struct cnode), M_HFSNODE);
846 }
847
848
849 __private_extern__
850 int
851 hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, cnid_t cnid)
852 {
853 struct cat_attr attr;
854 struct cat_desc cndesc;
855 int stillvalid = 0;
856 int lockflags;
857
858 /* System files are always valid */
859 if (cnid < kHFSFirstUserCatalogNodeID)
860 return (1);
861
862 /* XXX optimization: check write count in dvp */
863
864 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
865
866 if (dvp && cnp) {
867 bzero(&cndesc, sizeof(cndesc));
868 cndesc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
869 cndesc.cd_namelen = cnp->cn_namelen;
870 cndesc.cd_parentcnid = VTOC(dvp)->c_fileid;
871 cndesc.cd_hint = VTOC(dvp)->c_childhint;
872
873 if ((cat_lookup(hfsmp, &cndesc, 0, NULL, &attr, NULL, NULL) == 0) &&
874 (cnid == attr.ca_fileid)) {
875 stillvalid = 1;
876 }
877 } else {
878 if (cat_idlookup(hfsmp, cnid, 0, NULL, NULL, NULL) == 0) {
879 stillvalid = 1;
880 }
881 }
882 hfs_systemfile_unlock(hfsmp, lockflags);
883
884 return (stillvalid);
885 }
886
887 /*
888 * Touch cnode times based on c_touch_xxx flags
889 *
890 * cnode must be locked exclusive
891 *
892 * This will also update the volume modify time
893 */
894 __private_extern__
895 void
896 hfs_touchtimes(struct hfsmount *hfsmp, struct cnode* cp)
897 {
898 /* don't modify times if volume is read-only */
899 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
900 cp->c_touch_acctime = FALSE;
901 cp->c_touch_chgtime = FALSE;
902 cp->c_touch_modtime = FALSE;
903 }
904 else if (hfsmp->hfs_flags & HFS_STANDARD) {
905 /* HFS Standard doesn't support access times */
906 cp->c_touch_acctime = FALSE;
907 }
908
909 /*
910 * Skip access time updates if:
911 * . MNT_NOATIME is set
912 * . a file system freeze is in progress
913 * . a file system resize is in progress
914 */
915 if (cp->c_touch_acctime) {
916 if ((vfs_flags(hfsmp->hfs_mp) & MNT_NOATIME) ||
917 (hfsmp->hfs_freezing_proc != NULL) ||
918 (hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS))
919 cp->c_touch_acctime = FALSE;
920 }
921 if (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
922 struct timeval tv;
923 int touchvol = 0;
924
925 microtime(&tv);
926
927 if (cp->c_touch_acctime) {
928 cp->c_atime = tv.tv_sec;
929 /*
930 * When the access time is the only thing changing
931 * then make sure its sufficiently newer before
932 * committing it to disk.
933 */
934 if ((((u_int32_t)cp->c_atime - (u_int32_t)(cp)->c_attr.ca_atimeondisk) >
935 ATIME_ONDISK_ACCURACY)) {
936 cp->c_flag |= C_MODIFIED;
937 }
938 cp->c_touch_acctime = FALSE;
939 }
940 if (cp->c_touch_modtime) {
941 cp->c_mtime = tv.tv_sec;
942 cp->c_touch_modtime = FALSE;
943 cp->c_flag |= C_MODIFIED;
944 touchvol = 1;
945 #if 1
946 /*
947 * HFS dates that WE set must be adjusted for DST
948 */
949 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
950 cp->c_mtime += 3600;
951 }
952 #endif
953 }
954 if (cp->c_touch_chgtime) {
955 cp->c_ctime = tv.tv_sec;
956 cp->c_touch_chgtime = FALSE;
957 cp->c_flag |= C_MODIFIED;
958 touchvol = 1;
959 }
960
961 /* Touch the volume modtime if needed */
962 if (touchvol) {
963 MarkVCBDirty(hfsmp);
964 HFSTOVCB(hfsmp)->vcbLsMod = tv.tv_sec;
965 }
966 }
967 }
968
969 /*
970 * Lock a cnode.
971 */
972 __private_extern__
973 int
974 hfs_lock(struct cnode *cp, enum hfslocktype locktype)
975 {
976 void * thread = current_thread();
977
978 if (cp->c_lockowner == thread) {
979 /*
980 * Only the extents and bitmap file's support lock recursion.
981 */
982 if ((cp->c_fileid == kHFSExtentsFileID) ||
983 (cp->c_fileid == kHFSAllocationFileID)) {
984 cp->c_syslockcount++;
985 } else {
986 panic("hfs_lock: locking against myself!");
987 }
988 } else if (locktype == HFS_SHARED_LOCK) {
989 lck_rw_lock_shared(&cp->c_rwlock);
990 cp->c_lockowner = HFS_SHARED_OWNER;
991
992 } else /* HFS_EXCLUSIVE_LOCK */ {
993 lck_rw_lock_exclusive(&cp->c_rwlock);
994 cp->c_lockowner = thread;
995
996 /*
997 * Only the extents and bitmap file's support lock recursion.
998 */
999 if ((cp->c_fileid == kHFSExtentsFileID) ||
1000 (cp->c_fileid == kHFSAllocationFileID)) {
1001 cp->c_syslockcount = 1;
1002 }
1003 }
1004
1005 #ifdef HFS_CHECK_LOCK_ORDER
1006 /*
1007 * Regular cnodes (non-system files) cannot be locked
1008 * while holding the journal lock or a system file lock.
1009 */
1010 if (!(cp->c_desc.cd_flags & CD_ISMETA) &&
1011 ((cp->c_fileid > kHFSFirstUserCatalogNodeID) || (cp->c_fileid == kHFSRootFolderID))) {
1012 vnode_t vp = NULLVP;
1013
1014 /* Find corresponding vnode. */
1015 if (cp->c_vp != NULLVP && VTOC(cp->c_vp) == cp) {
1016 vp = cp->c_vp;
1017 } else if (cp->c_rsrc_vp != NULLVP && VTOC(cp->c_rsrc_vp) == cp) {
1018 vp = cp->c_rsrc_vp;
1019 }
1020 if (vp != NULLVP) {
1021 struct hfsmount *hfsmp = VTOHFS(vp);
1022
1023 if (hfsmp->jnl && (journal_owner(hfsmp->jnl) == thread)) {
1024 /* This will eventually be a panic here. */
1025 printf("hfs_lock: bad lock order (cnode after journal)\n");
1026 }
1027 if (hfsmp->hfs_catalog_cp && hfsmp->hfs_catalog_cp->c_lockowner == thread) {
1028 panic("hfs_lock: bad lock order (cnode after catalog)");
1029 }
1030 if (hfsmp->hfs_attribute_cp && hfsmp->hfs_attribute_cp->c_lockowner == thread) {
1031 panic("hfs_lock: bad lock order (cnode after attribute)");
1032 }
1033 if (hfsmp->hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == thread) {
1034 panic("hfs_lock: bad lock order (cnode after extents)");
1035 }
1036 }
1037 }
1038 #endif /* HFS_CHECK_LOCK_ORDER */
1039
1040 /*
1041 * Skip cnodes that no longer exist (were deleted).
1042 */
1043 if ((locktype != HFS_FORCE_LOCK) &&
1044 ((cp->c_desc.cd_flags & CD_ISMETA) == 0) &&
1045 (cp->c_flag & C_NOEXISTS)) {
1046 hfs_unlock(cp);
1047 return (ENOENT);
1048 }
1049 return (0);
1050 }
1051
1052 /*
1053 * Lock a pair of cnodes.
1054 */
1055 __private_extern__
1056 int
1057 hfs_lockpair(struct cnode *cp1, struct cnode *cp2, enum hfslocktype locktype)
1058 {
1059 struct cnode *first, *last;
1060 int error;
1061
1062 /*
1063 * If cnodes match then just lock one.
1064 */
1065 if (cp1 == cp2) {
1066 return hfs_lock(cp1, locktype);
1067 }
1068
1069 /*
1070 * Lock in cnode address order.
1071 */
1072 if (cp1 < cp2) {
1073 first = cp1;
1074 last = cp2;
1075 } else {
1076 first = cp2;
1077 last = cp1;
1078 }
1079
1080 if ( (error = hfs_lock(first, locktype))) {
1081 return (error);
1082 }
1083 if ( (error = hfs_lock(last, locktype))) {
1084 hfs_unlock(first);
1085 return (error);
1086 }
1087 return (0);
1088 }
1089
1090 /*
1091 * Check ordering of two cnodes. Return true if they are are in-order.
1092 */
1093 static int
1094 hfs_isordered(struct cnode *cp1, struct cnode *cp2)
1095 {
1096 if (cp1 == cp2)
1097 return (0);
1098 if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff)
1099 return (1);
1100 if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff)
1101 return (0);
1102 /*
1103 * Locking order is cnode address order.
1104 */
1105 return (cp1 < cp2);
1106 }
1107
1108 /*
1109 * Acquire 4 cnode locks.
1110 * - locked in cnode address order (lesser address first).
1111 * - all or none of the locks are taken
1112 * - only one lock taken per cnode (dup cnodes are skipped)
1113 * - some of the cnode pointers may be null
1114 */
1115 __private_extern__
1116 int
1117 hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3,
1118 struct cnode *cp4, enum hfslocktype locktype)
1119 {
1120 struct cnode * a[3];
1121 struct cnode * b[3];
1122 struct cnode * list[4];
1123 struct cnode * tmp;
1124 int i, j, k;
1125 int error;
1126
1127 if (hfs_isordered(cp1, cp2)) {
1128 a[0] = cp1; a[1] = cp2;
1129 } else {
1130 a[0] = cp2; a[1] = cp1;
1131 }
1132 if (hfs_isordered(cp3, cp4)) {
1133 b[0] = cp3; b[1] = cp4;
1134 } else {
1135 b[0] = cp4; b[1] = cp3;
1136 }
1137 a[2] = (struct cnode *)0xffffffff; /* sentinel value */
1138 b[2] = (struct cnode *)0xffffffff; /* sentinel value */
1139
1140 /*
1141 * Build the lock list, skipping over duplicates
1142 */
1143 for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) {
1144 tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++];
1145 if (k == 0 || tmp != list[k-1])
1146 list[k++] = tmp;
1147 }
1148
1149 /*
1150 * Now we can lock using list[0 - k].
1151 * Skip over NULL entries.
1152 */
1153 for (i = 0; i < k; ++i) {
1154 if (list[i])
1155 if ((error = hfs_lock(list[i], locktype))) {
1156 /* Drop any locks we acquired. */
1157 while (--i >= 0) {
1158 if (list[i])
1159 hfs_unlock(list[i]);
1160 }
1161 return (error);
1162 }
1163 }
1164 return (0);
1165 }
1166
1167
1168 /*
1169 * Unlock a cnode.
1170 */
1171 __private_extern__
1172 void
1173 hfs_unlock(struct cnode *cp)
1174 {
1175 vnode_t rvp = NULLVP;
1176 vnode_t vp = NULLVP;
1177 u_int32_t c_flag;
1178 void *lockowner;
1179
1180 /*
1181 * Only the extents and bitmap file's support lock recursion.
1182 */
1183 if ((cp->c_fileid == kHFSExtentsFileID) ||
1184 (cp->c_fileid == kHFSAllocationFileID)) {
1185 if (--cp->c_syslockcount > 0) {
1186 return;
1187 }
1188 }
1189 c_flag = cp->c_flag;
1190 cp->c_flag &= ~(C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT | C_NEED_DATA_SETSIZE | C_NEED_RSRC_SETSIZE);
1191
1192 if (c_flag & (C_NEED_DVNODE_PUT | C_NEED_DATA_SETSIZE)) {
1193 vp = cp->c_vp;
1194 }
1195 if (c_flag & (C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE)) {
1196 rvp = cp->c_rsrc_vp;
1197 }
1198
1199 lockowner = cp->c_lockowner;
1200 if (lockowner == current_thread()) {
1201 cp->c_lockowner = NULL;
1202 lck_rw_unlock_exclusive(&cp->c_rwlock);
1203 } else {
1204 lck_rw_unlock_shared(&cp->c_rwlock);
1205 }
1206
1207 /* Perform any vnode post processing after cnode lock is dropped. */
1208 if (vp) {
1209 if (c_flag & C_NEED_DATA_SETSIZE)
1210 ubc_setsize(vp, 0);
1211 if (c_flag & C_NEED_DVNODE_PUT)
1212 vnode_put(vp);
1213 }
1214 if (rvp) {
1215 if (c_flag & C_NEED_RSRC_SETSIZE)
1216 ubc_setsize(rvp, 0);
1217 if (c_flag & C_NEED_RVNODE_PUT)
1218 vnode_put(rvp);
1219 }
1220 }
1221
1222 /*
1223 * Unlock a pair of cnodes.
1224 */
1225 __private_extern__
1226 void
1227 hfs_unlockpair(struct cnode *cp1, struct cnode *cp2)
1228 {
1229 hfs_unlock(cp1);
1230 if (cp2 != cp1)
1231 hfs_unlock(cp2);
1232 }
1233
1234 /*
1235 * Unlock a group of cnodes.
1236 */
1237 __private_extern__
1238 void
1239 hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4)
1240 {
1241 struct cnode * list[4];
1242 int i, k = 0;
1243
1244 if (cp1) {
1245 hfs_unlock(cp1);
1246 list[k++] = cp1;
1247 }
1248 if (cp2) {
1249 for (i = 0; i < k; ++i) {
1250 if (list[i] == cp2)
1251 goto skip1;
1252 }
1253 hfs_unlock(cp2);
1254 list[k++] = cp2;
1255 }
1256 skip1:
1257 if (cp3) {
1258 for (i = 0; i < k; ++i) {
1259 if (list[i] == cp3)
1260 goto skip2;
1261 }
1262 hfs_unlock(cp3);
1263 list[k++] = cp3;
1264 }
1265 skip2:
1266 if (cp4) {
1267 for (i = 0; i < k; ++i) {
1268 if (list[i] == cp4)
1269 return;
1270 }
1271 hfs_unlock(cp4);
1272 }
1273 }
1274
1275
1276 /*
1277 * Protect a cnode against a truncation.
1278 *
1279 * Used mainly by read/write since they don't hold the
1280 * cnode lock across calls to the cluster layer.
1281 *
1282 * The process doing a truncation must take the lock
1283 * exclusive. The read/write processes can take it
1284 * non-exclusive.
1285 */
1286 __private_extern__
1287 void
1288 hfs_lock_truncate(struct cnode *cp, int exclusive)
1289 {
1290 #ifdef HFS_CHECK_LOCK_ORDER
1291 if (cp->c_lockowner == current_thread())
1292 panic("hfs_lock_truncate: cnode %p locked!", cp);
1293 #endif /* HFS_CHECK_LOCK_ORDER */
1294
1295 if (exclusive)
1296 lck_rw_lock_exclusive(&cp->c_truncatelock);
1297 else
1298 lck_rw_lock_shared(&cp->c_truncatelock);
1299 }
1300
1301 __private_extern__
1302 void
1303 hfs_unlock_truncate(struct cnode *cp, int exclusive)
1304 {
1305 if (exclusive) {
1306 lck_rw_unlock_exclusive(&cp->c_truncatelock);
1307 } else {
1308 lck_rw_unlock_shared(&cp->c_truncatelock);
1309 }
1310 }
1311
1312
1313
1314