]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cnode.c
7ff95e5933ba165a5329dd2b5591cbcb22453d27
[apple/xnu.git] / bsd / hfs / hfs_cnode.c
1 /*
2 * Copyright (c) 2002-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/proc.h>
31 #include <sys/vnode.h>
32 #include <sys/mount.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/time.h>
36 #include <sys/ubc.h>
37 #include <sys/quota.h>
38 #include <sys/kdebug.h>
39
40 #include <kern/locks.h>
41
42 #include <miscfs/specfs/specdev.h>
43 #include <miscfs/fifofs/fifo.h>
44
45 #include <hfs/hfs.h>
46 #include <hfs/hfs_catalog.h>
47 #include <hfs/hfs_cnode.h>
48 #include <hfs/hfs_quota.h>
49
50 extern int prtactive;
51
52 extern lck_attr_t * hfs_lock_attr;
53 extern lck_grp_t * hfs_mutex_group;
54 extern lck_grp_t * hfs_rwlock_group;
55
56 static int hfs_filedone(struct vnode *vp, vfs_context_t context);
57
58 static void hfs_reclaim_cnode(struct cnode *);
59
60 static int hfs_isordered(struct cnode *, struct cnode *);
61
62
63 /*
64 * Last reference to an cnode. If necessary, write or delete it.
65 */
66 __private_extern__
67 int
68 hfs_vnop_inactive(struct vnop_inactive_args *ap)
69 {
70 struct vnode *vp = ap->a_vp;
71 struct cnode *cp;
72 struct hfsmount *hfsmp = VTOHFS(vp);
73 struct proc *p = vfs_context_proc(ap->a_context);
74 int error = 0;
75 int recycle = 0;
76 int forkcount = 0;
77 int truncated = 0;
78 int started_tr = 0;
79 int took_trunc_lock = 0;
80 cat_cookie_t cookie;
81 int cat_reserve = 0;
82 int lockflags;
83 enum vtype v_type;
84
85 v_type = vnode_vtype(vp);
86 cp = VTOC(vp);
87
88 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || vnode_issystem(vp) ||
89 (hfsmp->hfs_freezing_proc == p)) {
90 return (0);
91 }
92
93 /*
94 * Ignore nodes related to stale file handles.
95 */
96 if (cp->c_mode == 0) {
97 vnode_recycle(vp);
98 return (0);
99 }
100
101 if ((v_type == VREG || v_type == VLNK)) {
102 hfs_lock_truncate(cp, TRUE);
103 took_trunc_lock = 1;
104 }
105
106 (void) hfs_lock(cp, HFS_FORCE_LOCK);
107
108 /*
109 * We should lock cnode before checking the flags in the
110 * condition below and should unlock the cnode before calling
111 * ubc_setsize() as cluster code can call other HFS vnops which
112 * will try to acquire the same cnode lock and cause deadlock.
113 */
114 if ((v_type == VREG || v_type == VLNK) &&
115 (cp->c_flag & C_DELETED) &&
116 (VTOF(vp)->ff_blocks != 0)) {
117 hfs_unlock(cp);
118 ubc_setsize(vp, 0);
119 (void) hfs_lock(cp, HFS_FORCE_LOCK);
120 }
121
122 if (v_type == VREG && !ISSET(cp->c_flag, C_DELETED) && VTOF(vp)->ff_blocks) {
123 hfs_filedone(vp, ap->a_context);
124 }
125 /*
126 * Remove any directory hints or cached origins
127 */
128 if (v_type == VDIR) {
129 hfs_reldirhints(cp, 0);
130 }
131
132 if (cp->c_flag & C_HARDLINK) {
133 hfs_relorigins(cp);
134 }
135
136 if (cp->c_datafork)
137 ++forkcount;
138 if (cp->c_rsrcfork)
139 ++forkcount;
140
141 /* If needed, get rid of any fork's data for a deleted file */
142 if ((v_type == VREG || v_type == VLNK) && (cp->c_flag & C_DELETED)) {
143 if (VTOF(vp)->ff_blocks != 0) {
144 /*
145 * Since we're already inside a transaction,
146 * tell hfs_truncate to skip the ubc_setsize.
147 */
148 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ap->a_context);
149 if (error)
150 goto out;
151 truncated = 1;
152 }
153 recycle = 1;
154
155 /*
156 * Check if there's any resource fork blocks that need to
157 * be reclaimed. This covers the case where there is a
158 * resource fork but its not in core.
159 */
160 if ((cp->c_blocks > 0) && (forkcount == 1) && (vp != cp->c_rsrc_vp)) {
161 struct vnode *rvp = NULLVP;
162
163 error = hfs_vgetrsrc(hfsmp, vp, &rvp, FALSE);
164 if (error)
165 goto out;
166 /*
167 * Defer the vnode_put and ubc_setsize on rvp until hfs_unlock().
168 */
169 cp->c_flag |= C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE;
170 error = hfs_truncate(rvp, (off_t)0, IO_NDELAY, 1, ap->a_context);
171 if (error)
172 goto out;
173 vnode_recycle(rvp); /* all done with this vnode */
174 }
175 }
176
177 // If needed, get rid of any xattrs that this file may have.
178 // Note that this must happen outside of any other transactions
179 // because it starts/ends its own transactions and grabs its
180 // own locks. This is to prevent a file with a lot of attributes
181 // from creating a transaction that is too large (which panics).
182 //
183 if ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0 && (cp->c_flag & C_DELETED)) {
184 hfs_removeallattr(hfsmp, cp->c_fileid);
185 }
186
187 /*
188 * Check for a postponed deletion.
189 * (only delete cnode when the last fork goes inactive)
190 */
191 if ((cp->c_flag & C_DELETED) && (forkcount <= 1)) {
192 /*
193 * Mark cnode in transit so that no one can get this
194 * cnode from cnode hash.
195 */
196 // hfs_chash_mark_in_transit(cp);
197 // XXXdbg - remove the cnode from the hash table since it's deleted
198 // otherwise someone could go to sleep on the cnode and not
199 // be woken up until this vnode gets recycled which could be
200 // a very long time...
201 hfs_chashremove(cp);
202
203 cp->c_flag |= C_NOEXISTS; // XXXdbg
204 cp->c_rdev = 0;
205
206 if (started_tr == 0) {
207 if (hfs_start_transaction(hfsmp) != 0) {
208 error = EINVAL;
209 goto out;
210 }
211 started_tr = 1;
212 }
213
214 /*
215 * Reserve some space in the Catalog file.
216 */
217 if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) {
218 goto out;
219 }
220 cat_reserve = 1;
221
222 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
223
224 if (cp->c_blocks > 0) {
225 printf("hfs_inactive: deleting non-empty%sfile %d, "
226 "blks %d\n", VNODE_IS_RSRC(vp) ? " rsrc " : " ",
227 (int)cp->c_fileid, (int)cp->c_blocks);
228 }
229
230 //
231 // release the name pointer in the descriptor so that
232 // cat_delete() will use the file-id to do the deletion.
233 // in the case of hard links this is imperative (in the
234 // case of regular files the fileid and cnid are the
235 // same so it doesn't matter).
236 //
237 cat_releasedesc(&cp->c_desc);
238
239 /*
240 * The descriptor name may be zero,
241 * in which case the fileid is used.
242 */
243 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
244
245 if (error && truncated && (error != ENXIO))
246 printf("hfs_inactive: couldn't delete a truncated file!");
247
248 /* Update HFS Private Data dir */
249 if (error == 0) {
250 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries--;
251 if (vnode_isdir(vp)) {
252 DEC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
253 }
254 (void)cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
255 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
256 }
257
258 hfs_systemfile_unlock(hfsmp, lockflags);
259
260 if (error)
261 goto out;
262
263 #if QUOTA
264 if (hfsmp->hfs_flags & HFS_QUOTAS)
265 (void)hfs_chkiq(cp, -1, NOCRED, 0);
266 #endif /* QUOTA */
267
268 cp->c_mode = 0;
269 cp->c_flag &= ~C_DELETED;
270 cp->c_touch_chgtime = TRUE;
271 cp->c_touch_modtime = TRUE;
272
273 if (error == 0)
274 hfs_volupdate(hfsmp, (v_type == VDIR) ? VOL_RMDIR : VOL_RMFILE, 0);
275 }
276
277 /*
278 * A file may have had delayed allocations, in which case hfs_update
279 * would not have updated the catalog record (cat_update). We need
280 * to do that now, before we lose our fork data. We also need to
281 * force the update, or hfs_update will again skip the cat_update.
282 */
283 if ((cp->c_flag & C_MODIFIED) ||
284 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
285 if ((cp->c_flag & C_MODIFIED) || cp->c_touch_modtime){
286 cp->c_flag |= C_FORCEUPDATE;
287 }
288 hfs_update(vp, 0);
289 }
290 out:
291 if (cat_reserve)
292 cat_postflight(hfsmp, &cookie, p);
293
294 // XXXdbg - have to do this because a goto could have come here
295 if (started_tr) {
296 hfs_end_transaction(hfsmp);
297 started_tr = 0;
298 }
299
300 hfs_unlock(cp);
301
302 if (took_trunc_lock)
303 hfs_unlock_truncate(cp, TRUE);
304
305 /*
306 * If we are done with the vnode, reclaim it
307 * so that it can be reused immediately.
308 */
309 if (cp->c_mode == 0 || recycle)
310 vnode_recycle(vp);
311
312 return (error);
313 }
314
315 /*
316 * File clean-up (zero fill and shrink peof).
317 */
318 static int
319 hfs_filedone(struct vnode *vp, vfs_context_t context)
320 {
321 struct cnode *cp;
322 struct filefork *fp;
323 struct hfsmount *hfsmp;
324 off_t leof;
325 u_long blks, blocksize;
326
327 cp = VTOC(vp);
328 fp = VTOF(vp);
329 hfsmp = VTOHFS(vp);
330 leof = fp->ff_size;
331
332 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (fp->ff_blocks == 0))
333 return (0);
334
335 hfs_unlock(cp);
336 (void) cluster_push(vp, IO_CLOSE);
337 hfs_lock(cp, HFS_FORCE_LOCK);
338
339 /*
340 * Explicitly zero out the areas of file
341 * that are currently marked invalid.
342 */
343 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
344 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
345 off_t start = invalid_range->rl_start;
346 off_t end = invalid_range->rl_end;
347
348 /* The range about to be written must be validated
349 * first, so that VNOP_BLOCKMAP() will return the
350 * appropriate mapping for the cluster code:
351 */
352 rl_remove(start, end, &fp->ff_invalidranges);
353
354 hfs_unlock(cp);
355 (void) cluster_write(vp, (struct uio *) 0,
356 leof, end + 1, start, (off_t)0,
357 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
358 hfs_lock(cp, HFS_FORCE_LOCK);
359 cp->c_flag |= C_MODIFIED;
360 }
361 cp->c_flag &= ~C_ZFWANTSYNC;
362 cp->c_zftimeout = 0;
363 blocksize = VTOVCB(vp)->blockSize;
364 blks = leof / blocksize;
365 if (((off_t)blks * (off_t)blocksize) != leof)
366 blks++;
367 /*
368 * Shrink the peof to the smallest size neccessary to contain the leof.
369 */
370 if (blks < fp->ff_blocks)
371 (void) hfs_truncate(vp, leof, IO_NDELAY, 0, context);
372 hfs_unlock(cp);
373 (void) cluster_push(vp, IO_CLOSE);
374 hfs_lock(cp, HFS_FORCE_LOCK);
375
376 /*
377 * If the hfs_truncate didn't happen to flush the vnode's
378 * information out to disk, force it to be updated now that
379 * all invalid ranges have been zero-filled and validated:
380 */
381 if (cp->c_flag & C_MODIFIED) {
382 hfs_update(vp, 0);
383 }
384 return (0);
385 }
386
387
388 /*
389 * Reclaim a cnode so that it can be used for other purposes.
390 */
391 __private_extern__
392 int
393 hfs_vnop_reclaim(struct vnop_reclaim_args *ap)
394 {
395 struct vnode *vp = ap->a_vp;
396 struct cnode *cp;
397 struct filefork *fp = NULL;
398 struct filefork *altfp = NULL;
399 int reclaim_cnode = 0;
400
401 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
402 cp = VTOC(vp);
403
404 /*
405 * Check if a deleted resource fork vnode missed a
406 * VNOP_INACTIVE call and requires truncation.
407 */
408 if (VNODE_IS_RSRC(vp) &&
409 (cp->c_flag & C_DELETED) &&
410 (VTOF(vp)->ff_blocks != 0)) {
411 hfs_unlock(cp);
412 ubc_setsize(vp, 0);
413
414 hfs_lock_truncate(cp, TRUE);
415 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
416
417 (void) hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ap->a_context);
418
419 hfs_unlock_truncate(cp, TRUE);
420 }
421 /*
422 * A file may have had delayed allocations, in which case hfs_update
423 * would not have updated the catalog record (cat_update). We need
424 * to do that now, before we lose our fork data. We also need to
425 * force the update, or hfs_update will again skip the cat_update.
426 */
427 if ((cp->c_flag & C_MODIFIED) ||
428 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
429 if ((cp->c_flag & C_MODIFIED) || cp->c_touch_modtime){
430 cp->c_flag |= C_FORCEUPDATE;
431 }
432 hfs_update(vp, 0);
433 }
434
435 /*
436 * Keep track of an inactive hot file.
437 */
438 if (!vnode_isdir(vp) &&
439 !vnode_issystem(vp) &&
440 !(cp->c_flag & (C_DELETED | C_NOEXISTS)) ) {
441 (void) hfs_addhotfile(vp);
442 }
443 vnode_removefsref(vp);
444
445 /*
446 * Find file fork for this vnode (if any)
447 * Also check if another fork is active
448 */
449 if (cp->c_vp == vp) {
450 fp = cp->c_datafork;
451 altfp = cp->c_rsrcfork;
452
453 cp->c_datafork = NULL;
454 cp->c_vp = NULL;
455 } else if (cp->c_rsrc_vp == vp) {
456 fp = cp->c_rsrcfork;
457 altfp = cp->c_datafork;
458
459 cp->c_rsrcfork = NULL;
460 cp->c_rsrc_vp = NULL;
461 } else {
462 panic("hfs_vnop_reclaim: vp points to wrong cnode\n");
463 }
464 /*
465 * On the last fork, remove the cnode from its hash chain.
466 */
467 if (altfp == NULL) {
468 /* If we can't remove it then the cnode must persist! */
469 if (hfs_chashremove(cp) == 0)
470 reclaim_cnode = 1;
471 /*
472 * Remove any directory hints
473 */
474 if (vnode_isdir(vp)) {
475 hfs_reldirhints(cp, 0);
476 }
477
478 if (cp->c_flag & C_HARDLINK) {
479 hfs_relorigins(cp);
480 }
481 }
482 /* Release the file fork and related data */
483 if (fp) {
484 /* Dump cached symlink data */
485 if (vnode_islnk(vp) && (fp->ff_symlinkptr != NULL)) {
486 FREE(fp->ff_symlinkptr, M_TEMP);
487 }
488 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
489 }
490
491 /*
492 * If there was only one active fork then we can release the cnode.
493 */
494 if (reclaim_cnode) {
495 hfs_chashwakeup(cp, H_ALLOC | H_TRANSIT);
496 hfs_reclaim_cnode(cp);
497 } else /* cnode in use */ {
498 hfs_unlock(cp);
499 }
500
501 vnode_clearfsnode(vp);
502 return (0);
503 }
504
505
506 extern int (**hfs_vnodeop_p) (void *);
507 extern int (**hfs_specop_p) (void *);
508 #if FIFO
509 extern int (**hfs_fifoop_p) (void *);
510 #endif
511
512 /*
513 * hfs_getnewvnode - get new default vnode
514 *
515 * The vnode is returned with an iocount and the cnode locked
516 */
517 __private_extern__
518 int
519 hfs_getnewvnode(
520 struct hfsmount *hfsmp,
521 struct vnode *dvp,
522 struct componentname *cnp,
523 struct cat_desc *descp,
524 int flags,
525 struct cat_attr *attrp,
526 struct cat_fork *forkp,
527 struct vnode **vpp)
528 {
529 struct mount *mp = HFSTOVFS(hfsmp);
530 struct vnode *vp = NULL;
531 struct vnode **cvpp;
532 struct vnode *tvp = NULLVP;
533 struct cnode *cp = NULL;
534 struct filefork *fp = NULL;
535 int retval;
536 int issystemfile;
537 int wantrsrc;
538 struct vnode_fsparam vfsp;
539 enum vtype vtype;
540 #if QUOTA
541 int i;
542 #endif /* QUOTA */
543
544 if (attrp->ca_fileid == 0) {
545 *vpp = NULL;
546 return (ENOENT);
547 }
548
549 #if !FIFO
550 if (IFTOVT(attrp->ca_mode) == VFIFO) {
551 *vpp = NULL;
552 return (ENOTSUP);
553 }
554 #endif /* !FIFO */
555 vtype = IFTOVT(attrp->ca_mode);
556 issystemfile = (descp->cd_flags & CD_ISMETA) && (vtype == VREG);
557 wantrsrc = flags & GNV_WANTRSRC;
558
559 #ifdef HFS_CHECK_LOCK_ORDER
560 /*
561 * The only case were its permissible to hold the parent cnode
562 * lock is during a create operation (hfs_makenode) or when
563 * we don't need the cnode lock (GNV_SKIPLOCK).
564 */
565 if ((dvp != NULL) &&
566 (flags & (GNV_CREATE | GNV_SKIPLOCK)) == 0 &&
567 VTOC(dvp)->c_lockowner == current_thread()) {
568 panic("hfs_getnewvnode: unexpected hold of parent cnode %p", VTOC(dvp));
569 }
570 #endif /* HFS_CHECK_LOCK_ORDER */
571
572 /*
573 * Get a cnode (new or existing)
574 */
575 cp = hfs_chash_getcnode(hfsmp->hfs_raw_dev, attrp->ca_fileid, vpp, wantrsrc, (flags & GNV_SKIPLOCK));
576
577 /*
578 * If the id is no longer valid for lookups we'll get back a NULL cp.
579 */
580 if (cp == NULL) {
581 return (ENOENT);
582 }
583
584 /* Hardlinks may need an updated catalog descriptor */
585 if ((cp->c_flag & C_HARDLINK) && descp->cd_nameptr && descp->cd_namelen > 0) {
586 replace_desc(cp, descp);
587 }
588 /* Check if we found a matching vnode */
589 if (*vpp != NULL)
590 return (0);
591
592 /*
593 * If this is a new cnode then initialize it.
594 */
595 if (ISSET(cp->c_hflag, H_ALLOC)) {
596 lck_rw_init(&cp->c_truncatelock, hfs_rwlock_group, hfs_lock_attr);
597
598 /* Make sure its still valid (ie exists on disk). */
599 if (!(flags & GNV_CREATE) &&
600 !hfs_valid_cnode(hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid)) {
601 hfs_chash_abort(cp);
602 hfs_reclaim_cnode(cp);
603 *vpp = NULL;
604 return (ENOENT);
605 }
606 bcopy(attrp, &cp->c_attr, sizeof(struct cat_attr));
607 bcopy(descp, &cp->c_desc, sizeof(struct cat_desc));
608
609 /* The name was inherited so clear descriptor state... */
610 descp->cd_namelen = 0;
611 descp->cd_nameptr = NULL;
612 descp->cd_flags &= ~CD_HASBUF;
613
614 /* Tag hardlinks */
615 if ((vtype == VREG || vtype == VDIR) &&
616 ((descp->cd_cnid != attrp->ca_fileid) ||
617 (attrp->ca_recflags & kHFSHasLinkChainMask))) {
618 cp->c_flag |= C_HARDLINK;
619 }
620 /*
621 * Fix-up dir link counts.
622 *
623 * Earlier versions of Leopard used ca_linkcount for posix
624 * nlink support (effectively the sub-directory count + 2).
625 * That is now accomplished using the ca_dircount field with
626 * the corresponding kHFSHasFolderCountMask flag.
627 *
628 * For directories the ca_linkcount is the true link count,
629 * tracking the number of actual hardlinks to a directory.
630 *
631 * We only do this if the mount has HFS_FOLDERCOUNT set;
632 * at the moment, we only set that for HFSX volumes.
633 */
634 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
635 (vtype == VDIR) &&
636 !(attrp->ca_recflags & kHFSHasFolderCountMask) &&
637 (cp->c_attr.ca_linkcount > 1)) {
638 if (cp->c_attr.ca_entries == 0)
639 cp->c_attr.ca_dircount = 0;
640 else
641 cp->c_attr.ca_dircount = cp->c_attr.ca_linkcount - 2;
642
643 cp->c_attr.ca_linkcount = 1;
644 cp->c_attr.ca_recflags |= kHFSHasFolderCountMask;
645 if ( !(hfsmp->hfs_flags & HFS_READ_ONLY) )
646 cp->c_flag |= C_MODIFIED;
647 }
648 #if QUOTA
649 if (hfsmp->hfs_flags & HFS_QUOTAS) {
650 for (i = 0; i < MAXQUOTAS; i++)
651 cp->c_dquot[i] = NODQUOT;
652 }
653 #endif /* QUOTA */
654 }
655
656 if (vtype == VDIR) {
657 if (cp->c_vp != NULL)
658 panic("hfs_getnewvnode: orphaned vnode (data)");
659 cvpp = &cp->c_vp;
660 } else {
661 if (forkp && attrp->ca_blocks < forkp->cf_blocks)
662 panic("hfs_getnewvnode: bad ca_blocks (too small)");
663 /*
664 * Allocate and initialize a file fork...
665 */
666 MALLOC_ZONE(fp, struct filefork *, sizeof(struct filefork),
667 M_HFSFORK, M_WAITOK);
668 fp->ff_cp = cp;
669 if (forkp)
670 bcopy(forkp, &fp->ff_data, sizeof(struct cat_fork));
671 else
672 bzero(&fp->ff_data, sizeof(struct cat_fork));
673 rl_init(&fp->ff_invalidranges);
674 fp->ff_sysfileinfo = 0;
675
676 if (wantrsrc) {
677 if (cp->c_rsrcfork != NULL)
678 panic("hfs_getnewvnode: orphaned rsrc fork");
679 if (cp->c_rsrc_vp != NULL)
680 panic("hfs_getnewvnode: orphaned vnode (rsrc)");
681 cp->c_rsrcfork = fp;
682 cvpp = &cp->c_rsrc_vp;
683 if ( (tvp = cp->c_vp) != NULLVP )
684 cp->c_flag |= C_NEED_DVNODE_PUT;
685 } else {
686 if (cp->c_datafork != NULL)
687 panic("hfs_getnewvnode: orphaned data fork");
688 if (cp->c_vp != NULL)
689 panic("hfs_getnewvnode: orphaned vnode (data)");
690 cp->c_datafork = fp;
691 cvpp = &cp->c_vp;
692 if ( (tvp = cp->c_rsrc_vp) != NULLVP)
693 cp->c_flag |= C_NEED_RVNODE_PUT;
694 }
695 }
696 if (tvp != NULLVP) {
697 /*
698 * grab an iocount on the vnode we weren't
699 * interested in (i.e. we want the resource fork
700 * but the cnode already has the data fork)
701 * to prevent it from being
702 * recycled by us when we call vnode_create
703 * which will result in a deadlock when we
704 * try to take the cnode lock in hfs_vnop_fsync or
705 * hfs_vnop_reclaim... vnode_get can be called here
706 * because we already hold the cnode lock which will
707 * prevent the vnode from changing identity until
708 * we drop it.. vnode_get will not block waiting for
709 * a change of state... however, it will return an
710 * error if the current iocount == 0 and we've already
711 * started to terminate the vnode... we don't need/want to
712 * grab an iocount in the case since we can't cause
713 * the fileystem to be re-entered on this thread for this vp
714 *
715 * the matching vnode_put will happen in hfs_unlock
716 * after we've dropped the cnode lock
717 */
718 if ( vnode_get(tvp) != 0)
719 cp->c_flag &= ~(C_NEED_RVNODE_PUT | C_NEED_DVNODE_PUT);
720 }
721 vfsp.vnfs_mp = mp;
722 vfsp.vnfs_vtype = vtype;
723 vfsp.vnfs_str = "hfs";
724 if ((cp->c_flag & C_HARDLINK) && (vtype == VDIR)) {
725 vfsp.vnfs_dvp = NULL; /* no parent for me! */
726 vfsp.vnfs_cnp = NULL; /* no name for me! */
727 } else {
728 vfsp.vnfs_dvp = dvp;
729 vfsp.vnfs_cnp = cnp;
730 }
731 vfsp.vnfs_fsnode = cp;
732 #if FIFO
733 if (vtype == VFIFO )
734 vfsp.vnfs_vops = hfs_fifoop_p;
735 else
736 #endif
737 if (vtype == VBLK || vtype == VCHR)
738 vfsp.vnfs_vops = hfs_specop_p;
739 else
740 vfsp.vnfs_vops = hfs_vnodeop_p;
741
742 if (vtype == VBLK || vtype == VCHR)
743 vfsp.vnfs_rdev = attrp->ca_rdev;
744 else
745 vfsp.vnfs_rdev = 0;
746
747 if (forkp)
748 vfsp.vnfs_filesize = forkp->cf_size;
749 else
750 vfsp.vnfs_filesize = 0;
751
752 vfsp.vnfs_flags = VNFS_ADDFSREF;
753 if (dvp == NULLVP || cnp == NULL || !(cnp->cn_flags & MAKEENTRY))
754 vfsp.vnfs_flags |= VNFS_NOCACHE;
755
756 /* Tag system files */
757 vfsp.vnfs_marksystem = issystemfile;
758
759 /* Tag root directory */
760 if (descp->cd_cnid == kHFSRootFolderID)
761 vfsp.vnfs_markroot = 1;
762 else
763 vfsp.vnfs_markroot = 0;
764
765 if ((retval = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, cvpp))) {
766 if (fp) {
767 if (fp == cp->c_datafork)
768 cp->c_datafork = NULL;
769 else
770 cp->c_rsrcfork = NULL;
771
772 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
773 }
774 /*
775 * If this is a newly created cnode or a vnode reclaim
776 * occurred during the attachment, then cleanup the cnode.
777 */
778 if ((cp->c_vp == NULL) && (cp->c_rsrc_vp == NULL)) {
779 hfs_chash_abort(cp);
780 hfs_reclaim_cnode(cp);
781 }
782 else {
783 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
784 if ((flags & GNV_SKIPLOCK) == 0){
785 hfs_unlock(cp);
786 }
787 }
788 *vpp = NULL;
789 return (retval);
790 }
791 vp = *cvpp;
792 vnode_settag(vp, VT_HFS);
793 if (cp->c_flag & C_HARDLINK) {
794 vnode_setmultipath(vp);
795 }
796 /*
797 * Tag resource fork vnodes as needing an VNOP_INACTIVE
798 * so that any deferred removes (open unlinked files)
799 * have the chance to process the resource fork.
800 */
801 if (VNODE_IS_RSRC(vp)) {
802 /* Force VL_NEEDINACTIVE on this vnode */
803 vnode_ref(vp);
804 vnode_rele(vp);
805 }
806 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
807
808 /*
809 * Stop tracking an active hot file.
810 */
811 if (!(flags & GNV_CREATE) && (vtype != VDIR) && !issystemfile) {
812 (void) hfs_removehotfile(vp);
813 }
814
815 *vpp = vp;
816 return (0);
817 }
818
819
820 static void
821 hfs_reclaim_cnode(struct cnode *cp)
822 {
823 #if QUOTA
824 int i;
825
826 for (i = 0; i < MAXQUOTAS; i++) {
827 if (cp->c_dquot[i] != NODQUOT) {
828 dqreclaim(cp->c_dquot[i]);
829 cp->c_dquot[i] = NODQUOT;
830 }
831 }
832 #endif /* QUOTA */
833
834 /*
835 * If the descriptor has a name then release it
836 */
837 if ((cp->c_desc.cd_flags & CD_HASBUF) && (cp->c_desc.cd_nameptr != 0)) {
838 const char *nameptr;
839
840 nameptr = (const char *) cp->c_desc.cd_nameptr;
841 cp->c_desc.cd_nameptr = 0;
842 cp->c_desc.cd_flags &= ~CD_HASBUF;
843 cp->c_desc.cd_namelen = 0;
844 vfs_removename(nameptr);
845 }
846
847 lck_rw_destroy(&cp->c_rwlock, hfs_rwlock_group);
848 lck_rw_destroy(&cp->c_truncatelock, hfs_rwlock_group);
849 bzero(cp, sizeof(struct cnode));
850 FREE_ZONE(cp, sizeof(struct cnode), M_HFSNODE);
851 }
852
853
854 __private_extern__
855 int
856 hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, cnid_t cnid)
857 {
858 struct cat_attr attr;
859 struct cat_desc cndesc;
860 int stillvalid = 0;
861 int lockflags;
862
863 /* System files are always valid */
864 if (cnid < kHFSFirstUserCatalogNodeID)
865 return (1);
866
867 /* XXX optimization: check write count in dvp */
868
869 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
870
871 if (dvp && cnp) {
872 bzero(&cndesc, sizeof(cndesc));
873 cndesc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
874 cndesc.cd_namelen = cnp->cn_namelen;
875 cndesc.cd_parentcnid = VTOC(dvp)->c_fileid;
876 cndesc.cd_hint = VTOC(dvp)->c_childhint;
877
878 if ((cat_lookup(hfsmp, &cndesc, 0, NULL, &attr, NULL, NULL) == 0) &&
879 (cnid == attr.ca_fileid)) {
880 stillvalid = 1;
881 }
882 } else {
883 if (cat_idlookup(hfsmp, cnid, 0, NULL, NULL, NULL) == 0) {
884 stillvalid = 1;
885 }
886 }
887 hfs_systemfile_unlock(hfsmp, lockflags);
888
889 return (stillvalid);
890 }
891
892 /*
893 * Touch cnode times based on c_touch_xxx flags
894 *
895 * cnode must be locked exclusive
896 *
897 * This will also update the volume modify time
898 */
899 __private_extern__
900 void
901 hfs_touchtimes(struct hfsmount *hfsmp, struct cnode* cp)
902 {
903 /* don't modify times if volume is read-only */
904 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
905 cp->c_touch_acctime = FALSE;
906 cp->c_touch_chgtime = FALSE;
907 cp->c_touch_modtime = FALSE;
908 }
909 else if (hfsmp->hfs_flags & HFS_STANDARD) {
910 /* HFS Standard doesn't support access times */
911 cp->c_touch_acctime = FALSE;
912 }
913
914 /*
915 * Skip access time updates if:
916 * . MNT_NOATIME is set
917 * . a file system freeze is in progress
918 * . a file system resize is in progress
919 */
920 if (cp->c_touch_acctime) {
921 if ((vfs_flags(hfsmp->hfs_mp) & MNT_NOATIME) ||
922 (hfsmp->hfs_freezing_proc != NULL) ||
923 (hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS))
924 cp->c_touch_acctime = FALSE;
925 }
926 if (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
927 struct timeval tv;
928 int touchvol = 0;
929
930 microtime(&tv);
931
932 if (cp->c_touch_acctime) {
933 cp->c_atime = tv.tv_sec;
934 /*
935 * When the access time is the only thing changing
936 * then make sure its sufficiently newer before
937 * committing it to disk.
938 */
939 if ((((u_int32_t)cp->c_atime - (u_int32_t)(cp)->c_attr.ca_atimeondisk) >
940 ATIME_ONDISK_ACCURACY)) {
941 cp->c_flag |= C_MODIFIED;
942 }
943 cp->c_touch_acctime = FALSE;
944 }
945 if (cp->c_touch_modtime) {
946 cp->c_mtime = tv.tv_sec;
947 cp->c_touch_modtime = FALSE;
948 cp->c_flag |= C_MODIFIED;
949 touchvol = 1;
950 #if 1
951 /*
952 * HFS dates that WE set must be adjusted for DST
953 */
954 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
955 cp->c_mtime += 3600;
956 }
957 #endif
958 }
959 if (cp->c_touch_chgtime) {
960 cp->c_ctime = tv.tv_sec;
961 cp->c_touch_chgtime = FALSE;
962 cp->c_flag |= C_MODIFIED;
963 touchvol = 1;
964 }
965
966 /* Touch the volume modtime if needed */
967 if (touchvol) {
968 MarkVCBDirty(hfsmp);
969 HFSTOVCB(hfsmp)->vcbLsMod = tv.tv_sec;
970 }
971 }
972 }
973
974 /*
975 * Lock a cnode.
976 */
977 __private_extern__
978 int
979 hfs_lock(struct cnode *cp, enum hfslocktype locktype)
980 {
981 void * thread = current_thread();
982
983 if (cp->c_lockowner == thread) {
984 /*
985 * Only the extents and bitmap file's support lock recursion.
986 */
987 if ((cp->c_fileid == kHFSExtentsFileID) ||
988 (cp->c_fileid == kHFSAllocationFileID)) {
989 cp->c_syslockcount++;
990 } else {
991 panic("hfs_lock: locking against myself!");
992 }
993 } else if (locktype == HFS_SHARED_LOCK) {
994 lck_rw_lock_shared(&cp->c_rwlock);
995 cp->c_lockowner = HFS_SHARED_OWNER;
996
997 } else /* HFS_EXCLUSIVE_LOCK */ {
998 lck_rw_lock_exclusive(&cp->c_rwlock);
999 cp->c_lockowner = thread;
1000
1001 /*
1002 * Only the extents and bitmap file's support lock recursion.
1003 */
1004 if ((cp->c_fileid == kHFSExtentsFileID) ||
1005 (cp->c_fileid == kHFSAllocationFileID)) {
1006 cp->c_syslockcount = 1;
1007 }
1008 }
1009
1010 #ifdef HFS_CHECK_LOCK_ORDER
1011 /*
1012 * Regular cnodes (non-system files) cannot be locked
1013 * while holding the journal lock or a system file lock.
1014 */
1015 if (!(cp->c_desc.cd_flags & CD_ISMETA) &&
1016 ((cp->c_fileid > kHFSFirstUserCatalogNodeID) || (cp->c_fileid == kHFSRootFolderID))) {
1017 vnode_t vp = NULLVP;
1018
1019 /* Find corresponding vnode. */
1020 if (cp->c_vp != NULLVP && VTOC(cp->c_vp) == cp) {
1021 vp = cp->c_vp;
1022 } else if (cp->c_rsrc_vp != NULLVP && VTOC(cp->c_rsrc_vp) == cp) {
1023 vp = cp->c_rsrc_vp;
1024 }
1025 if (vp != NULLVP) {
1026 struct hfsmount *hfsmp = VTOHFS(vp);
1027
1028 if (hfsmp->jnl && (journal_owner(hfsmp->jnl) == thread)) {
1029 /* This will eventually be a panic here. */
1030 printf("hfs_lock: bad lock order (cnode after journal)\n");
1031 }
1032 if (hfsmp->hfs_catalog_cp && hfsmp->hfs_catalog_cp->c_lockowner == thread) {
1033 panic("hfs_lock: bad lock order (cnode after catalog)");
1034 }
1035 if (hfsmp->hfs_attribute_cp && hfsmp->hfs_attribute_cp->c_lockowner == thread) {
1036 panic("hfs_lock: bad lock order (cnode after attribute)");
1037 }
1038 if (hfsmp->hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == thread) {
1039 panic("hfs_lock: bad lock order (cnode after extents)");
1040 }
1041 }
1042 }
1043 #endif /* HFS_CHECK_LOCK_ORDER */
1044
1045 /*
1046 * Skip cnodes that no longer exist (were deleted).
1047 */
1048 if ((locktype != HFS_FORCE_LOCK) &&
1049 ((cp->c_desc.cd_flags & CD_ISMETA) == 0) &&
1050 (cp->c_flag & C_NOEXISTS)) {
1051 hfs_unlock(cp);
1052 return (ENOENT);
1053 }
1054 return (0);
1055 }
1056
1057 /*
1058 * Lock a pair of cnodes.
1059 */
1060 __private_extern__
1061 int
1062 hfs_lockpair(struct cnode *cp1, struct cnode *cp2, enum hfslocktype locktype)
1063 {
1064 struct cnode *first, *last;
1065 int error;
1066
1067 /*
1068 * If cnodes match then just lock one.
1069 */
1070 if (cp1 == cp2) {
1071 return hfs_lock(cp1, locktype);
1072 }
1073
1074 /*
1075 * Lock in cnode address order.
1076 */
1077 if (cp1 < cp2) {
1078 first = cp1;
1079 last = cp2;
1080 } else {
1081 first = cp2;
1082 last = cp1;
1083 }
1084
1085 if ( (error = hfs_lock(first, locktype))) {
1086 return (error);
1087 }
1088 if ( (error = hfs_lock(last, locktype))) {
1089 hfs_unlock(first);
1090 return (error);
1091 }
1092 return (0);
1093 }
1094
1095 /*
1096 * Check ordering of two cnodes. Return true if they are are in-order.
1097 */
1098 static int
1099 hfs_isordered(struct cnode *cp1, struct cnode *cp2)
1100 {
1101 if (cp1 == cp2)
1102 return (0);
1103 if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff)
1104 return (1);
1105 if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff)
1106 return (0);
1107 /*
1108 * Locking order is cnode address order.
1109 */
1110 return (cp1 < cp2);
1111 }
1112
1113 /*
1114 * Acquire 4 cnode locks.
1115 * - locked in cnode address order (lesser address first).
1116 * - all or none of the locks are taken
1117 * - only one lock taken per cnode (dup cnodes are skipped)
1118 * - some of the cnode pointers may be null
1119 */
1120 __private_extern__
1121 int
1122 hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3,
1123 struct cnode *cp4, enum hfslocktype locktype)
1124 {
1125 struct cnode * a[3];
1126 struct cnode * b[3];
1127 struct cnode * list[4];
1128 struct cnode * tmp;
1129 int i, j, k;
1130 int error;
1131
1132 if (hfs_isordered(cp1, cp2)) {
1133 a[0] = cp1; a[1] = cp2;
1134 } else {
1135 a[0] = cp2; a[1] = cp1;
1136 }
1137 if (hfs_isordered(cp3, cp4)) {
1138 b[0] = cp3; b[1] = cp4;
1139 } else {
1140 b[0] = cp4; b[1] = cp3;
1141 }
1142 a[2] = (struct cnode *)0xffffffff; /* sentinel value */
1143 b[2] = (struct cnode *)0xffffffff; /* sentinel value */
1144
1145 /*
1146 * Build the lock list, skipping over duplicates
1147 */
1148 for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) {
1149 tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++];
1150 if (k == 0 || tmp != list[k-1])
1151 list[k++] = tmp;
1152 }
1153
1154 /*
1155 * Now we can lock using list[0 - k].
1156 * Skip over NULL entries.
1157 */
1158 for (i = 0; i < k; ++i) {
1159 if (list[i])
1160 if ((error = hfs_lock(list[i], locktype))) {
1161 /* Drop any locks we acquired. */
1162 while (--i >= 0) {
1163 if (list[i])
1164 hfs_unlock(list[i]);
1165 }
1166 return (error);
1167 }
1168 }
1169 return (0);
1170 }
1171
1172
1173 /*
1174 * Unlock a cnode.
1175 */
1176 __private_extern__
1177 void
1178 hfs_unlock(struct cnode *cp)
1179 {
1180 vnode_t rvp = NULLVP;
1181 vnode_t vp = NULLVP;
1182 u_int32_t c_flag;
1183 void *lockowner;
1184
1185 /*
1186 * Only the extents and bitmap file's support lock recursion.
1187 */
1188 if ((cp->c_fileid == kHFSExtentsFileID) ||
1189 (cp->c_fileid == kHFSAllocationFileID)) {
1190 if (--cp->c_syslockcount > 0) {
1191 return;
1192 }
1193 }
1194 c_flag = cp->c_flag;
1195 cp->c_flag &= ~(C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT | C_NEED_DATA_SETSIZE | C_NEED_RSRC_SETSIZE);
1196
1197 if (c_flag & (C_NEED_DVNODE_PUT | C_NEED_DATA_SETSIZE)) {
1198 vp = cp->c_vp;
1199 }
1200 if (c_flag & (C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE)) {
1201 rvp = cp->c_rsrc_vp;
1202 }
1203
1204 lockowner = cp->c_lockowner;
1205 if (lockowner == current_thread()) {
1206 cp->c_lockowner = NULL;
1207 lck_rw_unlock_exclusive(&cp->c_rwlock);
1208 } else {
1209 lck_rw_unlock_shared(&cp->c_rwlock);
1210 }
1211
1212 /* Perform any vnode post processing after cnode lock is dropped. */
1213 if (vp) {
1214 if (c_flag & C_NEED_DATA_SETSIZE)
1215 ubc_setsize(vp, 0);
1216 if (c_flag & C_NEED_DVNODE_PUT)
1217 vnode_put(vp);
1218 }
1219 if (rvp) {
1220 if (c_flag & C_NEED_RSRC_SETSIZE)
1221 ubc_setsize(rvp, 0);
1222 if (c_flag & C_NEED_RVNODE_PUT)
1223 vnode_put(rvp);
1224 }
1225 }
1226
1227 /*
1228 * Unlock a pair of cnodes.
1229 */
1230 __private_extern__
1231 void
1232 hfs_unlockpair(struct cnode *cp1, struct cnode *cp2)
1233 {
1234 hfs_unlock(cp1);
1235 if (cp2 != cp1)
1236 hfs_unlock(cp2);
1237 }
1238
1239 /*
1240 * Unlock a group of cnodes.
1241 */
1242 __private_extern__
1243 void
1244 hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4)
1245 {
1246 struct cnode * list[4];
1247 int i, k = 0;
1248
1249 if (cp1) {
1250 hfs_unlock(cp1);
1251 list[k++] = cp1;
1252 }
1253 if (cp2) {
1254 for (i = 0; i < k; ++i) {
1255 if (list[i] == cp2)
1256 goto skip1;
1257 }
1258 hfs_unlock(cp2);
1259 list[k++] = cp2;
1260 }
1261 skip1:
1262 if (cp3) {
1263 for (i = 0; i < k; ++i) {
1264 if (list[i] == cp3)
1265 goto skip2;
1266 }
1267 hfs_unlock(cp3);
1268 list[k++] = cp3;
1269 }
1270 skip2:
1271 if (cp4) {
1272 for (i = 0; i < k; ++i) {
1273 if (list[i] == cp4)
1274 return;
1275 }
1276 hfs_unlock(cp4);
1277 }
1278 }
1279
1280
1281 /*
1282 * Protect a cnode against a truncation.
1283 *
1284 * Used mainly by read/write since they don't hold the
1285 * cnode lock across calls to the cluster layer.
1286 *
1287 * The process doing a truncation must take the lock
1288 * exclusive. The read/write processes can take it
1289 * non-exclusive.
1290 */
1291 __private_extern__
1292 void
1293 hfs_lock_truncate(struct cnode *cp, int exclusive)
1294 {
1295 #ifdef HFS_CHECK_LOCK_ORDER
1296 if (cp->c_lockowner == current_thread())
1297 panic("hfs_lock_truncate: cnode %p locked!", cp);
1298 #endif /* HFS_CHECK_LOCK_ORDER */
1299
1300 if (exclusive)
1301 lck_rw_lock_exclusive(&cp->c_truncatelock);
1302 else
1303 lck_rw_lock_shared(&cp->c_truncatelock);
1304 }
1305
1306 __private_extern__
1307 void
1308 hfs_unlock_truncate(struct cnode *cp, int exclusive)
1309 {
1310 if (exclusive) {
1311 lck_rw_unlock_exclusive(&cp->c_truncatelock);
1312 } else {
1313 lck_rw_unlock_shared(&cp->c_truncatelock);
1314 }
1315 }
1316
1317
1318
1319