]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cnode.c
xnu-1228.5.20.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_cnode.c
1 /*
2 * Copyright (c) 2002-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/proc.h>
31 #include <sys/vnode.h>
32 #include <sys/mount.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/time.h>
36 #include <sys/ubc.h>
37 #include <sys/quota.h>
38 #include <sys/kdebug.h>
39
40 #include <kern/locks.h>
41
42 #include <miscfs/specfs/specdev.h>
43 #include <miscfs/fifofs/fifo.h>
44
45 #include <hfs/hfs.h>
46 #include <hfs/hfs_catalog.h>
47 #include <hfs/hfs_cnode.h>
48 #include <hfs/hfs_quota.h>
49
50 extern int prtactive;
51
52 extern lck_attr_t * hfs_lock_attr;
53 extern lck_grp_t * hfs_mutex_group;
54 extern lck_grp_t * hfs_rwlock_group;
55
56 static int hfs_filedone(struct vnode *vp, vfs_context_t context);
57
58 static void hfs_reclaim_cnode(struct cnode *);
59
60 static int hfs_isordered(struct cnode *, struct cnode *);
61
62
63 /*
64 * Last reference to an cnode. If necessary, write or delete it.
65 */
66 __private_extern__
67 int
68 hfs_vnop_inactive(struct vnop_inactive_args *ap)
69 {
70 struct vnode *vp = ap->a_vp;
71 struct cnode *cp;
72 struct hfsmount *hfsmp = VTOHFS(vp);
73 struct proc *p = vfs_context_proc(ap->a_context);
74 int error = 0;
75 int recycle = 0;
76 int forkcount = 0;
77 int truncated = 0;
78 int started_tr = 0;
79 int took_trunc_lock = 0;
80 cat_cookie_t cookie;
81 int cat_reserve = 0;
82 int lockflags;
83 enum vtype v_type;
84
85 v_type = vnode_vtype(vp);
86 cp = VTOC(vp);
87
88 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || vnode_issystem(vp) ||
89 (hfsmp->hfs_freezing_proc == p)) {
90 return (0);
91 }
92
93 /*
94 * Ignore nodes related to stale file handles.
95 */
96 if (cp->c_mode == 0) {
97 vnode_recycle(vp);
98 return (0);
99 }
100
101 if ((v_type == VREG || v_type == VLNK)) {
102 hfs_lock_truncate(cp, TRUE);
103 took_trunc_lock = 1;
104 }
105
106 (void) hfs_lock(cp, HFS_FORCE_LOCK);
107
108 /*
109 * We should lock cnode before checking the flags in the
110 * condition below and should unlock the cnode before calling
111 * ubc_setsize() as cluster code can call other HFS vnops which
112 * will try to acquire the same cnode lock and cause deadlock.
113 */
114 if ((v_type == VREG || v_type == VLNK) &&
115 (cp->c_flag & C_DELETED) &&
116 (VTOF(vp)->ff_blocks != 0)) {
117 hfs_unlock(cp);
118 ubc_setsize(vp, 0);
119 (void) hfs_lock(cp, HFS_FORCE_LOCK);
120 }
121
122 if (v_type == VREG && !ISSET(cp->c_flag, C_DELETED) && VTOF(vp)->ff_blocks) {
123 hfs_filedone(vp, ap->a_context);
124 }
125 /*
126 * Remove any directory hints or cached origins
127 */
128 if (v_type == VDIR) {
129 hfs_reldirhints(cp, 0);
130 if (cp->c_flag & C_HARDLINK)
131 hfs_relorigins(cp);
132 }
133
134 if (cp->c_datafork)
135 ++forkcount;
136 if (cp->c_rsrcfork)
137 ++forkcount;
138
139 /* If needed, get rid of any fork's data for a deleted file */
140 if ((v_type == VREG || v_type == VLNK) && (cp->c_flag & C_DELETED)) {
141 if (VTOF(vp)->ff_blocks != 0) {
142 /*
143 * Since we're already inside a transaction,
144 * tell hfs_truncate to skip the ubc_setsize.
145 */
146 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ap->a_context);
147 if (error)
148 goto out;
149 truncated = 1;
150 }
151 recycle = 1;
152
153 /*
154 * Check if there's any resource fork blocks that need to
155 * be reclaimed. This covers the case where there is a
156 * resource fork but its not in core.
157 */
158 if ((cp->c_blocks > 0) && (forkcount == 1) && (vp != cp->c_rsrc_vp)) {
159 struct vnode *rvp = NULLVP;
160
161 error = hfs_vgetrsrc(hfsmp, vp, &rvp, FALSE);
162 if (error)
163 goto out;
164 /*
165 * Defer the vnode_put and ubc_setsize on rvp until hfs_unlock().
166 */
167 cp->c_flag |= C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE;
168 error = hfs_truncate(rvp, (off_t)0, IO_NDELAY, 1, ap->a_context);
169 if (error)
170 goto out;
171 vnode_recycle(rvp); /* all done with this vnode */
172 }
173 }
174
175 // If needed, get rid of any xattrs that this file may have.
176 // Note that this must happen outside of any other transactions
177 // because it starts/ends its own transactions and grabs its
178 // own locks. This is to prevent a file with a lot of attributes
179 // from creating a transaction that is too large (which panics).
180 //
181 if ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0 && (cp->c_flag & C_DELETED)) {
182 hfs_removeallattr(hfsmp, cp->c_fileid);
183 }
184
185 /*
186 * Check for a postponed deletion.
187 * (only delete cnode when the last fork goes inactive)
188 */
189 if ((cp->c_flag & C_DELETED) && (forkcount <= 1)) {
190 /*
191 * Mark cnode in transit so that no one can get this
192 * cnode from cnode hash.
193 */
194 // hfs_chash_mark_in_transit(cp);
195 // XXXdbg - remove the cnode from the hash table since it's deleted
196 // otherwise someone could go to sleep on the cnode and not
197 // be woken up until this vnode gets recycled which could be
198 // a very long time...
199 hfs_chashremove(cp);
200
201 cp->c_flag |= C_NOEXISTS; // XXXdbg
202 cp->c_rdev = 0;
203
204 if (started_tr == 0) {
205 if (hfs_start_transaction(hfsmp) != 0) {
206 error = EINVAL;
207 goto out;
208 }
209 started_tr = 1;
210 }
211
212 /*
213 * Reserve some space in the Catalog file.
214 */
215 if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) {
216 goto out;
217 }
218 cat_reserve = 1;
219
220 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
221
222 if (cp->c_blocks > 0) {
223 printf("hfs_inactive: deleting non-empty%sfile %d, "
224 "blks %d\n", VNODE_IS_RSRC(vp) ? " rsrc " : " ",
225 (int)cp->c_fileid, (int)cp->c_blocks);
226 }
227
228 //
229 // release the name pointer in the descriptor so that
230 // cat_delete() will use the file-id to do the deletion.
231 // in the case of hard links this is imperative (in the
232 // case of regular files the fileid and cnid are the
233 // same so it doesn't matter).
234 //
235 cat_releasedesc(&cp->c_desc);
236
237 /*
238 * The descriptor name may be zero,
239 * in which case the fileid is used.
240 */
241 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
242
243 if (error && truncated && (error != ENXIO))
244 printf("hfs_inactive: couldn't delete a truncated file!");
245
246 /* Update HFS Private Data dir */
247 if (error == 0) {
248 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries--;
249 if (vnode_isdir(vp)) {
250 DEC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
251 }
252 (void)cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
253 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
254 }
255
256 hfs_systemfile_unlock(hfsmp, lockflags);
257
258 if (error)
259 goto out;
260
261 #if QUOTA
262 if (hfsmp->hfs_flags & HFS_QUOTAS)
263 (void)hfs_chkiq(cp, -1, NOCRED, 0);
264 #endif /* QUOTA */
265
266 cp->c_mode = 0;
267 cp->c_flag &= ~C_DELETED;
268 cp->c_touch_chgtime = TRUE;
269 cp->c_touch_modtime = TRUE;
270
271 if (error == 0)
272 hfs_volupdate(hfsmp, (v_type == VDIR) ? VOL_RMDIR : VOL_RMFILE, 0);
273 }
274
275 /*
276 * A file may have had delayed allocations, in which case hfs_update
277 * would not have updated the catalog record (cat_update). We need
278 * to do that now, before we lose our fork data. We also need to
279 * force the update, or hfs_update will again skip the cat_update.
280 */
281 if ((cp->c_flag & C_MODIFIED) ||
282 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
283 if ((cp->c_flag & C_MODIFIED) || cp->c_touch_modtime){
284 cp->c_flag |= C_FORCEUPDATE;
285 }
286 hfs_update(vp, 0);
287 }
288 out:
289 if (cat_reserve)
290 cat_postflight(hfsmp, &cookie, p);
291
292 // XXXdbg - have to do this because a goto could have come here
293 if (started_tr) {
294 hfs_end_transaction(hfsmp);
295 started_tr = 0;
296 }
297
298 hfs_unlock(cp);
299
300 if (took_trunc_lock)
301 hfs_unlock_truncate(cp, TRUE);
302
303 /*
304 * If we are done with the vnode, reclaim it
305 * so that it can be reused immediately.
306 */
307 if (cp->c_mode == 0 || recycle)
308 vnode_recycle(vp);
309
310 return (error);
311 }
312
313 /*
314 * File clean-up (zero fill and shrink peof).
315 */
316 static int
317 hfs_filedone(struct vnode *vp, vfs_context_t context)
318 {
319 struct cnode *cp;
320 struct filefork *fp;
321 struct hfsmount *hfsmp;
322 off_t leof;
323 u_long blks, blocksize;
324
325 cp = VTOC(vp);
326 fp = VTOF(vp);
327 hfsmp = VTOHFS(vp);
328 leof = fp->ff_size;
329
330 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (fp->ff_blocks == 0))
331 return (0);
332
333 hfs_unlock(cp);
334 (void) cluster_push(vp, IO_CLOSE);
335 hfs_lock(cp, HFS_FORCE_LOCK);
336
337 /*
338 * Explicitly zero out the areas of file
339 * that are currently marked invalid.
340 */
341 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
342 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
343 off_t start = invalid_range->rl_start;
344 off_t end = invalid_range->rl_end;
345
346 /* The range about to be written must be validated
347 * first, so that VNOP_BLOCKMAP() will return the
348 * appropriate mapping for the cluster code:
349 */
350 rl_remove(start, end, &fp->ff_invalidranges);
351
352 hfs_unlock(cp);
353 (void) cluster_write(vp, (struct uio *) 0,
354 leof, end + 1, start, (off_t)0,
355 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
356 hfs_lock(cp, HFS_FORCE_LOCK);
357 cp->c_flag |= C_MODIFIED;
358 }
359 cp->c_flag &= ~C_ZFWANTSYNC;
360 cp->c_zftimeout = 0;
361 blocksize = VTOVCB(vp)->blockSize;
362 blks = leof / blocksize;
363 if (((off_t)blks * (off_t)blocksize) != leof)
364 blks++;
365 /*
366 * Shrink the peof to the smallest size neccessary to contain the leof.
367 */
368 if (blks < fp->ff_blocks)
369 (void) hfs_truncate(vp, leof, IO_NDELAY, 0, context);
370 hfs_unlock(cp);
371 (void) cluster_push(vp, IO_CLOSE);
372 hfs_lock(cp, HFS_FORCE_LOCK);
373
374 /*
375 * If the hfs_truncate didn't happen to flush the vnode's
376 * information out to disk, force it to be updated now that
377 * all invalid ranges have been zero-filled and validated:
378 */
379 if (cp->c_flag & C_MODIFIED) {
380 hfs_update(vp, 0);
381 }
382 return (0);
383 }
384
385
386 /*
387 * Reclaim a cnode so that it can be used for other purposes.
388 */
389 __private_extern__
390 int
391 hfs_vnop_reclaim(struct vnop_reclaim_args *ap)
392 {
393 struct vnode *vp = ap->a_vp;
394 struct cnode *cp;
395 struct filefork *fp = NULL;
396 struct filefork *altfp = NULL;
397 int reclaim_cnode = 0;
398
399 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
400 cp = VTOC(vp);
401
402 /*
403 * Check if a deleted resource fork vnode missed a
404 * VNOP_INACTIVE call and requires truncation.
405 */
406 if (VNODE_IS_RSRC(vp) &&
407 (cp->c_flag & C_DELETED) &&
408 (VTOF(vp)->ff_blocks != 0)) {
409 hfs_unlock(cp);
410 ubc_setsize(vp, 0);
411
412 hfs_lock_truncate(cp, TRUE);
413 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
414
415 (void) hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ap->a_context);
416
417 hfs_unlock_truncate(cp, TRUE);
418 }
419 /*
420 * A file may have had delayed allocations, in which case hfs_update
421 * would not have updated the catalog record (cat_update). We need
422 * to do that now, before we lose our fork data. We also need to
423 * force the update, or hfs_update will again skip the cat_update.
424 */
425 if ((cp->c_flag & C_MODIFIED) ||
426 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
427 if ((cp->c_flag & C_MODIFIED) || cp->c_touch_modtime){
428 cp->c_flag |= C_FORCEUPDATE;
429 }
430 hfs_update(vp, 0);
431 }
432
433 /*
434 * Keep track of an inactive hot file.
435 */
436 if (!vnode_isdir(vp) &&
437 !vnode_issystem(vp) &&
438 !(cp->c_flag & (C_DELETED | C_NOEXISTS)) ) {
439 (void) hfs_addhotfile(vp);
440 }
441 vnode_removefsref(vp);
442
443 /*
444 * Find file fork for this vnode (if any)
445 * Also check if another fork is active
446 */
447 if (cp->c_vp == vp) {
448 fp = cp->c_datafork;
449 altfp = cp->c_rsrcfork;
450
451 cp->c_datafork = NULL;
452 cp->c_vp = NULL;
453 } else if (cp->c_rsrc_vp == vp) {
454 fp = cp->c_rsrcfork;
455 altfp = cp->c_datafork;
456
457 cp->c_rsrcfork = NULL;
458 cp->c_rsrc_vp = NULL;
459 } else {
460 panic("hfs_vnop_reclaim: vp points to wrong cnode\n");
461 }
462 /*
463 * On the last fork, remove the cnode from its hash chain.
464 */
465 if (altfp == NULL) {
466 /* If we can't remove it then the cnode must persist! */
467 if (hfs_chashremove(cp) == 0)
468 reclaim_cnode = 1;
469 /*
470 * Remove any directory hints
471 */
472 if (vnode_isdir(vp)) {
473 hfs_reldirhints(cp, 0);
474 }
475 }
476 /* Release the file fork and related data */
477 if (fp) {
478 /* Dump cached symlink data */
479 if (vnode_islnk(vp) && (fp->ff_symlinkptr != NULL)) {
480 FREE(fp->ff_symlinkptr, M_TEMP);
481 }
482 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
483 }
484
485 /*
486 * If there was only one active fork then we can release the cnode.
487 */
488 if (reclaim_cnode) {
489 hfs_chashwakeup(cp, H_ALLOC | H_TRANSIT);
490 hfs_reclaim_cnode(cp);
491 } else /* cnode in use */ {
492 hfs_unlock(cp);
493 }
494
495 vnode_clearfsnode(vp);
496 return (0);
497 }
498
499
500 extern int (**hfs_vnodeop_p) (void *);
501 extern int (**hfs_specop_p) (void *);
502 #if FIFO
503 extern int (**hfs_fifoop_p) (void *);
504 #endif
505
506 /*
507 * hfs_getnewvnode - get new default vnode
508 *
509 * The vnode is returned with an iocount and the cnode locked
510 */
511 __private_extern__
512 int
513 hfs_getnewvnode(
514 struct hfsmount *hfsmp,
515 struct vnode *dvp,
516 struct componentname *cnp,
517 struct cat_desc *descp,
518 int flags,
519 struct cat_attr *attrp,
520 struct cat_fork *forkp,
521 struct vnode **vpp)
522 {
523 struct mount *mp = HFSTOVFS(hfsmp);
524 struct vnode *vp = NULL;
525 struct vnode **cvpp;
526 struct vnode *tvp = NULLVP;
527 struct cnode *cp = NULL;
528 struct filefork *fp = NULL;
529 int retval;
530 int issystemfile;
531 int wantrsrc;
532 struct vnode_fsparam vfsp;
533 enum vtype vtype;
534 #if QUOTA
535 int i;
536 #endif /* QUOTA */
537
538 if (attrp->ca_fileid == 0) {
539 *vpp = NULL;
540 return (ENOENT);
541 }
542
543 #if !FIFO
544 if (IFTOVT(attrp->ca_mode) == VFIFO) {
545 *vpp = NULL;
546 return (ENOTSUP);
547 }
548 #endif /* !FIFO */
549 vtype = IFTOVT(attrp->ca_mode);
550 issystemfile = (descp->cd_flags & CD_ISMETA) && (vtype == VREG);
551 wantrsrc = flags & GNV_WANTRSRC;
552
553 #ifdef HFS_CHECK_LOCK_ORDER
554 /*
555 * The only case were its permissible to hold the parent cnode
556 * lock is during a create operation (hfs_makenode) or when
557 * we don't need the cnode lock (GNV_SKIPLOCK).
558 */
559 if ((dvp != NULL) &&
560 (flags & (GNV_CREATE | GNV_SKIPLOCK)) == 0 &&
561 VTOC(dvp)->c_lockowner == current_thread()) {
562 panic("hfs_getnewvnode: unexpected hold of parent cnode %p", VTOC(dvp));
563 }
564 #endif /* HFS_CHECK_LOCK_ORDER */
565
566 /*
567 * Get a cnode (new or existing)
568 */
569 cp = hfs_chash_getcnode(hfsmp->hfs_raw_dev, attrp->ca_fileid, vpp, wantrsrc, (flags & GNV_SKIPLOCK));
570
571 /*
572 * If the id is no longer valid for lookups we'll get back a NULL cp.
573 */
574 if (cp == NULL) {
575 return (ENOENT);
576 }
577
578 /* Hardlinks may need an updated catalog descriptor */
579 if ((cp->c_flag & C_HARDLINK) && descp->cd_nameptr && descp->cd_namelen > 0) {
580 replace_desc(cp, descp);
581 }
582 /* Check if we found a matching vnode */
583 if (*vpp != NULL)
584 return (0);
585
586 /*
587 * If this is a new cnode then initialize it.
588 */
589 if (ISSET(cp->c_hflag, H_ALLOC)) {
590 lck_rw_init(&cp->c_truncatelock, hfs_rwlock_group, hfs_lock_attr);
591
592 /* Make sure its still valid (ie exists on disk). */
593 if (!(flags & GNV_CREATE) &&
594 !hfs_valid_cnode(hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid)) {
595 hfs_chash_abort(cp);
596 hfs_reclaim_cnode(cp);
597 *vpp = NULL;
598 return (ENOENT);
599 }
600 bcopy(attrp, &cp->c_attr, sizeof(struct cat_attr));
601 bcopy(descp, &cp->c_desc, sizeof(struct cat_desc));
602
603 /* The name was inherited so clear descriptor state... */
604 descp->cd_namelen = 0;
605 descp->cd_nameptr = NULL;
606 descp->cd_flags &= ~CD_HASBUF;
607
608 /* Tag hardlinks */
609 if ((vtype == VREG || vtype == VDIR) &&
610 ((descp->cd_cnid != attrp->ca_fileid) ||
611 (attrp->ca_recflags & kHFSHasLinkChainMask))) {
612 cp->c_flag |= C_HARDLINK;
613 }
614 /*
615 * Fix-up dir link counts.
616 *
617 * Earlier versions of Leopard used ca_linkcount for posix
618 * nlink support (effectively the sub-directory count + 2).
619 * That is now accomplished using the ca_dircount field with
620 * the corresponding kHFSHasFolderCountMask flag.
621 *
622 * For directories the ca_linkcount is the true link count,
623 * tracking the number of actual hardlinks to a directory.
624 *
625 * We only do this if the mount has HFS_FOLDERCOUNT set;
626 * at the moment, we only set that for HFSX volumes.
627 */
628 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
629 (vtype == VDIR) &&
630 !(attrp->ca_recflags & kHFSHasFolderCountMask) &&
631 (cp->c_attr.ca_linkcount > 1)) {
632 if (cp->c_attr.ca_entries == 0)
633 cp->c_attr.ca_dircount = 0;
634 else
635 cp->c_attr.ca_dircount = cp->c_attr.ca_linkcount - 2;
636
637 cp->c_attr.ca_linkcount = 1;
638 cp->c_attr.ca_recflags |= kHFSHasFolderCountMask;
639 if ( !(hfsmp->hfs_flags & HFS_READ_ONLY) )
640 cp->c_flag |= C_MODIFIED;
641 }
642 #if QUOTA
643 if (hfsmp->hfs_flags & HFS_QUOTAS) {
644 for (i = 0; i < MAXQUOTAS; i++)
645 cp->c_dquot[i] = NODQUOT;
646 }
647 #endif /* QUOTA */
648 }
649
650 if (vtype == VDIR) {
651 if (cp->c_vp != NULL)
652 panic("hfs_getnewvnode: orphaned vnode (data)");
653 cvpp = &cp->c_vp;
654 } else {
655 if (forkp && attrp->ca_blocks < forkp->cf_blocks)
656 panic("hfs_getnewvnode: bad ca_blocks (too small)");
657 /*
658 * Allocate and initialize a file fork...
659 */
660 MALLOC_ZONE(fp, struct filefork *, sizeof(struct filefork),
661 M_HFSFORK, M_WAITOK);
662 fp->ff_cp = cp;
663 if (forkp)
664 bcopy(forkp, &fp->ff_data, sizeof(struct cat_fork));
665 else
666 bzero(&fp->ff_data, sizeof(struct cat_fork));
667 rl_init(&fp->ff_invalidranges);
668 fp->ff_sysfileinfo = 0;
669
670 if (wantrsrc) {
671 if (cp->c_rsrcfork != NULL)
672 panic("hfs_getnewvnode: orphaned rsrc fork");
673 if (cp->c_rsrc_vp != NULL)
674 panic("hfs_getnewvnode: orphaned vnode (rsrc)");
675 cp->c_rsrcfork = fp;
676 cvpp = &cp->c_rsrc_vp;
677 if ( (tvp = cp->c_vp) != NULLVP )
678 cp->c_flag |= C_NEED_DVNODE_PUT;
679 } else {
680 if (cp->c_datafork != NULL)
681 panic("hfs_getnewvnode: orphaned data fork");
682 if (cp->c_vp != NULL)
683 panic("hfs_getnewvnode: orphaned vnode (data)");
684 cp->c_datafork = fp;
685 cvpp = &cp->c_vp;
686 if ( (tvp = cp->c_rsrc_vp) != NULLVP)
687 cp->c_flag |= C_NEED_RVNODE_PUT;
688 }
689 }
690 if (tvp != NULLVP) {
691 /*
692 * grab an iocount on the vnode we weren't
693 * interested in (i.e. we want the resource fork
694 * but the cnode already has the data fork)
695 * to prevent it from being
696 * recycled by us when we call vnode_create
697 * which will result in a deadlock when we
698 * try to take the cnode lock in hfs_vnop_fsync or
699 * hfs_vnop_reclaim... vnode_get can be called here
700 * because we already hold the cnode lock which will
701 * prevent the vnode from changing identity until
702 * we drop it.. vnode_get will not block waiting for
703 * a change of state... however, it will return an
704 * error if the current iocount == 0 and we've already
705 * started to terminate the vnode... we don't need/want to
706 * grab an iocount in the case since we can't cause
707 * the fileystem to be re-entered on this thread for this vp
708 *
709 * the matching vnode_put will happen in hfs_unlock
710 * after we've dropped the cnode lock
711 */
712 if ( vnode_get(tvp) != 0)
713 cp->c_flag &= ~(C_NEED_RVNODE_PUT | C_NEED_DVNODE_PUT);
714 }
715 vfsp.vnfs_mp = mp;
716 vfsp.vnfs_vtype = vtype;
717 vfsp.vnfs_str = "hfs";
718 if ((cp->c_flag & C_HARDLINK) && (vtype == VDIR)) {
719 vfsp.vnfs_dvp = NULL; /* no parent for me! */
720 vfsp.vnfs_cnp = NULL; /* no name for me! */
721 } else {
722 vfsp.vnfs_dvp = dvp;
723 vfsp.vnfs_cnp = cnp;
724 }
725 vfsp.vnfs_fsnode = cp;
726 #if FIFO
727 if (vtype == VFIFO )
728 vfsp.vnfs_vops = hfs_fifoop_p;
729 else
730 #endif
731 if (vtype == VBLK || vtype == VCHR)
732 vfsp.vnfs_vops = hfs_specop_p;
733 else
734 vfsp.vnfs_vops = hfs_vnodeop_p;
735
736 if (vtype == VBLK || vtype == VCHR)
737 vfsp.vnfs_rdev = attrp->ca_rdev;
738 else
739 vfsp.vnfs_rdev = 0;
740
741 if (forkp)
742 vfsp.vnfs_filesize = forkp->cf_size;
743 else
744 vfsp.vnfs_filesize = 0;
745
746 vfsp.vnfs_flags = VNFS_ADDFSREF;
747 if (dvp == NULLVP || cnp == NULL || !(cnp->cn_flags & MAKEENTRY))
748 vfsp.vnfs_flags |= VNFS_NOCACHE;
749
750 /* Tag system files */
751 vfsp.vnfs_marksystem = issystemfile;
752
753 /* Tag root directory */
754 if (descp->cd_cnid == kHFSRootFolderID)
755 vfsp.vnfs_markroot = 1;
756 else
757 vfsp.vnfs_markroot = 0;
758
759 if ((retval = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, cvpp))) {
760 if (fp) {
761 if (fp == cp->c_datafork)
762 cp->c_datafork = NULL;
763 else
764 cp->c_rsrcfork = NULL;
765
766 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
767 }
768 /*
769 * If this is a newly created cnode or a vnode reclaim
770 * occurred during the attachment, then cleanup the cnode.
771 */
772 if ((cp->c_vp == NULL) && (cp->c_rsrc_vp == NULL)) {
773 hfs_chash_abort(cp);
774 hfs_reclaim_cnode(cp);
775 }
776 else {
777 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
778 if ((flags & GNV_SKIPLOCK) == 0){
779 hfs_unlock(cp);
780 }
781 }
782 *vpp = NULL;
783 return (retval);
784 }
785 vp = *cvpp;
786 vnode_settag(vp, VT_HFS);
787 if (cp->c_flag & C_HARDLINK) {
788 vnode_setmultipath(vp);
789 }
790 /*
791 * Tag resource fork vnodes as needing an VNOP_INACTIVE
792 * so that any deferred removes (open unlinked files)
793 * have the chance to process the resource fork.
794 */
795 if (VNODE_IS_RSRC(vp)) {
796 /* Force VL_NEEDINACTIVE on this vnode */
797 vnode_ref(vp);
798 vnode_rele(vp);
799 }
800 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
801
802 /*
803 * Stop tracking an active hot file.
804 */
805 if (!(flags & GNV_CREATE) && (vtype != VDIR) && !issystemfile) {
806 (void) hfs_removehotfile(vp);
807 }
808
809 *vpp = vp;
810 return (0);
811 }
812
813
814 static void
815 hfs_reclaim_cnode(struct cnode *cp)
816 {
817 #if QUOTA
818 int i;
819
820 for (i = 0; i < MAXQUOTAS; i++) {
821 if (cp->c_dquot[i] != NODQUOT) {
822 dqreclaim(cp->c_dquot[i]);
823 cp->c_dquot[i] = NODQUOT;
824 }
825 }
826 #endif /* QUOTA */
827
828 /*
829 * If the descriptor has a name then release it
830 */
831 if ((cp->c_desc.cd_flags & CD_HASBUF) && (cp->c_desc.cd_nameptr != 0)) {
832 const char *nameptr;
833
834 nameptr = (const char *) cp->c_desc.cd_nameptr;
835 cp->c_desc.cd_nameptr = 0;
836 cp->c_desc.cd_flags &= ~CD_HASBUF;
837 cp->c_desc.cd_namelen = 0;
838 vfs_removename(nameptr);
839 }
840
841 lck_rw_destroy(&cp->c_rwlock, hfs_rwlock_group);
842 lck_rw_destroy(&cp->c_truncatelock, hfs_rwlock_group);
843 bzero(cp, sizeof(struct cnode));
844 FREE_ZONE(cp, sizeof(struct cnode), M_HFSNODE);
845 }
846
847
848 __private_extern__
849 int
850 hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, cnid_t cnid)
851 {
852 struct cat_attr attr;
853 struct cat_desc cndesc;
854 int stillvalid = 0;
855 int lockflags;
856
857 /* System files are always valid */
858 if (cnid < kHFSFirstUserCatalogNodeID)
859 return (1);
860
861 /* XXX optimization: check write count in dvp */
862
863 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
864
865 if (dvp && cnp) {
866 bzero(&cndesc, sizeof(cndesc));
867 cndesc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
868 cndesc.cd_namelen = cnp->cn_namelen;
869 cndesc.cd_parentcnid = VTOC(dvp)->c_fileid;
870 cndesc.cd_hint = VTOC(dvp)->c_childhint;
871
872 if ((cat_lookup(hfsmp, &cndesc, 0, NULL, &attr, NULL, NULL) == 0) &&
873 (cnid == attr.ca_fileid)) {
874 stillvalid = 1;
875 }
876 } else {
877 if (cat_idlookup(hfsmp, cnid, 0, NULL, NULL, NULL) == 0) {
878 stillvalid = 1;
879 }
880 }
881 hfs_systemfile_unlock(hfsmp, lockflags);
882
883 return (stillvalid);
884 }
885
886 /*
887 * Touch cnode times based on c_touch_xxx flags
888 *
889 * cnode must be locked exclusive
890 *
891 * This will also update the volume modify time
892 */
893 __private_extern__
894 void
895 hfs_touchtimes(struct hfsmount *hfsmp, struct cnode* cp)
896 {
897 /* don't modify times if volume is read-only */
898 if (hfsmp->hfs_flags & HFS_READ_ONLY) {
899 cp->c_touch_acctime = FALSE;
900 cp->c_touch_chgtime = FALSE;
901 cp->c_touch_modtime = FALSE;
902 }
903 else if (hfsmp->hfs_flags & HFS_STANDARD) {
904 /* HFS Standard doesn't support access times */
905 cp->c_touch_acctime = FALSE;
906 }
907
908 /*
909 * Skip access time updates if:
910 * . MNT_NOATIME is set
911 * . a file system freeze is in progress
912 * . a file system resize is in progress
913 */
914 if (cp->c_touch_acctime) {
915 if ((vfs_flags(hfsmp->hfs_mp) & MNT_NOATIME) ||
916 (hfsmp->hfs_freezing_proc != NULL) ||
917 (hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS))
918 cp->c_touch_acctime = FALSE;
919 }
920 if (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
921 struct timeval tv;
922 int touchvol = 0;
923
924 microtime(&tv);
925
926 if (cp->c_touch_acctime) {
927 cp->c_atime = tv.tv_sec;
928 /*
929 * When the access time is the only thing changing
930 * then make sure its sufficiently newer before
931 * committing it to disk.
932 */
933 if ((((u_int32_t)cp->c_atime - (u_int32_t)(cp)->c_attr.ca_atimeondisk) >
934 ATIME_ONDISK_ACCURACY)) {
935 cp->c_flag |= C_MODIFIED;
936 }
937 cp->c_touch_acctime = FALSE;
938 }
939 if (cp->c_touch_modtime) {
940 cp->c_mtime = tv.tv_sec;
941 cp->c_touch_modtime = FALSE;
942 cp->c_flag |= C_MODIFIED;
943 touchvol = 1;
944 #if 1
945 /*
946 * HFS dates that WE set must be adjusted for DST
947 */
948 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
949 cp->c_mtime += 3600;
950 }
951 #endif
952 }
953 if (cp->c_touch_chgtime) {
954 cp->c_ctime = tv.tv_sec;
955 cp->c_touch_chgtime = FALSE;
956 cp->c_flag |= C_MODIFIED;
957 touchvol = 1;
958 }
959
960 /* Touch the volume modtime if needed */
961 if (touchvol) {
962 MarkVCBDirty(hfsmp);
963 HFSTOVCB(hfsmp)->vcbLsMod = tv.tv_sec;
964 }
965 }
966 }
967
968 /*
969 * Lock a cnode.
970 */
971 __private_extern__
972 int
973 hfs_lock(struct cnode *cp, enum hfslocktype locktype)
974 {
975 void * thread = current_thread();
976
977 if (cp->c_lockowner == thread) {
978 /*
979 * Only the extents and bitmap file's support lock recursion.
980 */
981 if ((cp->c_fileid == kHFSExtentsFileID) ||
982 (cp->c_fileid == kHFSAllocationFileID)) {
983 cp->c_syslockcount++;
984 } else {
985 panic("hfs_lock: locking against myself!");
986 }
987 } else if (locktype == HFS_SHARED_LOCK) {
988 lck_rw_lock_shared(&cp->c_rwlock);
989 cp->c_lockowner = HFS_SHARED_OWNER;
990
991 } else /* HFS_EXCLUSIVE_LOCK */ {
992 lck_rw_lock_exclusive(&cp->c_rwlock);
993 cp->c_lockowner = thread;
994
995 /*
996 * Only the extents and bitmap file's support lock recursion.
997 */
998 if ((cp->c_fileid == kHFSExtentsFileID) ||
999 (cp->c_fileid == kHFSAllocationFileID)) {
1000 cp->c_syslockcount = 1;
1001 }
1002 }
1003
1004 #ifdef HFS_CHECK_LOCK_ORDER
1005 /*
1006 * Regular cnodes (non-system files) cannot be locked
1007 * while holding the journal lock or a system file lock.
1008 */
1009 if (!(cp->c_desc.cd_flags & CD_ISMETA) &&
1010 ((cp->c_fileid > kHFSFirstUserCatalogNodeID) || (cp->c_fileid == kHFSRootFolderID))) {
1011 vnode_t vp = NULLVP;
1012
1013 /* Find corresponding vnode. */
1014 if (cp->c_vp != NULLVP && VTOC(cp->c_vp) == cp) {
1015 vp = cp->c_vp;
1016 } else if (cp->c_rsrc_vp != NULLVP && VTOC(cp->c_rsrc_vp) == cp) {
1017 vp = cp->c_rsrc_vp;
1018 }
1019 if (vp != NULLVP) {
1020 struct hfsmount *hfsmp = VTOHFS(vp);
1021
1022 if (hfsmp->jnl && (journal_owner(hfsmp->jnl) == thread)) {
1023 /* This will eventually be a panic here. */
1024 printf("hfs_lock: bad lock order (cnode after journal)\n");
1025 }
1026 if (hfsmp->hfs_catalog_cp && hfsmp->hfs_catalog_cp->c_lockowner == thread) {
1027 panic("hfs_lock: bad lock order (cnode after catalog)");
1028 }
1029 if (hfsmp->hfs_attribute_cp && hfsmp->hfs_attribute_cp->c_lockowner == thread) {
1030 panic("hfs_lock: bad lock order (cnode after attribute)");
1031 }
1032 if (hfsmp->hfs_extents_cp && hfsmp->hfs_extents_cp->c_lockowner == thread) {
1033 panic("hfs_lock: bad lock order (cnode after extents)");
1034 }
1035 }
1036 }
1037 #endif /* HFS_CHECK_LOCK_ORDER */
1038
1039 /*
1040 * Skip cnodes that no longer exist (were deleted).
1041 */
1042 if ((locktype != HFS_FORCE_LOCK) &&
1043 ((cp->c_desc.cd_flags & CD_ISMETA) == 0) &&
1044 (cp->c_flag & C_NOEXISTS)) {
1045 hfs_unlock(cp);
1046 return (ENOENT);
1047 }
1048 return (0);
1049 }
1050
1051 /*
1052 * Lock a pair of cnodes.
1053 */
1054 __private_extern__
1055 int
1056 hfs_lockpair(struct cnode *cp1, struct cnode *cp2, enum hfslocktype locktype)
1057 {
1058 struct cnode *first, *last;
1059 int error;
1060
1061 /*
1062 * If cnodes match then just lock one.
1063 */
1064 if (cp1 == cp2) {
1065 return hfs_lock(cp1, locktype);
1066 }
1067
1068 /*
1069 * Lock in cnode address order.
1070 */
1071 if (cp1 < cp2) {
1072 first = cp1;
1073 last = cp2;
1074 } else {
1075 first = cp2;
1076 last = cp1;
1077 }
1078
1079 if ( (error = hfs_lock(first, locktype))) {
1080 return (error);
1081 }
1082 if ( (error = hfs_lock(last, locktype))) {
1083 hfs_unlock(first);
1084 return (error);
1085 }
1086 return (0);
1087 }
1088
1089 /*
1090 * Check ordering of two cnodes. Return true if they are are in-order.
1091 */
1092 static int
1093 hfs_isordered(struct cnode *cp1, struct cnode *cp2)
1094 {
1095 if (cp1 == cp2)
1096 return (0);
1097 if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff)
1098 return (1);
1099 if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff)
1100 return (0);
1101 /*
1102 * Locking order is cnode address order.
1103 */
1104 return (cp1 < cp2);
1105 }
1106
1107 /*
1108 * Acquire 4 cnode locks.
1109 * - locked in cnode address order (lesser address first).
1110 * - all or none of the locks are taken
1111 * - only one lock taken per cnode (dup cnodes are skipped)
1112 * - some of the cnode pointers may be null
1113 */
1114 __private_extern__
1115 int
1116 hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3,
1117 struct cnode *cp4, enum hfslocktype locktype)
1118 {
1119 struct cnode * a[3];
1120 struct cnode * b[3];
1121 struct cnode * list[4];
1122 struct cnode * tmp;
1123 int i, j, k;
1124 int error;
1125
1126 if (hfs_isordered(cp1, cp2)) {
1127 a[0] = cp1; a[1] = cp2;
1128 } else {
1129 a[0] = cp2; a[1] = cp1;
1130 }
1131 if (hfs_isordered(cp3, cp4)) {
1132 b[0] = cp3; b[1] = cp4;
1133 } else {
1134 b[0] = cp4; b[1] = cp3;
1135 }
1136 a[2] = (struct cnode *)0xffffffff; /* sentinel value */
1137 b[2] = (struct cnode *)0xffffffff; /* sentinel value */
1138
1139 /*
1140 * Build the lock list, skipping over duplicates
1141 */
1142 for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) {
1143 tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++];
1144 if (k == 0 || tmp != list[k-1])
1145 list[k++] = tmp;
1146 }
1147
1148 /*
1149 * Now we can lock using list[0 - k].
1150 * Skip over NULL entries.
1151 */
1152 for (i = 0; i < k; ++i) {
1153 if (list[i])
1154 if ((error = hfs_lock(list[i], locktype))) {
1155 /* Drop any locks we acquired. */
1156 while (--i >= 0) {
1157 if (list[i])
1158 hfs_unlock(list[i]);
1159 }
1160 return (error);
1161 }
1162 }
1163 return (0);
1164 }
1165
1166
1167 /*
1168 * Unlock a cnode.
1169 */
1170 __private_extern__
1171 void
1172 hfs_unlock(struct cnode *cp)
1173 {
1174 vnode_t rvp = NULLVP;
1175 vnode_t vp = NULLVP;
1176 u_int32_t c_flag;
1177 void *lockowner;
1178
1179 /*
1180 * Only the extents and bitmap file's support lock recursion.
1181 */
1182 if ((cp->c_fileid == kHFSExtentsFileID) ||
1183 (cp->c_fileid == kHFSAllocationFileID)) {
1184 if (--cp->c_syslockcount > 0) {
1185 return;
1186 }
1187 }
1188 c_flag = cp->c_flag;
1189 cp->c_flag &= ~(C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT | C_NEED_DATA_SETSIZE | C_NEED_RSRC_SETSIZE);
1190
1191 if (c_flag & (C_NEED_DVNODE_PUT | C_NEED_DATA_SETSIZE)) {
1192 vp = cp->c_vp;
1193 }
1194 if (c_flag & (C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE)) {
1195 rvp = cp->c_rsrc_vp;
1196 }
1197
1198 lockowner = cp->c_lockowner;
1199 if (lockowner == current_thread()) {
1200 cp->c_lockowner = NULL;
1201 lck_rw_unlock_exclusive(&cp->c_rwlock);
1202 } else {
1203 lck_rw_unlock_shared(&cp->c_rwlock);
1204 }
1205
1206 /* Perform any vnode post processing after cnode lock is dropped. */
1207 if (vp) {
1208 if (c_flag & C_NEED_DATA_SETSIZE)
1209 ubc_setsize(vp, 0);
1210 if (c_flag & C_NEED_DVNODE_PUT)
1211 vnode_put(vp);
1212 }
1213 if (rvp) {
1214 if (c_flag & C_NEED_RSRC_SETSIZE)
1215 ubc_setsize(rvp, 0);
1216 if (c_flag & C_NEED_RVNODE_PUT)
1217 vnode_put(rvp);
1218 }
1219 }
1220
1221 /*
1222 * Unlock a pair of cnodes.
1223 */
1224 __private_extern__
1225 void
1226 hfs_unlockpair(struct cnode *cp1, struct cnode *cp2)
1227 {
1228 hfs_unlock(cp1);
1229 if (cp2 != cp1)
1230 hfs_unlock(cp2);
1231 }
1232
1233 /*
1234 * Unlock a group of cnodes.
1235 */
1236 __private_extern__
1237 void
1238 hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4)
1239 {
1240 struct cnode * list[4];
1241 int i, k = 0;
1242
1243 if (cp1) {
1244 hfs_unlock(cp1);
1245 list[k++] = cp1;
1246 }
1247 if (cp2) {
1248 for (i = 0; i < k; ++i) {
1249 if (list[i] == cp2)
1250 goto skip1;
1251 }
1252 hfs_unlock(cp2);
1253 list[k++] = cp2;
1254 }
1255 skip1:
1256 if (cp3) {
1257 for (i = 0; i < k; ++i) {
1258 if (list[i] == cp3)
1259 goto skip2;
1260 }
1261 hfs_unlock(cp3);
1262 list[k++] = cp3;
1263 }
1264 skip2:
1265 if (cp4) {
1266 for (i = 0; i < k; ++i) {
1267 if (list[i] == cp4)
1268 return;
1269 }
1270 hfs_unlock(cp4);
1271 }
1272 }
1273
1274
1275 /*
1276 * Protect a cnode against a truncation.
1277 *
1278 * Used mainly by read/write since they don't hold the
1279 * cnode lock across calls to the cluster layer.
1280 *
1281 * The process doing a truncation must take the lock
1282 * exclusive. The read/write processes can take it
1283 * non-exclusive.
1284 */
1285 __private_extern__
1286 void
1287 hfs_lock_truncate(struct cnode *cp, int exclusive)
1288 {
1289 #ifdef HFS_CHECK_LOCK_ORDER
1290 if (cp->c_lockowner == current_thread())
1291 panic("hfs_lock_truncate: cnode %p locked!", cp);
1292 #endif /* HFS_CHECK_LOCK_ORDER */
1293
1294 if (exclusive)
1295 lck_rw_lock_exclusive(&cp->c_truncatelock);
1296 else
1297 lck_rw_lock_shared(&cp->c_truncatelock);
1298 }
1299
1300 __private_extern__
1301 void
1302 hfs_unlock_truncate(struct cnode *cp, int exclusive)
1303 {
1304 if (exclusive) {
1305 lck_rw_unlock_exclusive(&cp->c_truncatelock);
1306 } else {
1307 lck_rw_unlock_shared(&cp->c_truncatelock);
1308 }
1309 }
1310
1311
1312
1313