]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cnode.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_cnode.c
1 /*
2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/proc.h>
33 #include <sys/vnode.h>
34 #include <sys/mount.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/time.h>
38 #include <sys/ubc.h>
39 #include <sys/quota.h>
40 #include <sys/kdebug.h>
41
42 #include <kern/locks.h>
43
44 #include <miscfs/specfs/specdev.h>
45 #include <miscfs/fifofs/fifo.h>
46
47 #include <hfs/hfs.h>
48 #include <hfs/hfs_catalog.h>
49 #include <hfs/hfs_cnode.h>
50 #include <hfs/hfs_quota.h>
51
52 extern int prtactive;
53
54 extern lck_attr_t * hfs_lock_attr;
55 extern lck_grp_t * hfs_mutex_group;
56 extern lck_grp_t * hfs_rwlock_group;
57
58 static int hfs_filedone(struct vnode *vp, vfs_context_t context);
59
60 static void hfs_reclaim_cnode(struct cnode *);
61
62 static int hfs_valid_cnode(struct hfsmount *, struct vnode *, struct componentname *, cnid_t);
63
64 static int hfs_isordered(struct cnode *, struct cnode *);
65
66 int hfs_vnop_inactive(struct vnop_inactive_args *);
67
68 int hfs_vnop_reclaim(struct vnop_reclaim_args *);
69
70
71 /*
72 * Last reference to an cnode. If necessary, write or delete it.
73 */
74 __private_extern__
75 int
76 hfs_vnop_inactive(struct vnop_inactive_args *ap)
77 {
78 struct vnode *vp = ap->a_vp;
79 struct cnode *cp;
80 struct hfsmount *hfsmp = VTOHFS(vp);
81 struct proc *p = vfs_context_proc(ap->a_context);
82 int error = 0;
83 int recycle = 0;
84 int forkcount = 0;
85 int truncated = 0;
86 int started_tr = 0;
87 int took_trunc_lock = 0;
88 cat_cookie_t cookie;
89 int cat_reserve = 0;
90 int lockflags;
91 enum vtype v_type;
92
93 v_type = vnode_vtype(vp);
94 cp = VTOC(vp);
95
96 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || vnode_issystem(vp) ||
97 (hfsmp->hfs_freezing_proc == p)) {
98 return (0);
99 }
100
101 /*
102 * Ignore nodes related to stale file handles.
103 */
104 if (cp->c_mode == 0) {
105 vnode_recycle(vp);
106 return (0);
107 }
108
109 if ((v_type == VREG) &&
110 (ISSET(cp->c_flag, C_DELETED) || VTOF(vp)->ff_blocks)) {
111 hfs_lock_truncate(cp, TRUE);
112 took_trunc_lock = 1;
113 }
114
115 /*
116 * We do the ubc_setsize before we take the cnode
117 * lock and before the hfs_truncate (since we'll
118 * be inside a transaction).
119 */
120 if ((v_type == VREG || v_type == VLNK) &&
121 (cp->c_flag & C_DELETED) &&
122 (VTOF(vp)->ff_blocks != 0)) {
123 ubc_setsize(vp, 0);
124 }
125
126 (void) hfs_lock(cp, HFS_FORCE_LOCK);
127
128 if (v_type == VREG && !ISSET(cp->c_flag, C_DELETED) && VTOF(vp)->ff_blocks) {
129 hfs_filedone(vp, ap->a_context);
130 }
131 /*
132 * Remove any directory hints
133 */
134 if (v_type == VDIR)
135 hfs_reldirhints(cp, 0);
136
137 if (cp->c_datafork)
138 ++forkcount;
139 if (cp->c_rsrcfork)
140 ++forkcount;
141
142 /* If needed, get rid of any fork's data for a deleted file */
143 if ((v_type == VREG || v_type == VLNK) && (cp->c_flag & C_DELETED)) {
144 if (VTOF(vp)->ff_blocks != 0) {
145 // start the transaction out here so that
146 // the truncate and the removal of the file
147 // are all in one transaction. otherwise
148 // because this cnode is marked for deletion
149 // the truncate won't cause the catalog entry
150 // to get updated which means that we could
151 // free blocks but still keep a reference to
152 // them in the catalog entry and then double
153 // free them later.
154 //
155 // if (hfs_start_transaction(hfsmp) != 0) {
156 // error = EINVAL;
157 // goto out;
158 // }
159 // started_tr = 1;
160
161 /*
162 * Since we're already inside a transaction,
163 * tell hfs_truncate to skip the ubc_setsize.
164 */
165 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ap->a_context);
166 if (error)
167 goto out;
168 truncated = 1;
169 }
170 recycle = 1;
171 }
172
173 /*
174 * Check for a postponed deletion.
175 * (only delete cnode when the last fork goes inactive)
176 */
177 if ((cp->c_flag & C_DELETED) && (forkcount <= 1)) {
178 /*
179 * Mark cnode in transit so that no one can get this
180 * cnode from cnode hash.
181 */
182 hfs_chash_mark_in_transit(cp);
183
184 cp->c_flag &= ~C_DELETED;
185 cp->c_flag |= C_NOEXISTS; // XXXdbg
186 cp->c_rdev = 0;
187
188 if (started_tr == 0) {
189 if (hfs_start_transaction(hfsmp) != 0) {
190 error = EINVAL;
191 goto out;
192 }
193 started_tr = 1;
194 }
195
196 /*
197 * Reserve some space in the Catalog file.
198 */
199 if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) {
200 goto out;
201 }
202 cat_reserve = 1;
203
204 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
205
206 if (cp->c_blocks > 0)
207 printf("hfs_inactive: attempting to delete a non-empty file!");
208
209
210 //
211 // release the name pointer in the descriptor so that
212 // cat_delete() will use the file-id to do the deletion.
213 // in the case of hard links this is imperative (in the
214 // case of regular files the fileid and cnid are the
215 // same so it doesn't matter).
216 //
217 cat_releasedesc(&cp->c_desc);
218
219 /*
220 * The descriptor name may be zero,
221 * in which case the fileid is used.
222 */
223 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
224
225 if (error && truncated && (error != ENXIO))
226 printf("hfs_inactive: couldn't delete a truncated file!");
227
228 /* Update HFS Private Data dir */
229 if (error == 0) {
230 hfsmp->hfs_privdir_attr.ca_entries--;
231 (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc,
232 &hfsmp->hfs_privdir_attr, NULL, NULL);
233 }
234
235 if (error == 0) {
236 /* Delete any attributes, ignore errors */
237 (void) hfs_removeallattr(hfsmp, cp->c_fileid);
238 }
239
240 hfs_systemfile_unlock(hfsmp, lockflags);
241
242 if (error)
243 goto out;
244
245 #if QUOTA
246 (void)hfs_chkiq(cp, -1, NOCRED, 0);
247 #endif /* QUOTA */
248
249 cp->c_mode = 0;
250 cp->c_flag |= C_NOEXISTS;
251 cp->c_touch_chgtime = TRUE;
252 cp->c_touch_modtime = TRUE;
253
254 if (error == 0)
255 hfs_volupdate(hfsmp, VOL_RMFILE, 0);
256 }
257
258 if ((cp->c_flag & C_MODIFIED) ||
259 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
260 hfs_update(vp, 0);
261 }
262 out:
263 if (cat_reserve)
264 cat_postflight(hfsmp, &cookie, p);
265
266 // XXXdbg - have to do this because a goto could have come here
267 if (started_tr) {
268 hfs_end_transaction(hfsmp);
269 started_tr = 0;
270 }
271
272 hfs_unlock(cp);
273
274 if (took_trunc_lock)
275 hfs_unlock_truncate(cp);
276
277 /*
278 * If we are done with the vnode, reclaim it
279 * so that it can be reused immediately.
280 */
281 if (cp->c_mode == 0 || recycle)
282 vnode_recycle(vp);
283
284 return (error);
285 }
286
287 /*
288 * File clean-up (zero fill and shrink peof).
289 */
290 static int
291 hfs_filedone(struct vnode *vp, vfs_context_t context)
292 {
293 struct cnode *cp;
294 struct filefork *fp;
295 struct hfsmount *hfsmp;
296 off_t leof;
297 u_long blks, blocksize;
298
299 cp = VTOC(vp);
300 fp = VTOF(vp);
301 hfsmp = VTOHFS(vp);
302 leof = fp->ff_size;
303
304 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (fp->ff_blocks == 0))
305 return (0);
306
307 hfs_unlock(cp);
308 (void) cluster_push(vp, IO_CLOSE);
309 hfs_lock(cp, HFS_FORCE_LOCK);
310
311 /*
312 * Explicitly zero out the areas of file
313 * that are currently marked invalid.
314 */
315 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
316 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
317 off_t start = invalid_range->rl_start;
318 off_t end = invalid_range->rl_end;
319
320 /* The range about to be written must be validated
321 * first, so that VNOP_BLOCKMAP() will return the
322 * appropriate mapping for the cluster code:
323 */
324 rl_remove(start, end, &fp->ff_invalidranges);
325
326 hfs_unlock(cp);
327 (void) cluster_write(vp, (struct uio *) 0,
328 leof, end + 1, start, (off_t)0,
329 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
330 hfs_lock(cp, HFS_FORCE_LOCK);
331 cp->c_flag |= C_MODIFIED;
332 }
333 cp->c_flag &= ~C_ZFWANTSYNC;
334 cp->c_zftimeout = 0;
335 blocksize = VTOVCB(vp)->blockSize;
336 blks = leof / blocksize;
337 if (((off_t)blks * (off_t)blocksize) != leof)
338 blks++;
339 /*
340 * Shrink the peof to the smallest size neccessary to contain the leof.
341 */
342 if (blks < fp->ff_blocks)
343 (void) hfs_truncate(vp, leof, IO_NDELAY, 0, context);
344 hfs_unlock(cp);
345 (void) cluster_push(vp, IO_CLOSE);
346 hfs_lock(cp, HFS_FORCE_LOCK);
347
348 /*
349 * If the hfs_truncate didn't happen to flush the vnode's
350 * information out to disk, force it to be updated now that
351 * all invalid ranges have been zero-filled and validated:
352 */
353 if (cp->c_flag & C_MODIFIED) {
354 hfs_update(vp, 0);
355 }
356 return (0);
357 }
358
359
360 /*
361 * Reclaim a cnode so that it can be used for other purposes.
362 */
363 __private_extern__
364 int
365 hfs_vnop_reclaim(struct vnop_reclaim_args *ap)
366 {
367 struct vnode *vp = ap->a_vp;
368 struct cnode *cp;
369 struct filefork *fp = NULL;
370 struct filefork *altfp = NULL;
371 int reclaim_cnode = 0;
372
373 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
374 cp = VTOC(vp);
375
376 /*
377 * Keep track of an inactive hot file.
378 */
379 if (!vnode_isdir(vp) && !vnode_issystem(vp))
380 (void) hfs_addhotfile(vp);
381
382 vnode_removefsref(vp);
383
384 /*
385 * Find file fork for this vnode (if any)
386 * Also check if another fork is active
387 */
388 if (cp->c_vp == vp) {
389 fp = cp->c_datafork;
390 altfp = cp->c_rsrcfork;
391
392 cp->c_datafork = NULL;
393 cp->c_vp = NULL;
394 } else if (cp->c_rsrc_vp == vp) {
395 fp = cp->c_rsrcfork;
396 altfp = cp->c_datafork;
397
398 cp->c_rsrcfork = NULL;
399 cp->c_rsrc_vp = NULL;
400 } else {
401 panic("hfs_vnop_reclaim: vp points to wrong cnode\n");
402 }
403 /*
404 * On the last fork, remove the cnode from its hash chain.
405 */
406 if (altfp == NULL) {
407 /* If we can't remove it then the cnode must persist! */
408 if (hfs_chashremove(cp) == 0)
409 reclaim_cnode = 1;
410 /*
411 * Remove any directory hints
412 */
413 if (vnode_isdir(vp)) {
414 hfs_reldirhints(cp, 0);
415 }
416 }
417 /* Release the file fork and related data */
418 if (fp) {
419 /* Dump cached symlink data */
420 if (vnode_islnk(vp) && (fp->ff_symlinkptr != NULL)) {
421 FREE(fp->ff_symlinkptr, M_TEMP);
422 }
423 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
424 }
425
426 /*
427 * If there was only one active fork then we can release the cnode.
428 */
429 if (reclaim_cnode) {
430 hfs_chashwakeup(cp, H_ALLOC | H_TRANSIT);
431 hfs_reclaim_cnode(cp);
432 } else /* cnode in use */ {
433 hfs_unlock(cp);
434 }
435
436 vnode_clearfsnode(vp);
437 return (0);
438 }
439
440
441 extern int (**hfs_vnodeop_p) (void *);
442 extern int (**hfs_specop_p) (void *);
443 extern int (**hfs_fifoop_p) (void *);
444
445 /*
446 * hfs_getnewvnode - get new default vnode
447 *
448 * The vnode is returned with an iocount and the cnode locked
449 */
450 __private_extern__
451 int
452 hfs_getnewvnode(
453 struct hfsmount *hfsmp,
454 struct vnode *dvp,
455 struct componentname *cnp,
456 struct cat_desc *descp,
457 int wantrsrc,
458 struct cat_attr *attrp,
459 struct cat_fork *forkp,
460 struct vnode **vpp)
461 {
462 struct mount *mp = HFSTOVFS(hfsmp);
463 struct vnode *vp = NULL;
464 struct vnode **cvpp;
465 struct vnode *tvp = NULLVP;
466 struct cnode *cp = NULL;
467 struct filefork *fp = NULL;
468 int i;
469 int retval;
470 int issystemfile;
471 struct vnode_fsparam vfsp;
472 enum vtype vtype;
473
474 if (attrp->ca_fileid == 0) {
475 *vpp = NULL;
476 return (ENOENT);
477 }
478
479 #if !FIFO
480 if (IFTOVT(attrp->ca_mode) == VFIFO) {
481 *vpp = NULL;
482 return (ENOTSUP);
483 }
484 #endif
485 vtype = IFTOVT(attrp->ca_mode);
486 issystemfile = (descp->cd_flags & CD_ISMETA) && (vtype == VREG);
487
488 /*
489 * Get a cnode (new or existing)
490 * skip getting the cnode lock if we are getting resource fork (wantrsrc == 2)
491 */
492 cp = hfs_chash_getcnode(hfsmp->hfs_raw_dev, attrp->ca_fileid, vpp, wantrsrc, (wantrsrc == 2));
493
494 /* Hardlinks may need an updated catalog descriptor */
495 if ((cp->c_flag & C_HARDLINK) && descp->cd_nameptr && descp->cd_namelen > 0) {
496 replace_desc(cp, descp);
497 }
498 /* Check if we found a matching vnode */
499 if (*vpp != NULL)
500 return (0);
501
502 /*
503 * If this is a new cnode then initialize it.
504 */
505 if (ISSET(cp->c_hflag, H_ALLOC)) {
506 lck_rw_init(&cp->c_truncatelock, hfs_rwlock_group, hfs_lock_attr);
507
508 /* Make sure its still valid (ie exists on disk). */
509 if (!hfs_valid_cnode(hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid)) {
510 hfs_chash_abort(cp);
511 hfs_reclaim_cnode(cp);
512 *vpp = NULL;
513 return (ENOENT);
514 }
515 bcopy(attrp, &cp->c_attr, sizeof(struct cat_attr));
516 bcopy(descp, &cp->c_desc, sizeof(struct cat_desc));
517
518 /* The name was inherited so clear descriptor state... */
519 descp->cd_namelen = 0;
520 descp->cd_nameptr = NULL;
521 descp->cd_flags &= ~CD_HASBUF;
522
523 /* Tag hardlinks */
524 if (IFTOVT(cp->c_mode) == VREG &&
525 (descp->cd_cnid != attrp->ca_fileid)) {
526 cp->c_flag |= C_HARDLINK;
527 }
528
529 /* Take one dev reference for each non-directory cnode */
530 if (IFTOVT(cp->c_mode) != VDIR) {
531 cp->c_devvp = hfsmp->hfs_devvp;
532 vnode_ref(cp->c_devvp);
533 }
534 #if QUOTA
535 for (i = 0; i < MAXQUOTAS; i++)
536 cp->c_dquot[i] = NODQUOT;
537 #endif /* QUOTA */
538 }
539
540 if (IFTOVT(cp->c_mode) == VDIR) {
541 if (cp->c_vp != NULL)
542 panic("hfs_getnewvnode: orphaned vnode (data)");
543 cvpp = &cp->c_vp;
544 } else {
545 if (forkp && attrp->ca_blocks < forkp->cf_blocks)
546 panic("hfs_getnewvnode: bad ca_blocks (too small)");
547 /*
548 * Allocate and initialize a file fork...
549 */
550 MALLOC_ZONE(fp, struct filefork *, sizeof(struct filefork),
551 M_HFSFORK, M_WAITOK);
552 fp->ff_cp = cp;
553 if (forkp)
554 bcopy(forkp, &fp->ff_data, sizeof(struct cat_fork));
555 else
556 bzero(&fp->ff_data, sizeof(struct cat_fork));
557 rl_init(&fp->ff_invalidranges);
558 fp->ff_sysfileinfo = 0;
559
560 if (wantrsrc) {
561 if (cp->c_rsrcfork != NULL)
562 panic("hfs_getnewvnode: orphaned rsrc fork");
563 if (cp->c_rsrc_vp != NULL)
564 panic("hfs_getnewvnode: orphaned vnode (rsrc)");
565 cp->c_rsrcfork = fp;
566 cvpp = &cp->c_rsrc_vp;
567 if ( (tvp = cp->c_vp) != NULLVP )
568 cp->c_flag |= C_NEED_DVNODE_PUT;
569 } else {
570 if (cp->c_datafork != NULL)
571 panic("hfs_getnewvnode: orphaned data fork");
572 if (cp->c_vp != NULL)
573 panic("hfs_getnewvnode: orphaned vnode (data)");
574 cp->c_datafork = fp;
575 cvpp = &cp->c_vp;
576 if ( (tvp = cp->c_rsrc_vp) != NULLVP)
577 cp->c_flag |= C_NEED_RVNODE_PUT;
578 }
579 }
580 if (tvp != NULLVP) {
581 /*
582 * grab an iocount on the vnode we weren't
583 * interested in (i.e. we want the resource fork
584 * but the cnode already has the data fork)
585 * to prevent it from being
586 * recycled by us when we call vnode_create
587 * which will result in a deadlock when we
588 * try to take the cnode lock in hfs_vnop_fsync or
589 * hfs_vnop_reclaim... vnode_get can be called here
590 * because we already hold the cnode lock which will
591 * prevent the vnode from changing identity until
592 * we drop it.. vnode_get will not block waiting for
593 * a change of state... however, it will return an
594 * error if the current iocount == 0 and we've already
595 * started to terminate the vnode... we don't need/want to
596 * grab an iocount in the case since we can't cause
597 * the fileystem to be re-entered on this thread for this vp
598 *
599 * the matching vnode_put will happen in hfs_unlock
600 * after we've dropped the cnode lock
601 */
602 if ( vnode_get(tvp) != 0)
603 cp->c_flag &= ~(C_NEED_RVNODE_PUT | C_NEED_DVNODE_PUT);
604 }
605 vfsp.vnfs_mp = mp;
606 vfsp.vnfs_vtype = vtype;
607 vfsp.vnfs_str = "hfs";
608 vfsp.vnfs_dvp = dvp;
609 vfsp.vnfs_fsnode = cp;
610 vfsp.vnfs_cnp = cnp;
611 if (vtype == VFIFO )
612 vfsp.vnfs_vops = hfs_fifoop_p;
613 else if (vtype == VBLK || vtype == VCHR)
614 vfsp.vnfs_vops = hfs_specop_p;
615 else
616 vfsp.vnfs_vops = hfs_vnodeop_p;
617
618 if (vtype == VBLK || vtype == VCHR)
619 vfsp.vnfs_rdev = attrp->ca_rdev;
620 else
621 vfsp.vnfs_rdev = 0;
622
623 if (forkp)
624 vfsp.vnfs_filesize = forkp->cf_size;
625 else
626 vfsp.vnfs_filesize = 0;
627
628 if (dvp && cnp && (cnp->cn_flags & MAKEENTRY))
629 vfsp.vnfs_flags = 0;
630 else
631 vfsp.vnfs_flags = VNFS_NOCACHE;
632
633 /* Tag system files */
634 vfsp.vnfs_marksystem = issystemfile;
635
636 /* Tag root directory */
637 if (descp->cd_cnid == kHFSRootFolderID)
638 vfsp.vnfs_markroot = 1;
639 else
640 vfsp.vnfs_markroot = 0;
641
642 if ((retval = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, cvpp))) {
643 if (fp) {
644 if (fp == cp->c_datafork)
645 cp->c_datafork = NULL;
646 else
647 cp->c_rsrcfork = NULL;
648
649 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
650 }
651 /*
652 * If this is a newly created cnode or a vnode reclaim
653 * occurred during the attachment, then cleanup the cnode.
654 */
655 if ((cp->c_vp == NULL) && (cp->c_rsrc_vp == NULL)) {
656 hfs_chash_abort(cp);
657 hfs_reclaim_cnode(cp);
658 } else {
659 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
660 hfs_unlock(cp);
661 }
662 *vpp = NULL;
663 return (retval);
664 }
665 vp = *cvpp;
666 vnode_addfsref(vp);
667 vnode_settag(vp, VT_HFS);
668 if (cp->c_flag & C_HARDLINK)
669 vnode_set_hard_link(vp);
670 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
671
672 /*
673 * Stop tracking an active hot file.
674 */
675 if (!vnode_isdir(vp) && !vnode_issystem(vp))
676 (void) hfs_removehotfile(vp);
677
678 *vpp = vp;
679 return (0);
680 }
681
682
683 static void
684 hfs_reclaim_cnode(struct cnode *cp)
685 {
686 #if QUOTA
687 int i;
688
689 for (i = 0; i < MAXQUOTAS; i++) {
690 if (cp->c_dquot[i] != NODQUOT) {
691 dqreclaim(cp->c_dquot[i]);
692 cp->c_dquot[i] = NODQUOT;
693 }
694 }
695 #endif /* QUOTA */
696
697 if (cp->c_devvp) {
698 struct vnode *tmp_vp = cp->c_devvp;
699
700 cp->c_devvp = NULL;
701 vnode_rele(tmp_vp);
702 }
703
704 /*
705 * If the descriptor has a name then release it
706 */
707 if (cp->c_desc.cd_flags & CD_HASBUF) {
708 char *nameptr;
709
710 nameptr = cp->c_desc.cd_nameptr;
711 cp->c_desc.cd_nameptr = 0;
712 cp->c_desc.cd_flags &= ~CD_HASBUF;
713 cp->c_desc.cd_namelen = 0;
714 vfs_removename(nameptr);
715 }
716
717 lck_rw_destroy(&cp->c_rwlock, hfs_rwlock_group);
718 lck_rw_destroy(&cp->c_truncatelock, hfs_rwlock_group);
719 bzero(cp, sizeof(struct cnode));
720 FREE_ZONE(cp, sizeof(struct cnode), M_HFSNODE);
721 }
722
723
724 static int
725 hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, cnid_t cnid)
726 {
727 struct cat_attr attr;
728 struct cat_desc cndesc;
729 int stillvalid = 0;
730 int lockflags;
731
732 /* System files are always valid */
733 if (cnid < kHFSFirstUserCatalogNodeID)
734 return (1);
735
736 /* XXX optimization: check write count in dvp */
737
738 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
739
740 if (dvp && cnp) {
741 bzero(&cndesc, sizeof(cndesc));
742 cndesc.cd_nameptr = cnp->cn_nameptr;
743 cndesc.cd_namelen = cnp->cn_namelen;
744 cndesc.cd_parentcnid = VTOC(dvp)->c_cnid;
745 cndesc.cd_hint = VTOC(dvp)->c_childhint;
746
747 if ((cat_lookup(hfsmp, &cndesc, 0, NULL, &attr, NULL, NULL) == 0) &&
748 (cnid == attr.ca_fileid)) {
749 stillvalid = 1;
750 }
751 } else {
752 if (cat_idlookup(hfsmp, cnid, NULL, NULL, NULL) == 0) {
753 stillvalid = 1;
754 }
755 }
756 hfs_systemfile_unlock(hfsmp, lockflags);
757
758 return (stillvalid);
759 }
760
761 /*
762 * Touch cnode times based on c_touch_xxx flags
763 *
764 * cnode must be locked exclusive
765 *
766 * This will also update the volume modify time
767 */
768 __private_extern__
769 void
770 hfs_touchtimes(struct hfsmount *hfsmp, struct cnode* cp)
771 {
772 /* HFS Standard doesn't support access times */
773 if (hfsmp->hfs_flags & HFS_STANDARD) {
774 cp->c_touch_acctime = FALSE;
775 }
776
777 if (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
778 struct timeval tv;
779 int touchvol = 0;
780
781 microtime(&tv);
782
783 if (cp->c_touch_acctime) {
784 cp->c_atime = tv.tv_sec;
785 /*
786 * When the access time is the only thing changing
787 * then make sure its sufficiently newer before
788 * committing it to disk.
789 */
790 if ((((u_int32_t)cp->c_atime - (u_int32_t)(cp)->c_attr.ca_atimeondisk) >
791 ATIME_ONDISK_ACCURACY)) {
792 cp->c_flag |= C_MODIFIED;
793 }
794 cp->c_touch_acctime = FALSE;
795 }
796 if (cp->c_touch_modtime) {
797 cp->c_mtime = tv.tv_sec;
798 cp->c_touch_modtime = FALSE;
799 cp->c_flag |= C_MODIFIED;
800 touchvol = 1;
801 #if 1
802 /*
803 * HFS dates that WE set must be adjusted for DST
804 */
805 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
806 cp->c_mtime += 3600;
807 }
808 #endif
809 }
810 if (cp->c_touch_chgtime) {
811 cp->c_ctime = tv.tv_sec;
812 cp->c_touch_chgtime = FALSE;
813 cp->c_flag |= C_MODIFIED;
814 touchvol = 1;
815 }
816
817 /* Touch the volume modtime if needed */
818 if (touchvol) {
819 HFSTOVCB(hfsmp)->vcbFlags |= 0xFF00;
820 HFSTOVCB(hfsmp)->vcbLsMod = tv.tv_sec;
821 }
822 }
823 }
824
825 /*
826 * Lock a cnode.
827 */
828 __private_extern__
829 int
830 hfs_lock(struct cnode *cp, enum hfslocktype locktype)
831 {
832 void * thread = current_thread();
833
834 /* System files need to keep track of owner */
835 if ((cp->c_fileid < kHFSFirstUserCatalogNodeID) &&
836 (cp->c_fileid > kHFSRootFolderID) &&
837 (locktype != HFS_SHARED_LOCK)) {
838
839 /*
840 * The extents and bitmap file locks support
841 * recursion and are always taken exclusive.
842 */
843 if (cp->c_fileid == kHFSExtentsFileID ||
844 cp->c_fileid == kHFSAllocationFileID) {
845 if (cp->c_lockowner == thread) {
846 cp->c_syslockcount++;
847 } else {
848 lck_rw_lock_exclusive(&cp->c_rwlock);
849 cp->c_lockowner = thread;
850 cp->c_syslockcount = 1;
851 }
852 } else {
853 lck_rw_lock_exclusive(&cp->c_rwlock);
854 cp->c_lockowner = thread;
855 }
856 } else if (locktype == HFS_SHARED_LOCK) {
857 lck_rw_lock_shared(&cp->c_rwlock);
858 cp->c_lockowner = HFS_SHARED_OWNER;
859 } else {
860 lck_rw_lock_exclusive(&cp->c_rwlock);
861 cp->c_lockowner = thread;
862 }
863 /*
864 * Skip cnodes that no longer exist (were deleted).
865 */
866 if ((locktype != HFS_FORCE_LOCK) &&
867 ((cp->c_desc.cd_flags & CD_ISMETA) == 0) &&
868 (cp->c_flag & C_NOEXISTS)) {
869 hfs_unlock(cp);
870 return (ENOENT);
871 }
872 return (0);
873 }
874
875 /*
876 * Lock a pair of cnodes.
877 */
878 __private_extern__
879 int
880 hfs_lockpair(struct cnode *cp1, struct cnode *cp2, enum hfslocktype locktype)
881 {
882 struct cnode *first, *last;
883 int error;
884
885 /*
886 * If cnodes match then just lock one.
887 */
888 if (cp1 == cp2) {
889 return hfs_lock(cp1, locktype);
890 }
891
892 /*
893 * Lock in cnode parent-child order (if there is a relationship);
894 * otherwise lock in cnode address order.
895 */
896 if ((IFTOVT(cp1->c_mode) == VDIR) && (cp1->c_fileid == cp2->c_parentcnid)) {
897 first = cp1;
898 last = cp2;
899 } else if (cp1 < cp2) {
900 first = cp1;
901 last = cp2;
902 } else {
903 first = cp2;
904 last = cp1;
905 }
906
907 if ( (error = hfs_lock(first, locktype))) {
908 return (error);
909 }
910 if ( (error = hfs_lock(last, locktype))) {
911 hfs_unlock(first);
912 return (error);
913 }
914 return (0);
915 }
916
917 /*
918 * Check ordering of two cnodes. Return true if they are are in-order.
919 */
920 static int
921 hfs_isordered(struct cnode *cp1, struct cnode *cp2)
922 {
923 if (cp1 == cp2)
924 return (0);
925 if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff)
926 return (1);
927 if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff)
928 return (0);
929 if (cp1->c_fileid == cp2->c_parentcnid)
930 return (1); /* cp1 is the parent and should go first */
931 if (cp2->c_fileid == cp1->c_parentcnid)
932 return (0); /* cp1 is the child and should go last */
933
934 return (cp1 < cp2); /* fall-back is to use address order */
935 }
936
937 /*
938 * Acquire 4 cnode locks.
939 * - locked in cnode parent-child order (if there is a relationship)
940 * otherwise lock in cnode address order (lesser address first).
941 * - all or none of the locks are taken
942 * - only one lock taken per cnode (dup cnodes are skipped)
943 * - some of the cnode pointers may be null
944 */
945 __private_extern__
946 int
947 hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3,
948 struct cnode *cp4, enum hfslocktype locktype)
949 {
950 struct cnode * a[3];
951 struct cnode * b[3];
952 struct cnode * list[4];
953 struct cnode * tmp;
954 int i, j, k;
955 int error;
956
957 if (hfs_isordered(cp1, cp2)) {
958 a[0] = cp1; a[1] = cp2;
959 } else {
960 a[0] = cp2; a[1] = cp1;
961 }
962 if (hfs_isordered(cp3, cp4)) {
963 b[0] = cp3; b[1] = cp4;
964 } else {
965 b[0] = cp4; b[1] = cp3;
966 }
967 a[2] = (struct cnode *)0xffffffff; /* sentinel value */
968 b[2] = (struct cnode *)0xffffffff; /* sentinel value */
969
970 /*
971 * Build the lock list, skipping over duplicates
972 */
973 for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) {
974 tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++];
975 if (k == 0 || tmp != list[k-1])
976 list[k++] = tmp;
977 }
978
979 /*
980 * Now we can lock using list[0 - k].
981 * Skip over NULL entries.
982 */
983 for (i = 0; i < k; ++i) {
984 if (list[i])
985 if ((error = hfs_lock(list[i], locktype))) {
986 /* Drop any locks we acquired. */
987 while (--i >= 0) {
988 if (list[i])
989 hfs_unlock(list[i]);
990 }
991 return (error);
992 }
993 }
994 return (0);
995 }
996
997
998 /*
999 * Unlock a cnode.
1000 */
1001 __private_extern__
1002 void
1003 hfs_unlock(struct cnode *cp)
1004 {
1005 vnode_t rvp = NULLVP;
1006 vnode_t dvp = NULLVP;
1007
1008 /* System files need to keep track of owner */
1009 if ((cp->c_fileid < kHFSFirstUserCatalogNodeID) &&
1010 (cp->c_fileid > kHFSRootFolderID) &&
1011 (cp->c_datafork != NULL)) {
1012 /*
1013 * The extents and bitmap file locks support
1014 * recursion and are always taken exclusive.
1015 */
1016 if (cp->c_fileid == kHFSExtentsFileID ||
1017 cp->c_fileid == kHFSAllocationFileID) {
1018 if (--cp->c_syslockcount > 0) {
1019 return;
1020 }
1021 }
1022 }
1023 if (cp->c_flag & C_NEED_DVNODE_PUT)
1024 dvp = cp->c_vp;
1025
1026 if (cp->c_flag & C_NEED_RVNODE_PUT)
1027 rvp = cp->c_rsrc_vp;
1028
1029 cp->c_flag &= ~(C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT);
1030
1031 cp-> c_lockowner = NULL;
1032 lck_rw_done(&cp->c_rwlock);
1033
1034 if (dvp)
1035 vnode_put(dvp);
1036 if (rvp)
1037 vnode_put(rvp);
1038 }
1039
1040 /*
1041 * Unlock a pair of cnodes.
1042 */
1043 __private_extern__
1044 void
1045 hfs_unlockpair(struct cnode *cp1, struct cnode *cp2)
1046 {
1047 hfs_unlock(cp1);
1048 if (cp2 != cp1)
1049 hfs_unlock(cp2);
1050 }
1051
1052 /*
1053 * Unlock a group of cnodes.
1054 */
1055 __private_extern__
1056 void
1057 hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4)
1058 {
1059 struct cnode * list[4];
1060 int i, k = 0;
1061
1062 if (cp1) {
1063 hfs_unlock(cp1);
1064 list[k++] = cp1;
1065 }
1066 if (cp2) {
1067 for (i = 0; i < k; ++i) {
1068 if (list[i] == cp2)
1069 goto skip1;
1070 }
1071 hfs_unlock(cp2);
1072 list[k++] = cp2;
1073 }
1074 skip1:
1075 if (cp3) {
1076 for (i = 0; i < k; ++i) {
1077 if (list[i] == cp3)
1078 goto skip2;
1079 }
1080 hfs_unlock(cp3);
1081 list[k++] = cp3;
1082 }
1083 skip2:
1084 if (cp4) {
1085 for (i = 0; i < k; ++i) {
1086 if (list[i] == cp4)
1087 return;
1088 }
1089 hfs_unlock(cp4);
1090 }
1091 }
1092
1093
1094 /*
1095 * Protect a cnode against a truncation.
1096 *
1097 * Used mainly by read/write since they don't hold the
1098 * cnode lock across calls to the cluster layer.
1099 *
1100 * The process doing a truncation must take the lock
1101 * exclusive. The read/write processes can take it
1102 * non-exclusive.
1103 */
1104 __private_extern__
1105 void
1106 hfs_lock_truncate(struct cnode *cp, int exclusive)
1107 {
1108 if (cp->c_lockowner == current_thread())
1109 panic("hfs_lock_truncate: cnode 0x%08x locked!", cp);
1110
1111 if (exclusive)
1112 lck_rw_lock_exclusive(&cp->c_truncatelock);
1113 else
1114 lck_rw_lock_shared(&cp->c_truncatelock);
1115 }
1116
1117 __private_extern__
1118 void
1119 hfs_unlock_truncate(struct cnode *cp)
1120 {
1121 lck_rw_done(&cp->c_truncatelock);
1122 }
1123
1124
1125
1126