]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_cnode.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_cnode.c
1 /*
2 * Copyright (c) 2002-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/proc.h>
31 #include <sys/vnode.h>
32 #include <sys/mount.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/time.h>
36 #include <sys/ubc.h>
37 #include <sys/quota.h>
38 #include <sys/kdebug.h>
39
40 #include <kern/locks.h>
41
42 #include <miscfs/specfs/specdev.h>
43 #include <miscfs/fifofs/fifo.h>
44
45 #include <hfs/hfs.h>
46 #include <hfs/hfs_catalog.h>
47 #include <hfs/hfs_cnode.h>
48 #include <hfs/hfs_quota.h>
49
50 extern int prtactive;
51
52 extern lck_attr_t * hfs_lock_attr;
53 extern lck_grp_t * hfs_mutex_group;
54 extern lck_grp_t * hfs_rwlock_group;
55
56 static int hfs_filedone(struct vnode *vp, vfs_context_t context);
57
58 static void hfs_reclaim_cnode(struct cnode *);
59
60 static int hfs_valid_cnode(struct hfsmount *, struct vnode *, struct componentname *, cnid_t);
61
62 static int hfs_isordered(struct cnode *, struct cnode *);
63
64 int hfs_vnop_inactive(struct vnop_inactive_args *);
65
66 int hfs_vnop_reclaim(struct vnop_reclaim_args *);
67
68
69 /*
70 * Last reference to an cnode. If necessary, write or delete it.
71 */
72 __private_extern__
73 int
74 hfs_vnop_inactive(struct vnop_inactive_args *ap)
75 {
76 struct vnode *vp = ap->a_vp;
77 struct cnode *cp;
78 struct hfsmount *hfsmp = VTOHFS(vp);
79 struct proc *p = vfs_context_proc(ap->a_context);
80 int error = 0;
81 int recycle = 0;
82 int forkcount = 0;
83 int truncated = 0;
84 int started_tr = 0;
85 int took_trunc_lock = 0;
86 cat_cookie_t cookie;
87 int cat_reserve = 0;
88 int lockflags;
89 enum vtype v_type;
90
91 v_type = vnode_vtype(vp);
92 cp = VTOC(vp);
93
94 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || vnode_issystem(vp) ||
95 (hfsmp->hfs_freezing_proc == p)) {
96 return (0);
97 }
98
99 /*
100 * Ignore nodes related to stale file handles.
101 */
102 if (cp->c_mode == 0) {
103 vnode_recycle(vp);
104 return (0);
105 }
106
107 if ((v_type == VREG) &&
108 (ISSET(cp->c_flag, C_DELETED) || VTOF(vp)->ff_blocks)) {
109 hfs_lock_truncate(cp, TRUE);
110 took_trunc_lock = 1;
111 }
112
113 /*
114 * We do the ubc_setsize before we take the cnode
115 * lock and before the hfs_truncate (since we'll
116 * be inside a transaction).
117 */
118 if ((v_type == VREG || v_type == VLNK) &&
119 (cp->c_flag & C_DELETED) &&
120 (VTOF(vp)->ff_blocks != 0)) {
121 ubc_setsize(vp, 0);
122 }
123
124 (void) hfs_lock(cp, HFS_FORCE_LOCK);
125
126 if (v_type == VREG && !ISSET(cp->c_flag, C_DELETED) && VTOF(vp)->ff_blocks) {
127 hfs_filedone(vp, ap->a_context);
128 }
129 /*
130 * Remove any directory hints
131 */
132 if (v_type == VDIR)
133 hfs_reldirhints(cp, 0);
134
135 if (cp->c_datafork)
136 ++forkcount;
137 if (cp->c_rsrcfork)
138 ++forkcount;
139
140 /* If needed, get rid of any fork's data for a deleted file */
141 if ((v_type == VREG || v_type == VLNK) && (cp->c_flag & C_DELETED)) {
142 if (VTOF(vp)->ff_blocks != 0) {
143 // start the transaction out here so that
144 // the truncate and the removal of the file
145 // are all in one transaction. otherwise
146 // because this cnode is marked for deletion
147 // the truncate won't cause the catalog entry
148 // to get updated which means that we could
149 // free blocks but still keep a reference to
150 // them in the catalog entry and then double
151 // free them later.
152 //
153 // if (hfs_start_transaction(hfsmp) != 0) {
154 // error = EINVAL;
155 // goto out;
156 // }
157 // started_tr = 1;
158
159 /*
160 * Since we're already inside a transaction,
161 * tell hfs_truncate to skip the ubc_setsize.
162 */
163 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ap->a_context);
164 if (error)
165 goto out;
166 truncated = 1;
167 }
168 recycle = 1;
169 }
170
171 /*
172 * Check for a postponed deletion.
173 * (only delete cnode when the last fork goes inactive)
174 */
175 if ((cp->c_flag & C_DELETED) && (forkcount <= 1)) {
176 /*
177 * Mark cnode in transit so that no one can get this
178 * cnode from cnode hash.
179 */
180 hfs_chash_mark_in_transit(cp);
181
182 cp->c_flag &= ~C_DELETED;
183 cp->c_flag |= C_NOEXISTS; // XXXdbg
184 cp->c_rdev = 0;
185
186 if (started_tr == 0) {
187 if (hfs_start_transaction(hfsmp) != 0) {
188 error = EINVAL;
189 goto out;
190 }
191 started_tr = 1;
192 }
193
194 /*
195 * Reserve some space in the Catalog file.
196 */
197 if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) {
198 goto out;
199 }
200 cat_reserve = 1;
201
202 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
203
204 if (cp->c_blocks > 0)
205 printf("hfs_inactive: attempting to delete a non-empty file!");
206
207
208 //
209 // release the name pointer in the descriptor so that
210 // cat_delete() will use the file-id to do the deletion.
211 // in the case of hard links this is imperative (in the
212 // case of regular files the fileid and cnid are the
213 // same so it doesn't matter).
214 //
215 cat_releasedesc(&cp->c_desc);
216
217 /*
218 * The descriptor name may be zero,
219 * in which case the fileid is used.
220 */
221 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
222
223 if (error && truncated && (error != ENXIO))
224 printf("hfs_inactive: couldn't delete a truncated file!");
225
226 /* Update HFS Private Data dir */
227 if (error == 0) {
228 hfsmp->hfs_privdir_attr.ca_entries--;
229 (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc,
230 &hfsmp->hfs_privdir_attr, NULL, NULL);
231 }
232
233 if (error == 0) {
234 /* Delete any attributes, ignore errors */
235 (void) hfs_removeallattr(hfsmp, cp->c_fileid);
236 }
237
238 hfs_systemfile_unlock(hfsmp, lockflags);
239
240 if (error)
241 goto out;
242
243 #if QUOTA
244 (void)hfs_chkiq(cp, -1, NOCRED, 0);
245 #endif /* QUOTA */
246
247 cp->c_mode = 0;
248 cp->c_flag |= C_NOEXISTS;
249 cp->c_touch_chgtime = TRUE;
250 cp->c_touch_modtime = TRUE;
251
252 if (error == 0)
253 hfs_volupdate(hfsmp, VOL_RMFILE, 0);
254 }
255
256 if ((cp->c_flag & C_MODIFIED) ||
257 cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
258 hfs_update(vp, 0);
259 }
260 out:
261 if (cat_reserve)
262 cat_postflight(hfsmp, &cookie, p);
263
264 // XXXdbg - have to do this because a goto could have come here
265 if (started_tr) {
266 hfs_end_transaction(hfsmp);
267 started_tr = 0;
268 }
269
270 hfs_unlock(cp);
271
272 if (took_trunc_lock)
273 hfs_unlock_truncate(cp);
274
275 /*
276 * If we are done with the vnode, reclaim it
277 * so that it can be reused immediately.
278 */
279 if (cp->c_mode == 0 || recycle)
280 vnode_recycle(vp);
281
282 return (error);
283 }
284
285 /*
286 * File clean-up (zero fill and shrink peof).
287 */
288 static int
289 hfs_filedone(struct vnode *vp, vfs_context_t context)
290 {
291 struct cnode *cp;
292 struct filefork *fp;
293 struct hfsmount *hfsmp;
294 off_t leof;
295 u_long blks, blocksize;
296
297 cp = VTOC(vp);
298 fp = VTOF(vp);
299 hfsmp = VTOHFS(vp);
300 leof = fp->ff_size;
301
302 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (fp->ff_blocks == 0))
303 return (0);
304
305 hfs_unlock(cp);
306 (void) cluster_push(vp, IO_CLOSE);
307 hfs_lock(cp, HFS_FORCE_LOCK);
308
309 /*
310 * Explicitly zero out the areas of file
311 * that are currently marked invalid.
312 */
313 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
314 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
315 off_t start = invalid_range->rl_start;
316 off_t end = invalid_range->rl_end;
317
318 /* The range about to be written must be validated
319 * first, so that VNOP_BLOCKMAP() will return the
320 * appropriate mapping for the cluster code:
321 */
322 rl_remove(start, end, &fp->ff_invalidranges);
323
324 hfs_unlock(cp);
325 (void) cluster_write(vp, (struct uio *) 0,
326 leof, end + 1, start, (off_t)0,
327 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
328 hfs_lock(cp, HFS_FORCE_LOCK);
329 cp->c_flag |= C_MODIFIED;
330 }
331 cp->c_flag &= ~C_ZFWANTSYNC;
332 cp->c_zftimeout = 0;
333 blocksize = VTOVCB(vp)->blockSize;
334 blks = leof / blocksize;
335 if (((off_t)blks * (off_t)blocksize) != leof)
336 blks++;
337 /*
338 * Shrink the peof to the smallest size neccessary to contain the leof.
339 */
340 if (blks < fp->ff_blocks)
341 (void) hfs_truncate(vp, leof, IO_NDELAY, 0, context);
342 hfs_unlock(cp);
343 (void) cluster_push(vp, IO_CLOSE);
344 hfs_lock(cp, HFS_FORCE_LOCK);
345
346 /*
347 * If the hfs_truncate didn't happen to flush the vnode's
348 * information out to disk, force it to be updated now that
349 * all invalid ranges have been zero-filled and validated:
350 */
351 if (cp->c_flag & C_MODIFIED) {
352 hfs_update(vp, 0);
353 }
354 return (0);
355 }
356
357
358 /*
359 * Reclaim a cnode so that it can be used for other purposes.
360 */
361 __private_extern__
362 int
363 hfs_vnop_reclaim(struct vnop_reclaim_args *ap)
364 {
365 struct vnode *vp = ap->a_vp;
366 struct cnode *cp;
367 struct filefork *fp = NULL;
368 struct filefork *altfp = NULL;
369 int reclaim_cnode = 0;
370
371 (void) hfs_lock(VTOC(vp), HFS_FORCE_LOCK);
372 cp = VTOC(vp);
373
374 /*
375 * Keep track of an inactive hot file.
376 */
377 if (!vnode_isdir(vp) && !vnode_issystem(vp))
378 (void) hfs_addhotfile(vp);
379
380 vnode_removefsref(vp);
381
382 /*
383 * Find file fork for this vnode (if any)
384 * Also check if another fork is active
385 */
386 if (cp->c_vp == vp) {
387 fp = cp->c_datafork;
388 altfp = cp->c_rsrcfork;
389
390 cp->c_datafork = NULL;
391 cp->c_vp = NULL;
392 } else if (cp->c_rsrc_vp == vp) {
393 fp = cp->c_rsrcfork;
394 altfp = cp->c_datafork;
395
396 cp->c_rsrcfork = NULL;
397 cp->c_rsrc_vp = NULL;
398 } else {
399 panic("hfs_vnop_reclaim: vp points to wrong cnode\n");
400 }
401 /*
402 * On the last fork, remove the cnode from its hash chain.
403 */
404 if (altfp == NULL) {
405 /* If we can't remove it then the cnode must persist! */
406 if (hfs_chashremove(cp) == 0)
407 reclaim_cnode = 1;
408 /*
409 * Remove any directory hints
410 */
411 if (vnode_isdir(vp)) {
412 hfs_reldirhints(cp, 0);
413 }
414 }
415 /* Release the file fork and related data */
416 if (fp) {
417 /* Dump cached symlink data */
418 if (vnode_islnk(vp) && (fp->ff_symlinkptr != NULL)) {
419 FREE(fp->ff_symlinkptr, M_TEMP);
420 }
421 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
422 }
423
424 /*
425 * If there was only one active fork then we can release the cnode.
426 */
427 if (reclaim_cnode) {
428 hfs_chashwakeup(cp, H_ALLOC | H_TRANSIT);
429 hfs_reclaim_cnode(cp);
430 } else /* cnode in use */ {
431 hfs_unlock(cp);
432 }
433
434 vnode_clearfsnode(vp);
435 return (0);
436 }
437
438
439 extern int (**hfs_vnodeop_p) (void *);
440 extern int (**hfs_specop_p) (void *);
441 extern int (**hfs_fifoop_p) (void *);
442
443 /*
444 * hfs_getnewvnode - get new default vnode
445 *
446 * The vnode is returned with an iocount and the cnode locked
447 */
448 __private_extern__
449 int
450 hfs_getnewvnode(
451 struct hfsmount *hfsmp,
452 struct vnode *dvp,
453 struct componentname *cnp,
454 struct cat_desc *descp,
455 int wantrsrc,
456 struct cat_attr *attrp,
457 struct cat_fork *forkp,
458 struct vnode **vpp)
459 {
460 struct mount *mp = HFSTOVFS(hfsmp);
461 struct vnode *vp = NULL;
462 struct vnode **cvpp;
463 struct vnode *tvp = NULLVP;
464 struct cnode *cp = NULL;
465 struct filefork *fp = NULL;
466 int i;
467 int retval;
468 int issystemfile;
469 struct vnode_fsparam vfsp;
470 enum vtype vtype;
471
472 if (attrp->ca_fileid == 0) {
473 *vpp = NULL;
474 return (ENOENT);
475 }
476
477 #if !FIFO
478 if (IFTOVT(attrp->ca_mode) == VFIFO) {
479 *vpp = NULL;
480 return (ENOTSUP);
481 }
482 #endif
483 vtype = IFTOVT(attrp->ca_mode);
484 issystemfile = (descp->cd_flags & CD_ISMETA) && (vtype == VREG);
485
486 /*
487 * Get a cnode (new or existing)
488 * skip getting the cnode lock if we are getting resource fork (wantrsrc == 2)
489 */
490 cp = hfs_chash_getcnode(hfsmp->hfs_raw_dev, attrp->ca_fileid, vpp, wantrsrc, (wantrsrc == 2));
491
492 /* Hardlinks may need an updated catalog descriptor */
493 if ((cp->c_flag & C_HARDLINK) && descp->cd_nameptr && descp->cd_namelen > 0) {
494 replace_desc(cp, descp);
495 }
496 /* Check if we found a matching vnode */
497 if (*vpp != NULL)
498 return (0);
499
500 /*
501 * If this is a new cnode then initialize it.
502 */
503 if (ISSET(cp->c_hflag, H_ALLOC)) {
504 lck_rw_init(&cp->c_truncatelock, hfs_rwlock_group, hfs_lock_attr);
505
506 /* Make sure its still valid (ie exists on disk). */
507 if (!hfs_valid_cnode(hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid)) {
508 hfs_chash_abort(cp);
509 hfs_reclaim_cnode(cp);
510 *vpp = NULL;
511 return (ENOENT);
512 }
513 bcopy(attrp, &cp->c_attr, sizeof(struct cat_attr));
514 bcopy(descp, &cp->c_desc, sizeof(struct cat_desc));
515
516 /* The name was inherited so clear descriptor state... */
517 descp->cd_namelen = 0;
518 descp->cd_nameptr = NULL;
519 descp->cd_flags &= ~CD_HASBUF;
520
521 /* Tag hardlinks */
522 if (IFTOVT(cp->c_mode) == VREG &&
523 (descp->cd_cnid != attrp->ca_fileid)) {
524 cp->c_flag |= C_HARDLINK;
525 }
526
527 /* Take one dev reference for each non-directory cnode */
528 if (IFTOVT(cp->c_mode) != VDIR) {
529 cp->c_devvp = hfsmp->hfs_devvp;
530 vnode_ref(cp->c_devvp);
531 }
532 #if QUOTA
533 for (i = 0; i < MAXQUOTAS; i++)
534 cp->c_dquot[i] = NODQUOT;
535 #endif /* QUOTA */
536 }
537
538 if (IFTOVT(cp->c_mode) == VDIR) {
539 if (cp->c_vp != NULL)
540 panic("hfs_getnewvnode: orphaned vnode (data)");
541 cvpp = &cp->c_vp;
542 } else {
543 if (forkp && attrp->ca_blocks < forkp->cf_blocks)
544 panic("hfs_getnewvnode: bad ca_blocks (too small)");
545 /*
546 * Allocate and initialize a file fork...
547 */
548 MALLOC_ZONE(fp, struct filefork *, sizeof(struct filefork),
549 M_HFSFORK, M_WAITOK);
550 fp->ff_cp = cp;
551 if (forkp)
552 bcopy(forkp, &fp->ff_data, sizeof(struct cat_fork));
553 else
554 bzero(&fp->ff_data, sizeof(struct cat_fork));
555 rl_init(&fp->ff_invalidranges);
556 fp->ff_sysfileinfo = 0;
557
558 if (wantrsrc) {
559 if (cp->c_rsrcfork != NULL)
560 panic("hfs_getnewvnode: orphaned rsrc fork");
561 if (cp->c_rsrc_vp != NULL)
562 panic("hfs_getnewvnode: orphaned vnode (rsrc)");
563 cp->c_rsrcfork = fp;
564 cvpp = &cp->c_rsrc_vp;
565 if ( (tvp = cp->c_vp) != NULLVP )
566 cp->c_flag |= C_NEED_DVNODE_PUT;
567 } else {
568 if (cp->c_datafork != NULL)
569 panic("hfs_getnewvnode: orphaned data fork");
570 if (cp->c_vp != NULL)
571 panic("hfs_getnewvnode: orphaned vnode (data)");
572 cp->c_datafork = fp;
573 cvpp = &cp->c_vp;
574 if ( (tvp = cp->c_rsrc_vp) != NULLVP)
575 cp->c_flag |= C_NEED_RVNODE_PUT;
576 }
577 }
578 if (tvp != NULLVP) {
579 /*
580 * grab an iocount on the vnode we weren't
581 * interested in (i.e. we want the resource fork
582 * but the cnode already has the data fork)
583 * to prevent it from being
584 * recycled by us when we call vnode_create
585 * which will result in a deadlock when we
586 * try to take the cnode lock in hfs_vnop_fsync or
587 * hfs_vnop_reclaim... vnode_get can be called here
588 * because we already hold the cnode lock which will
589 * prevent the vnode from changing identity until
590 * we drop it.. vnode_get will not block waiting for
591 * a change of state... however, it will return an
592 * error if the current iocount == 0 and we've already
593 * started to terminate the vnode... we don't need/want to
594 * grab an iocount in the case since we can't cause
595 * the fileystem to be re-entered on this thread for this vp
596 *
597 * the matching vnode_put will happen in hfs_unlock
598 * after we've dropped the cnode lock
599 */
600 if ( vnode_get(tvp) != 0)
601 cp->c_flag &= ~(C_NEED_RVNODE_PUT | C_NEED_DVNODE_PUT);
602 }
603 vfsp.vnfs_mp = mp;
604 vfsp.vnfs_vtype = vtype;
605 vfsp.vnfs_str = "hfs";
606 vfsp.vnfs_dvp = dvp;
607 vfsp.vnfs_fsnode = cp;
608 vfsp.vnfs_cnp = cnp;
609 if (vtype == VFIFO )
610 vfsp.vnfs_vops = hfs_fifoop_p;
611 else if (vtype == VBLK || vtype == VCHR)
612 vfsp.vnfs_vops = hfs_specop_p;
613 else
614 vfsp.vnfs_vops = hfs_vnodeop_p;
615
616 if (vtype == VBLK || vtype == VCHR)
617 vfsp.vnfs_rdev = attrp->ca_rdev;
618 else
619 vfsp.vnfs_rdev = 0;
620
621 if (forkp)
622 vfsp.vnfs_filesize = forkp->cf_size;
623 else
624 vfsp.vnfs_filesize = 0;
625
626 if (dvp && cnp && (cnp->cn_flags & MAKEENTRY))
627 vfsp.vnfs_flags = 0;
628 else
629 vfsp.vnfs_flags = VNFS_NOCACHE;
630
631 /* Tag system files */
632 vfsp.vnfs_marksystem = issystemfile;
633
634 /* Tag root directory */
635 if (descp->cd_cnid == kHFSRootFolderID)
636 vfsp.vnfs_markroot = 1;
637 else
638 vfsp.vnfs_markroot = 0;
639
640 if ((retval = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, cvpp))) {
641 if (fp) {
642 if (fp == cp->c_datafork)
643 cp->c_datafork = NULL;
644 else
645 cp->c_rsrcfork = NULL;
646
647 FREE_ZONE(fp, sizeof(struct filefork), M_HFSFORK);
648 }
649 /*
650 * If this is a newly created cnode or a vnode reclaim
651 * occurred during the attachment, then cleanup the cnode.
652 */
653 if ((cp->c_vp == NULL) && (cp->c_rsrc_vp == NULL)) {
654 hfs_chash_abort(cp);
655 hfs_reclaim_cnode(cp);
656 } else {
657 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
658 hfs_unlock(cp);
659 }
660 *vpp = NULL;
661 return (retval);
662 }
663 vp = *cvpp;
664 vnode_addfsref(vp);
665 vnode_settag(vp, VT_HFS);
666 if (cp->c_flag & C_HARDLINK)
667 vnode_set_hard_link(vp);
668 hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
669
670 /*
671 * Stop tracking an active hot file.
672 */
673 if (!vnode_isdir(vp) && !vnode_issystem(vp))
674 (void) hfs_removehotfile(vp);
675
676 *vpp = vp;
677 return (0);
678 }
679
680
681 static void
682 hfs_reclaim_cnode(struct cnode *cp)
683 {
684 #if QUOTA
685 int i;
686
687 for (i = 0; i < MAXQUOTAS; i++) {
688 if (cp->c_dquot[i] != NODQUOT) {
689 dqreclaim(cp->c_dquot[i]);
690 cp->c_dquot[i] = NODQUOT;
691 }
692 }
693 #endif /* QUOTA */
694
695 if (cp->c_devvp) {
696 struct vnode *tmp_vp = cp->c_devvp;
697
698 cp->c_devvp = NULL;
699 vnode_rele(tmp_vp);
700 }
701
702 /*
703 * If the descriptor has a name then release it
704 */
705 if (cp->c_desc.cd_flags & CD_HASBUF) {
706 char *nameptr;
707
708 nameptr = cp->c_desc.cd_nameptr;
709 cp->c_desc.cd_nameptr = 0;
710 cp->c_desc.cd_flags &= ~CD_HASBUF;
711 cp->c_desc.cd_namelen = 0;
712 vfs_removename(nameptr);
713 }
714
715 lck_rw_destroy(&cp->c_rwlock, hfs_rwlock_group);
716 lck_rw_destroy(&cp->c_truncatelock, hfs_rwlock_group);
717 bzero(cp, sizeof(struct cnode));
718 FREE_ZONE(cp, sizeof(struct cnode), M_HFSNODE);
719 }
720
721
722 static int
723 hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp, cnid_t cnid)
724 {
725 struct cat_attr attr;
726 struct cat_desc cndesc;
727 int stillvalid = 0;
728 int lockflags;
729
730 /* System files are always valid */
731 if (cnid < kHFSFirstUserCatalogNodeID)
732 return (1);
733
734 /* XXX optimization: check write count in dvp */
735
736 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
737
738 if (dvp && cnp) {
739 bzero(&cndesc, sizeof(cndesc));
740 cndesc.cd_nameptr = cnp->cn_nameptr;
741 cndesc.cd_namelen = cnp->cn_namelen;
742 cndesc.cd_parentcnid = VTOC(dvp)->c_cnid;
743 cndesc.cd_hint = VTOC(dvp)->c_childhint;
744
745 if ((cat_lookup(hfsmp, &cndesc, 0, NULL, &attr, NULL, NULL) == 0) &&
746 (cnid == attr.ca_fileid)) {
747 stillvalid = 1;
748 }
749 } else {
750 if (cat_idlookup(hfsmp, cnid, NULL, NULL, NULL) == 0) {
751 stillvalid = 1;
752 }
753 }
754 hfs_systemfile_unlock(hfsmp, lockflags);
755
756 return (stillvalid);
757 }
758
759 /*
760 * Touch cnode times based on c_touch_xxx flags
761 *
762 * cnode must be locked exclusive
763 *
764 * This will also update the volume modify time
765 */
766 __private_extern__
767 void
768 hfs_touchtimes(struct hfsmount *hfsmp, struct cnode* cp)
769 {
770 /* HFS Standard doesn't support access times */
771 if (hfsmp->hfs_flags & HFS_STANDARD) {
772 cp->c_touch_acctime = FALSE;
773 }
774
775 if (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
776 struct timeval tv;
777 int touchvol = 0;
778
779 microtime(&tv);
780
781 if (cp->c_touch_acctime) {
782 cp->c_atime = tv.tv_sec;
783 /*
784 * When the access time is the only thing changing
785 * then make sure its sufficiently newer before
786 * committing it to disk.
787 */
788 if ((((u_int32_t)cp->c_atime - (u_int32_t)(cp)->c_attr.ca_atimeondisk) >
789 ATIME_ONDISK_ACCURACY)) {
790 cp->c_flag |= C_MODIFIED;
791 }
792 cp->c_touch_acctime = FALSE;
793 }
794 if (cp->c_touch_modtime) {
795 cp->c_mtime = tv.tv_sec;
796 cp->c_touch_modtime = FALSE;
797 cp->c_flag |= C_MODIFIED;
798 touchvol = 1;
799 #if 1
800 /*
801 * HFS dates that WE set must be adjusted for DST
802 */
803 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
804 cp->c_mtime += 3600;
805 }
806 #endif
807 }
808 if (cp->c_touch_chgtime) {
809 cp->c_ctime = tv.tv_sec;
810 cp->c_touch_chgtime = FALSE;
811 cp->c_flag |= C_MODIFIED;
812 touchvol = 1;
813 }
814
815 /* Touch the volume modtime if needed */
816 if (touchvol) {
817 HFSTOVCB(hfsmp)->vcbFlags |= 0xFF00;
818 HFSTOVCB(hfsmp)->vcbLsMod = tv.tv_sec;
819 }
820 }
821 }
822
823 /*
824 * Lock a cnode.
825 */
826 __private_extern__
827 int
828 hfs_lock(struct cnode *cp, enum hfslocktype locktype)
829 {
830 void * thread = current_thread();
831
832 /* System files need to keep track of owner */
833 if ((cp->c_fileid < kHFSFirstUserCatalogNodeID) &&
834 (cp->c_fileid > kHFSRootFolderID) &&
835 (locktype != HFS_SHARED_LOCK)) {
836
837 /*
838 * The extents and bitmap file locks support
839 * recursion and are always taken exclusive.
840 */
841 if (cp->c_fileid == kHFSExtentsFileID ||
842 cp->c_fileid == kHFSAllocationFileID) {
843 if (cp->c_lockowner == thread) {
844 cp->c_syslockcount++;
845 } else {
846 lck_rw_lock_exclusive(&cp->c_rwlock);
847 cp->c_lockowner = thread;
848 cp->c_syslockcount = 1;
849 }
850 } else {
851 lck_rw_lock_exclusive(&cp->c_rwlock);
852 cp->c_lockowner = thread;
853 }
854 } else if (locktype == HFS_SHARED_LOCK) {
855 lck_rw_lock_shared(&cp->c_rwlock);
856 cp->c_lockowner = HFS_SHARED_OWNER;
857 } else {
858 lck_rw_lock_exclusive(&cp->c_rwlock);
859 cp->c_lockowner = thread;
860 }
861 /*
862 * Skip cnodes that no longer exist (were deleted).
863 */
864 if ((locktype != HFS_FORCE_LOCK) &&
865 ((cp->c_desc.cd_flags & CD_ISMETA) == 0) &&
866 (cp->c_flag & C_NOEXISTS)) {
867 hfs_unlock(cp);
868 return (ENOENT);
869 }
870 return (0);
871 }
872
873 /*
874 * Lock a pair of cnodes.
875 */
876 __private_extern__
877 int
878 hfs_lockpair(struct cnode *cp1, struct cnode *cp2, enum hfslocktype locktype)
879 {
880 struct cnode *first, *last;
881 int error;
882
883 /*
884 * If cnodes match then just lock one.
885 */
886 if (cp1 == cp2) {
887 return hfs_lock(cp1, locktype);
888 }
889
890 /*
891 * Lock in cnode parent-child order (if there is a relationship);
892 * otherwise lock in cnode address order.
893 */
894 if ((IFTOVT(cp1->c_mode) == VDIR) && (cp1->c_fileid == cp2->c_parentcnid)) {
895 first = cp1;
896 last = cp2;
897 } else if (cp1 < cp2) {
898 first = cp1;
899 last = cp2;
900 } else {
901 first = cp2;
902 last = cp1;
903 }
904
905 if ( (error = hfs_lock(first, locktype))) {
906 return (error);
907 }
908 if ( (error = hfs_lock(last, locktype))) {
909 hfs_unlock(first);
910 return (error);
911 }
912 return (0);
913 }
914
915 /*
916 * Check ordering of two cnodes. Return true if they are are in-order.
917 */
918 static int
919 hfs_isordered(struct cnode *cp1, struct cnode *cp2)
920 {
921 if (cp1 == cp2)
922 return (0);
923 if (cp1 == NULL || cp2 == (struct cnode *)0xffffffff)
924 return (1);
925 if (cp2 == NULL || cp1 == (struct cnode *)0xffffffff)
926 return (0);
927 if (cp1->c_fileid == cp2->c_parentcnid)
928 return (1); /* cp1 is the parent and should go first */
929 if (cp2->c_fileid == cp1->c_parentcnid)
930 return (0); /* cp1 is the child and should go last */
931
932 return (cp1 < cp2); /* fall-back is to use address order */
933 }
934
935 /*
936 * Acquire 4 cnode locks.
937 * - locked in cnode parent-child order (if there is a relationship)
938 * otherwise lock in cnode address order (lesser address first).
939 * - all or none of the locks are taken
940 * - only one lock taken per cnode (dup cnodes are skipped)
941 * - some of the cnode pointers may be null
942 */
943 __private_extern__
944 int
945 hfs_lockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3,
946 struct cnode *cp4, enum hfslocktype locktype)
947 {
948 struct cnode * a[3];
949 struct cnode * b[3];
950 struct cnode * list[4];
951 struct cnode * tmp;
952 int i, j, k;
953 int error;
954
955 if (hfs_isordered(cp1, cp2)) {
956 a[0] = cp1; a[1] = cp2;
957 } else {
958 a[0] = cp2; a[1] = cp1;
959 }
960 if (hfs_isordered(cp3, cp4)) {
961 b[0] = cp3; b[1] = cp4;
962 } else {
963 b[0] = cp4; b[1] = cp3;
964 }
965 a[2] = (struct cnode *)0xffffffff; /* sentinel value */
966 b[2] = (struct cnode *)0xffffffff; /* sentinel value */
967
968 /*
969 * Build the lock list, skipping over duplicates
970 */
971 for (i = 0, j = 0, k = 0; (i < 2 || j < 2); ) {
972 tmp = hfs_isordered(a[i], b[j]) ? a[i++] : b[j++];
973 if (k == 0 || tmp != list[k-1])
974 list[k++] = tmp;
975 }
976
977 /*
978 * Now we can lock using list[0 - k].
979 * Skip over NULL entries.
980 */
981 for (i = 0; i < k; ++i) {
982 if (list[i])
983 if ((error = hfs_lock(list[i], locktype))) {
984 /* Drop any locks we acquired. */
985 while (--i >= 0) {
986 if (list[i])
987 hfs_unlock(list[i]);
988 }
989 return (error);
990 }
991 }
992 return (0);
993 }
994
995
996 /*
997 * Unlock a cnode.
998 */
999 __private_extern__
1000 void
1001 hfs_unlock(struct cnode *cp)
1002 {
1003 vnode_t rvp = NULLVP;
1004 vnode_t vp = NULLVP;
1005 u_int32_t c_flag;
1006
1007 /* System files need to keep track of owner */
1008 if ((cp->c_fileid < kHFSFirstUserCatalogNodeID) &&
1009 (cp->c_fileid > kHFSRootFolderID) &&
1010 (cp->c_datafork != NULL)) {
1011 /*
1012 * The extents and bitmap file locks support
1013 * recursion and are always taken exclusive.
1014 */
1015 if (cp->c_fileid == kHFSExtentsFileID ||
1016 cp->c_fileid == kHFSAllocationFileID) {
1017 if (--cp->c_syslockcount > 0) {
1018 return;
1019 }
1020 }
1021 }
1022 c_flag = cp->c_flag;
1023 cp->c_flag &= ~(C_NEED_DVNODE_PUT | C_NEED_RVNODE_PUT | C_NEED_DATA_SETSIZE | C_NEED_RSRC_SETSIZE);
1024 if (c_flag & (C_NEED_DVNODE_PUT | C_NEED_DATA_SETSIZE)) {
1025 vp = cp->c_vp;
1026 }
1027 if (c_flag & (C_NEED_RVNODE_PUT | C_NEED_RSRC_SETSIZE)) {
1028 rvp = cp->c_rsrc_vp;
1029 }
1030
1031 cp->c_lockowner = NULL;
1032 lck_rw_done(&cp->c_rwlock);
1033
1034 /* Perform any vnode post processing after cnode lock is dropped. */
1035 if (vp) {
1036 if (c_flag & C_NEED_DATA_SETSIZE)
1037 ubc_setsize(vp, 0);
1038 if (c_flag & C_NEED_DVNODE_PUT)
1039 vnode_put(vp);
1040 }
1041 if (rvp) {
1042 if (c_flag & C_NEED_RSRC_SETSIZE)
1043 ubc_setsize(rvp, 0);
1044 if (c_flag & C_NEED_RVNODE_PUT)
1045 vnode_put(rvp);
1046 }
1047 }
1048
1049 /*
1050 * Unlock a pair of cnodes.
1051 */
1052 __private_extern__
1053 void
1054 hfs_unlockpair(struct cnode *cp1, struct cnode *cp2)
1055 {
1056 hfs_unlock(cp1);
1057 if (cp2 != cp1)
1058 hfs_unlock(cp2);
1059 }
1060
1061 /*
1062 * Unlock a group of cnodes.
1063 */
1064 __private_extern__
1065 void
1066 hfs_unlockfour(struct cnode *cp1, struct cnode *cp2, struct cnode *cp3, struct cnode *cp4)
1067 {
1068 struct cnode * list[4];
1069 int i, k = 0;
1070
1071 if (cp1) {
1072 hfs_unlock(cp1);
1073 list[k++] = cp1;
1074 }
1075 if (cp2) {
1076 for (i = 0; i < k; ++i) {
1077 if (list[i] == cp2)
1078 goto skip1;
1079 }
1080 hfs_unlock(cp2);
1081 list[k++] = cp2;
1082 }
1083 skip1:
1084 if (cp3) {
1085 for (i = 0; i < k; ++i) {
1086 if (list[i] == cp3)
1087 goto skip2;
1088 }
1089 hfs_unlock(cp3);
1090 list[k++] = cp3;
1091 }
1092 skip2:
1093 if (cp4) {
1094 for (i = 0; i < k; ++i) {
1095 if (list[i] == cp4)
1096 return;
1097 }
1098 hfs_unlock(cp4);
1099 }
1100 }
1101
1102
1103 /*
1104 * Protect a cnode against a truncation.
1105 *
1106 * Used mainly by read/write since they don't hold the
1107 * cnode lock across calls to the cluster layer.
1108 *
1109 * The process doing a truncation must take the lock
1110 * exclusive. The read/write processes can take it
1111 * non-exclusive.
1112 */
1113 __private_extern__
1114 void
1115 hfs_lock_truncate(struct cnode *cp, int exclusive)
1116 {
1117 if (cp->c_lockowner == current_thread())
1118 panic("hfs_lock_truncate: cnode 0x%08x locked!", cp);
1119
1120 if (exclusive)
1121 lck_rw_lock_exclusive(&cp->c_truncatelock);
1122 else
1123 lck_rw_lock_shared(&cp->c_truncatelock);
1124 }
1125
1126 __private_extern__
1127 void
1128 hfs_unlock_truncate(struct cnode *cp)
1129 {
1130 lck_rw_done(&cp->c_truncatelock);
1131 }
1132
1133
1134
1135